test: Replace RTF with Rust

This commit replaces the existing RTF test runner with a simple rust
binary package called - integration-test.

integration-test depends on integration-ebpf, which contains test eBPF
code written in Rust and C. `cargo xtask build-integration-test-ebpf`
can be used to build this code and supress rust-analyzer warnings. It
does require `bpf-linker`, but that is highly likely to be available to
developers of Aya. It also requires a checkout of `libbpf` to extract
headers like bpf-helpers.h.

Since everything is compiled into a single binary, it can be run
be run locally using `cargo xtask integration-test` or remotely using
`./run.sh` which re-uses the bash script from the old test framework
to spawn a VM in which to run the tests.

Signed-off-by: Dave Tucker <dave@dtucker.co.uk>
pull/335/head
Dave Tucker 2 years ago
parent 6188c9dee3
commit 79101e748a

@ -38,14 +38,35 @@ jobs:
test:
runs-on: ubuntu-20.04
needs: build
container:
image: ghcr.io/aya-rs/aya-test-rtf:main
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v2
with:
repository: libbpf/libbpf
path: libbpf
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly
components: rustfmt, clippy, rust-src
target: x86_64-unknown-linux-musl
override: true
- uses: Swatinem/rust-cache@v1
- name: Install Pre-requisites
run: |
sudo apt-get -qy install linux-tools-common qemu-system-x86 cloud-image-utils openssh-client libelf-dev gcc-multilib
cargo install bpf-linker
- name: Lint integration tests
run: |
cargo xtask build-integration-test-ebpf --libbpf-dir ./libbpf
cargo clippy -p integration-test -- --deny warnings
cargo clippy -p integration-test-macros -- --deny warnings
- name: Run regression tests
- name: Run integration tests
run: |
ln -s /root/.rustup ${HOME}/.rustup
cd test
rtf -vvv run
(cd test && ./run.sh ../libbpf)

@ -1,53 +0,0 @@
name: aya-test-image
on:
schedule:
- cron: "42 2 * * 0"
push:
branches:
- 'main'
paths:
- 'images/**'
- '.github/workflows/images.yml'
pull_request:
branches:
- 'main'
paths:
- 'images/**'
- '.github/workflows/images.yml'
env:
REGISTRY: ghcr.io
IMAGE_NAME: aya-rs/aya-test-rtf
jobs:
rtf:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@v2
- name: Log in to the Container registry
uses: docker/login-action@v1
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v3
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
- name: Build and push Docker image
uses: docker/build-push-action@v2
with:
context: images
file: images/Dockerfile.rtf
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

@ -24,24 +24,22 @@ jobs:
with:
profile: minimal
toolchain: nightly
components: rustfmt, clippy, miri
components: rustfmt, clippy, miri, rust-src
override: true
- name: Check formatting
run: |
cargo fmt --all -- --check
pushd bpf
cargo fmt --all -- --check
popd
(cd bpf && cargo fmt --all -- --check)
(cd test/integration-ebpf && cargo fmt --all -- --check)
- name: Run clippy
run: |
cargo clippy -p aya -- --deny warnings
cargo clippy -p aya-gen -- --deny warnings
cargo clippy -p xtask -- --deny warnings
pushd bpf
cargo clippy -p aya-bpf -- --deny warnings
popd
(cd bpf && cargo clippy -p aya-bpf -- --deny warnings)
(cd test/integration-ebpf && cargo clippy -- --deny warnings)
- name: Run miri
env:

@ -1,3 +1,4 @@
{
"rust-analyzer.linkedProjects": ["Cargo.toml", "bpf/Cargo.toml"]
"rust-analyzer.linkedProjects": ["Cargo.toml", "bpf/Cargo.toml", "test/integration-ebpf/Cargo.toml"],
"rust-analyzer.checkOnSave.allTargets": false
}

@ -1,3 +1,4 @@
{
"rust-analyzer.linkedProjects": ["Cargo.toml", "bpf/Cargo.toml"]
"rust-analyzer.linkedProjects": ["Cargo.toml", "bpf/Cargo.toml", "test/integration-ebpf/Cargo.toml"],
"rust-analyzer.checkOnSave.allTargets": false
}

@ -1,2 +1,3 @@
[workspace]
members = ["aya", "aya-gen", "xtask"]
members = ["aya", "aya-gen", "test/integration-test", "test/integration-test-macros", "xtask"]
default-members = ["aya", "aya-gen"]

@ -0,0 +1 @@
../rustfmt.toml

@ -1,38 +0,0 @@
FROM fedora:35
# Rust Nightly
RUN curl https://sh.rustup.rs -sSf | sh -s -- \
--default-toolchain nightly \
--component rustfmt \
--component clippy \
--component rust-src \
--target x86_64-unknown-linux-musl \
-y
ENV PATH "/root/.cargo/bin:$PATH"
# Pre-requisites
RUN dnf install \
--setopt=install_weak_deps=False --best -qy \
golang \
qemu-system-x86 \
cloud-utils \
genisoimage \
libbpf-devel \
clang \
openssl-devel \
musl-libc \
git && dnf clean all \
&& rm -rf /var/cache/yum
RUN cargo install \
bpf-linker \
rust-script \
sccache
RUN go install github.com/linuxkit/rtf@latest
ENV PATH "/root/go/bin:$PATH"
ENV RUSTC_WRAPPER "sccache"
ENTRYPOINT ["rtf"]
CMD ["-vvv", "run"]

4
test/.gitignore vendored

@ -1,3 +1 @@
_results
_tmp
_images
.tmp

@ -1,48 +1,52 @@
Aya Regression Tests
====================
Aya Integration Tests
=====================
The aya regression test suite is a set of tests to ensure that
The aya integration test suite is a set of tests to ensure that
common usage behaviours work on real Linux distros
## Prerequisites
This assumes you have a working Rust and Go toolchain on the host machine
### Linux
1. `rustup target add x86_64-unknown-linux-musl`
1. Install [`rtf`](https://github.com/linuxkit/rtf): `go install github.com/linuxkit/rtf@latest`
1. Install rust-script: `cargo install rust-script`
1. Install `qemu` and `cloud-init-utils` package - or any package that provides `cloud-localds`
To run locally all you need is:
It is not required, but the tests run significantly faster if you use `sccache`
1. Rust nightly
1. A checkout of `libbpf`
1. `cargo install bpf-linker`
1. `bpftool`
You may also use the docker image to run the tests:
### Other OSs
```
docker run -it --rm --device /dev/kvm -v/home/dave/dev/aya-rs/aya:/src -w /src/test ghcr.io/aya-rs/aya-test-rtf:main
```
1. A POSIX shell
1. A checkout of `libbpf`
1. `rustup target add x86_64-unknown-linux-musl`
1. `cargo install bpf-linker`
1. Install `qemu` and `cloud-init-utils` package - or any package that provides `cloud-localds`
## Usage
To read more about how to use `rtf`, see the [documentation](https://github.com/linuxkit/rtf/blob/master/docs/USER_GUIDE.md)
From the root of this repository:
### Run the tests with verbose output
### Native
```
rtf -vvv run
cargo xtask integration-test --libbpf-dir /path/to/libbpf
```
### Run the tests using an older kernel
### Virtualized
```
AYA_TEST_IMAGE=centos8 rtf -vvv run
./test/run.sh /path/to/libbpf
```
### Writing a test
Tests should follow this pattern:
- The eBPF code should be in a file named `${NAME}.ebpf.rs`
- The userspace code should be in a file named `${NAME}.rs`
- The userspace program should make assertions and exit with a non-zero return code to signal failure
- VM start and stop is handled by the framework
- Any files copied to the VM should be cleaned up afterwards
Tests should follow these guidelines:
See `./cases` for examples
- Rust eBPF code should live in `integration-ebpf/${NAME}.rs` and included in `integration-ebpf/Cargo.toml`
- C eBPF code should live in `integration-test/src/bpf/${NAME}.bpf.c`. It's automatically compiled and made available as `${OUT_DIR}/${NAME}.bpf.o`.
- Any bytecode should be included in the integration test binary using `include_bytes_aligned!`
- Tests should be added to `integration-test/src/test`
- You may add a new module, or use an existing one
- Integration tests must use the `#[integration_test]` macro to be included in the build
- Test functions should return `anyhow::Result<()>` since this allows the use of `?` to return errors.
- You may either `panic!` when an assertion fails or `bail!`. The former is preferred since the stack trace will point directly to the failed line.

@ -1,19 +0,0 @@
//! ```cargo
//! [dependencies]
//! aya = { path = "../../../../aya" }
//! ```
use aya::{
Bpf,
programs::{Xdp, XdpFlags},
};
use std::convert::TryInto;
fn main() {
println!("Loading XDP program");
let mut bpf = Bpf::load_file("pass.o").unwrap();
let dispatcher: &mut Xdp = bpf.program_mut("pass").unwrap().try_into().unwrap();
dispatcher.load().unwrap();
dispatcher.attach("eth0", XdpFlags::default()).unwrap();
println!("Success...");
}

@ -1,29 +0,0 @@
#!/bin/sh
# SUMMARY: Check that a simple XDP program an be loaded
# LABELS:
set -e
# Source libraries. Uncomment if needed/defined
#. "${RT_LIB}"
. "${RT_PROJECT_ROOT}/_lib/lib.sh"
NAME=pass
clean_up() {
rm -rf ${NAME}.o ${NAME}
exec_vm rm -f ${NAME} ${NAME}.o
}
trap clean_up EXIT
# Test code goes here
compile_ebpf "$(pwd)/${NAME}.ebpf.rs"
compile_user "$(pwd)/${NAME}.rs"
scp_vm ${NAME}.o
scp_vm ${NAME}
exec_vm sudo ./${NAME}
exit 0

@ -1,24 +0,0 @@
//! ```cargo
//! [dependencies]
//! aya = { path = "../../../../aya" }
//! ```
use aya::{
Bpf, BpfLoader,
programs::{Extension, ProgramFd, Xdp, XdpFlags},
};
use std::convert::TryInto;
fn main() {
println!("Loading Root XDP program");
let mut bpf = Bpf::load_file("main.o").unwrap();
let pass: &mut Xdp = bpf.program_mut("pass").unwrap().try_into().unwrap();
pass.load().unwrap();
pass.attach("lo", XdpFlags::default()).unwrap();
println!("Loading Extension Program");
let mut bpf = BpfLoader::new().extension("drop").load_file("ext.o").unwrap();
let drop_: &mut Extension = bpf.program_mut("drop").unwrap().try_into().unwrap();
drop_.load(pass.fd().unwrap(), "xdp_pass").unwrap();
println!("Success...");
}

@ -1,33 +0,0 @@
#!/bin/sh
# SUMMARY: Check that a simple XDP program an be loaded
# LABELS:
set -e
# Source libraries. Uncomment if needed/defined
#. "${RT_LIB}"
. "${RT_PROJECT_ROOT}/_lib/lib.sh"
NAME=ext
clean_up() {
rm -rf main.o ${NAME}.o ${NAME}
exec_vm rm -f main.o ${NAME}.o ${NAME}
}
trap clean_up EXIT
# Test code goes here
min_kernel_version 5.9
compile_c_ebpf "$(pwd)/main.bpf.c"
compile_c_ebpf "$(pwd)/${NAME}.bpf.c"
compile_user "$(pwd)/${NAME}.rs"
scp_vm main.o
scp_vm ${NAME}.o
scp_vm ${NAME}
exec_vm sudo ./${NAME}
exit 0

@ -1,36 +0,0 @@
#!/bin/sh
# SUMMARY: Smoke tests to check that simple programs can be loaded on a VM
# LABELS:
# Source libraries. Uncomment if needed/defined
# . "${RT_LIB}"
. "${RT_PROJECT_ROOT}/_lib/lib.sh"
set -e
group_init() {
# Group initialisation code goes here
return 0
}
group_deinit() {
# Group de-initialisation code goes here
return 0
}
CMD=$1
case $CMD in
init)
group_init
res=$?
;;
deinit)
group_deinit
res=$?
;;
*)
res=1
;;
esac
exit $res

@ -1,20 +0,0 @@
//! ```cargo
//! [dependencies]
//! aya = { path = "../../../../aya" }
//! ```
use aya::{
Bpf,
programs::{Xdp, XdpFlags},
};
use std::convert::TryInto;
use std::{thread, time};
fn main() {
println!("Loading XDP program");
let mut bpf = Bpf::load_file("name_test.o").unwrap();
let dispatcher: &mut Xdp = bpf.program_mut("ihaveaverylongname").unwrap().try_into().unwrap();
dispatcher.load().unwrap();
dispatcher.attach("eth0", XdpFlags::default()).unwrap();
thread::sleep(time::Duration::from_secs(20));
}

@ -1,32 +0,0 @@
#!/bin/sh
# SUMMARY: Check that long names are properly truncated
# LABELS:
set -e
# Source libraries. Uncomment if needed/defined
#. "${RT_LIB}"
. "${RT_PROJECT_ROOT}/_lib/lib.sh"
NAME=name_test
clean_up() {
rm -rf ebpf user ${NAME}.o ${NAME}
exec_vm sudo pkill -9 ${NAME}
exec_vm rm ${NAME} ${NAME}.o
}
trap clean_up EXIT
# Test code goes here
compile_ebpf ${NAME}.ebpf.rs
compile_user ${NAME}.rs
scp_vm ${NAME}.o
scp_vm ${NAME}
exec_vm sudo ./${NAME}&
prog_list=$(exec_vm sudo bpftool prog)
echo "${prog_list}" | grep -q "xdp name ihaveaverylongn tag"
exit 0

@ -1,33 +0,0 @@
//! ```cargo
//! [dependencies]
//! log = "0.4"
//! simplelog = "0.11"
//! aya = { path = "../../../../aya" }
//! ```
use aya::{
Bpf,
programs::{Xdp, XdpFlags},
};
use log::info;
use std::convert::TryInto;
use simplelog::{ColorChoice, ConfigBuilder, LevelFilter, TermLogger, TerminalMode};
fn main() {
TermLogger::init(
LevelFilter::Debug,
ConfigBuilder::new()
.set_target_level(LevelFilter::Error)
.set_location_level(LevelFilter::Error)
.build(),
TerminalMode::Mixed,
ColorChoice::Auto,
).unwrap();
info!("Loading XDP program");
let mut bpf = Bpf::load_file("multimap.o").unwrap();
let pass: &mut Xdp = bpf.program_mut("stats").unwrap().try_into().unwrap();
pass.load().unwrap();
pass.attach("eth0", XdpFlags::default()).unwrap();
info!("Success...");
}

@ -1,29 +0,0 @@
#!/bin/sh
# SUMMARY: Check that a program with multiple maps in the maps section loads
# LABELS:
set -e
# Source libraries. Uncomment if needed/defined
#. "${RT_LIB}"
. "${RT_PROJECT_ROOT}/_lib/lib.sh"
NAME=multimap
clean_up() {
rm -rf ${NAME}.o ${NAME}
exec_vm rm -f ${NAME}.o ${NAME}
}
trap clean_up EXIT
# Test code goes here
compile_c_ebpf "$(pwd)/${NAME}.bpf.c"
compile_user "$(pwd)/${NAME}.rs"
scp_vm ${NAME}.o
scp_vm ${NAME}
exec_vm sudo ./${NAME}
exit 0

@ -1,57 +0,0 @@
//! ```cargo
//! [dependencies]
//! aya = { path = "../../../../aya" }
//! ```
use aya::{
programs::{Xdp, XdpFlags},
Bpf,
};
use std::convert::TryInto;
use std::process::Command;
fn is_loaded() -> bool {
let output = Command::new("bpftool").args(&["prog"]).output().unwrap();
let stdout = String::from_utf8(output.stdout).unwrap();
stdout.contains("test_unload")
}
fn assert_loaded(loaded: bool) {
let state = is_loaded();
if state == loaded {
return;
}
panic!("Expected loaded: {} but was loaded: {}", loaded, state);
}
fn main() {
println!("Loading XDP program");
let mut bpf = Bpf::load_file("test.o").unwrap();
let dispatcher: &mut Xdp = bpf.program_mut("test_unload").unwrap().try_into().unwrap();
dispatcher.load().unwrap();
let link = dispatcher.attach("eth0", XdpFlags::default()).unwrap();
{
let link_owned = dispatcher.take_link(link);
dispatcher.unload().unwrap();
assert_loaded(true);
};
assert_loaded(false);
dispatcher.load().unwrap();
assert_loaded(true);
dispatcher.attach("eth0", XdpFlags::default()).unwrap();
assert_loaded(true);
dispatcher.unload().unwrap();
assert_loaded(false);
}

@ -1,27 +0,0 @@
#!/bin/sh
# SUMMARY: Check that the program can be unloaded
# LABELS:
set -ex
# Source libraries. Uncomment if needed/defined
#. "${RT_LIB}"
. "${RT_PROJECT_ROOT}/_lib/lib.sh"
NAME=test
clean_up() {
rm -rf ebpf user ${NAME}.o ${NAME}
exec_vm rm ${NAME} ${NAME}.o
}
trap clean_up EXIT
# Test code goes here
compile_ebpf ${NAME}.ebpf.rs
compile_user ${NAME}.rs
scp_vm ${NAME}.o
scp_vm ${NAME}
exec_vm sudo ./${NAME}

@ -1,36 +0,0 @@
#!/bin/sh
# SUMMARY: Tests to check loader features
# LABELS:
# Source libraries. Uncomment if needed/defined
# . "${RT_LIB}"
. "${RT_PROJECT_ROOT}/_lib/lib.sh"
set -e
group_init() {
# Group initialisation code goes here
return 0
}
group_deinit() {
# Group de-initialisation code goes here
return 0
}
CMD=$1
case $CMD in
init)
group_init
res=$?
;;
deinit)
group_deinit
res=$?
;;
*)
res=1
;;
esac
exit $res

@ -1,25 +0,0 @@
#!/bin/sh
# SUMMARY: Check that maps are correctly represented in ELF files
# LABELS:
set -ex
# Source libraries. Uncomment if needed/defined
#. "${RT_LIB}"
. "${RT_PROJECT_ROOT}/_lib/lib.sh"
NAME=map_test
clean_up() {
rm -rf ebpf user ${NAME}.o
}
trap clean_up EXIT
# Test code goes here
compile_ebpf ${NAME}.ebpf.rs
readelf --sections ${NAME}.o | grep -q "maps"
readelf --syms ${NAME}.o | grep -q "BAR"
exit 0

@ -1,36 +0,0 @@
#!/bin/sh
# SUMMARY: Tests to check ELF from aya-bpf
# LABELS:
# Source libraries. Uncomment if needed/defined
# . "${RT_LIB}"
. "${RT_PROJECT_ROOT}/_lib/lib.sh"
set -e
group_init() {
# Group initialisation code goes here
return 0
}
group_deinit() {
# Group de-initialisation code goes here
return 0
}
CMD=$1
case $CMD in
init)
group_init
res=$?
;;
deinit)
group_deinit
res=$?
;;
*)
res=1
;;
esac
exit $res

@ -1,85 +0,0 @@
//! ```cargo
//! [dependencies]
//! libbpf-sys = { version = "0.6.1-1" }
//! anyhow = "1"
//! ```
use std::{
env,
fs::{self, OpenOptions},
io::Write,
path::Path,
process::Command,
string::String,
};
use anyhow::{bail, Context, Result};
static CLANG_DEFAULT: &str = "/usr/bin/clang";
/// Extract vendored libbpf headers from libbpf-sys.
fn extract_libbpf_headers<P: AsRef<Path>>(include_path: P) -> Result<()> {
let dir = include_path.as_ref().join("bpf");
fs::create_dir_all(&dir)?;
for (filename, contents) in libbpf_sys::API_HEADERS.iter() {
let path = dir.as_path().join(filename);
let mut file = OpenOptions::new().write(true).create(true).open(path)?;
file.write_all(contents.as_bytes())?;
}
Ok(())
}
/// Build eBPF programs with clang and libbpf headers.
fn build_ebpf<P: Clone + AsRef<Path>>(in_file: P, out_file: P, include_path: P) -> Result<()> {
extract_libbpf_headers(include_path.clone())?;
let clang = match env::var("CLANG") {
Ok(val) => val,
Err(_) => String::from(CLANG_DEFAULT),
};
let arch = match std::env::consts::ARCH {
"x86_64" => "x86",
"aarch64" => "arm64",
_ => std::env::consts::ARCH,
};
let mut cmd = Command::new(clang);
cmd.arg(format!("-I{}", include_path.as_ref().to_string_lossy()))
.arg("-g")
.arg("-O2")
.arg("-target")
.arg("bpf")
.arg("-c")
.arg(format!("-D__TARGET_ARCH_{}", arch))
.arg(in_file.as_ref().as_os_str())
.arg("-o")
.arg(out_file.as_ref().as_os_str());
let output = cmd.output().context("Failed to execute clang")?;
if !output.status.success() {
bail!(
"Failed to compile eBPF programs\n \
stdout=\n \
{}\n \
stderr=\n \
{}\n",
String::from_utf8(output.stdout).unwrap(),
String::from_utf8(output.stderr).unwrap()
);
}
Ok(())
}
fn main() -> Result<()> {
let args: Vec<String> = env::args().collect();
if args.len() != 3 {
bail!("requires 2 arguments. src and dst")
}
let path = env::current_dir()?;
let src = Path::new(&args[1]);
let dst = Path::new(&args[2]);
let include_path = path.join("include");
fs::create_dir_all(include_path.clone())?;
build_ebpf(src, dst, &include_path)?;
Ok(())
}

@ -1,36 +0,0 @@
#!/bin/sh
# NAME: aya
# SUMMARY: Aya Regression Tests
# Source libraries. Uncomment if needed/defined
# . "${RT_LIB}"
. "${RT_PROJECT_ROOT}/_lib/lib.sh"
group_init() {
# Group initialisation code goes here
[ -r "${AYA_TMPDIR}" ] && rm -rf "${AYA_TMPDIR}"
mkdir "${AYA_TMPDIR}"
start_vm
}
group_deinit() {
# Group de-initialisation code goes here
stop_vm
}
CMD=$1
case $CMD in
init)
group_init
res=$?
;;
deinit)
group_deinit
res=$?
;;
*)
res=1
;;
esac
exit $res

@ -0,0 +1,6 @@
[build]
target-dir = "../../target"
target = "bpfel-unknown-none"
[unstable]
build-std = ["core"]

@ -0,0 +1,36 @@
[package]
name = "integration-ebpf"
version = "0.1.0"
edition = "2018"
publish = false
[dependencies]
aya-bpf = { path = "../../bpf/aya-bpf" }
[[bin]]
name = "map_test"
path = "src/map_test.rs"
[[bin]]
name = "name_test"
path = "src/name_test.rs"
[[bin]]
name = "pass"
path = "src/pass.rs"
[[bin]]
name = "test"
path = "src/test.rs"
[profile.dev]
panic = "abort"
opt-level = 2
overflow-checks = false
[profile.release]
panic = "abort"
debug = 2
[workspace]
members = []

@ -0,0 +1,2 @@
[toolchain]
channel="nightly"

@ -1,6 +1,5 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
SEC("xdp/drop")
int xdp_drop(struct xdp_md *ctx)
@ -8,4 +7,4 @@ int xdp_drop(struct xdp_md *ctx)
return XDP_DROP;
}
char _license[] SEC("license") = "GPL";
char _license[] SEC("license") = "GPL";

@ -1,6 +1,5 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
SEC("xdp/pass")
int xdp_pass(struct xdp_md *ctx)
@ -8,4 +7,4 @@ int xdp_pass(struct xdp_md *ctx)
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";
char _license[] SEC("license") = "GPL";

@ -44,4 +44,4 @@ int xdp_stats(struct xdp_md *ctx)
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";
char _license[] SEC("license") = "GPL";

@ -1,16 +1,11 @@
//! ```cargo
//! [dependencies]
//! aya-bpf = { path = "../../../../bpf/aya-bpf" }
//! ```
#![no_std]
#![no_main]
use aya_bpf::{
bindings::xdp_action,
macros::{map, xdp},
programs::XdpContext,
maps::Array,
programs::XdpContext,
};
#[map]

@ -1,19 +1,10 @@
//! ```cargo
//! [dependencies]
//! aya-bpf = { path = "../../../../bpf/aya-bpf" }
//! ```
#![no_std]
#![no_main]
use aya_bpf::{
bindings::xdp_action,
macros::xdp,
programs::XdpContext,
};
use aya_bpf::{bindings::xdp_action, macros::xdp, programs::XdpContext};
#[xdp(name="ihaveaverylongname")]
pub fn pass(ctx: XdpContext) -> u32 {
#[xdp(name = "ihaveaverylongname")]
pub fn ihaveaverylongname(ctx: XdpContext) -> u32 {
match unsafe { try_pass(ctx) } {
Ok(ret) => ret,
Err(_) => xdp_action::XDP_ABORTED,
@ -27,4 +18,4 @@ unsafe fn try_pass(_ctx: XdpContext) -> Result<u32, u32> {
#[panic_handler]
fn panic(_info: &core::panic::PanicInfo) -> ! {
unsafe { core::hint::unreachable_unchecked() }
}
}

@ -1,18 +1,9 @@
//! ```cargo
//! [dependencies]
//! aya-bpf = { path = "../../../../bpf/aya-bpf" }
//! ```
#![no_std]
#![no_main]
use aya_bpf::{
bindings::xdp_action,
macros::xdp,
programs::XdpContext,
};
use aya_bpf::{bindings::xdp_action, macros::xdp, programs::XdpContext};
#[xdp(name="pass")]
#[xdp(name = "pass")]
pub fn pass(ctx: XdpContext) -> u32 {
match unsafe { try_pass(ctx) } {
Ok(ret) => ret,
@ -27,4 +18,4 @@ unsafe fn try_pass(_ctx: XdpContext) -> Result<u32, u32> {
#[panic_handler]
fn panic(_info: &core::panic::PanicInfo) -> ! {
unsafe { core::hint::unreachable_unchecked() }
}
}

@ -1,8 +1,3 @@
//! ```cargo
//! [dependencies]
//! aya-bpf = { path = "../../../../bpf/aya-bpf" }
//! ```
#![no_std]
#![no_main]

@ -0,0 +1,12 @@
[package]
name = "integration-test-macros"
version = "0.1.0"
edition = "2018"
publish = false
[dependencies]
quote = "1"
syn = {version = "1.0", features = ["full"]}
[lib]
proc-macro = true

@ -0,0 +1,19 @@
use proc_macro::TokenStream;
use quote::quote;
use syn::{parse_macro_input, ItemFn};
#[proc_macro_attribute]
pub fn integration_test(_attr: TokenStream, item: TokenStream) -> TokenStream {
let item = parse_macro_input!(item as ItemFn);
let name = &item.sig.ident;
let name_str = &item.sig.ident.to_string();
let expanded = quote! {
#item
inventory::submit!(IntegrationTest {
name: concat!(module_path!(), "::", #name_str),
test_fn: #name,
});
};
TokenStream::from(expanded)
}

@ -0,0 +1,17 @@
[package]
name = "integration-test"
version = "0.1.0"
edition = "2018"
publish = false
[dependencies]
anyhow = "1"
aya = { path = "../../aya" }
inventory = "0.2"
integration-test-macros = { path = "../integration-test-macros" }
lazy_static = "1"
libc = { version = "0.2.105" }
log = "0.4"
object = { version = "0.29", default-features = false, features = ["std", "read_core", "elf"] }
regex = "1"
simplelog = "0.12"

@ -0,0 +1,27 @@
use log::info;
use simplelog::{ColorChoice, ConfigBuilder, LevelFilter, TermLogger, TerminalMode};
mod tests;
use tests::IntegrationTest;
fn main() -> anyhow::Result<()> {
TermLogger::init(
LevelFilter::Debug,
ConfigBuilder::new()
.set_target_level(LevelFilter::Error)
.set_location_level(LevelFilter::Error)
.build(),
TerminalMode::Mixed,
ColorChoice::Auto,
)?;
// Run the tests
for t in inventory::iter::<IntegrationTest> {
info!("Running {}", t.name);
if let Err(e) = (t.test_fn)() {
panic!("{}", e)
}
}
Ok(())
}

@ -0,0 +1,27 @@
use super::{integration_test, IntegrationTest};
use anyhow::bail;
use aya::include_bytes_aligned;
use object::{Object, ObjectSymbol};
#[integration_test]
fn test_maps() -> anyhow::Result<()> {
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/map_test");
let obj_file = object::File::parse(bytes)?;
if obj_file.section_by_name("maps").is_none() {
bail!("No 'maps' ELF section");
}
let mut found = false;
for sym in obj_file.symbols() {
if let Ok(name) = sym.name() {
if name == "BAR" {
found = true;
break;
}
}
}
if !found {
bail!("No symbol 'BAR' in ELF file")
}
Ok(())
}

@ -0,0 +1,75 @@
use std::{convert::TryInto, process::Command};
use aya::{
include_bytes_aligned,
programs::{Xdp, XdpFlags},
Bpf,
};
use super::{integration_test, IntegrationTest};
#[integration_test]
fn long_name() -> anyhow::Result<()> {
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/name_test");
let mut bpf = Bpf::load(bytes)?;
let name_prog: &mut Xdp = bpf.program_mut("ihaveaverylongname").unwrap().try_into()?;
name_prog.load().unwrap();
name_prog.attach("lo", XdpFlags::default())?;
// We used to be able to assert with bpftool that the program name was short.
// It seem though that it now uses the name from the ELF symbol table instead.
// Therefore, as long as we were able to load the program, this is good enough.
Ok(())
}
#[integration_test]
fn multiple_maps() -> anyhow::Result<()> {
let bytes =
include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/multimap.bpf.o");
let mut bpf = Bpf::load(bytes)?;
let pass: &mut Xdp = bpf.program_mut("stats").unwrap().try_into().unwrap();
pass.load().unwrap();
pass.attach("lo", XdpFlags::default()).unwrap();
Ok(())
}
fn is_loaded() -> bool {
let output = Command::new("bpftool").args(&["prog"]).output().unwrap();
let stdout = String::from_utf8(output.stdout).unwrap();
stdout.contains("test_unload")
}
fn assert_loaded(loaded: bool) {
let state = is_loaded();
if state == loaded {
return;
}
panic!("Expected loaded: {} but was loaded: {}", loaded, state);
}
#[integration_test]
fn unload() -> anyhow::Result<()> {
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/test");
let mut bpf = Bpf::load(bytes)?;
let prog: &mut Xdp = bpf.program_mut("test_unload").unwrap().try_into().unwrap();
prog.load().unwrap();
let link = prog.attach("lo", XdpFlags::default()).unwrap();
{
let _link_owned = prog.take_link(link);
prog.unload().unwrap();
assert_loaded(true);
};
assert_loaded(false);
prog.load().unwrap();
assert_loaded(true);
prog.attach("lo", XdpFlags::default()).unwrap();
assert_loaded(true);
prog.unload().unwrap();
assert_loaded(false);
Ok(())
}

@ -0,0 +1,37 @@
use anyhow::bail;
use lazy_static::lazy_static;
use libc::{uname, utsname};
use regex::Regex;
use std::{ffi::CStr, mem};
pub mod elf;
pub mod load;
pub mod smoke;
pub use integration_test_macros::integration_test;
#[derive(Debug)]
pub struct IntegrationTest {
pub name: &'static str,
pub test_fn: fn() -> anyhow::Result<()>,
}
pub(crate) fn kernel_version() -> anyhow::Result<(u8, u8, u8)> {
lazy_static! {
static ref RE: Regex = Regex::new(r"^([0-9]+)\.([0-9]+)\.([0-9]+)").unwrap();
}
let mut data: utsname = unsafe { mem::zeroed() };
let ret = unsafe { uname(&mut data) };
assert!(ret >= 0, "libc::uname failed.");
let release_cstr = unsafe { CStr::from_ptr(data.release.as_ptr()) };
let release = release_cstr.to_string_lossy();
if let Some(caps) = RE.captures(&release) {
let major = caps.get(1).unwrap().as_str().parse().unwrap();
let minor = caps.get(2).unwrap().as_str().parse().unwrap();
let patch = caps.get(3).unwrap().as_str().parse().unwrap();
Ok((major, minor, patch))
} else {
bail!("no kernel version found");
}
}
inventory::collect!(IntegrationTest);

@ -0,0 +1,45 @@
use std::convert::TryInto;
use aya::{
include_bytes_aligned,
programs::{Extension, Xdp, XdpFlags},
Bpf, BpfLoader,
};
use log::info;
use super::{integration_test, kernel_version, IntegrationTest};
#[integration_test]
fn xdp() -> anyhow::Result<()> {
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/pass");
let mut bpf = Bpf::load(bytes)?;
let dispatcher: &mut Xdp = bpf.program_mut("pass").unwrap().try_into().unwrap();
dispatcher.load().unwrap();
dispatcher.attach("lo", XdpFlags::default()).unwrap();
Ok(())
}
#[integration_test]
fn extension() -> anyhow::Result<()> {
let (major, minor, _) = kernel_version()?;
if major < 5 || minor < 9 {
info!(
"skipping as {}.{} does not meet version requirement of 5.9",
major, minor
);
return Ok(());
}
// TODO: Check kernel version == 5.9 or later
let main_bytes =
include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/main.bpf.o");
let mut bpf = Bpf::load(main_bytes)?;
let pass: &mut Xdp = bpf.program_mut("pass").unwrap().try_into().unwrap();
pass.load().unwrap();
pass.attach("lo", XdpFlags::default()).unwrap();
let ext_bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/ext.bpf.o");
let mut bpf = BpfLoader::new().extension("drop").load(ext_bytes).unwrap();
let drop_: &mut Extension = bpf.program_mut("drop").unwrap().try_into().unwrap();
drop_.load(pass.fd().unwrap(), "xdp_pass").unwrap();
Ok(())
}

@ -1,16 +1,12 @@
#!/bin/sh
# Source the main regression test library if present
[ -f "${RT_LIB}" ] && . "${RT_LIB}"
set -e
# Temporary directory for tests to use.
AYA_TMPDIR="${RT_PROJECT_ROOT}/_tmp"
AYA_TMPDIR="$(pwd)/.tmp"
# Directory for VM images
AYA_IMGDIR="${RT_PROJECT_ROOT}/_images"
# Cancel Exit Code
RT_CANCEL=253
AYA_IMGDIR=${AYA_TMPDIR}
# Test Architecture
if [ -z "${AYA_TEST_ARCH}" ]; then
@ -27,71 +23,6 @@ case "${AYA_TEST_IMAGE}" in
centos*) AYA_SSH_USER="centos";;
esac
# compiles the ebpf program by using rust-script to create a temporary
# cargo project in $(pwd)/ebpf. caller must add rm -rf ebpf to the clean_up
# functionAYA_TEST_ARCH
compile_ebpf() {
file=$(basename "$1")
dir=$(dirname "$1")
base=$(echo "${file}" | cut -f1 -d '.')
rm -rf "${dir}/ebpf"
rust-script --pkg-path "${dir}/ebpf" --gen-pkg-only "$1"
artifact=$(sed -n 's/^name = \"\(.*\)\"/\1/p' "${dir}/ebpf/Cargo.toml" | head -n1)
mkdir -p "${dir}/.cargo"
cat > "${dir}/.cargo/config.toml" << EOF
[build]
target = "bpfel-unknown-none"
[unstable]
build-std = ["core"]
EOF
cat >> "${dir}/ebpf/Cargo.toml" << EOF
[workspace]
members = []
EOF
# overwrite the rs file as rust-script adds a main fn
cp "$1" "${dir}/ebpf/${file}"
cargo build -q --manifest-path "${dir}/ebpf/Cargo.toml"
mv "${dir}/ebpf/target/bpfel-unknown-none/debug/${artifact}" "${dir}/${base}.o"
rm -rf "${dir}/.cargo"
rm -rf "${dir}/ebpf"
}
# compile a C BPF file
compile_c_ebpf() {
file=$(basename "$1")
dir=$(dirname "$1")
base=$(echo "${file}" | cut -f1 -d '.')
rust-script "${RT_PROJECT_ROOT}/_lib/compile-ebpf.ers" "${1}" "${dir}/${base}.o"
rm -rf "${dir}/include"
}
# compiles the userspace program by using rust-script to create a temporary
# cargo project in $(pwd)/user. caller must add rm -rf ebpf to the clean_up
# function. this is required since the binary produced has to be run with
# sudo to load an eBPF program
compile_user() {
file=$(basename "$1")
dir=$(dirname "$1")
base=$(echo "${file}" | cut -f1 -d '.')
rm -rf "${dir}/user"
rust-script --pkg-path "${dir}/user" --gen-pkg-only "$1"
artifact=$(sed -n 's/^name = \"\(.*\)\"/\1/p' "${dir}/user/Cargo.toml" | head -n1)
cat >> "${dir}/user/Cargo.toml" << EOF
[workspace]
members = []
EOF
cargo build -q --release --manifest-path "${dir}/user/Cargo.toml" --target=x86_64-unknown-linux-musl
mv "${dir}/user/target/x86_64-unknown-linux-musl/release/${artifact}" "${dir}/${base}"
rm -rf "${dir}/user"
}
download_images() {
mkdir -p "${AYA_IMGDIR}"
case $1 in
@ -214,10 +145,11 @@ EOF
scp_vm() {
local=$1
remote=$(basename "$1")
scp -q -F "${AYA_TMPDIR}/ssh_config" \
-i "${AYA_TMPDIR}/test_rsa" \
-P 2222 "${local}" \
"${AYA_SSH_USER}@localhost:${local}"
"${AYA_SSH_USER}@localhost:${remote}"
}
exec_vm() {
@ -243,19 +175,14 @@ cleanup_vm() {
fi
}
# Check that host machine meets minimum kernel requirement
# Must be in format {major}.{minor}
min_kernel_version() {
target_major=$(echo "$1" | cut -d '.' -f1)
target_minor=$(echo "$1" | cut -d '.' -f2)
if [ -z "$1" ]; then
echo "path to libbpf required"
exit 1
fi
vm_kernel=$(exec_vm uname -r)
vm_major=$(echo "${vm_kernel}" | cut -d '.' -f1)
vm_minor=$(echo "${vm_kernel}" | cut -d '.' -f2)
start_vm
trap stop_vm EXIT
if [ "${vm_major}" -lt "${target_major}" ] || [ "${vm_minor}" -lt "${target_minor}" ]; then
echo "Test not supported on kernel ${vm_major}.${vm_minor}"
return ${RT_CANCEL}
fi
return 0
}
cargo xtask build-integration-test --musl --libbpf-dir "$1"
scp_vm ../target/x86_64-unknown-linux-musl/debug/integration-test
exec_vm sudo ./integration-test

@ -13,3 +13,5 @@ quote = "1"
proc-macro2 = "1"
indexmap = "1.6"
indoc = "1.0"
lazy_static = "1"
serde_json = "1"

@ -0,0 +1,162 @@
use std::{
env, fs,
path::{Path, PathBuf},
process::Command,
};
use anyhow::{bail, Context};
use clap::Parser;
use crate::utils::WORKSPACE_ROOT;
#[derive(Debug, Copy, Clone)]
pub enum Architecture {
BpfEl,
BpfEb,
}
impl std::str::FromStr for Architecture {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s {
"bpfel-unknown-none" => Architecture::BpfEl,
"bpfeb-unknown-none" => Architecture::BpfEb,
_ => return Err("invalid target".to_owned()),
})
}
}
impl std::fmt::Display for Architecture {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(match self {
Architecture::BpfEl => "bpfel-unknown-none",
Architecture::BpfEb => "bpfeb-unknown-none",
})
}
}
#[derive(Debug, Parser)]
pub struct Options {
/// Set the endianness of the BPF target
#[clap(default_value = "bpfel-unknown-none", long)]
pub target: Architecture,
/// Build the release target
#[clap(long)]
pub release: bool,
/// Libbpf dir, required for compiling C code
#[clap(long, action)]
pub libbpf_dir: PathBuf,
}
pub fn build_ebpf(opts: Options) -> anyhow::Result<()> {
build_rust_ebpf(&opts)?;
build_c_ebpf(&opts)
}
fn build_rust_ebpf(opts: &Options) -> anyhow::Result<()> {
let mut dir = PathBuf::from(WORKSPACE_ROOT.to_string());
dir.push("test/integration-ebpf");
let target = format!("--target={}", opts.target);
let mut args = vec![
"+nightly",
"build",
"--verbose",
target.as_str(),
"-Z",
"build-std=core",
];
if opts.release {
args.push("--release")
}
let status = Command::new("cargo")
.current_dir(&dir)
.args(&args)
.status()
.expect("failed to build bpf program");
assert!(status.success());
Ok(())
}
fn get_libbpf_headers<P: AsRef<Path>>(libbpf_dir: P, include_path: P) -> anyhow::Result<()> {
let dir = include_path.as_ref();
fs::create_dir_all(&dir)?;
let status = Command::new("make")
.current_dir(libbpf_dir.as_ref().join("src"))
.arg(format!("INCLUDEDIR={}", dir.as_os_str().to_string_lossy()))
.arg("install_headers")
.status()
.expect("failed to build get libbpf headers");
assert!(status.success());
Ok(())
}
fn build_c_ebpf(opts: &Options) -> anyhow::Result<()> {
let mut src = PathBuf::from(WORKSPACE_ROOT.to_string());
src.push("test/integration-ebpf/src/bpf");
let mut out_path = PathBuf::from(WORKSPACE_ROOT.to_string());
out_path.push("target");
out_path.push(opts.target.to_string());
out_path.push(if opts.release { "release " } else { "debug" });
let include_path = out_path.join("include");
get_libbpf_headers(&opts.libbpf_dir, &include_path)?;
let files = fs::read_dir(&src).unwrap();
for file in files {
let p = file.unwrap().path();
if let Some(ext) = p.extension() {
if ext == "c" {
let mut out = PathBuf::from(&out_path);
out.push(p.file_name().unwrap());
out.set_extension("o");
compile_with_clang(&p, &out, &include_path)?;
}
}
}
Ok(())
}
/// Build eBPF programs with clang and libbpf headers.
fn compile_with_clang<P: Clone + AsRef<Path>>(
src: P,
out: P,
include_path: P,
) -> anyhow::Result<()> {
let clang = match env::var("CLANG") {
Ok(val) => val,
Err(_) => String::from("/usr/bin/clang"),
};
let arch = match std::env::consts::ARCH {
"x86_64" => "x86",
"aarch64" => "arm64",
_ => std::env::consts::ARCH,
};
let mut cmd = Command::new(clang);
cmd.arg(format!("-I{}", include_path.as_ref().to_string_lossy()))
.arg("-g")
.arg("-O2")
.arg("-target")
.arg("bpf")
.arg("-c")
.arg(format!("-D__TARGET_ARCH_{}", arch))
.arg(src.as_ref().as_os_str())
.arg("-o")
.arg(out.as_ref().as_os_str());
let output = cmd.output().context("Failed to execute clang")?;
if !output.status.success() {
bail!(
"Failed to compile eBPF programs\n \
stdout=\n \
{}\n \
stderr=\n \
{}\n",
String::from_utf8(output.stdout).unwrap(),
String::from_utf8(output.stderr).unwrap()
);
}
Ok(())
}

@ -0,0 +1,29 @@
use clap::Parser;
use std::process::Command;
use crate::build_ebpf;
#[derive(Parser)]
pub struct Options {
/// Whether to compile for the musl libc target
#[clap(short, long)]
pub musl: bool,
#[clap(flatten)]
pub ebpf_options: build_ebpf::Options,
}
pub fn build_test(opts: Options) -> anyhow::Result<()> {
build_ebpf::build_ebpf(opts.ebpf_options)?;
let mut args = vec!["build", "-p", "integration-test", "--verbose"];
if opts.musl {
args.push("--target=x86_64-unknown-linux-musl");
}
let status = Command::new("cargo")
.args(&args)
.status()
.expect("failed to build bpf program");
assert!(status.success());
Ok(())
}

@ -30,13 +30,7 @@ pub fn docs() -> Result<(), anyhow::Error> {
header.flush().expect("couldn't flush contents");
let abs_header_path = fs::canonicalize(&header_path).unwrap();
let args = vec![
"+nightly",
"doc",
"--workspace",
"--no-deps",
"--all-features",
];
let args = vec!["+nightly", "doc", "--no-deps", "--all-features"];
let status = Command::new("cargo")
.current_dir(&working_dir)

@ -1,5 +1,9 @@
mod build_ebpf;
mod build_test;
mod codegen;
mod docs;
mod run;
pub(crate) mod utils;
use std::process::exit;
@ -14,6 +18,9 @@ pub struct Options {
enum Command {
Codegen(codegen::Options),
Docs,
BuildIntegrationTest(build_test::Options),
BuildIntegrationTestEbpf(build_ebpf::Options),
IntegrationTest(run::Options),
}
fn main() {
@ -23,6 +30,9 @@ fn main() {
let ret = match opts.command {
Codegen(opts) => codegen::codegen(opts),
Docs => docs::docs(),
BuildIntegrationTest(opts) => build_test::build_test(opts),
BuildIntegrationTestEbpf(opts) => build_ebpf::build_ebpf(opts),
IntegrationTest(opts) => run::run(opts),
};
if let Err(e) = ret {

@ -0,0 +1,72 @@
use std::{os::unix::process::CommandExt, path::PathBuf, process::Command};
use anyhow::Context as _;
use clap::Parser;
use crate::build_ebpf::{build_ebpf, Architecture, Options as BuildOptions};
#[derive(Debug, Parser)]
pub struct Options {
/// Set the endianness of the BPF target
#[clap(default_value = "bpfel-unknown-none", long)]
pub bpf_target: Architecture,
/// Build and run the release target
#[clap(long)]
pub release: bool,
/// The command used to wrap your application
#[clap(short, long, default_value = "sudo -E")]
pub runner: String,
/// libbpf directory
#[clap(long, action)]
pub libbpf_dir: String,
/// Arguments to pass to your application
#[clap(name = "args", last = true)]
pub run_args: Vec<String>,
}
/// Build the project
fn build(opts: &Options) -> Result<(), anyhow::Error> {
let mut args = vec!["build"];
if opts.release {
args.push("--release")
}
args.push("--workspace");
let status = Command::new("cargo")
.args(&args)
.status()
.expect("failed to build userspace");
assert!(status.success());
Ok(())
}
/// Build and run the project
pub fn run(opts: Options) -> Result<(), anyhow::Error> {
// build our ebpf program followed by our application
build_ebpf(BuildOptions {
target: opts.bpf_target,
release: opts.release,
libbpf_dir: PathBuf::from(&opts.libbpf_dir),
})
.context("Error while building eBPF program")?;
build(&opts).context("Error while building userspace application")?;
// profile we are building (release or debug)
let profile = if opts.release { "release" } else { "debug" };
let bin_path = format!("target/{}/integration-test", profile);
// arguments to pass to the application
let mut run_args: Vec<_> = opts.run_args.iter().map(String::as_str).collect();
// configure args
let mut args: Vec<_> = opts.runner.trim().split_terminator(' ').collect();
args.push(bin_path.as_str());
args.append(&mut run_args);
// spawn the command
let err = Command::new(args.first().expect("No first argument"))
.args(args.iter().skip(1))
.exec();
// we shouldn't get here unless the command failed to spawn
Err(anyhow::Error::from(err).context(format!("Failed to run `{}`", args.join(" "))))
}

@ -0,0 +1,17 @@
use lazy_static::lazy_static;
use serde_json::Value;
use std::process::Command;
lazy_static! {
pub static ref WORKSPACE_ROOT: String = workspace_root();
}
fn workspace_root() -> String {
let output = Command::new("cargo").arg("metadata").output().unwrap();
if !output.status.success() {
panic!("unable to run cargo metadata")
}
let stdout = String::from_utf8(output.stdout).unwrap();
let v: Value = serde_json::from_str(&stdout).unwrap();
v["workspace_root"].as_str().unwrap().to_string()
}
Loading…
Cancel
Save