integration-test: Implement running on VMs

Implements running integration tests on multiple VMs with arbitrary
kernel images using `cargo xtask integration-test vm ...`.

This changes our coverage from 6.2 to 6.1 and 6.4.
pull/733/head
Tamir Duberstein 1 year ago
parent b6a6a81f95
commit 82a77bc83d
No known key found for this signature in database

@ -139,8 +139,14 @@ jobs:
--target ${{ matrix.target }} \ --target ${{ matrix.target }} \
-Z build-std=core -Z build-std=core
build-integration-test: run-integration-test:
runs-on: ubuntu-22.04 strategy:
fail-fast: false
matrix:
runner:
- macos-12
- ubuntu-22.04
runs-on: ${{ matrix.runner }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
@ -150,13 +156,12 @@ jobs:
with: with:
toolchain: nightly toolchain: nightly
components: rust-src components: rust-src
targets: aarch64-unknown-linux-musl,x86_64-unknown-linux-musl
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
- name: bpf-linker - name: Install prerequisites
run: cargo install bpf-linker --git https://github.com/aya-rs/bpf-linker.git if: runner.os == 'Linux'
- name: Install dependencies
# ubuntu-22.04 comes with clang 14[0] which doesn't include support for signed and 64bit # ubuntu-22.04 comes with clang 14[0] which doesn't include support for signed and 64bit
# enum values which was added in clang 15[1]. # enum values which was added in clang 15[1].
# #
@ -171,63 +176,78 @@ jobs:
set -euxo pipefail set -euxo pipefail
wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | sudo tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | sudo tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc
echo deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy main | sudo tee /etc/apt/sources.list.d/llvm.list echo deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy main | sudo tee /etc/apt/sources.list.d/llvm.list
sudo apt-get update sudo apt update
sudo apt-get -y install clang gcc-multilib llvm sudo apt -y install clang gcc-multilib llvm locate qemu-system-{arm,x86}
- name: Build - name: bpf-linker
if: runner.os == 'Linux'
run: cargo install bpf-linker --git https://github.com/aya-rs/bpf-linker.git
- name: Install prerequisites
if: runner.os == 'macOS'
# The clang shipped on macOS doesn't support BPF, so we need LLVM from brew.
#
# We also need LLVM for bpf-linker, see comment below.
run: | run: |
set -euxo pipefail set -euxo pipefail
mkdir -p integration-test-binaries brew install qemu dpkg pkg-config llvm
# See https://doc.rust-lang.org/cargo/reference/profiles.html for the echo /usr/local/opt/llvm/bin >> $GITHUB_PATH
# names of the builtin profiles. Note that dev builds "debug" targets.
cargo xtask build-integration-test --cargo-arg=--profile=dev | xargs -I % cp % integration-test-binaries/dev
cargo xtask build-integration-test --cargo-arg=--profile=release | xargs -I % cp % integration-test-binaries/release
- uses: actions/upload-artifact@v3
with:
name: integration-test-binaries
path: integration-test-binaries
run-integration-test: - name: bpf-linker
runs-on: macos-latest if: runner.os == 'macOS'
needs: ["build-integration-test"] # NB: rustc doesn't ship libLLVM.so on macOS, so disable proxying (default feature).
steps: run: cargo install bpf-linker --git https://github.com/aya-rs/bpf-linker.git --no-default-features
- uses: actions/checkout@v3
with:
sparse-checkout: |
test/run.sh
test/cloud-localds
- name: Install Pre-requisites - name: Download debian kernels
if: runner.arch == 'ARM64'
run: | run: |
brew install qemu gnu-getopt coreutils cdrtools set -euxo pipefail
mkdir -p test/.tmp/debian-kernels/arm64
- name: Cache tmp files # NB: a 4.19 kernel image for arm64 was not available.
uses: actions/cache@v3 # TODO: enable tests on kernels before 6.0.
with: # linux-image-5.10.0-23-cloud-arm64-unsigned_5.10.179-3_arm64.deb \
path: | printf '%s\0' \
.tmp/*.qcow2 linux-image-6.1.0-10-cloud-arm64-unsigned_6.1.38-2_arm64.deb \
.tmp/test_rsa linux-image-6.4.0-1-cloud-arm64-unsigned_6.4.4-2_arm64.deb \
.tmp/test_rsa.pub | xargs -0 -t -P0 -I {} wget -nd -q -P test/.tmp/debian-kernels/arm64 ftp://ftp.us.debian.org/debian/pool/main/l/linux/{}
key: tmp-files-${{ hashFiles('test/run.sh') }}
- name: Download debian kernels
- uses: actions/download-artifact@v3 if: runner.arch == 'X64'
with:
name: integration-test-binaries
path: integration-test-binaries
- name: Run integration tests
run: | run: |
set -euxo pipefail set -euxo pipefail
find integration-test-binaries -type f -exec chmod +x {} \; mkdir -p test/.tmp/debian-kernels/amd64
test/run.sh integration-test-binaries # TODO: enable tests on kernels before 6.0.
# linux-image-4.19.0-21-cloud-amd64-unsigned_4.19.249-2_amd64.deb \
# linux-image-5.10.0-23-cloud-amd64-unsigned_5.10.179-3_amd64.deb \
printf '%s\0' \
linux-image-6.1.0-10-cloud-amd64-unsigned_6.1.38-2_amd64.deb \
linux-image-6.4.0-1-cloud-amd64-unsigned_6.4.4-2_amd64.deb \
| xargs -0 -t -P0 -I {} wget -nd -q -P test/.tmp/debian-kernels/amd64 ftp://ftp.us.debian.org/debian/pool/main/l/linux/{}
- name: Alias gtar as tar
if: runner.os == 'macOS'
# macOS tar doesn't support --wildcards which we use below.
run: mkdir tar-is-gtar && ln -s "$(which gtar)" tar-is-gtar/tar && echo "$PWD"/tar-is-gtar >> $GITHUB_PATH
- name: Extract debian kernels
run: |
set -euxo pipefail
find test/.tmp -name '*.deb' -print0 | xargs -t -0 -I {} \
sh -c "dpkg --fsys-tarfile {} | tar -C test/.tmp --wildcards --extract '*vmlinuz*' --file -"
- name: Run integration tests
run: find test/.tmp -name 'vmlinuz-*' | xargs -t cargo xtask integration-test vm
# Provides a single status check for the entire build workflow. # Provides a single status check for the entire build workflow.
# This is used for merge automation, like Mergify, since GH actions # This is used for merge automation, like Mergify, since GH actions
# has no concept of "when all status checks pass". # has no concept of "when all status checks pass".
# https://docs.mergify.com/conditions/#validating-all-status-checks # https://docs.mergify.com/conditions/#validating-all-status-checks
build-workflow-complete: build-workflow-complete:
needs: ["lint", "build-test-aya", "build-test-aya-bpf", "run-integration-test"] needs:
- lint
- build-test-aya
- build-test-aya-bpf
- run-integration-test
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Build Complete - name: Build Complete

@ -6,6 +6,7 @@ members = [
"aya-log-parser", "aya-log-parser",
"aya-obj", "aya-obj",
"aya-tool", "aya-tool",
"init",
"test/integration-test", "test/integration-test",
"xtask", "xtask",
@ -29,6 +30,7 @@ default-members = [
"aya-log-parser", "aya-log-parser",
"aya-obj", "aya-obj",
"aya-tool", "aya-tool",
"init",
# test/integration-test is omitted; including it in this list causes `cargo test` to run its # test/integration-test is omitted; including it in this list causes `cargo test` to run its
# tests, and that doesn't work unless they've been built with `cargo xtask`. # tests, and that doesn't work unless they've been built with `cargo xtask`.
"xtask", "xtask",
@ -72,6 +74,7 @@ lazy_static = { version = "1", default-features = false }
libc = { version = "0.2.105", default-features = false } libc = { version = "0.2.105", default-features = false }
log = { version = "0.4", default-features = false } log = { version = "0.4", default-features = false }
netns-rs = { version = "0.1", default-features = false } netns-rs = { version = "0.1", default-features = false }
nix = { version = "0.26.2", default-features = false }
num_enum = { version = "0.6", default-features = false } num_enum = { version = "0.6", default-features = false }
object = { version = "0.31", default-features = false } object = { version = "0.31", default-features = false }
parking_lot = { version = "0.12.0", default-features = false } parking_lot = { version = "0.12.0", default-features = false }

@ -1,10 +1,9 @@
#![no_std] #![no_std]
#![warn(clippy::cast_lossless, clippy::cast_sign_loss)] #![warn(clippy::cast_lossless, clippy::cast_sign_loss)]
use aya_bpf::{ #[cfg(target_arch = "bpf")]
macros::map, use aya_bpf::macros::map;
maps::{PerCpuArray, PerfEventByteArray}, use aya_bpf::maps::{PerCpuArray, PerfEventByteArray};
};
pub use aya_log_common::{write_record_header, Level, WriteToBuf, LOG_BUF_CAPACITY}; pub use aya_log_common::{write_record_header, Level, WriteToBuf, LOG_BUF_CAPACITY};
pub use aya_log_ebpf_macros::{debug, error, info, log, trace, warn}; pub use aya_log_ebpf_macros::{debug, error, info, log, trace, warn};
@ -15,11 +14,19 @@ pub struct LogBuf {
} }
#[doc(hidden)] #[doc(hidden)]
#[map] // This cfg_attr prevents compilation failures on macOS where the generated section name doesn't
// meet mach-o's requirements. We wouldn't ordinarily build this crate for macOS, but we do so
// because the integration-test crate depends on this crate transitively. See comment in
// test/integration-test/Cargo.toml.
#[cfg_attr(target_arch = "bpf", map)]
pub static mut AYA_LOG_BUF: PerCpuArray<LogBuf> = PerCpuArray::with_max_entries(1, 0); pub static mut AYA_LOG_BUF: PerCpuArray<LogBuf> = PerCpuArray::with_max_entries(1, 0);
#[doc(hidden)] #[doc(hidden)]
#[map] // This cfg_attr prevents compilation failures on macOS where the generated section name doesn't
// meet mach-o's requirements. We wouldn't ordinarily build this crate for macOS, but we do so
// because the integration-test crate depends on this crate transitively. See comment in
// test/integration-test/Cargo.toml.
#[cfg_attr(target_arch = "bpf", map)]
pub static mut AYA_LOGS: PerfEventByteArray = PerfEventByteArray::new(0); pub static mut AYA_LOGS: PerfEventByteArray = PerfEventByteArray::new(0);
#[doc(hidden)] #[doc(hidden)]

@ -0,0 +1,10 @@
[package]
name = "init"
version = "0.1.0"
authors = ["Tamir Duberstein <tamird@gmail.com>"]
edition = "2021"
publish = false
[dependencies]
anyhow = { workspace = true, features = ["std"] }
nix = { workspace = true, features = ["fs", "mount", "reboot"] }

@ -0,0 +1,166 @@
//! init is the first process started by the kernel.
//!
//! This implementation creates the minimal mounts required to run BPF programs, runs all binaries
//! in /bin, prints a final message ("init: success|failure"), and powers off the machine.
use anyhow::Context as _;
#[derive(Debug)]
struct Errors(Vec<anyhow::Error>);
impl std::fmt::Display for Errors {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let Self(errors) = self;
for (i, error) in errors.iter().enumerate() {
if i != 0 {
writeln!(f)?;
}
write!(f, "{:?}", error)?;
}
Ok(())
}
}
impl std::error::Error for Errors {}
fn run() -> anyhow::Result<()> {
const RXRXRX: nix::sys::stat::Mode = nix::sys::stat::Mode::empty()
.union(nix::sys::stat::Mode::S_IRUSR)
.union(nix::sys::stat::Mode::S_IXUSR)
.union(nix::sys::stat::Mode::S_IRGRP)
.union(nix::sys::stat::Mode::S_IXGRP)
.union(nix::sys::stat::Mode::S_IROTH)
.union(nix::sys::stat::Mode::S_IXOTH);
struct Mount {
source: &'static str,
target: &'static str,
fstype: &'static str,
flags: nix::mount::MsFlags,
data: Option<&'static str>,
target_mode: Option<nix::sys::stat::Mode>,
}
for Mount {
source,
target,
fstype,
flags,
data,
target_mode,
} in [
Mount {
source: "proc",
target: "/proc",
fstype: "proc",
flags: nix::mount::MsFlags::empty(),
data: None,
target_mode: Some(RXRXRX),
},
Mount {
source: "sysfs",
target: "/sys",
fstype: "sysfs",
flags: nix::mount::MsFlags::empty(),
data: None,
target_mode: Some(RXRXRX),
},
Mount {
source: "debugfs",
target: "/sys/kernel/debug",
fstype: "debugfs",
flags: nix::mount::MsFlags::empty(),
data: None,
target_mode: None,
},
Mount {
source: "bpffs",
target: "/sys/fs/bpf",
fstype: "bpf",
flags: nix::mount::MsFlags::empty(),
data: None,
target_mode: None,
},
] {
match target_mode {
None => {
// Must exist.
let nix::sys::stat::FileStat { st_mode, .. } = nix::sys::stat::stat(target)
.with_context(|| format!("stat({target}) failed"))?;
let s_flag = nix::sys::stat::SFlag::from_bits_truncate(st_mode);
if !s_flag.contains(nix::sys::stat::SFlag::S_IFDIR) {
anyhow::bail!("{target} is not a directory");
}
}
Some(target_mode) => {
// Must not exist.
nix::unistd::mkdir(target, target_mode)
.with_context(|| format!("mkdir({target}) failed"))?;
}
}
nix::mount::mount(Some(source), target, Some(fstype), flags, data).with_context(|| {
format!("mount({source}, {target}, {fstype}, {flags:?}, {data:?}) failed")
})?;
}
// By contract we run everything in /bin and assume they're rust test binaries.
//
// If the user requested command line arguments, they're named init.arg={}.
// Read kernel parameters from /proc/cmdline. They're space separated on a single line.
let cmdline = std::fs::read_to_string("/proc/cmdline")
.with_context(|| "read_to_string(/proc/cmdline) failed")?;
let args = cmdline
.split_whitespace()
.filter_map(|parameter| {
parameter
.strip_prefix("init.arg=")
.map(std::ffi::OsString::from)
})
.collect::<Vec<_>>();
// Iterate files in /bin.
let read_dir = std::fs::read_dir("/bin").context("read_dir(/bin) failed")?;
let errors = read_dir
.filter_map(|entry| {
match (|| {
let entry = entry.context("read_dir(/bin) failed")?;
let path = entry.path();
let status = std::process::Command::new(&path)
.args(&args)
.status()
.with_context(|| format!("failed to execute {}", path.display()))?;
if status.code() == Some(0) {
Ok(())
} else {
Err(anyhow::anyhow!("{} failed: {status:?}", path.display()))
}
})() {
Ok(()) => None,
Err(err) => Some(err),
}
})
.collect::<Vec<_>>();
if errors.is_empty() {
Ok(())
} else {
Err(Errors(errors).into())
}
}
fn main() {
match run() {
Ok(()) => {
println!("init: success");
}
Err(err) => {
println!("{err:?}");
println!("init: failure");
}
}
let how = nix::sys::reboot::RebootMode::RB_POWER_OFF;
let _: std::convert::Infallible = nix::sys::reboot::reboot(how)
.unwrap_or_else(|err| panic!("reboot({how:?}) failed: {err:?}"));
}

@ -3,21 +3,15 @@ Aya Integration Tests
The aya integration test suite is a set of tests to ensure that The aya integration test suite is a set of tests to ensure that
common usage behaviours work on real Linux distros common usage behaviours work on real Linux distros
## Prerequisites
### Linux
To run locally all you need is:
1. Rust nightly ## Prerequisites
1. `cargo install bpf-linker`
### Other OSs You'll need:
1. A POSIX shell 1. `rustup toolchain install nightly`
1. `rustup target add x86_64-unknown-linux-musl` 1. `rustup target add {aarch64,x86_64}-unknown-linux-musl`
1. `cargo install bpf-linker` 1. `cargo install bpf-linker`
1. Install `qemu` and `cloud-init-utils` package - or any package that provides `cloud-localds` 1. (virtualized only) `qemu`
## Usage ## Usage
@ -26,15 +20,13 @@ From the root of this repository:
### Native ### Native
``` ```
cargo xtask integration-test cargo xtask integration-test local
``` ```
### Virtualized ### Virtualized
``` ```
mkdir -p integration-test-binaries cargo xtask integration-test vm
cargo xtask build-integration-test | xargs -I % cp % integration-test-binaries
./test/run.sh integration-test-binaries
``` ```
### Writing an integration test ### Writing an integration test

@ -1,264 +0,0 @@
#!/bin/bash
VERBOSITY=0
TEMP_D=""
DEF_DISK_FORMAT="raw"
DEF_FILESYSTEM="iso9660"
CR="
"
error() { echo "$@" 1>&2; }
fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
Usage() {
cat <<EOF
Usage: ${0##*/} [ options ] output user-data [meta-data]
Create a disk for cloud-init to utilize nocloud
options:
-h | --help show usage
-d | --disk-format D disk format to output. default: raw
can be anything supported by qemu-img or
tar, tar-seed-local, tar-seed-net
-H | --hostname H set hostname in metadata to H
-f | --filesystem F filesystem format (vfat or iso), default: iso9660
-i | --interfaces F write network interfaces file into metadata
-N | --network-config F write network config file to local datasource
-m | --dsmode M add 'dsmode' ('local' or 'net') to the metadata
default in cloud-init is 'net', meaning network is
required.
-V | --vendor-data F vendor-data file
-v | --verbose increase verbosity
Note, --dsmode, --hostname, and --interfaces are incompatible
with metadata.
Example:
* cat my-user-data
#cloud-config
password: passw0rd
chpasswd: { expire: False }
ssh_pwauth: True
* echo "instance-id: \$(uuidgen || echo i-abcdefg)" > my-meta-data
* ${0##*/} my-seed.img my-user-data my-meta-data
* kvm -net nic -net user,hostfwd=tcp::2222-:22 \\
-drive file=disk1.img,if=virtio -drive file=my-seed.img,if=virtio
* ssh -p 2222 ubuntu@localhost
EOF
}
bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; exit 1; }
cleanup() {
[ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}"
}
debug() {
local level=${1}; shift;
[ "${level}" -gt "${VERBOSITY}" ] && return
error "${@}"
}
has_cmd() {
command -v "$1" >/dev/null 2>&1
}
short_opts="hH:i:d:f:m:N:o:V:v"
long_opts="disk-format:,dsmode:,filesystem:,help,hostname:,interfaces:,"
long_opts="${long_opts}network-config:,output:,vendor-data:,verbose"
getopt_out=$(getopt -n "${0##*/}" \
-o "${short_opts}" -l "${long_opts}" -- "$@") &&
eval set -- "${getopt_out}" ||
bad_Usage
## <<insert default variables here>>
output=""
userdata=""
metadata=""
vendordata=""
filesystem=""
diskformat=$DEF_DISK_FORMAT
interfaces=_unset
dsmode=""
hostname=""
ncname="network-config"
while [ $# -ne 0 ]; do
cur=${1}; next=${2};
case "$cur" in
-h|--help) Usage ; exit 0;;
-d|--disk-format) diskformat=$next; shift;;
-f|--filesystem) filesystem=$next; shift;;
-H|--hostname) hostname=$next; shift;;
-i|--interfaces) interfaces=$next; shift;;
-N|--network-config) netcfg=$next; shift;;
-m|--dsmode) dsmode=$next; shift;;
-v|--verbose) VERBOSITY=$((${VERBOSITY}+1));;
-V|--vendor-data) vendordata="$next";;
--) shift; break;;
esac
shift;
done
## check arguments here
## how many args do you expect?
echo $1
echo $2
echo $3
[ $# -ge 2 ] || bad_Usage "must provide output, userdata"
[ $# -le 3 ] || bad_Usage "confused by additional args"
output=$1
userdata=$2
metadata=$3
if [ -n "$metadata" ]; then
[ "$interfaces" = "_unset" -a -z "$dsmode" -a -z "$hostname" ] ||
fail "metadata is incompatible with:" \
"--interfaces, --hostname, --dsmode"
fi
case "$diskformat" in
tar|tar-seed-local|tar-seed-net)
if [ "${filesystem:-tar}" != "tar" ]; then
fail "diskformat=tar is incompatible with filesystem"
fi
filesystem="$diskformat"
;;
tar*)
fail "supported 'tar' formats are tar, tar-seed-local, tar-seed-net"
esac
if [ -z "$filesystem" ]; then
filesystem="$DEF_FILESYSTEM"
fi
if [ "$filesystem" = "iso" ]; then
filesystem="iso9660"
fi
case "$filesystem" in
tar*)
has_cmd tar ||
fail "missing 'tar'. Required for --filesystem=$filesystem";;
vfat)
has_cmd mkfs.vfat ||
fail "missing 'mkfs.vfat'. Required for --filesystem=vfat."
has_cmd mcopy ||
fail "missing 'mcopy'. Required for --filesystem=vfat."
;;
iso9660)
has_cmd mkisofs ||
fail "missing 'mkisofs'. Required for --filesystem=iso9660."
;;
*) fail "unknown filesystem $filesystem";;
esac
case "$diskformat" in
tar*|raw) :;;
*) has_cmd "qemu-img" ||
fail "missing 'qemu-img'. Required for --disk-format=$diskformat."
esac
[ "$interfaces" = "_unset" -o -r "$interfaces" ] ||
fail "$interfaces: not a readable file"
TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") ||
fail "failed to make tempdir"
trap cleanup EXIT
files=( "${TEMP_D}/user-data" "${TEMP_D}/meta-data" )
if [ -n "$metadata" ]; then
cp "$metadata" "$TEMP_D/meta-data" || fail "$metadata: failed to copy"
else
instance_id="iid-local01"
iface_data=""
[ "$interfaces" != "_unset" ] &&
iface_data=$(sed ':a;N;$!ba;s/\n/\\n/g' "$interfaces")
# write json formatted user-data (json is a subset of yaml)
mdata=""
for kv in "instance-id:$instance_id" "local-hostname:$hostname" \
"interfaces:${iface_data}" "dsmode:$dsmode"; do
key=${kv%%:*}
val=${kv#*:}
[ -n "$val" ] || continue
mdata="${mdata:+${mdata},${CR}}\"$key\": \"$val\""
done
printf "{\n%s\n}\n" "$mdata" > "${TEMP_D}/meta-data"
fi
if [ -n "$netcfg" ]; then
cp "$netcfg" "${TEMP_D}/$ncname" ||
fail "failed to copy network config"
files[${#files[@]}]="$TEMP_D/$ncname"
fi
if [ -n "$vendordata" ]; then
cp "$vendordata" "${TEMP_D}/vendor-data" ||
fail "failed to copy vendor data"
files[${#files[@]}]="$TEMP_D/vendor-data"
fi
files_rel=( )
for f in "${files[@]}"; do
files_rel[${#files_rel[@]}]="${f#${TEMP_D}/}"
done
if [ "$userdata" = "-" ]; then
cat > "$TEMP_D/user-data" || fail "failed to read from stdin"
else
cp "$userdata" "$TEMP_D/user-data" || fail "$userdata: failed to copy"
fi
## alternatively, create a vfat filesystem with same files
img="$TEMP_D/seed-data"
tar_opts=( --owner=root --group=root )
case "$filesystem" in
tar)
tar "${tar_opts[@]}" -C "${TEMP_D}" -cf "$img" "${files_rel[@]}" ||
fail "failed to create tarball of ${files_rel[*]}"
;;
tar-seed-local|tar-seed-net)
if [ "$filesystem" = "tar-seed-local" ]; then
path="var/lib/cloud/seed/nocloud"
else
path="var/lib/cloud/seed/nocloud-net"
fi
mkdir -p "${TEMP_D}/${path}" ||
fail "failed making path for seed files"
mv "${files[@]}" "${TEMP_D}/$path" ||
fail "failed moving files"
tar "${tar_opts[@]}" -C "${TEMP_D}" -cf "$img" "${path}" ||
fail "failed to create tarball with $path"
;;
iso9660)
mkisofs -output "$img" -volid cidata \
-joliet -rock "${files[@]}" > "$TEMP_D/err" 2>&1 ||
{ cat "$TEMP_D/err" 1>&2; fail "failed to mkisofs"; }
;;
vfat)
truncate -s 128K "$img" || fail "failed truncate image"
out=$(mkfs.vfat -n cidata "$img" 2>&1) ||
{ error "failed: mkfs.vfat -n cidata $img"; error "$out"; }
mcopy -oi "$img" "${files[@]}" :: ||
fail "failed to copy user-data, meta-data to img"
;;
esac
[ "$output" = "-" ] && output="$TEMP_D/final"
if [ "${diskformat#tar}" != "$diskformat" -o "$diskformat" = "raw" ]; then
cp "$img" "$output" ||
fail "failed to copy image to $output"
else
qemu-img convert -f raw -O "$diskformat" "$img" "$output" ||
fail "failed to convert to disk format $diskformat"
fi
[ "$output" != "$TEMP_D/final" ] || { cat "$output" && output="-"; } ||
fail "failed to write to -"
debug 1 "wrote ${output} with filesystem=$filesystem and diskformat=$diskformat"
# vi: ts=4 noexpandtab

@ -1,241 +0,0 @@
#!/usr/bin/env bash
set -ex
if [ "$(uname -s)" = "Darwin" ]; then
PATH="$(dirname "$(brew list gnu-getopt | grep "bin/getopt$")"):$PATH"
export PATH
fi
AYA_SOURCE_DIR="$(realpath "$(dirname "$0")"/..)"
# Temporary directory for tests to use.
AYA_TMPDIR="${AYA_SOURCE_DIR}/.tmp"
# Directory for VM images
AYA_IMGDIR=${AYA_TMPDIR}
if [ -z "${AYA_BUILD_TARGET}" ]; then
AYA_BUILD_TARGET=$(rustc -vV | sed -n 's|host: ||p')
fi
AYA_HOST_ARCH=$(uname -m)
if [ "${AYA_HOST_ARCH}" = "arm64" ]; then
AYA_HOST_ARCH="aarch64"
fi
if [ -z "${AYA_GUEST_ARCH}" ]; then
AYA_GUEST_ARCH="${AYA_HOST_ARCH}"
fi
if [ "${AYA_GUEST_ARCH}" = "aarch64" ]; then
if [ -z "${AARCH64_UEFI}" ]; then
AARCH64_UEFI="$(brew list qemu -1 -v | grep edk2-aarch64-code.fd)"
fi
fi
if [ -z "$AYA_MUSL_TARGET" ]; then
AYA_MUSL_TARGET=${AYA_GUEST_ARCH}-unknown-linux-musl
fi
# Test Image
if [ -z "${AYA_TEST_IMAGE}" ]; then
AYA_TEST_IMAGE="fedora38"
fi
case "${AYA_TEST_IMAGE}" in
fedora*) AYA_SSH_USER="fedora";;
centos*) AYA_SSH_USER="centos";;
esac
download_images() {
mkdir -p "${AYA_IMGDIR}"
case $1 in
fedora37)
if [ ! -f "${AYA_IMGDIR}/fedora37.${AYA_GUEST_ARCH}.qcow2" ]; then
IMAGE="Fedora-Cloud-Base-37-1.7.${AYA_GUEST_ARCH}.qcow2"
IMAGE_URL="https://download.fedoraproject.org/pub/fedora/linux/releases/37/Cloud/${AYA_GUEST_ARCH}/images"
echo "Downloading: ${IMAGE}, this may take a while..."
curl -o "${AYA_IMGDIR}/fedora37.${AYA_GUEST_ARCH}.qcow2" -sSL "${IMAGE_URL}/${IMAGE}"
fi
;;
fedora38)
if [ ! -f "${AYA_IMGDIR}/fedora38.${AYA_GUEST_ARCH}.qcow2" ]; then
IMAGE="Fedora-Cloud-Base-38_Beta-1.3.${AYA_GUEST_ARCH}.qcow2"
IMAGE_URL="https://fr2.rpmfind.net/linux/fedora/linux/releases/test/38_Beta/Cloud/${AYA_GUEST_ARCH}/images"
echo "Downloading: ${IMAGE}, this may take a while..."
curl -o "${AYA_IMGDIR}/fedora38.${AYA_GUEST_ARCH}.qcow2" -sSL "${IMAGE_URL}/${IMAGE}"
fi
;;
centos8)
if [ ! -f "${AYA_IMGDIR}/centos8.${AYA_GUEST_ARCH}.qcow2" ]; then
IMAGE="CentOS-8-GenericCloud-8.4.2105-20210603.0.${AYA_GUEST_ARCH}.qcow2"
IMAGE_URL="https://cloud.centos.org/centos/8/${AYA_GUEST_ARCH}/images"
echo "Downloading: ${IMAGE}, this may take a while..."
curl -o "${AYA_IMGDIR}/centos8.${AYA_GUEST_ARCH}.qcow2" -sSL "${IMAGE_URL}/${IMAGE}"
fi
;;
*)
echo "$1 is not a recognized image name"
return 1
;;
esac
}
start_vm() {
download_images "${AYA_TEST_IMAGE}"
# prepare config
cat > "${AYA_TMPDIR}/metadata.yaml" <<EOF
instance-id: iid-local01
local-hostname: test
EOF
if [ ! -f "${AYA_TMPDIR}/test_rsa" ]; then
ssh-keygen -t rsa -b 4096 -f "${AYA_TMPDIR}/test_rsa" -N "" -C "" -q
pub_key=$(cat "${AYA_TMPDIR}/test_rsa.pub")
fi
if [ ! -f "${AYA_TMPDIR}/ssh_config" ]; then
cat > "${AYA_TMPDIR}/ssh_config" <<EOF
StrictHostKeyChecking=no
UserKnownHostsFile=/dev/null
GlobalKnownHostsFile=/dev/null
EOF
fi
cat > "${AYA_TMPDIR}/user-data.yaml" <<EOF
#cloud-config
ssh_authorized_keys:
- ${pub_key}
EOF
"$AYA_SOURCE_DIR"/test/cloud-localds "${AYA_TMPDIR}/seed.img" "${AYA_TMPDIR}/user-data.yaml" "${AYA_TMPDIR}/metadata.yaml"
case "${AYA_GUEST_ARCH}" in
x86_64)
QEMU=qemu-system-x86_64
machine="q35"
cpu="qemu64"
nr_cpus="$(nproc --all)"
if [ "${AYA_HOST_ARCH}" = "${AYA_GUEST_ARCH}" ]; then
if [ -c /dev/kvm ]; then
machine="${machine},accel=kvm"
cpu="host"
elif [ "$(uname -s)" = "Darwin" ]; then
machine="${machine},accel=hvf"
cpu="host"
fi
fi
;;
aarch64)
QEMU=qemu-system-aarch64
machine="virt"
cpu="cortex-a57"
uefi=("-drive" "file=${AARCH64_UEFI},if=pflash,format=raw,readonly=on")
if [ "${AYA_HOST_ARCH}" = "${AYA_GUEST_ARCH}" ]; then
if [ -c /dev/kvm ]; then
machine="${machine},accel=kvm"
cpu="host"
nr_cpus="$(nproc --all)"
elif [ "$(uname -s)" = "Darwin" ]; then
machine="${machine},accel=hvf,highmem=off"
cpu="cortex-a72"
# nrpoc --all on apple silicon returns the two extra fancy
# cores and then qemu complains that nr_cpus > actual_cores
nr_cpus=8
fi
fi
;;
*)
echo "${AYA_GUEST_ARCH} is not supported"
return 1
;;
esac
if [ ! -f "${AYA_IMGDIR}/vm.qcow2" ]; then
echo "Creating VM image"
qemu-img create -F qcow2 -f qcow2 -o backing_file="${AYA_IMGDIR}/${AYA_TEST_IMAGE}.${AYA_GUEST_ARCH}.qcow2" "${AYA_IMGDIR}/vm.qcow2" || return 1
else
echo "Reusing existing VM image"
fi
$QEMU \
-machine "${machine}" \
-cpu "${cpu}" \
-m 3G \
-smp "${nr_cpus}" \
-display none \
-monitor none \
-daemonize \
-pidfile "${AYA_TMPDIR}/vm.pid" \
-device virtio-net-pci,netdev=net0 \
-netdev user,id=net0,hostfwd=tcp::2222-:22 \
"${uefi[@]}" \
-drive if=virtio,format=qcow2,file="${AYA_IMGDIR}/vm.qcow2" \
-drive if=virtio,format=raw,file="${AYA_TMPDIR}/seed.img" || return 1
trap cleanup_vm EXIT
echo "Waiting for SSH on port 2222..."
retry=0
max_retries=300
while ! ssh -q -F "${AYA_TMPDIR}/ssh_config" -o ConnectTimeout=1 -i "${AYA_TMPDIR}/test_rsa" "${AYA_SSH_USER}"@localhost -p 2222 echo "Hello VM"; do
retry=$((retry+1))
if [ ${retry} -gt ${max_retries} ]; then
echo "Unable to connect to VM"
return 1
fi
sleep 1
done
echo "VM launched"
exec_vm uname -a
echo "Enabling testing repositories"
exec_vm sudo dnf config-manager --set-enabled updates-testing
exec_vm sudo dnf config-manager --set-enabled updates-testing-modular
}
scp_vm() {
local=$1
remote=$(basename "$1")
scp -q -F "${AYA_TMPDIR}/ssh_config" \
-i "${AYA_TMPDIR}/test_rsa" \
-P 2222 "${local}" \
"${AYA_SSH_USER}@localhost:${remote}"
}
rsync_vm() {
rsync -a -e "ssh -p 2222 -F ${AYA_TMPDIR}/ssh_config -i ${AYA_TMPDIR}/test_rsa" "$1" "$AYA_SSH_USER"@localhost:
}
exec_vm() {
ssh -q -F "${AYA_TMPDIR}/ssh_config" \
-i "${AYA_TMPDIR}/test_rsa" \
-p 2222 \
"${AYA_SSH_USER}"@localhost \
"$@"
}
stop_vm() {
if [ -f "${AYA_TMPDIR}/vm.pid" ]; then
echo "Stopping VM forcefully"
kill -9 "$(cat "${AYA_TMPDIR}/vm.pid")"
rm "${AYA_TMPDIR}/vm.pid"
fi
}
cleanup_vm() {
if ! stop_vm; then
rm -f "${AYA_IMGDIR}/vm.qcow2"
fi
}
start_vm
trap cleanup_vm EXIT
# make sure we always use fresh sources (also see comment at the end)
rsync_vm "$*"
exec_vm "find $* -type f -executable -print0 | xargs -0 -I {} sudo {} --test-threads=1"
# we rm and sync but it doesn't seem to work reliably - I guess we could sleep a
# few seconds after but ain't nobody got time for that. Instead we also rm
# before rsyncing.
exec_vm "rm -rf $*; sync"

@ -19,7 +19,6 @@ pub struct XtaskOptions {
enum Subcommand { enum Subcommand {
Codegen(codegen::Options), Codegen(codegen::Options),
Docs, Docs,
BuildIntegrationTest(run::BuildOptions),
IntegrationTest(run::Options), IntegrationTest(run::Options),
PublicApi(public_api::Options), PublicApi(public_api::Options),
} }
@ -45,17 +44,6 @@ fn main() -> Result<()> {
match command { match command {
Subcommand::Codegen(opts) => codegen::codegen(opts, libbpf_dir), Subcommand::Codegen(opts) => codegen::codegen(opts, libbpf_dir),
Subcommand::Docs => docs::docs(metadata), Subcommand::Docs => docs::docs(metadata),
Subcommand::BuildIntegrationTest(opts) => {
let binaries = run::build(opts)?;
let mut stdout = std::io::stdout();
for (_name, binary) in binaries {
use std::{io::Write as _, os::unix::ffi::OsStrExt as _};
stdout.write_all(binary.as_os_str().as_bytes())?;
stdout.write_all("\n".as_bytes())?;
}
Ok(())
}
Subcommand::IntegrationTest(opts) => run::run(opts), Subcommand::IntegrationTest(opts) => run::run(opts),
Subcommand::PublicApi(opts) => public_api::public_api(opts, metadata), Subcommand::PublicApi(opts) => public_api::public_api(opts, metadata),
} }

@ -1,47 +1,66 @@
use std::{ use std::{
env::consts::{ARCH, OS},
ffi::OsString, ffi::OsString,
fmt::Write as _, fmt::Write as _,
io::BufReader, fs::{copy, create_dir_all, metadata, File},
path::PathBuf, io::{BufRead as _, BufReader, ErrorKind, Write as _},
process::{Child, Command, Stdio}, path::{Path, PathBuf},
process::{Child, Command, Output, Stdio},
}; };
use anyhow::{anyhow, bail, Context as _, Result}; use anyhow::{anyhow, bail, Context as _, Result};
use cargo_metadata::{Artifact, CompilerMessage, Message, Target}; use cargo_metadata::{Artifact, CompilerMessage, Message, Target};
use clap::Parser; use clap::Parser;
use xtask::AYA_BUILD_INTEGRATION_BPF; use xtask::{exec, AYA_BUILD_INTEGRATION_BPF};
#[derive(Debug, Parser)] #[derive(Parser)]
pub struct BuildOptions { enum Environment {
/// Arguments to pass to `cargo build`. /// Runs the integration tests locally.
#[clap(long)] Local {
pub cargo_arg: Vec<OsString>, /// The command used to wrap your application.
#[clap(short, long, default_value = "sudo -E")]
runner: String,
},
/// Runs the integration tests in a VM.
VM {
/// The kernel images to use.
///
/// You can download some images with:
///
/// wget --accept-regex '.*/linux-image-[0-9\.-]+-cloud-.*-unsigned*' \
/// --recursive ftp://ftp.us.debian.org/debian/pool/main/l/linux/
///
/// You can then extract them with:
///
/// find . -name '*.deb' -print0 \
/// | xargs -0 -I {} sh -c "dpkg --fsys-tarfile {} \
/// | tar --wildcards --extract '*vmlinuz*' --file -"
#[clap(required = true)]
kernel_image: Vec<PathBuf>,
},
} }
#[derive(Debug, Parser)] #[derive(Parser)]
pub struct Options { pub struct Options {
#[command(flatten)] #[clap(subcommand)]
pub build_options: BuildOptions, environment: Environment,
/// The command used to wrap your application.
#[clap(short, long, default_value = "sudo -E")]
pub runner: String,
/// Arguments to pass to your application. /// Arguments to pass to your application.
#[clap(last = true)] #[clap(global = true, last = true)]
pub run_args: Vec<OsString>, run_args: Vec<OsString>,
} }
/// Build the project pub fn build<F>(target: Option<&str>, f: F) -> Result<Vec<(String, PathBuf)>>
pub fn build(opts: BuildOptions) -> Result<Vec<(String, PathBuf)>> { where
let BuildOptions { cargo_arg } = opts; F: FnOnce(&mut Command) -> &mut Command,
{
// Always use rust-lld and -Zbuild-std in case we're cross-compiling.
let mut cmd = Command::new("cargo"); let mut cmd = Command::new("cargo");
cmd.env(AYA_BUILD_INTEGRATION_BPF, "true") cmd.args(["build", "--message-format=json"]);
.args([ if let Some(target) = target {
"build", let config = format!("target.{target}.linker = \"rust-lld\"");
"--tests", cmd.args(["--target", target, "--config", &config]);
"--message-format=json", }
"--package=integration-test", f(&mut cmd);
])
.args(cargo_arg);
let mut child = cmd let mut child = cmd
.stdout(Stdio::piped()) .stdout(Stdio::piped())
@ -83,35 +102,78 @@ pub fn build(opts: BuildOptions) -> Result<Vec<(String, PathBuf)>> {
Ok(executables) Ok(executables)
} }
/// Build and run the project #[derive(Debug)]
struct Errors(Vec<anyhow::Error>);
impl std::fmt::Display for Errors {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let Self(errors) = self;
for (i, error) in errors.iter().enumerate() {
if i != 0 {
writeln!(f)?;
}
write!(f, "{:?}", error)?;
}
Ok(())
}
}
impl std::error::Error for Errors {}
/// Build and run the project.
pub fn run(opts: Options) -> Result<()> { pub fn run(opts: Options) -> Result<()> {
let Options { let Options {
build_options, environment,
runner,
run_args, run_args,
} = opts; } = opts;
let binaries = build(build_options).context("error while building userspace application")?; type Binary = (String, PathBuf);
fn binaries(target: Option<&str>) -> Result<Vec<(&str, Vec<Binary>)>> {
["dev", "release"]
.into_iter()
.map(|profile| {
let binaries = build(target, |cmd| {
cmd.env(AYA_BUILD_INTEGRATION_BPF, "true").args([
"--package",
"integration-test",
"--tests",
"--profile",
profile,
])
})?;
anyhow::Ok((profile, binaries))
})
.collect()
}
// Use --test-threads=1 to prevent tests from interacting with shared
// kernel state due to the lack of inter-test isolation.
let default_args = [OsString::from("--test-threads=1")];
let run_args = default_args.iter().chain(run_args.iter());
match environment {
Environment::Local { runner } => {
let mut args = runner.trim().split_terminator(' '); let mut args = runner.trim().split_terminator(' ');
let runner = args.next().ok_or(anyhow!("no first argument"))?; let runner = args.next().ok_or(anyhow!("no first argument"))?;
let args = args.collect::<Vec<_>>(); let args = args.collect::<Vec<_>>();
let binaries = binaries(None)?;
let mut failures = String::new(); let mut failures = String::new();
for (profile, binaries) in binaries {
for (name, binary) in binaries { for (name, binary) in binaries {
let mut cmd = Command::new(runner); let mut cmd = Command::new(runner);
let cmd = cmd let cmd = cmd.args(args.iter()).arg(binary).args(run_args.clone());
.args(args.iter())
.arg(binary)
.args(run_args.iter())
.arg("--test-threads=1");
println!("{name} running {cmd:?}"); println!("{profile}:{name} running {cmd:?}");
let status = cmd let status = cmd
.status() .status()
.with_context(|| format!("failed to run {cmd:?}"))?; .with_context(|| format!("failed to run {cmd:?}"))?;
if status.code() != Some(0) { if status.code() != Some(0) {
writeln!(&mut failures, "{name} failed: {status:?}").context("String write failed")? writeln!(&mut failures, "{profile}:{name} failed: {status:?}")
.context("String write failed")?
}
} }
} }
if failures.is_empty() { if failures.is_empty() {
@ -119,4 +181,326 @@ pub fn run(opts: Options) -> Result<()> {
} else { } else {
Err(anyhow!("failures:\n{}", failures)) Err(anyhow!("failures:\n{}", failures))
} }
}
Environment::VM { kernel_image } => {
// The user has asked us to run the tests on a VM. This is involved; strap in.
//
// We need tools to build the initramfs; we use gen_init_cpio from the Linux repository,
// taking care to cache it.
//
// Then we iterate the kernel images, using the `file` program to guess the target
// architecture. We then build the init program and our test binaries for that
// architecture, and use gen_init_cpio to build an initramfs containing the test
// binaries. We're almost ready to run the VM.
//
// We consult our OS, our architecture, and the target architecture to determine if
// hardware acceleration is available, and then start QEMU with the provided kernel
// image and the initramfs we built.
//
// We consume the output of QEMU, looking for the output of our init program. This is
// the only way to distinguish success from failure. We batch up the errors across all
// VM images and report to the user. The end.
let cache_dir = Path::new("test/.tmp");
create_dir_all(cache_dir).context("failed to create cache dir")?;
let gen_init_cpio = cache_dir.join("gen_init_cpio");
if !gen_init_cpio
.try_exists()
.context("failed to check existence of gen_init_cpio")?
{
let mut curl = Command::new("curl");
curl.args([
"-sfSL",
"https://raw.githubusercontent.com/torvalds/linux/master/usr/gen_init_cpio.c",
]);
let mut curl_child = curl
.stdout(Stdio::piped())
.spawn()
.with_context(|| format!("failed to spawn {curl:?}"))?;
let Child { stdout, .. } = &mut curl_child;
let curl_stdout = stdout.take().unwrap();
let mut clang = Command::new("clang");
let clang = exec(
clang
.args(["-g", "-O2", "-x", "c", "-", "-o"])
.arg(&gen_init_cpio)
.stdin(curl_stdout),
);
let output = curl_child
.wait_with_output()
.with_context(|| format!("failed to wait for {curl:?}"))?;
let Output { status, .. } = &output;
if status.code() != Some(0) {
bail!("{curl:?} failed: {output:?}")
}
// Check the result of clang *after* checking curl; in case the download failed,
// only curl's output will be useful.
clang?;
}
let mut errors = Vec::new();
for kernel_image in kernel_image {
// Guess the guest architecture.
let mut cmd = Command::new("file");
let output = cmd
.arg("--brief")
.arg(&kernel_image)
.output()
.with_context(|| format!("failed to run {cmd:?}"))?;
let Output { status, .. } = &output;
if status.code() != Some(0) {
bail!("{cmd:?} failed: {output:?}")
}
let Output { stdout, .. } = output;
// Now parse the output of the file command, which looks something like
//
// - Linux kernel ARM64 boot executable Image, little-endian, 4K pages
//
// - Linux kernel x86 boot executable bzImage, version 6.1.0-10-cloud-amd64 [..]
let stdout = String::from_utf8(stdout)
.with_context(|| format!("invalid UTF-8 in {cmd:?} stdout"))?;
let (_, stdout) = stdout
.split_once("Linux kernel")
.ok_or_else(|| anyhow!("failed to parse {cmd:?} stdout: {stdout}"))?;
let (guest_arch, _) = stdout
.split_once("boot executable")
.ok_or_else(|| anyhow!("failed to parse {cmd:?} stdout: {stdout}"))?;
let guest_arch = guest_arch.trim();
let (guest_arch, machine, cpu) = match guest_arch {
"ARM64" => ("aarch64", Some("virt"), Some("cortex-a57")),
"x86" => ("x86_64", Some("q35"), Some("qemu64")),
guest_arch => (guest_arch, None, None),
};
let target = format!("{guest_arch}-unknown-linux-musl");
// Build our init program. The contract is that it will run anything it finds in /bin.
let init = build(Some(&target), |cmd| {
cmd.args(["--package", "init", "--profile", "release"])
})
.context("building init program failed")?;
let init = match &*init {
[(name, init)] => {
if name != "init" {
bail!("expected init program to be named init, found {name}")
}
init
}
init => bail!("expected exactly one init program, found {init:?}"),
};
let binaries = binaries(Some(&target))?;
let tmp_dir = tempfile::tempdir().context("tempdir failed")?;
let initrd_image = tmp_dir.path().join("qemu-initramfs.img");
let initrd_image_file = File::create(&initrd_image).with_context(|| {
format!("failed to create {} for writing", initrd_image.display())
})?;
let mut gen_init_cpio = Command::new(&gen_init_cpio);
let mut gen_init_cpio_child = gen_init_cpio
.arg("-")
.stdin(Stdio::piped())
.stdout(initrd_image_file)
.spawn()
.with_context(|| format!("failed to spawn {gen_init_cpio:?}"))?;
let Child { stdin, .. } = &mut gen_init_cpio_child;
let mut stdin = stdin.take().unwrap();
use std::os::unix::ffi::OsStrExt as _;
// Send input into gen_init_cpio which looks something like
//
// file /init path-to-init 0755 0 0
// dir /bin 0755 0 0
// file /bin/foo path-to-foo 0755 0 0
// file /bin/bar path-to-bar 0755 0 0
for bytes in [
"file /init ".as_bytes(),
init.as_os_str().as_bytes(),
" 0755 0 0\n".as_bytes(),
"dir /bin 0755 0 0\n".as_bytes(),
] {
stdin.write_all(bytes).expect("write");
}
for (profile, binaries) in binaries {
for (name, binary) in binaries {
let name = format!("{}-{}", profile, name);
let path = tmp_dir.path().join(&name);
copy(&binary, &path).with_context(|| {
format!("copy({}, {}) failed", binary.display(), path.display())
})?;
for bytes in [
"file /bin/".as_bytes(),
name.as_bytes(),
" ".as_bytes(),
path.as_os_str().as_bytes(),
" 0755 0 0\n".as_bytes(),
] {
stdin.write_all(bytes).expect("write");
}
}
}
// Must explicitly close to signal EOF.
drop(stdin);
let output = gen_init_cpio_child
.wait_with_output()
.with_context(|| format!("failed to wait for {gen_init_cpio:?}"))?;
let Output { status, .. } = &output;
if status.code() != Some(0) {
bail!("{gen_init_cpio:?} failed: {output:?}")
}
copy(&initrd_image, "/tmp/initrd.img").context("copy failed")?;
let mut qemu = Command::new(format!("qemu-system-{guest_arch}"));
if let Some(machine) = machine {
qemu.args(["-machine", machine]);
}
if guest_arch == ARCH {
match OS {
"linux" => match metadata("/dev/kvm") {
Ok(metadata) => {
use std::os::unix::fs::FileTypeExt as _;
if metadata.file_type().is_char_device() {
qemu.args(["-accel", "kvm"]);
}
}
Err(error) => {
if error.kind() != ErrorKind::NotFound {
Err(error).context("failed to check existence of /dev/kvm")?;
}
}
},
"macos" => {
qemu.args(["-accel", "hvf"]);
}
os => bail!("unsupported OS: {os}"),
}
} else if let Some(cpu) = cpu {
qemu.args(["-cpu", cpu]);
}
let console = OsString::from("ttyS0");
let kernel_args = std::iter::once(("console", &console))
.chain(run_args.clone().map(|run_arg| ("init.arg", run_arg)))
.enumerate()
.fold(OsString::new(), |mut acc, (i, (k, v))| {
if i != 0 {
acc.push(" ");
}
acc.push(k);
acc.push("=");
acc.push(v);
acc
});
qemu.args(["-no-reboot", "-nographic", "-m", "512M", "-smp", "2"])
.arg("-append")
.arg(kernel_args)
.arg("-kernel")
.arg(&kernel_image)
.arg("-initrd")
.arg(&initrd_image);
if guest_arch == "aarch64" {
match OS {
"linux" => {
let mut cmd = Command::new("locate");
let output = cmd
.arg("QEMU_EFI.fd")
.output()
.with_context(|| format!("failed to run {cmd:?}"))?;
let Output { status, .. } = &output;
if status.code() != Some(0) {
bail!("{qemu:?} failed: {output:?}")
}
let Output { stdout, .. } = output;
let bios = String::from_utf8(stdout)
.with_context(|| format!("failed to parse output of {cmd:?}"))?;
qemu.args(["-bios", bios.trim()]);
}
"macos" => {
let mut cmd = Command::new("brew");
let output = cmd
.args(["list", "qemu", "-1", "-v"])
.output()
.with_context(|| format!("failed to run {cmd:?}"))?;
let Output { status, .. } = &output;
if status.code() != Some(0) {
bail!("{qemu:?} failed: {output:?}")
}
let Output { stdout, .. } = output;
let output = String::from_utf8(stdout)
.with_context(|| format!("failed to parse output of {cmd:?}"))?;
const NAME: &str = "edk2-aarch64-code.fd";
let bios = output.lines().find(|line| line.contains(NAME)).ok_or_else(
|| anyhow!("failed to find {NAME} in output of {cmd:?}: {output}"),
)?;
qemu.args(["-bios", bios.trim()]);
}
os => bail!("unsupported OS: {os}"),
};
}
let mut qemu_child = qemu
.stdout(Stdio::piped())
.spawn()
.with_context(|| format!("failed to spawn {qemu:?}"))?;
let Child { stdout, .. } = &mut qemu_child;
let stdout = stdout.take().unwrap();
let stdout = BufReader::new(stdout);
let mut outcome = None;
for line in stdout.lines() {
let line =
line.with_context(|| format!("failed to read line from {qemu:?}"))?;
println!("{}", line);
// The init program will print "init: success" or "init: failure" to indicate
// the outcome of running the binaries it found in /bin.
if let Some(line) = line.strip_prefix("init: ") {
let previous = match line {
"success" => outcome.replace(Ok(())),
"failure" => outcome.replace(Err(())),
line => bail!("unexpected init output: {}", line),
};
if let Some(previous) = previous {
bail!("multiple exit status: previous={previous:?}, current={line}");
}
// Try to get QEMU to exit on kernel panic; otherwise it might hang indefinitely.
if line.contains("end Kernel panic") {
qemu_child.kill().context("failed to kill {qemu:?}")?;
}
}
}
let output = qemu_child
.wait_with_output()
.with_context(|| format!("failed to wait for {qemu:?}"))?;
let Output { status, .. } = &output;
if status.code() != Some(0) {
bail!("{qemu:?} failed: {output:?}")
}
let outcome = outcome.ok_or(anyhow!("init did not exit"))?;
match outcome {
Ok(()) => {}
Err(()) => {
errors.push(anyhow!("VM binaries failed on {}", kernel_image.display()))
}
}
}
if errors.is_empty() {
Ok(())
} else {
Err(Errors(errors).into())
}
}
}
} }

Loading…
Cancel
Save