Merge pull request #1160 from dave-tucker/modprobe

reviewable/pr1223/r3
Dave Tucker 3 weeks ago committed by GitHub
commit 29f4f2b780
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -0,0 +1,30 @@
#!/usr/bin/env python3
import os
import glob
import sys
from typing import List
def find_kernels(directory: str) -> List[str]:
return glob.glob(f"{directory}/**/vmlinuz-*", recursive=True)
def find_modules_directory(directory: str, kernel: str) -> str:
matches = glob.glob(f"{directory}/**/modules/{kernel}", recursive=True)
if len(matches) != 1:
raise RuntimeError(f"Expected to find exactly one modules directory. Found {len(matches)}.")
return matches[0]
def main() -> None:
images = find_kernels('test/.tmp')
modules = []
for image in images:
image_name = os.path.basename(image).replace('vmlinuz-', '')
module_dir = find_modules_directory('test/.tmp', image_name)
modules.append(module_dir)
args = ' '.join(f"{image}:{module}" for image, module in zip(images, modules))
print(args)
if __name__ == "__main__":
main()

@ -228,7 +228,7 @@ jobs:
run: |
set -euxo pipefail
sudo apt update
sudo apt -y install lynx qemu-system-{arm,x86}
sudo apt -y install lynx qemu-system-{arm,x86} musl-tools
echo /usr/lib/llvm-15/bin >> $GITHUB_PATH
- name: Install prerequisites
@ -240,6 +240,10 @@ jobs:
# The tar shipped on macOS doesn't support --wildcards, so we need GNU tar.
#
# The clang shipped on macOS doesn't support BPF, so we need LLVM from brew.
#
# We need a musl C toolchain to compile our `test-distro` since some of
# our dependencies have build scripts that compile C code (i.e xz2).
# This is provided by `brew install filosottile/musl-cross/musl-cross`.
run: |
set -euxo pipefail
brew update
@ -250,6 +254,8 @@ jobs:
echo $(brew --prefix curl)/bin >> $GITHUB_PATH
echo $(brew --prefix gnu-tar)/libexec/gnubin >> $GITHUB_PATH
echo $(brew --prefix llvm)/bin >> $GITHUB_PATH
brew install filosottile/musl-cross/musl-cross
ln -s "$(brew --prefix musl-cross)/bin/x86_64-linux-musl-gcc" /usr/local/bin/musl-gcc
- uses: dtolnay/rust-toolchain@nightly
with:
@ -302,21 +308,46 @@ jobs:
# TODO: enable tests on kernels before 6.0.
run: .github/scripts/download_kernel_images.sh test/.tmp/debian-kernels/amd64 amd64 6.1 6.10
- name: Cleanup stale kernels and modules
run: |
set -euxo pipefail
rm -rf test/.tmp/boot test/.tmp/lib
- name: Extract debian kernels
run: |
set -euxo pipefail
# The wildcard '**/boot/*' extracts kernel images and config.
# The wildcard '**/modules/*' extracts kernel modules.
# Modules are required since not all parts of the kernel we want to
# test are built-in.
find test/.tmp -name '*.deb' -print0 | xargs -t -0 -I {} \
sh -c "dpkg --fsys-tarfile {} | tar -C test/.tmp --wildcards --extract '*vmlinuz*' --file -"
sh -c "dpkg --fsys-tarfile {} | tar -C test/.tmp \
--wildcards --extract '**/boot/*' '**/modules/*' --file -"
- name: Run local integration tests
if: runner.os == 'Linux'
run: cargo xtask integration-test local
- name: Run virtualized integration tests
if: runner.os == 'Linux'
run: |
set -euxo pipefail
ARGS=$(./.github/scripts/find_kernels.py)
cargo xtask integration-test vm --cache-dir test/.tmp \
--github-api-token ${{ secrets.GITHUB_TOKEN }} \
${ARGS}
- name: Run virtualized integration tests
if: runner.os == 'macOS'
env:
# This sets the linker to the one installed by FiloSottile/musl-cross.
CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER: x86_64-linux-musl-gcc
run: |
set -euxo pipefail
find test/.tmp -name 'vmlinuz-*' -print0 | xargs -t -0 \
cargo xtask integration-test vm --cache-dir test/.tmp --github-api-token ${{ secrets.GITHUB_TOKEN }}
ARGS=$(./.github/scripts/find_kernels.py)
cargo xtask integration-test vm --cache-dir test/.tmp \
--github-api-token ${{ secrets.GITHUB_TOKEN }} \
${ARGS}
# Provides a single status check for the entire build workflow.
# This is used for merge automation, like Mergify, since GH actions

@ -1,7 +1,8 @@
{
"rust-analyzer.check.allTargets": true,
"rust-analyzer.check.command": "clippy",
"search.exclude": {
"/xtask/public-api/*.txt": true,
},
"rust-analyzer.check.allTargets": true,
"rust-analyzer.check.command": "clippy",
"search.exclude": {
"/xtask/public-api/*.txt": true
},
"yaml.format.singleQuote": true
}

@ -7,7 +7,7 @@ members = [
"aya-log-parser",
"aya-obj",
"aya-tool",
"init",
"test-distro",
"test/integration-common",
"test/integration-test",
"xtask",
@ -33,7 +33,7 @@ default-members = [
"aya-log-parser",
"aya-obj",
"aya-tool",
"init",
"test-distro",
"test/integration-common",
# test/integration-test is omitted; including it in this list causes `cargo test` to run its
# tests, and that doesn't work unless they've been built with `cargo xtask`.
@ -74,6 +74,7 @@ diff = { version = "0.1.13", default-features = false }
env_logger = { version = "0.11", default-features = false }
epoll = { version = "4.3.3", default-features = false }
futures = { version = "0.3.28", default-features = false }
glob = { version = "0.3.0", default-features = false }
hashbrown = { version = "0.15.0", default-features = false }
indoc = { version = "2.0", default-features = false }
libc = { version = "0.2.105", default-features = false }
@ -101,8 +102,10 @@ test-log = { version = "0.2.13", default-features = false }
testing_logger = { version = "0.1.1", default-features = false }
thiserror = { version = "2.0.3", default-features = false }
tokio = { version = "1.24.0", default-features = false }
walkdir = { version = "2", default-features = false }
which = { version = "7.0.0", default-features = false }
xdpilone = { version = "1.0.5", default-features = false }
xz2 = { version = "0.1.7", default-features = false }
[workspace.lints.rust]
unused-extern-crates = "warn"

@ -1,18 +0,0 @@
[package]
name = "init"
publish = false
version = "0.1.0"
authors.workspace = true
edition.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
rust-version.workspace = true
[lints]
workspace = true
[dependencies]
anyhow = { workspace = true, features = ["std"] }
nix = { workspace = true, features = ["fs", "mount", "reboot"] }

@ -0,0 +1,38 @@
[package]
name = "test-distro"
publish = false
version = "0.1.0"
authors.workspace = true
edition.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
[[bin]]
name = "init"
path = "src/init.rs"
[[bin]]
name = "modprobe"
path = "src/modprobe.rs"
[[bin]]
name = "depmod"
path = "src/depmod.rs"
[dependencies]
anyhow = { workspace = true, features = ["std"] }
clap = { workspace = true, default-features = true, features = ["derive"] }
glob = { workspace = true }
nix = { workspace = true, features = [
"user",
"fs",
"mount",
"reboot",
"kmod",
"feature",
] }
object = { workspace = true, features = ["elf", "read_core", "std"] }
walkdir = { workspace = true }
xz2 = { workspace = true }

@ -0,0 +1,140 @@
//! depmod is used to build the modules.alias file to assist with loading
//! kernel modules.
//!
//! This implementation is incredibly naive and is only designed to work within
//! the constraints of the test environment. Not for production use.
use std::{
fs::File,
io::{BufWriter, Read, Write as _},
path::PathBuf,
};
use anyhow::{Context as _, anyhow};
use clap::Parser;
use object::{Object, ObjectSection, ObjectSymbol, Section};
use test_distro::resolve_modules_dir;
use walkdir::WalkDir;
use xz2::read::XzDecoder;
#[derive(Parser)]
struct Args {
#[clap(long, short)]
base_dir: Option<PathBuf>,
}
fn main() -> anyhow::Result<()> {
let Args { base_dir } = Parser::parse();
let modules_dir = if let Some(base_dir) = base_dir {
base_dir
} else {
resolve_modules_dir().context("failed to resolve modules dir")?
};
let modules_alias = modules_dir.join("modules.alias");
let f = std::fs::OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open(&modules_alias)
.with_context(|| format!("failed to open: {}", modules_alias.display()))?;
let mut output = BufWriter::new(&f);
for entry in WalkDir::new(modules_dir) {
let entry = entry.context("failed to read entry in walkdir")?;
if entry.file_type().is_file() {
let path = entry.path();
let module_name = path
.file_name()
.ok_or_else(|| anyhow!("{} does not have a file name", path.display()))?
.to_str()
.ok_or_else(|| anyhow!("{} is not valid utf-8", path.display()))?;
let (module_name, compressed) =
if let Some(module_name) = module_name.strip_suffix(".xz") {
(module_name, true)
} else {
(module_name, false)
};
let module_name = if let Some(module_name) = module_name.strip_suffix(".ko") {
module_name
} else {
// Not a kernel module
continue;
};
let mut f =
File::open(path).with_context(|| format!("failed to open: {}", path.display()))?;
let stat = f
.metadata()
.with_context(|| format!("failed to get metadata for {}", path.display()))?;
if compressed {
let mut decoder = XzDecoder::new(f);
// We don't know the size of the decompressed data, so we assume it's
// no more than twice the size of the compressed data.
let mut decompressed = Vec::with_capacity(stat.len() as usize * 2);
decoder.read_to_end(&mut decompressed)?;
read_aliases_from_module(&decompressed, module_name, &mut output)
} else {
let mut buf = Vec::with_capacity(stat.len() as usize);
f.read_to_end(&mut buf)
.with_context(|| format!("failed to read: {}", path.display()))?;
read_aliases_from_module(&buf, module_name, &mut output)
}
.with_context(|| format!("failed to read aliases from module {}", path.display()))?;
}
}
Ok(())
}
fn read_aliases_from_module(
contents: &[u8],
module_name: &str,
output: &mut BufWriter<&File>,
) -> Result<(), anyhow::Error> {
let obj = object::read::File::parse(contents).context("failed to parse")?;
let section = (|| -> anyhow::Result<Option<Section<'_, '_, &[u8]>>> {
for s in obj.sections() {
let name = s
.name_bytes()
.with_context(|| format!("failed to get name of section idx {}", s.index()))?;
if name == b".modinfo" {
return Ok(Some(s));
}
}
Ok(None)
})()?;
let section = section.context("failed to find .modinfo section")?;
let section_idx = section.index();
let data = section
.data()
.context("failed to get modinfo section data")?;
for s in obj.symbols() {
if s.section_index() != Some(section_idx) {
continue;
}
let name = s
.name()
.with_context(|| format!("failed to get name of symbol idx {}", s.index()))?;
if name.contains("alias") {
let start = s.address() as usize;
let end = start + s.size() as usize;
let sym_data = &data[start..end];
let cstr = std::ffi::CStr::from_bytes_with_nul(sym_data)
.with_context(|| format!("failed to convert {:?} to cstr", sym_data))?;
let sym_str = cstr
.to_str()
.with_context(|| format!("failed to convert {:?} to str", cstr))?;
let alias = sym_str
.strip_prefix("alias=")
.with_context(|| format!("failed to strip prefix 'alias=' from {}", sym_str))?;
writeln!(output, "alias {} {}", alias, module_name).expect("write");
}
}
Ok(())
}

@ -57,6 +57,14 @@ fn run() -> anyhow::Result<()> {
data: None,
target_mode: Some(RXRXRX),
},
Mount {
source: "dev",
target: "/dev",
fstype: "devtmpfs",
flags: nix::mount::MsFlags::empty(),
data: None,
target_mode: None,
},
Mount {
source: "sysfs",
target: "/sys",

@ -0,0 +1,30 @@
use std::path::PathBuf;
use anyhow::Context as _;
use nix::sys::utsname::uname;
/// Kernel modules are in `/lib/modules`.
/// They may be in the root of this directory,
/// or in subdirectory named after the kernel release.
pub fn resolve_modules_dir() -> anyhow::Result<PathBuf> {
let modules_dir = PathBuf::from("/lib/modules");
let stat = modules_dir
.metadata()
.with_context(|| format!("stat(): {}", modules_dir.display()))?;
if stat.is_dir() {
return Ok(modules_dir);
}
let utsname = uname().context("uname()")?;
let release = utsname.release();
let modules_dir = modules_dir.join(release);
let stat = modules_dir
.metadata()
.with_context(|| format!("stat(): {}", modules_dir.display()))?;
anyhow::ensure!(
stat.is_dir(),
"{} is not a directory",
modules_dir.display()
);
Ok(modules_dir)
}

@ -0,0 +1,140 @@
//! modprobe is used to load kernel modules into the kernel.
//!
//! This implementation is incredibly naive and is only designed to work within
//! the constraints of the test environment. Not for production use.
use std::{
fs::File,
io::{BufRead as _, Read as _},
path::Path,
};
use anyhow::{Context as _, anyhow, bail};
use clap::Parser;
use glob::glob;
use nix::kmod::init_module;
use test_distro::resolve_modules_dir;
macro_rules! output {
($quiet:expr, $($arg:tt)*) => {
if !$quiet {
println!($($arg)*);
}
};
}
#[derive(Parser)]
struct Args {
/// Suppress all output and don't return an error code.
#[clap(short, long, default_value = "false")]
quiet: bool,
/// The name of the module to load.
/// This can be either an alias like `net-sched-sch-ingress` or a module
/// name like `sch_ingress`.
name: String,
}
fn main() -> anyhow::Result<()> {
let Args { quiet, name } = Parser::parse();
let ret = try_main(quiet, name);
if quiet { Ok(()) } else { ret }
}
fn try_main(quiet: bool, name: String) -> anyhow::Result<()> {
let modules_dir = resolve_modules_dir()?;
output!(quiet, "resolving alias for module: {}", name);
let module = resolve_alias(quiet, &modules_dir, &name)?;
let pattern = format!(
"{}/kernel/**/{}.ko*",
modules_dir
.to_str()
.ok_or_else(|| anyhow!("failed to convert {} to string", modules_dir.display()))?,
module
);
let module_path = glob(&pattern)
.with_context(|| format!("failed to glob: {}", pattern))?
.next()
.ok_or_else(|| anyhow!("module not found: {}", module))?
.context("glob error")?;
output!(quiet, "loading module: {}", module_path.display());
let mut f =
File::open(&module_path).with_context(|| format!("open(): {}", module_path.display()))?;
let stat = f
.metadata()
.with_context(|| format!("stat(): {}", module_path.display()))?;
let extension = module_path
.as_path()
.extension()
.ok_or_else(|| anyhow!("module has no extension: {}", module_path.display()))?;
let contents = if extension == "xz" {
output!(quiet, "decompressing module");
let mut decompressed = Vec::with_capacity(stat.len() as usize * 2);
xz2::read::XzDecoder::new(f).read_to_end(&mut decompressed)?;
decompressed
} else {
let mut contents: Vec<u8> = Vec::with_capacity(stat.len() as usize);
f.read_to_end(&mut contents)?;
contents
};
if !contents.starts_with(&[0x7f, 0x45, 0x4c, 0x46]) {
bail!("module is not an valid ELF file");
}
match init_module(&contents, c"") {
Ok(()) => {
output!(quiet, "module loaded successfully");
Ok(())
}
Err(e) => {
if e == nix::errno::Errno::EEXIST {
Err(anyhow!("module already loaded"))
} else {
Err(anyhow!("failed to load module: {}", e))
}
}
}
}
fn resolve_alias(quiet: bool, module_dir: &Path, name: &str) -> anyhow::Result<String> {
let modules_alias = module_dir.join("modules.alias");
output!(
quiet,
"opening modules.alias file: {}",
modules_alias.display()
);
let alias_file = File::open(&modules_alias)
.with_context(|| format!("open(): {}", modules_alias.display()))?;
let alias_file = std::io::BufReader::new(alias_file);
for line in alias_file.lines() {
let line = line?;
if line.starts_with("alias ") {
let mut parts = line.split_whitespace();
let prefix = parts.next();
if prefix != Some("alias") {
bail!("alias line incorrect prefix: {}", line);
}
let alias = parts
.next()
.with_context(|| format!("alias line missing alias: {}", line))?;
let module = parts
.next()
.with_context(|| format!("alias line missing module: {}", line))?;
if parts.next().is_some() {
bail!("alias line has too many parts: {}", line);
}
if alias == name {
return Ok(module.to_string());
}
}
}
bail!("alias not found: {}", name)
}

@ -32,7 +32,6 @@ pub const TEST: &[u8] = include_bytes_aligned!(concat!(env!("OUT_DIR"), "/test")
pub const TWO_PROGS: &[u8] = include_bytes_aligned!(concat!(env!("OUT_DIR"), "/two_progs"));
pub const XDP_SEC: &[u8] = include_bytes_aligned!(concat!(env!("OUT_DIR"), "/xdp_sec"));
pub const UPROBE_COOKIE: &[u8] = include_bytes_aligned!(concat!(env!("OUT_DIR"), "/uprobe_cookie"));
#[cfg(test)]
mod tests;
#[cfg(test)]

@ -1,12 +1,25 @@
use aya::{
Ebpf, EbpfLoader,
programs::{Extension, TracePoint, Xdp, XdpFlags},
programs::{Extension, TracePoint, Xdp, XdpFlags, tc},
util::KernelVersion,
};
use test_log::test;
use crate::utils::NetNsGuard;
#[test]
fn modprobe() {
// This very simple looking test is actually quite complex.
// The call to tc::qdisc_add_clsact() causes the linux kernel to call into
// `__request_module()`, which via the usermodehelper calls out into the
// `/sbin/modprobe` to load the required kernel module.
// In order for this test to pass, all of that machinery must work
// correctly within the test environment.
let _netns = NetNsGuard::new();
tc::qdisc_add_clsact("lo").unwrap();
}
#[test]
fn xdp() {
let kernel_version = KernelVersion::current().unwrap();

@ -31,4 +31,5 @@ rustup-toolchain = { workspace = true }
syn = { workspace = true }
tempfile = { workspace = true }
tokio = { workspace = true, features = ["rt"] }
walkdir = { workspace = true }
which = { workspace = true }

@ -3,7 +3,8 @@ use std::{
fmt::Write as _,
fs::{OpenOptions, copy, create_dir_all},
io::{BufRead as _, BufReader, Write as _},
path::PathBuf,
ops::Deref as _,
path::{Path, PathBuf},
process::{Child, ChildStdin, Command, Output, Stdio},
sync::{Arc, Mutex},
thread,
@ -13,6 +14,7 @@ use anyhow::{Context as _, Result, anyhow, bail};
use base64::engine::Engine as _;
use cargo_metadata::{Artifact, CompilerMessage, Message, Target};
use clap::Parser;
use walkdir::WalkDir;
use xtask::{AYA_BUILD_INTEGRATION_BPF, Errors};
#[derive(Parser)]
@ -35,23 +37,48 @@ enum Environment {
#[clap(long)]
github_api_token: Option<String>,
/// The kernel images to use.
/// The kernel image and modules to use.
///
/// Format: </path/to/image/vmlinuz>:</path/to/lib/modules>
///
/// You can download some images with:
///
/// wget --accept-regex '.*/linux-image-[0-9\.-]+-cloud-.*-unsigned*' \
/// --recursive ftp://ftp.us.debian.org/debian/pool/main/l/linux/
/// --recursive http://ftp.us.debian.org/debian/pool/main/l/linux/
///
/// You can then extract them with:
/// You can then extract the images and kernel modules with:
///
/// find . -name '*.deb' -print0 \
/// | xargs -0 -I {} sh -c "dpkg --fsys-tarfile {} \
/// | tar --wildcards --extract '*vmlinuz*' --file -"
#[clap(required = true)]
kernel_image: Vec<PathBuf>,
/// | tar --wildcards --extract '**/boot/*' '**/modules/*' --file -"
///
/// `**/boot/*` is used to extract the kernel image and config.
///
/// `**/modules/*` is used to extract the kernel modules.
///
/// Modules are required since not all parts of the kernel we want to
/// test are built-in.
#[clap(required = true, value_parser=parse_image_and_modules)]
image_and_modules: Vec<(PathBuf, PathBuf)>,
},
}
pub(crate) fn parse_image_and_modules(s: &str) -> Result<(PathBuf, PathBuf), std::io::Error> {
let mut parts = s.split(':');
let image = parts
.next()
.ok_or(std::io::ErrorKind::InvalidInput)
.map(PathBuf::from)?;
let modules = parts
.next()
.ok_or(std::io::ErrorKind::InvalidInput)
.map(PathBuf::from)?;
if parts.next().is_some() {
return Err(std::io::ErrorKind::InvalidInput.into());
}
Ok((image, modules))
}
#[derive(Parser)]
pub struct Options {
#[clap(subcommand)]
@ -69,8 +96,7 @@ where
let mut cmd = Command::new("cargo");
cmd.args(["build", "--message-format=json"]);
if let Some(target) = target {
let config = format!("target.{target}.linker = \"rust-lld\"");
cmd.args(["--target", target, "--config", &config]);
cmd.args(["--target", target]);
}
f(&mut cmd);
@ -181,7 +207,7 @@ pub fn run(opts: Options) -> Result<()> {
Environment::VM {
cache_dir,
github_api_token,
kernel_image,
image_and_modules,
} => {
// The user has asked us to run the tests on a VM. This is involved; strap in.
//
@ -200,6 +226,7 @@ pub fn run(opts: Options) -> Result<()> {
// We consume the output of QEMU, looking for the output of our init program. This is
// the only way to distinguish success from failure. We batch up the errors across all
// VM images and report to the user. The end.
create_dir_all(&cache_dir).context("failed to create cache dir")?;
let gen_init_cpio = cache_dir.join("gen_init_cpio");
if !gen_init_cpio
@ -260,7 +287,7 @@ pub fn run(opts: Options) -> Result<()> {
}
let mut errors = Vec::new();
for kernel_image in kernel_image {
for (kernel_image, modules_dir) in image_and_modules {
// Guess the guest architecture.
let mut cmd = Command::new("file");
let output = cmd
@ -298,21 +325,10 @@ pub fn run(opts: Options) -> Result<()> {
let target = format!("{guest_arch}-unknown-linux-musl");
// Build our init program. The contract is that it will run anything it finds in /bin.
let init = build(Some(&target), |cmd| {
cmd.args(["--package", "init", "--profile", "release"])
let test_distro: Vec<(String, PathBuf)> = build(Some(&target), |cmd| {
cmd.args(["--package", "test-distro", "--profile", "release"])
})
.context("building init program failed")?;
let init = match &*init {
[(name, init)] => {
if name != "init" {
bail!("expected init program to be named init, found {name}")
}
init
}
init => bail!("expected exactly one init program, found {init:?}"),
};
.context("building test-distro package failed")?;
let binaries = binaries(Some(&target))?;
@ -335,24 +351,92 @@ pub fn run(opts: Options) -> Result<()> {
.spawn()
.with_context(|| format!("failed to spawn {gen_init_cpio:?}"))?;
let Child { stdin, .. } = &mut gen_init_cpio_child;
let mut stdin = stdin.take().unwrap();
let stdin = Arc::new(stdin.take().unwrap());
use std::os::unix::ffi::OsStrExt as _;
// Send input into gen_init_cpio which looks something like
// Send input into gen_init_cpio for directories
//
// file /init path-to-init 0755 0 0
// dir /bin 0755 0 0
// file /bin/foo path-to-foo 0755 0 0
// file /bin/bar path-to-bar 0755 0 0
for bytes in [
"file /init ".as_bytes(),
init.as_os_str().as_bytes(),
" 0755 0 0\n".as_bytes(),
"dir /bin 0755 0 0\n".as_bytes(),
] {
stdin.write_all(bytes).expect("write");
// dir /bin 755 0 0
let write_dir = |out_path: &Path| {
for bytes in [
"dir ".as_bytes(),
out_path.as_os_str().as_bytes(),
" ".as_bytes(),
"755 0 0\n".as_bytes(),
] {
stdin.deref().write_all(bytes).expect("write");
}
};
// Send input into gen_init_cpio for files
//
// file /init path-to-init 755 0 0
let write_file = |out_path: &Path, in_path: &Path, mode: &str| {
for bytes in [
"file ".as_bytes(),
out_path.as_os_str().as_bytes(),
" ".as_bytes(),
in_path.as_os_str().as_bytes(),
" ".as_bytes(),
mode.as_bytes(),
"\n".as_bytes(),
] {
stdin.deref().write_all(bytes).expect("write");
}
};
write_dir(Path::new("/bin"));
write_dir(Path::new("/sbin"));
write_dir(Path::new("/lib"));
write_dir(Path::new("/lib/modules"));
test_distro.iter().for_each(|(name, path)| {
if name == "init" {
write_file(Path::new("/init"), path, "755 0 0");
} else {
write_file(&Path::new("/sbin").join(name), path, "755 0 0");
}
});
// At this point we need to make a slight detour!
// Preparing the `modules.alias` file inside the VM as part of
// `/init` is slow. It's faster to prepare it here.
Command::new("cargo")
.args([
"run",
"--package",
"test-distro",
"--bin",
"depmod",
"--release",
"--",
"-b",
])
.arg(&modules_dir)
.status()
.context("failed to run depmod")?;
// Now our modules.alias file is built, we can recursively
// walk the modules directory and add all the files to the
// initramfs.
for entry in WalkDir::new(&modules_dir) {
let entry = entry.context("read_dir failed")?;
let path = entry.path();
let metadata = entry.metadata().context("metadata failed")?;
let out_path = Path::new("/lib/modules").join(
path.strip_prefix(&modules_dir).with_context(|| {
format!(
"strip prefix {} failed for {}",
path.display(),
modules_dir.display()
)
})?,
);
if metadata.file_type().is_dir() {
write_dir(&out_path);
} else if metadata.file_type().is_file() {
write_file(&out_path, path, "644 0 0");
}
}
for (profile, binaries) in binaries {
@ -362,17 +446,11 @@ pub fn run(opts: Options) -> Result<()> {
copy(&binary, &path).with_context(|| {
format!("copy({}, {}) failed", binary.display(), path.display())
})?;
for bytes in [
"file /bin/".as_bytes(),
name.as_bytes(),
" ".as_bytes(),
path.as_os_str().as_bytes(),
" 0755 0 0\n".as_bytes(),
] {
stdin.write_all(bytes).expect("write");
}
let out_path = Path::new("/bin").join(&name);
write_file(&out_path, &path, "755 0 0");
}
}
// Must explicitly close to signal EOF.
drop(stdin);

Loading…
Cancel
Save