Merge branch 'aya-rs:main' into main

reviewable/pr1389/r7
blong 1 month ago committed by GitHub
commit 1dfea36dcc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -9,3 +9,15 @@ linker = "arm-linux-gnueabihf-gcc"
[target.aarch64-unknown-linux-musl]
linker = "aarch64-linux-musl-gcc"
[target.x86_64-unknown-linux-musl]
linker = "x86_64-linux-musl-gcc"
[target.powerpc64le-unknown-linux-gnu]
linker = "powerpc64le-linux-gnu-gcc"
[target.s390x-unknown-linux-gnu]
linker = "s390x-linux-gnu-gcc"
[target.mips-unknown-linux-gnu]
linker = "mips-linux-gnu-gcc"

@ -1,5 +1,5 @@
# Please see the documentation for all configuration options:
# https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/dependabot-options-reference
version: 2
updates:

@ -0,0 +1,51 @@
#!/usr/bin/env bash
set -euo pipefail
# Check for required arguments.
if [ "$#" -lt 3 ]; then
echo "Usage: $0 <output directory> <architecture> <version1> [<version2> ...]"
exit 1
fi
OUTPUT_DIR=$1
ARCHITECTURE=$2
shift 2
VERSIONS=("$@")
URLS=$(lynx -dump -listonly -nonumbers https://mirrors.wikimedia.org/debian/pool/main/l/linux/)
readonly URLS
# Find the latest revision of each kernel version.
FILES=()
for VERSION in "${VERSIONS[@]}"; do
REGEX="linux-image-${VERSION//./\\.}\\.[0-9]+(-[0-9]+)?(\+bpo|\+deb[0-9]+)?-cloud-${ARCHITECTURE}-unsigned_.*\\.deb"
match=$(printf '%s\n' "$URLS" | grep -E "$REGEX" | sort -V | tail -n1) || {
printf '%s\nVERSION=%s\nREGEX=%s\n' "$URLS" "$VERSION" "$REGEX" >&2
exit 1
}
FILES+=("$match")
done
# Note: `--etag-{compare,save}` are not idempotent until curl 8.9.0 which included
# https://github.com/curl/curl/commit/85efbb92b8e6679705e122cee45ce76c56414a3e. At the time of
# writing our CI uses Ubuntu 22.04 which has curl 7.81.0 and the latest available is Ubuntu 24.04
# which has curl 8.5.0. Since neither has a new enough curl, we don't bother to update, but we
# should do so when Ubuntu 24.10 or later is available.
mkdir -p "$OUTPUT_DIR"
KEEP=()
for FILE in "${FILES[@]}"; do
name=$(basename "$FILE")
etag_name="$name.etag"
KEEP+=("$name" "$etag_name")
etag="$OUTPUT_DIR/$etag_name"
curl -sfSL --output-dir "$OUTPUT_DIR" --remote-name-all --etag-compare "$etag" --etag-save "$etag" "$FILE"
done
# Remove any files that were previously downloaded that are no longer needed.
FIND_ARGS=()
for FILE in "${KEEP[@]}"; do
FIND_ARGS+=("!" "-name" "$FILE")
done
find "$OUTPUT_DIR" -type f "${FIND_ARGS[@]}" -exec rm {} +

@ -2,12 +2,8 @@ name: aya-ci
on:
push:
branches:
- main
pull_request:
branches:
- main
schedule:
- cron: 00 4 * * *
@ -17,15 +13,17 @@ env:
jobs:
lint:
runs-on: ubuntu-22.04
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@master
- uses: dtolnay/rust-toolchain@nightly
with:
toolchain: nightly
components: rustfmt, clippy, miri, rust-src
components: clippy,miri,rustfmt,rust-src
# Installed *after* nightly so it is the default.
- uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
@ -33,28 +31,42 @@ jobs:
with:
tool: cargo-hack,taplo-cli
- name: Check C formatting
run: git ls-files -- '*.c' '*.h' | xargs clang-format --dry-run --Werror
- run: git ls-files -- '*.c' '*.h' | xargs clang-format --dry-run --Werror
- name: Check Markdown
uses: DavidAnson/markdownlint-cli2-action@v16
- uses: DavidAnson/markdownlint-cli2-action@v20
- name: Check TOML formatting
run: taplo fmt --check
- run: taplo fmt --check
- name: Check formatting
run: cargo fmt --all -- --check
- run: cargo +nightly fmt --all -- --check
- name: Run clippy
run: cargo hack clippy --all-targets --feature-powerset --workspace -- --deny warnings
- run: ./clippy.sh
- name: Check public API
run: cargo xtask public-api
# On the `aya-rs/aya` repository, regenerate the public API on a schedule.
#
# On all other events and repositories assert the public API is up to date.
- run: cargo xtask public-api
if: ${{ !(github.event_name == 'schedule' && github.repository == 'aya-rs/aya') }}
- run: cargo xtask public-api --bless
if: ${{ (github.event_name == 'schedule' && github.repository == 'aya-rs/aya') }}
- uses: peter-evans/create-pull-request@v7
if: ${{ (github.event_name == 'schedule' && github.repository == 'aya-rs/aya') }}
with:
# GitHub actions aren't allowed to trigger other actions to prevent
# abuse; the canonical workaround is to use a sufficiently authorized
# token.
#
# See https://github.com/peter-evans/create-pull-request/issues/48.
token: ${{ secrets.CRABBY_GITHUB_TOKEN }}
branch: create-pull-request/public-api
commit-message: 'public-api: regenerate'
title: 'public-api: regenerate'
body: |
**Automated changes**
- name: Run miri
run: |
set -euxo pipefail
cargo hack miri test --all-targets --feature-powerset \
cargo +nightly hack miri test --all-targets --feature-powerset \
--exclude aya-ebpf \
--exclude aya-ebpf-bindings \
--exclude aya-log-ebpf \
@ -67,23 +79,26 @@ jobs:
fail-fast: false
matrix:
arch:
- x86_64-unknown-linux-gnu
- aarch64-unknown-linux-gnu
- armv7-unknown-linux-gnueabi
- loongarch64-unknown-linux-gnu
- powerpc64le-unknown-linux-gnu
- riscv64gc-unknown-linux-gnu
runs-on: ubuntu-22.04
- s390x-unknown-linux-gnu
- x86_64-unknown-linux-gnu
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@master
- uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
targets: ${{ matrix.arch }}
- uses: Swatinem/rust-cache@v2
- uses: taiki-e/install-action@cargo-hack
# This is magic, it sets `$CARGO_BUILD_TARGET`.
- uses: taiki-e/setup-cross-toolchain-action@v1
with:
target: ${{ matrix.arch }}
@ -96,6 +111,7 @@ jobs:
--exclude aya-ebpf-bindings \
--exclude aya-log-ebpf \
--exclude integration-ebpf \
--exclude xtask \
--workspace
- name: Test
@ -103,186 +119,241 @@ jobs:
RUST_BACKTRACE: full
run: |
set -euxo pipefail
cargo hack test --all-targets --feature-powerset \
cargo hack test --all-targets \
--exclude aya-ebpf \
--exclude aya-ebpf-bindings \
--exclude aya-log-ebpf \
--exclude integration-ebpf \
--exclude integration-test \
--workspace
--feature-powerset
- name: Doctests
env:
RUST_BACKTRACE: full
run: |
set -euxo pipefail
cargo hack test --doc --feature-powerset \
cargo hack test --doc \
--exclude aya-ebpf \
--exclude aya-ebpf-bindings \
--exclude aya-log-ebpf \
--exclude init \
--exclude integration-ebpf \
--exclude integration-test \
--workspace
--feature-powerset
build-test-aya-ebpf:
strategy:
fail-fast: false
matrix:
arch:
- x86_64
- aarch64
- arm
- riscv64
target:
- bpfel-unknown-none
- bpfeb-unknown-none
runs-on: ubuntu-22.04
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@master
- uses: dtolnay/rust-toolchain@nightly
with:
toolchain: nightly
components: rust-src
# Installed *after* nightly so it is the default.
- uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
- name: bpf-linker
run: cargo install bpf-linker --git https://github.com/aya-rs/bpf-linker.git
- run: cargo install --git https://github.com/aya-rs/bpf-linker.git bpf-linker --features llvm-21
- uses: taiki-e/install-action@cargo-hack
- name: Build
env:
CARGO_CFG_BPF_TARGET_ARCH: ${{ matrix.arch }}
run: |
set -euxo pipefail
cargo hack build --package aya-ebpf --package aya-log-ebpf \
--feature-powerset \
--target ${{ matrix.target }} \
-Z build-std=core
- name: Test
- name: Build & test for all BPF architectures
env:
CARGO_CFG_BPF_TARGET_ARCH: ${{ matrix.arch }}
RUST_BACKTRACE: full
run: |
set -euxo pipefail
cargo hack test --doc \
--package aya-ebpf \
--package aya-log-ebpf \
--feature-powerset
set -euo pipefail
failures=()
# NB: this hand-rolled shell script is used instead of a matrix
# because the time spent doing useful work per target is about equal
# to the overhead of setting up the job - so this saves a bunch of
# machine time.
for arch in aarch64 arm loongarch64 mips powerpc64 riscv64 s390x x86_64; do
echo "::group::arch=$arch"
export RUSTFLAGS="--cfg bpf_target_arch=\"$arch\""
for target in bpfeb-unknown-none bpfel-unknown-none; do
echo "::group::target=$target"
if ! (
cargo +nightly hack build \
--release \
--target "$target" \
-Z build-std=core \
--package aya-ebpf \
--package aya-ebpf-bindings \
--package aya-log-ebpf \
--package integration-ebpf \
--feature-powerset
); then
failures+=("build: $arch/$target")
fi
echo "::endgroup::"
done
if ! (
RUSTDOCFLAGS=$RUSTFLAGS cargo +nightly hack test --doc \
--package aya-ebpf \
--package aya-ebpf-bindings \
--package aya-log-ebpf \
--package integration-ebpf \
--feature-powerset
); then
failures+=("doctests: $arch")
fi
echo "::endgroup::"
done
if ((${#failures[@]})); then
echo "::error::Some builds/tests failed:"
printf ' %s\n' "${failures[@]}"
exit 1
fi
run-integration-test:
strategy:
fail-fast: false
matrix:
runner:
- macos-12
- ubuntu-22.04
runs-on: ${{ matrix.runner }}
include:
# TODO(https://github.com/actions/runner-images/issues/13277): Reenable when fixed.
# - target: x86_64-apple-darwin
# macos-15 is arm64[0] which doesn't support nested
# virtualization[1].
#
# [0] https://github.com/actions/runner-images#available-images
#
# [1] https://docs.github.com/en/actions/reference/runners/github-hosted-runners#limitations-for-arm64-macos-runners
#
# os: macos-15-intel
# We don't use ubuntu-latest because we care about the apt packages available.
- target: x86_64-unknown-linux-gnu
os: ubuntu-24.04
- target: aarch64-unknown-linux-gnu
os: ubuntu-24.04-arm
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
with:
submodules: recursive
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly
components: rust-src
targets: aarch64-unknown-linux-musl,x86_64-unknown-linux-musl
- uses: Swatinem/rust-cache@v2
- name: Install prerequisites
if: runner.os == 'Linux'
# ubuntu-22.04 comes with clang 14[0] which doesn't include support for signed and 64bit
# enum values which was added in clang 15[1].
#
# gcc-multilib provides at least <asm/types.h> which is referenced by libbpf.
#
# llvm provides llvm-objcopy which is used to build the BTF relocation tests.
#
# [0] https://github.com/actions/runner-images/blob/ubuntu22/20230724.1/images/linux/Ubuntu2204-Readme.md
#
# [1] https://github.com/llvm/llvm-project/commit/dc1c43d
run: |
set -euxo pipefail
wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | sudo tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc
echo deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy main | sudo tee /etc/apt/sources.list.d/llvm.list
sudo apt update
sudo apt -y install clang gcc-multilib llvm locate qemu-system-{arm,x86}
- name: bpf-linker
if: runner.os == 'Linux'
run: cargo install bpf-linker --git https://github.com/aya-rs/bpf-linker.git
sudo apt -y install \
liblzma-dev \
lynx \
musl-tools \
qemu-system-{arm,x86}
- name: Install prerequisites
if: runner.os == 'macOS'
# The xargs shipped on macOS always exits 0 with -P0, so we need GNU findutils.
# The curl shipped on macOS doesn't contain
# https://github.com/curl/curl/commit/85efbb92b8e6679705e122cee45ce76c56414a3e which is
# needed for proper handling of `--etag-{compare,save}`.
#
# The tar shipped on macOS doesn't support --wildcards, so we need GNU tar.
#
# The clang shipped on macOS doesn't support BPF, so we need LLVM from brew.
#
# We also need LLVM for bpf-linker, see comment below.
# We need a musl C toolchain to compile our `test-distro` since some of
# our dependencies have build scripts that compile C code (i.e xz2).
# This is provided by `brew install filosottile/musl-cross/musl-cross`.
run: |
set -euxo pipefail
brew update
# Dependencies are tracked in `Brewfile`.
brew bundle
echo $(brew --prefix curl)/bin >> $GITHUB_PATH
echo $(brew --prefix llvm)/bin >> $GITHUB_PATH
# https://github.com/actions/setup-python/issues/577
find /usr/local/bin -type l -exec sh -c 'readlink -f "$1" \
| grep -q ^/Library/Frameworks/Python.framework/Versions/' _ {} \; -exec rm -v {} \;
HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK=1 \
brew install dpkg findutils gnu-tar llvm pkg-config qemu
echo /usr/local/opt/findutils/libexec/gnubin >> $GITHUB_PATH
echo /usr/local/opt/gnu-tar/libexec/gnubin >> $GITHUB_PATH
echo /usr/local/opt/llvm/bin >> $GITHUB_PATH
# https://github.com/Homebrew/homebrew-core/issues/140244
codesign --verify $(which qemu-system-x86_64) || brew reinstall qemu --build-from-source
- name: bpf-linker
if: runner.os == 'macOS'
# NB: rustc doesn't ship libLLVM.so on macOS, so disable proxying (default feature). We also
# --force so that bpf-linker gets always relinked against the latest LLVM installed by brew.
run: cargo install --force bpf-linker --git https://github.com/aya-rs/bpf-linker.git --no-default-features
- name: Download debian kernels
if: runner.arch == 'ARM64'
- uses: dtolnay/rust-toolchain@nightly
with:
components: rust-src
# Installed *after* nightly so it is the default.
- uses: dtolnay/rust-toolchain@stable
with:
targets: aarch64-unknown-linux-musl,x86_64-unknown-linux-musl
- uses: Swatinem/rust-cache@v2
- name: Install libLLVM
# Download libLLVM from Rust CI to ensure that the libLLVM version
# matches exactly with the version used by the current Rust nightly. A
# mismatch between libLLVM (used by bpf-linker) and Rust's LLVM version
# can lead to linking issues.
run: |
set -euxo pipefail
mkdir -p test/.tmp/debian-kernels/arm64
# NB: a 4.19 kernel image for arm64 was not available.
# TODO: enable tests on kernels before 6.0.
# linux-image-5.10.0-23-cloud-arm64-unsigned_5.10.179-3_arm64.deb \
printf '%s\0' \
linux-image-6.1.0-15-cloud-arm64-unsigned_6.1.66-1_arm64.deb \
| xargs -0 -t -P0 -I {} wget -nd -nv -P test/.tmp/debian-kernels/arm64 ftp://ftp.us.debian.org/debian/pool/main/l/linux/{}
# Get the partial SHA from Rust nightly.
rustc_sha=$(rustc +nightly --version | grep -oE '[a-f0-9]{7,40}')
# Get the full SHA from GitHub.
rustc_sha=$(curl -sfSL https://api.github.com/repos/rust-lang/rust/commits/$rustc_sha \
--header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \
--header 'content-type: application/json' \
| jq -r '.sha')
mkdir -p /tmp/rustc-llvm
curl -sfSL https://ci-artifacts.rust-lang.org/rustc-builds/$rustc_sha/rust-dev-nightly-${{ matrix.target }}.tar.xz | \
tar -xJ --strip-components 2 -C /tmp/rustc-llvm
echo /tmp/rustc-llvm/bin >> $GITHUB_PATH
# NB: rustc doesn't ship libLLVM.so on macOS, so disable proxying (default feature). We also
# --force so that bpf-linker gets always relinked against the latest LLVM downloaded above.
#
# Do this on all system (not just macOS) to avoid relying on rustc-provided libLLVM.so.
- run: cargo install --git https://github.com/aya-rs/bpf-linker.git bpf-linker --no-default-features --features llvm-21 --force
- uses: actions/cache@v4
with:
path: test/.tmp
key: ${{ runner.arch }}-${{ runner.os }}-test-cache
- name: Download debian kernels
if: runner.arch == 'ARM64'
run: .github/scripts/download_kernel_images.sh test/.tmp/debian-kernels/arm64 arm64 5.10 6.1 6.12
- name: Download debian kernels
if: runner.arch == 'X64'
run: .github/scripts/download_kernel_images.sh test/.tmp/debian-kernels/amd64 amd64 5.10 6.1 6.12
- name: Cleanup stale kernels and modules
run: |
set -euxo pipefail
mkdir -p test/.tmp/debian-kernels/amd64
# TODO: enable tests on kernels before 6.0.
# linux-image-4.19.0-21-cloud-amd64-unsigned_4.19.249-2_amd64.deb \
# linux-image-5.10.0-23-cloud-amd64-unsigned_5.10.179-3_amd64.deb \
printf '%s\0' \
linux-image-6.1.0-15-cloud-amd64-unsigned_6.1.66-1_amd64.deb \
| xargs -0 -t -P0 -I {} wget -nd -nv -P test/.tmp/debian-kernels/amd64 ftp://ftp.us.debian.org/debian/pool/main/l/linux/{}
- name: Extract debian kernels
run: |
set -euxo pipefail
find test/.tmp -name '*.deb' -print0 | xargs -t -0 -I {} \
sh -c "dpkg --fsys-tarfile {} | tar -C test/.tmp --wildcards --extract '*vmlinuz*' --file -"
rm -rf test/.tmp/boot test/.tmp/lib
- name: Run local integration tests
if: runner.os == 'Linux'
run: cargo xtask integration-test local
- name: Run virtualized integration tests
run: find test/.tmp -name 'vmlinuz-*' | xargs -t cargo xtask integration-test vm
if: runner.os == 'Linux'
run: |
set -euxo pipefail
# https://github.blog/changelog/2023-02-23-hardware-accelerated-android-virtualization-on-actions-windows-and-linux-larger-hosted-runners/
echo 'KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm"' | sudo tee /etc/udev/rules.d/99-kvm4all.rules
sudo udevadm control --reload-rules
sudo udevadm trigger --name-match=kvm || true # kvm is not available on arm64.
find test/.tmp -name '*.deb' -print0 | sort -Vz | xargs -t -0 \
cargo xtask integration-test vm --cache-dir test/.tmp \
--github-api-token ${{ secrets.GITHUB_TOKEN }}
- name: Run virtualized integration tests
if: runner.os == 'macOS'
env:
# This sets the linker to the one installed by FiloSottile/musl-cross.
CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER: x86_64-linux-musl-gcc
run: |
set -euxo pipefail
find test/.tmp -name '*.deb' -print0 | sort -Vz | xargs -t -0 \
cargo xtask integration-test vm --cache-dir test/.tmp \
--github-api-token ${{ secrets.GITHUB_TOKEN }}
# Provides a single status check for the entire build workflow.
# This is used for merge automation, like Mergify, since GH actions
@ -296,5 +367,4 @@ jobs:
- run-integration-test
runs-on: ubuntu-latest
steps:
- name: Build Complete
run: echo "Build Complete"
- run: echo 'Build Complete'

@ -1,62 +1,61 @@
name: codegen
on: workflow_dispatch
on:
push:
branches-ignore:
- 'create-pull-request/**'
schedule:
- cron: 00 4 * * *
workflow_dispatch:
jobs:
codegen:
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
with:
submodules: recursive
- name: update libbpf
working-directory: xtask/libbpf
run: |
set -e
git fetch origin
git checkout origin/HEAD
echo "LIBBPF_SHA=$(git rev-parse HEAD)" >> $GITHUB_ENV
- uses: dtolnay/rust-toolchain@master
- uses: dtolnay/rust-toolchain@nightly
with:
toolchain: nightly
components: rustfmt, clippy
- uses: Swatinem/rust-cache@v2
- name: Install headers
run: |
set -euxo pipefail
sudo apt -y update
sudo apt -y install libc6-dev libc6-dev-{arm64,armel,riscv64}-cross
sudo apt -y install libelf-dev libc6-dev libc6-dev-{arm64,armel,loong64,riscv64,ppc64el,s390x,mips}-cross
- name: Run codegen
run: |
cargo xtask codegen
- run: cargo xtask codegen
# aya-ebpf-bindings aren't emitted directly from bindgen and so aren't formatted.
- run: cargo fmt --all
- run: cargo xtask public-api --bless
- name: Check for changes
run: |
git diff --quiet || echo "COMMIT_CHANGES=1" >> $GITHUB_ENV
- id: libbpf
working-directory: xtask/libbpf
run: echo "sha=$(git rev-parse HEAD)" >> "$GITHUB_OUTPUT"
- name: Commit Changes
id: commit
if: env.COMMIT_CHANGES == 1
uses: devops-infra/action-commit-push@master
with:
github_token: "${{ secrets.CRABBY_GITHUB_TOKEN }}"
commit_prefix: "[codegen] Update libbpf to ${{ env.LIBBPF_SHA }}"
commit_message: "Update libbpf to ${{ env.LIBBPF_SHA }}"
target_branch: codegen
force: true
- name: Create pull request
if: steps.commit.outputs.files_changed != ''
uses: devops-infra/action-pull-request@master
- uses: peter-evans/create-pull-request@v7
if: ${{ github.repository == 'aya-rs/aya' }}
with:
github_token: ${{ secrets.CRABBY_GITHUB_TOKEN }}
body: "**Automated pull request**<br><br>Update libbpf to ${{ env.LIBBPF_SHA }}"
title: Update libbpf to ${{ env.LIBBPF_SHA }}
source_branch: codegen
target_branch: main
get_diff: true
# GitHub actions aren't allowed to trigger other actions to prevent
# abuse; the canonical workaround is to use a sufficiently authorized
# token.
#
# See https://github.com/peter-evans/create-pull-request/issues/48.
token: ${{ secrets.CRABBY_GITHUB_TOKEN }}
branch: create-pull-request/codegen
commit-message: |
aya-obj, aya-ebpf-bindings: regenerate
libbpf commit: ${{ steps.libbpf.outputs.sha }}
title: 'aya-obj, aya-ebpf-bindings: regenerate'
body: |
**Automated changes**
libbpf commit: ${{ steps.libbpf.outputs.sha}}

@ -1,11 +1,4 @@
pull_request_rules:
- name: automatic merge for Dependabot pull request that pass CI
conditions:
- author=dependabot[bot]
actions:
comment:
message: "@dependabot merge"
# REVIEW MANAGEMENT
- name: ask alessandrod to review public API changes

@ -2,3 +2,4 @@
[rule.formatting]
indent_string = " "
reorder_keys = true

@ -1,7 +1,9 @@
{
"rust-analyzer.check.allTargets": true,
"rust-analyzer.check.command": "clippy",
"search.exclude": {
"/xtask/public-api/*.txt": true,
},
"rust-analyzer.check.allTargets": true,
"rust-analyzer.check.command": "clippy",
"rust-analyzer.cargo.target": "x86_64-unknown-linux-musl",
"search.exclude": {
"/xtask/public-api/*.txt": true
},
"yaml.format.singleQuote": true
}

@ -0,0 +1,31 @@
# AGENTS NOTES
- Repository: aya (Rust library and tooling for working with eBPF programs).
- Development tooling:
- Do not regenerate public API fixtures; the user handles that.
- Many crates only build on Linux; on macOS lint and type check using
```sh
./clippy.sh --target x86_64-unknown-linux-musl
```
- Coding guidelines:
- Use github or bootlin permalinks when referencing kernel sources.
- Integration testing:
- Prepare:
```sh
.github/scripts/download_kernel_images.sh \
test/.tmp/debian-kernels/<arch> <arch> [VERSIONS]...
```
- Run:
<!-- markdownlint-disable line-length -->
```sh
find test/.tmp -name '*.deb' -print0 | xargs -0 -t sh -c \
'cargo xtask integration-test vm --cache-dir test/.tmp "$@" -- <test-filter> [ARGS]...' _
```
<!-- markdownlint-restore -->

@ -0,0 +1,10 @@
# Keep this congruent with `.github/workflows/ci.yml`.
brew "curl"
brew "llvm"
brew "lynx"
brew "pkg-config"
brew "qemu"
tap "filosottile/musl-cross"
brew "filosottile/musl-cross/musl-cross"

@ -115,7 +115,7 @@ the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available [here][covenant-2-0].
[version 2.0][covenant-2-0].
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
@ -124,5 +124,5 @@ enforcement ladder](https://github.com/mozilla/diversity).
[covenant-2-0]: https://www.contributor-covenant.org/version/2/0/code_of_conduct
For answers to common questions about this code of conduct, see the
[FAQ](https://www.contributor-covenant.org/faq). Translations are available
[here](https://www.contributor-covenant.org/translations).
[FAQ](https://www.contributor-covenant.org/faq).
[Translations](https://www.contributor-covenant.org/translations) are available.

@ -1,12 +1,15 @@
[workspace]
members = [
"aya",
"aya-build",
"aya-log",
"aya-log-common",
"aya-log-parser",
"aya-obj",
"aya-tool",
"init",
"ebpf-panic",
"test-distro",
"test/integration-common",
"test/integration-test",
"xtask",
@ -25,12 +28,15 @@ resolver = "2"
default-members = [
"aya",
"aya-build",
"aya-log",
"aya-log-common",
"aya-log-parser",
"aya-obj",
"aya-tool",
"init",
"ebpf-panic",
"test-distro",
"test/integration-common",
# test/integration-test is omitted; including it in this list causes `cargo test` to run its
# tests, and that doesn't work unless they've been built with `cargo xtask`.
"xtask",
@ -39,72 +45,119 @@ default-members = [
"aya-log-ebpf-macros",
# ebpf crates are omitted; they must be built with:
#
# RUSTFLAGS="--cfg bpf_target_arch={aarch64,arm,loongarch64,mips,powerpc64,riscv64,s390x,x86_64}"
# ...
# --target bpfe{b,l}-unknown-none
# CARGO_CFG_BPF_TARGET_ARCH={x86_64,aarch64,arm,riscv64}
]
[workspace.package]
authors = ["Aya Contributors"]
edition = "2024"
homepage = "https://aya-rs.dev"
license = "MIT OR Apache-2.0"
repository = "https://github.com/aya-rs/aya"
homepage = "https://aya-rs.dev"
edition = "2021"
rust-version = "1.87.0"
# NOTE(vadorovsky): Neither cargo-udeps nor cargo-machete are able to detect
# unused crates defined in this section. It would be nice to teach either of
# them to do that, but in the meantime we need to be careful.
[workspace.dependencies]
anyhow = { version = "1", default-features = false }
ar = { version = "0.9", default-features = false }
assert_matches = { version = "1.5.0", default-features = false }
async-io = { version = "2.0", default-features = false }
bindgen = { version = "0.69", default-features = false }
base64 = { version = "0.22.1", default-features = false }
bindgen = { version = "0.72", default-features = false }
bitflags = { version = "2.2.1", default-features = false }
bytes = { version = "1", default-features = false }
cargo_metadata = { version = "0.18.0", default-features = false }
cargo_metadata = { version = "0.23.0", default-features = false }
clap = { version = "4", default-features = false }
const-assert = { version = "1.0.1", default-features = false }
core-error = { version = "0.0.0", default-features = false }
dialoguer = { version = "0.11", default-features = false }
dialoguer = { version = "0.12", default-features = false }
diff = { version = "0.1.13", default-features = false }
env_logger = { version = "0.11", default-features = false }
epoll = { version = "4.3.3", default-features = false }
futures = { version = "0.3.28", default-features = false }
hashbrown = { version = "0.14.3", default-features = false }
glob = { version = "0.3.0", default-features = false }
hashbrown = { version = "0.16.0", default-features = false }
indoc = { version = "2.0", default-features = false }
integration-ebpf = { path = "test/integration-ebpf", default-features = false }
lazy_static = { version = "1", default-features = false }
libc = { version = "0.2.105", default-features = false }
log = { version = "0.4", default-features = false }
netns-rs = { version = "0.1", default-features = false }
nix = { version = "0.29.0", default-features = false }
network-types = { version = "0.1.0", default-features = false }
nix = { version = "0.30.1", default-features = false }
num_enum = { version = "0.7", default-features = false }
object = { version = "0.36", default-features = false }
proc-macro-error = { version = "1.0", default-features = false }
object = { version = "0.37", default-features = false }
once_cell = { version = "1.20.1", default-features = false }
proc-macro2 = { version = "1", default-features = false }
public-api = { version = "0.35.0", default-features = false }
proc-macro2-diagnostics = { version = "0.10.1", default-features = false }
procfs = { version = "0.18.0", default-features = false }
public-api = { version = "0.50.0", default-features = false }
quote = { version = "1", default-features = false }
rand = { version = "0.8", default-features = false }
rbpf = { version = "0.2.0", default-features = false }
rand = { version = "0.9", default-features = false }
rbpf = { version = "0.3.0", default-features = false }
rustdoc-json = { version = "0.9.0", default-features = false }
rustup-toolchain = { version = "0.1.5", default-features = false }
rustversion = { version = "1.0.0", default-features = false }
scopeguard = { version = "1.2.0", default-features = false }
syn = { version = "2", default-features = false }
tar = { version = "0.4.44", default-features = false }
tempfile = { version = "3", default-features = false }
test-case = { version = "3.1.0", default-features = false }
test-log = { version = "0.2.13", default-features = false }
testing_logger = { version = "0.1.1", default-features = false }
thiserror = { version = "1", default-features = false }
thiserror = { version = "2.0.3", default-features = false }
tokio = { version = "1.24.0", default-features = false }
which = { version = "6.0.0", default-features = false }
xdpilone = { version = "1.0", default-features = false }
xtask = { path = "xtask", default-features = false }
[profile.dev]
panic = "abort"
walkdir = { version = "2", default-features = false }
which = { version = "8.0.0", default-features = false }
xdpilone = { version = "1.0.5", default-features = false }
xz2 = { version = "0.1.7", default-features = false }
[profile.release]
panic = "abort"
[workspace.lints.clippy]
all = "warn"
as_ptr_cast_mut = "warn"
as_underscore = "warn"
cast_lossless = "warn"
#cast_possible_truncation = "warn"
#cast_possible_wrap = "warn"
cast_precision_loss = "warn"
#cast_sign_loss = "warn"
char_lit_as_u8 = "warn"
fn_to_numeric_cast = "warn"
fn_to_numeric_cast_with_truncation = "warn"
mut_mut = "warn"
needless_bitwise_bool = "warn"
needless_lifetimes = "warn"
no_mangle_with_rust_abi = "warn"
ptr_as_ptr = "warn"
ptr_cast_constness = "warn"
ref_as_ptr = "warn"
unnecessary_cast = "warn"
unused_trait_names = "warn"
use_self = "warn"
[profile.release.package.integration-ebpf]
debug = 2
codegen-units = 1
[workspace.lints.rust]
absolute_paths_not_starting_with_crate = "warn"
deprecated_in_future = "warn"
elided_lifetimes_in_paths = "warn"
explicit_outlives_requirements = "warn"
ffi_unwind_calls = "warn"
keyword_idents = "warn"
#let_underscore_drop = "warn"
macro_use_extern_crate = "warn"
meta_variable_misuse = "warn"
missing_abi = "warn"
#missing_copy_implementations = "warn"
non_ascii_idents = "warn"
noop_method_call = "warn"
single_use_lifetimes = "warn"
trivial_numeric_casts = "warn"
unreachable_pub = "warn"
unsafe_op_in_unsafe_fn = "warn"
unstable_features = "warn"
unused_crate_dependencies = "warn"
unused_extern_crates = "warn"
unused_import_braces = "warn"
unused_lifetimes = "warn"
unused_macro_rules = "warn"
#unused_qualifications = "warn" # https://github.com/rust-lang/rust/commit/9ccc7b7 added size_of to the prelude, but we need to continue to qualify it so that we build on older compilers.
#unused_results = "warn"

@ -4,6 +4,7 @@
![License][license-badge]
[![Build status][build-badge]][build-url]
[![Book][book-badge]][book-url]
[![Gurubase][gurubase-badge]][gurubase-url]
[crates-badge]: https://img.shields.io/crates/v/aya.svg?style=for-the-badge&logo=rust
[crates-url]: https://crates.io/crates/aya
@ -12,6 +13,8 @@
[build-url]: https://github.com/aya-rs/aya/actions/workflows/ci.yml
[book-badge]: https://img.shields.io/badge/read%20the-book-9cf.svg?style=for-the-badge&logo=mdbook
[book-url]: https://aya-rs.dev/book
[gurubase-badge]: https://img.shields.io/badge/Gurubase-Ask%20Aya%20Guru-006BFF?style=for-the-badge
[gurubase-url]: https://gurubase.io/g/aya
## API Documentation
@ -75,7 +78,7 @@ use a `BPF_PROG_TYPE_CGROUP_SKB` program with aya:
```rust
use std::fs::File;
use aya::Ebpf;
use aya::programs::{CgroupSkb, CgroupSkbAttachType};
use aya::programs::{CgroupSkb, CgroupSkbAttachType, CgroupAttachMode};
// load the BPF code
let mut ebpf = Ebpf::load_file("ebpf.o")?;
@ -89,7 +92,7 @@ ingress.load()?;
// attach the program to the root cgroup. `ingress_filter` will be called for all
// incoming packets.
let cgroup = File::open("/sys/fs/cgroup/unified")?;
ingress.attach(cgroup, CgroupSkbAttachType::Ingress)?;
ingress.attach(cgroup, CgroupSkbAttachType::Ingress, CgroupAttachMode::AllowOverride)?;
```
## Contributing

@ -0,0 +1,18 @@
[package]
description = "Build-time support for aya projects"
name = "aya-build"
version = "0.1.2"
authors.workspace = true
edition.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
rust-version.workspace = true
[lints]
workspace = true
[dependencies]
anyhow = { workspace = true, default-features = true }
cargo_metadata = { workspace = true }

@ -0,0 +1,274 @@
use std::{
borrow::Cow,
env,
ffi::OsString,
fs,
io::{BufRead as _, BufReader},
path::PathBuf,
process::{Child, Command, Stdio},
};
use anyhow::{Context as _, Result, anyhow};
use cargo_metadata::{Artifact, CompilerMessage, Message, Target};
#[derive(Default)]
pub struct Package<'a> {
pub name: &'a str,
pub root_dir: &'a str,
pub no_default_features: bool,
pub features: &'a [&'a str],
}
fn target_arch_fixup(target_arch: Cow<'_, str>) -> Cow<'_, str> {
if target_arch.starts_with("riscv64") {
"riscv64".into()
} else {
target_arch
}
}
/// Build binary artifacts produced by `packages`.
///
/// This would be better expressed as one or more [artifact-dependencies][bindeps] but issues such
/// as:
///
/// * <https://github.com/rust-lang/cargo/issues/12374>
/// * <https://github.com/rust-lang/cargo/issues/12375>
/// * <https://github.com/rust-lang/cargo/issues/12385>
///
/// prevent their use for the time being.
///
/// [bindeps]: https://doc.rust-lang.org/nightly/cargo/reference/unstable.html?highlight=feature#artifact-dependencies
pub fn build_ebpf<'a>(
packages: impl IntoIterator<Item = Package<'a>>,
toolchain: Toolchain<'a>,
) -> Result<()> {
let out_dir = env::var_os("OUT_DIR").ok_or(anyhow!("OUT_DIR not set"))?;
let out_dir = PathBuf::from(out_dir);
let endian =
env::var_os("CARGO_CFG_TARGET_ENDIAN").ok_or(anyhow!("CARGO_CFG_TARGET_ENDIAN not set"))?;
let target = if endian == "big" {
"bpfeb"
} else if endian == "little" {
"bpfel"
} else {
return Err(anyhow!("unsupported endian={endian:?}"));
};
const TARGET_ARCH: &str = "CARGO_CFG_TARGET_ARCH";
let bpf_target_arch =
env::var_os(TARGET_ARCH).unwrap_or_else(|| panic!("{TARGET_ARCH} not set"));
let bpf_target_arch = bpf_target_arch
.into_string()
.unwrap_or_else(|err| panic!("OsString::into_string({TARGET_ARCH}): {err:?}"));
let bpf_target_arch = target_arch_fixup(bpf_target_arch.into());
let target = format!("{target}-unknown-none");
for Package {
name,
root_dir,
no_default_features,
features,
} in packages
{
// We have a build-dependency on `name`, so cargo will automatically rebuild us if `name`'s
// *library* target or any of its dependencies change. Since we depend on `name`'s *binary*
// targets, that only gets us half of the way. This stanza ensures cargo will rebuild us on
// changes to the binaries too, which gets us the rest of the way.
println!("cargo:rerun-if-changed={root_dir}");
let mut cmd = Command::new("rustup");
cmd.args([
"run",
toolchain.as_str(),
"cargo",
"build",
"--package",
name,
"-Z",
"build-std=core",
"--bins",
"--message-format=json",
"--release",
"--target",
&target,
]);
if no_default_features {
cmd.arg("--no-default-features");
}
cmd.args(["--features", &features.join(",")]);
{
const SEPARATOR: &str = "\x1f";
let mut rustflags = OsString::new();
for s in [
"--cfg=bpf_target_arch=\"",
&bpf_target_arch,
"\"",
SEPARATOR,
"-Cdebuginfo=2",
SEPARATOR,
"-Clink-arg=--btf",
] {
rustflags.push(s);
}
cmd.env("CARGO_ENCODED_RUSTFLAGS", rustflags);
}
// Workaround to make sure that the correct toolchain is used.
for key in ["RUSTC", "RUSTC_WORKSPACE_WRAPPER"] {
cmd.env_remove(key);
}
// Workaround for https://github.com/rust-lang/cargo/issues/6412 where cargo flocks itself.
let target_dir = out_dir.join(name);
cmd.arg("--target-dir").arg(&target_dir);
let mut child = cmd
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.with_context(|| format!("failed to spawn {cmd:?}"))?;
let Child { stdout, stderr, .. } = &mut child;
// Trampoline stdout to cargo warnings.
let stderr = stderr.take().expect("stderr");
let stderr = BufReader::new(stderr);
let stderr = std::thread::spawn(move || {
for line in stderr.lines() {
let line = line.expect("read line");
println!("cargo:warning={line}");
}
});
let stdout = stdout.take().expect("stdout");
let stdout = BufReader::new(stdout);
let mut executables = Vec::new();
for message in Message::parse_stream(stdout) {
#[expect(clippy::collapsible_match)]
match message.expect("valid JSON") {
Message::CompilerArtifact(Artifact {
executable,
target: Target { name, .. },
..
}) => {
if let Some(executable) = executable {
executables.push((name, executable.into_std_path_buf()));
}
}
Message::CompilerMessage(CompilerMessage { message, .. }) => {
for line in message.rendered.unwrap_or_default().split('\n') {
println!("cargo:warning={line}");
}
}
Message::TextLine(line) => {
println!("cargo:warning={line}");
}
_ => {}
}
}
let status = child
.wait()
.with_context(|| format!("failed to wait for {cmd:?}"))?;
if !status.success() {
return Err(anyhow!("{cmd:?} failed: {status:?}"));
}
match stderr.join().map_err(std::panic::resume_unwind) {
Ok(()) => {}
Err(err) => match err {},
}
for (name, binary) in executables {
let dst = out_dir.join(name);
let _: u64 = fs::copy(&binary, &dst)
.with_context(|| format!("failed to copy {binary:?} to {dst:?}"))?;
}
}
Ok(())
}
/// The toolchain to use for building eBPF programs.
#[derive(Default)]
pub enum Toolchain<'a> {
/// The latest nightly toolchain i.e. `nightly`.
#[default]
Nightly,
/// A custom toolchain e.g. `nightly-2021-01-01`.
///
/// The toolchain specifier is passed to `rustup run` and therefore should _not_ have a preceding `+`.
Custom(&'a str),
}
impl<'a> Toolchain<'a> {
fn as_str(&self) -> &'a str {
match self {
Toolchain::Nightly => "nightly",
Toolchain::Custom(toolchain) => toolchain,
}
}
}
/// Emit cfg flags that describe the desired BPF target architecture.
pub fn emit_bpf_target_arch_cfg() {
// The presence of this environment variable indicates that `--cfg
// bpf_target_arch="..."` was passed to the compiler, so we don't need to
// emit it again. Note that we cannot *set* this environment variable - it
// is set by cargo.
const BPF_TARGET_ARCH: &str = "CARGO_CFG_BPF_TARGET_ARCH";
println!("cargo:rerun-if-env-changed={BPF_TARGET_ARCH}");
// Users may directly set this environment variable in situations where
// using RUSTFLAGS to set `--cfg bpf_target_arch="..."` is not possible or
// not ergonomic. In contrast to RUSTFLAGS this mechanism reuses the target
// cache for all values, producing many more invalidations.
const AYA_BPF_TARGET_ARCH: &str = "AYA_BPF_TARGET_ARCH";
println!("cargo:rerun-if-env-changed={AYA_BPF_TARGET_ARCH}");
const HOST: &str = "HOST";
println!("cargo:rerun-if-env-changed={HOST}");
if std::env::var_os(BPF_TARGET_ARCH).is_none() {
let host = std::env::var_os(HOST).unwrap_or_else(|| panic!("{HOST} not set"));
let host = host
.into_string()
.unwrap_or_else(|err| panic!("OsString::into_string({HOST}): {err:?}"));
let host = host.as_str();
let bpf_target_arch = if let Some(bpf_target_arch) = std::env::var_os(AYA_BPF_TARGET_ARCH) {
bpf_target_arch
.into_string()
.unwrap_or_else(|err| {
panic!("OsString::into_string({AYA_BPF_TARGET_ARCH}): {err:?}")
})
.into()
} else {
target_arch_fixup(
host.split_once('-')
.map_or(host, |(arch, _rest)| arch)
.into(),
)
};
println!("cargo:rustc-cfg=bpf_target_arch=\"{bpf_target_arch}\"");
}
print!("cargo::rustc-check-cfg=cfg(bpf_target_arch, values(");
for value in [
"aarch64",
"arm",
"loongarch64",
"mips",
"powerpc64",
"riscv64",
"s390x",
"x86_64",
] {
print!("\"{value}\",");
}
println!("))");
}

@ -5,6 +5,43 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## v0.1.1 (2024-10-09)
### Chore
- <csr-id-a6f4739b5b138e718632758cad266ee3cb7b1b65/> aya-ebpf-macros: uncomment aya-ebpf dev-dep
This wasn't meant to be committed, cargo-smart-release. Commenting is
needed to fix the cyclic dep aya-ebpf-macros -> aya-ebpf ->
aya-ebpf-macros. See
https://github.com/rust-lang/cargo/issues/4242#issuecomment-413203081
### Other
- <csr-id-b84ede10b9c4813f221fade16b60d5ced4ecdc58/> separate probe to probe ctx & retprobe to retprobe ctx
Added logic in expand function in both kprobe.rs and uprobe.rs for valid
macros. Now, kprobe & uprobe proc macros only accept ProbeContext, and
kretprobe & uretprobe only accept RetProbeContext.
### Commit Statistics
<csr-read-only-do-not-edit/>
- 2 commits contributed to the release.
- 185 days passed between releases.
- 2 commits were understood as [conventional](https://www.conventionalcommits.org).
- 0 issues like '(#ID)' were seen in commit messages
### Commit Details
<csr-read-only-do-not-edit/>
<details><summary>view details</summary>
* **Uncategorized**
- Separate probe to probe ctx & retprobe to retprobe ctx ([`b84ede1`](https://github.com/aya-rs/aya/commit/b84ede10b9c4813f221fade16b60d5ced4ecdc58))
- Aya-ebpf-macros: uncomment aya-ebpf dev-dep ([`a6f4739`](https://github.com/aya-rs/aya/commit/a6f4739b5b138e718632758cad266ee3cb7b1b65))
</details>
## v0.1.0 (2024-04-06)
<csr-id-ea8073793e44c593e983e69eaa43a4f72799bfc5/>
@ -22,7 +59,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<csr-read-only-do-not-edit/>
- 8 commits contributed to the release over the course of 31 calendar days.
- 9 commits contributed to the release.
- 2 commits were understood as [conventional](https://www.conventionalcommits.org).
- 0 issues like '(#ID)' were seen in commit messages
@ -33,6 +70,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<details><summary>view details</summary>
* **Uncategorized**
- Release aya-ebpf-macros v0.1.0 ([`9d24bbe`](https://github.com/aya-rs/aya/commit/9d24bbe316ddf5caca7413198d6f79a0064def88))
- Release aya-ebpf-macros v0.1.0 ([`90f68db`](https://github.com/aya-rs/aya/commit/90f68dbd074e4cd74540d98fb9f17b6c2de3d054))
- Release aya-ebpf-macros v0.1.0, aya-ebpf v0.1.0 ([`eb3947b`](https://github.com/aya-rs/aya/commit/eb3947bf14e8e7ab0f70e12306e38fb8056edf57))
- Release aya-ebpf-bindings v0.1.0, aya-ebpf-macros v0.1.0, aya-ebpf v0.1.0 ([`a34c5e4`](https://github.com/aya-rs/aya/commit/a34c5e43b85dd176b9b18f1cc9c9d80d52f10a1f))

@ -1,21 +1,26 @@
[package]
name = "aya-ebpf-macros"
version = "0.1.0"
description = "Proc macros used by aya-ebpf"
name = "aya-ebpf-macros"
version = "0.1.1"
authors.workspace = true
edition.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
homepage.workspace = true
edition.workspace = true
rust-version.workspace = true
[lints]
workspace = true
[lib]
proc-macro = true
[dependencies]
proc-macro2 = { workspace = true }
proc-macro-error = { workspace = true }
proc-macro2-diagnostics = { workspace = true }
quote = { workspace = true }
syn = { workspace = true, default-features = true, features = ["full"] }
[dev-dependencies]
aya-ebpf = { path = "../ebpf/aya-ebpf", version = "^0.1.0", default-features = false }
aya-ebpf = { path = "../ebpf/aya-ebpf", default-features = false }

@ -1,7 +1,7 @@
use syn::{
Error, Ident, LitStr, Result, Token,
parse::{Parse, ParseStream},
punctuated::{Pair, Punctuated},
Error, Ident, LitStr, Result, Token,
};
pub(crate) struct NameValue {
@ -19,7 +19,7 @@ pub(crate) struct Args {
}
impl Parse for Args {
fn parse(input: ParseStream) -> Result<Args> {
fn parse(input: ParseStream<'_>) -> Result<Self> {
let args = Punctuated::<Arg, Token![,]>::parse_terminated_with(input, |input| {
let ident = input.parse::<Ident>()?;
let lookahead = input.lookahead1();
@ -42,7 +42,7 @@ impl Parse for Args {
})
.collect();
Ok(Args { args })
Ok(Self { args })
}
}

@ -0,0 +1,75 @@
use std::borrow::Cow;
use proc_macro2::TokenStream;
use quote::quote;
use syn::{ItemStatic, Result};
use crate::args::name_arg;
pub(crate) struct BtfMap {
item: ItemStatic,
name: String,
}
impl BtfMap {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self> {
let item: ItemStatic = syn::parse2(item)?;
let mut args = syn::parse2(attrs)?;
let name = name_arg(&mut args).unwrap_or_else(|| item.ident.to_string());
Ok(Self { item, name })
}
pub(crate) fn expand(&self) -> TokenStream {
let section_name: Cow<'_, _> = ".maps".into();
let name = &self.name;
let item = &self.item;
quote! {
#[unsafe(link_section = #section_name)]
#[unsafe(export_name = #name)]
#item
}
}
}
#[cfg(test)]
mod tests {
use syn::parse_quote;
use super::*;
#[test]
fn test_map_with_name() {
let map = BtfMap::parse(
parse_quote!(name = "foo"),
parse_quote!(
static BAR: HashMap<&'static str, u32> = HashMap::new();
),
)
.unwrap();
let expanded = map.expand();
let expected = quote!(
#[unsafe(link_section = ".maps")]
#[unsafe(export_name = "foo")]
static BAR: HashMap<&'static str, u32> = HashMap::new();
);
assert_eq!(expected.to_string(), expanded.to_string());
}
#[test]
fn test_map_no_name() {
let map = BtfMap::parse(
parse_quote!(),
parse_quote!(
static BAR: HashMap<&'static str, u32> = HashMap::new();
),
)
.unwrap();
let expanded = map.expand();
let expected = quote!(
#[unsafe(link_section = ".maps")]
#[unsafe(export_name = "BAR")]
static BAR: HashMap<&'static str, u32> = HashMap::new();
);
assert_eq!(expected.to_string(), expanded.to_string());
}
}

@ -4,7 +4,7 @@ use proc_macro2::TokenStream;
use quote::quote;
use syn::{ItemFn, Result};
use crate::args::{err_on_unknown_args, pop_string_arg, Args};
use crate::args::{Args, err_on_unknown_args, pop_string_arg};
pub(crate) struct BtfTracePoint {
item: ItemFn,
@ -18,28 +18,33 @@ impl BtfTracePoint {
let function = pop_string_arg(&mut args, "function");
err_on_unknown_args(&args)?;
Ok(BtfTracePoint { item, function })
Ok(Self { item, function })
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let section_name: Cow<'_, _> = if let Some(function) = &self.function {
format!("tp_btf/{}", function).into()
pub(crate) fn expand(&self) -> TokenStream {
let Self { item, function } = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let section_name: Cow<'_, _> = if let Some(function) = function {
format!("tp_btf/{function}").into()
} else {
"tp_btf".into()
};
let fn_vis = &self.item.vis;
let fn_name = self.item.sig.ident.clone();
let item = &self.item;
Ok(quote! {
#[no_mangle]
#[link_section = #section_name]
#fn_vis fn #fn_name(ctx: *mut ::core::ffi::c_void) -> i32 {
let fn_name = &sig.ident;
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = #section_name)]
#vis fn #fn_name(ctx: *mut ::core::ffi::c_void) -> i32 {
let _ = #fn_name(::aya_ebpf::programs::BtfTracePointContext::new(ctx));
return 0;
#item
}
})
}
}
}
@ -60,10 +65,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote!(
#[no_mangle]
#[link_section = "tp_btf"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "tp_btf")]
fn foo(ctx: *mut ::core::ffi::c_void) -> i32 {
let _ = foo(::aya_ebpf::programs::BtfTracePointContext::new(ctx));
return 0;
@ -87,10 +92,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote!(
#[no_mangle]
#[link_section = "tp_btf/some_func"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "tp_btf/some_func")]
fn foo(ctx: *mut ::core::ffi::c_void) -> i32 {
let _ = foo(::aya_ebpf::programs::BtfTracePointContext::new(ctx));
return 0;

@ -1,34 +1,39 @@
use proc_macro2::TokenStream;
use proc_macro_error::abort;
use proc_macro2_diagnostics::{Diagnostic, SpanDiagnosticExt as _};
use quote::quote;
use syn::{ItemFn, Result};
use syn::{ItemFn, spanned::Spanned as _};
pub(crate) struct CgroupDevice {
item: ItemFn,
}
impl CgroupDevice {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self, Diagnostic> {
if !attrs.is_empty() {
abort!(attrs, "unexpected attribute")
return Err(attrs.span().error("unexpected attribute"));
}
let item = syn::parse2(item)?;
Ok(CgroupDevice { item })
Ok(Self { item })
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let fn_vis = &self.item.vis;
let fn_name = self.item.sig.ident.clone();
let item = &self.item;
Ok(quote! {
#[no_mangle]
#[link_section = "cgroup/dev"]
#fn_vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::bpf_cgroup_dev_ctx) -> i32 {
pub(crate) fn expand(&self) -> TokenStream {
let Self { item } = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let fn_name = &sig.ident;
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/dev")]
#vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::bpf_cgroup_dev_ctx) -> i32 {
return #fn_name(::aya_ebpf::programs::DeviceContext::new(ctx));
#item
}
})
}
}
}
@ -49,10 +54,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup/dev"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/dev")]
fn foo(ctx: *mut ::aya_ebpf::bindings::bpf_cgroup_dev_ctx) -> i32 {
return foo(::aya_ebpf::programs::DeviceContext::new(ctx));

@ -1,48 +1,52 @@
use std::borrow::Cow;
use proc_macro2::TokenStream;
use proc_macro_error::abort;
use proc_macro2_diagnostics::{Diagnostic, SpanDiagnosticExt as _};
use quote::quote;
use syn::{Ident, ItemFn, Result};
use syn::{Ident, ItemFn};
pub(crate) struct CgroupSkb {
item: ItemFn,
attach_type: Option<String>,
attach_type: Option<Ident>,
}
impl CgroupSkb {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<CgroupSkb> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self, Diagnostic> {
let item: ItemFn = syn::parse2(item)?;
let mut attach_type = None;
if !attrs.is_empty() {
let attach_type = if attrs.is_empty() {
None
} else {
let ident: Ident = syn::parse2(attrs)?;
match ident.to_string().as_str() {
"ingress" | "egress" => (),
_ => abort!(ident, "invalid attach type"),
if ident != "ingress" && ident != "egress" {
return Err(ident.span().error("invalid attach type"));
}
attach_type = Some(ident.to_string());
}
Ok(CgroupSkb { item, attach_type })
Some(ident)
};
Ok(Self { item, attach_type })
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let section_name: Cow<'_, _> = if self.attach_type.is_some() {
format!("cgroup_skb/{}", self.attach_type.as_ref().unwrap()).into()
} else {
"cgroup/skb".into()
pub(crate) fn expand(&self) -> TokenStream {
let Self { item, attach_type } = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let section_name: Cow<'_, _> = match attach_type {
Some(attach_type) => format!("cgroup_skb/{attach_type}").into(),
None => "cgroup/skb".into(),
};
let fn_vis = &self.item.vis;
let fn_name = self.item.sig.ident.clone();
let item = &self.item;
Ok(quote! {
#[no_mangle]
#[link_section = #section_name]
#fn_vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::__sk_buff) -> i32 {
let fn_name = &sig.ident;
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = #section_name)]
#vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::__sk_buff) -> i32 {
return #fn_name(::aya_ebpf::programs::SkBuffContext::new(ctx));
#item
}
})
}
}
}
@ -63,10 +67,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup/skb"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/skb")]
fn foo(ctx: *mut ::aya_ebpf::bindings::__sk_buff) -> i32 {
return foo(::aya_ebpf::programs::SkBuffContext::new(ctx));
@ -89,10 +93,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup_skb/egress"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup_skb/egress")]
fn foo(ctx: *mut ::aya_ebpf::bindings::__sk_buff) -> i32 {
return foo(::aya_ebpf::programs::SkBuffContext::new(ctx));
@ -115,10 +119,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup_skb/ingress"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup_skb/ingress")]
fn foo(ctx: *mut ::aya_ebpf::bindings::__sk_buff) -> i32 {
return foo(::aya_ebpf::programs::SkBuffContext::new(ctx));
@ -141,10 +145,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup_skb/egress"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup_skb/egress")]
fn foo(ctx: *mut ::aya_ebpf::bindings::__sk_buff) -> i32 {
return foo(::aya_ebpf::programs::SkBuffContext::new(ctx));
@ -167,10 +171,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup_skb/egress"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup_skb/egress")]
pub fn foo(ctx: *mut ::aya_ebpf::bindings::__sk_buff) -> i32 {
return foo(::aya_ebpf::programs::SkBuffContext::new(ctx));
@ -193,10 +197,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup_skb/egress"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup_skb/egress")]
pub(crate) fn foo(ctx: *mut ::aya_ebpf::bindings::__sk_buff) -> i32 {
return foo(::aya_ebpf::programs::SkBuffContext::new(ctx));

@ -1,46 +1,51 @@
use std::borrow::Cow;
use proc_macro2::TokenStream;
use proc_macro_error::abort;
use proc_macro2_diagnostics::{Diagnostic, SpanDiagnosticExt as _};
use quote::quote;
use syn::{Ident, ItemFn, Result};
use syn::{Ident, ItemFn, spanned::Spanned as _};
pub(crate) struct CgroupSock {
item: ItemFn,
attach_type: String,
attach_type: Ident,
}
impl CgroupSock {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<CgroupSock> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self, Diagnostic> {
if attrs.is_empty() {
abort!(attrs, "missing attach type")
return Err(attrs.span().error("missing attach type"));
}
let item: ItemFn = syn::parse2(item)?;
let attach_type: Ident = syn::parse2(attrs)?;
match attach_type.to_string().as_str() {
"post_bind4" | "post_bind6" | "sock_create" | "sock_release" => (),
_ => abort!(attach_type, "invalid attach type"),
if attach_type != "post_bind4"
&& attach_type != "post_bind6"
&& attach_type != "sock_create"
&& attach_type != "sock_release"
{
return Err(attach_type.span().error("invalid attach type"));
}
Ok(CgroupSock {
item,
attach_type: attach_type.to_string(),
})
Ok(Self { item, attach_type })
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let section_name: Cow<'_, _> = format!("cgroup/{}", self.attach_type).into();
let fn_vis = &self.item.vis;
let fn_name = self.item.sig.ident.clone();
let item = &self.item;
Ok(quote! {
#[no_mangle]
#[link_section = #section_name]
#fn_vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::bpf_sock) -> i32 {
pub(crate) fn expand(&self) -> TokenStream {
let Self { item, attach_type } = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let section_name: Cow<'_, _> = format!("cgroup/{attach_type}").into();
let fn_name = &sig.ident;
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = #section_name)]
#vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::bpf_sock) -> i32 {
return #fn_name(::aya_ebpf::programs::SockContext::new(ctx));
#item
}
})
}
}
}
@ -61,10 +66,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup/post_bind4"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/post_bind4")]
fn foo(ctx: *mut ::aya_ebpf::bindings::bpf_sock) -> i32 {
return foo(::aya_ebpf::programs::SockContext::new(ctx));
@ -87,10 +92,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup/post_bind6"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/post_bind6")]
fn foo(ctx: *mut ::aya_ebpf::bindings::bpf_sock) -> i32 {
return foo(::aya_ebpf::programs::SockContext::new(ctx));
@ -112,10 +117,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup/sock_create"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/sock_create")]
fn foo(ctx: *mut ::aya_ebpf::bindings::bpf_sock) -> i32 {
return foo(::aya_ebpf::programs::SockContext::new(ctx));
@ -137,10 +142,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup/sock_release"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/sock_release")]
fn foo(ctx: *mut ::aya_ebpf::bindings::bpf_sock) -> i32 {
return foo(::aya_ebpf::programs::SockContext::new(ctx));

@ -1,48 +1,59 @@
use std::borrow::Cow;
use proc_macro2::TokenStream;
use proc_macro_error::abort;
use proc_macro2_diagnostics::{Diagnostic, SpanDiagnosticExt as _};
use quote::quote;
use syn::{Ident, ItemFn, Result};
use syn::{Ident, ItemFn, spanned::Spanned as _};
pub(crate) struct CgroupSockAddr {
item: ItemFn,
attach_type: String,
attach_type: Ident,
}
impl CgroupSockAddr {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self, Diagnostic> {
if attrs.is_empty() {
abort!(attrs, "missing attach type")
return Err(attrs.span().error("missing attach type"));
}
let item = syn::parse2(item)?;
let attach_type: Ident = syn::parse2(attrs)?;
match attach_type.to_string().as_str() {
"connect4" | "connect6" | "bind4" | "bind6" | "getpeername4" | "getpeername6"
| "getsockname4" | "getsockname6" | "sendmsg4" | "sendmsg6" | "recvmsg4"
| "recvmsg6" => (),
_ => abort!(attach_type, "invalid attach type"),
if attach_type != "connect4"
&& attach_type != "connect6"
&& attach_type != "bind4"
&& attach_type != "bind6"
&& attach_type != "getpeername4"
&& attach_type != "getpeername6"
&& attach_type != "getsockname4"
&& attach_type != "getsockname6"
&& attach_type != "sendmsg4"
&& attach_type != "sendmsg6"
&& attach_type != "recvmsg4"
&& attach_type != "recvmsg6"
{
return Err(attach_type.span().error("invalid attach type"));
}
Ok(CgroupSockAddr {
item,
attach_type: attach_type.to_string(),
})
Ok(Self { item, attach_type })
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let section_name: Cow<'_, _> = format!("cgroup/{}", self.attach_type).into();
let fn_vis = &self.item.vis;
let fn_name = self.item.sig.ident.clone();
let item = &self.item;
Ok(quote! {
#[no_mangle]
#[link_section = #section_name]
#fn_vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::bpf_sock_addr) -> i32 {
pub(crate) fn expand(&self) -> TokenStream {
let Self { item, attach_type } = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let section_name: Cow<'_, _> = format!("cgroup/{attach_type}").into();
let fn_name = &sig.ident;
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = #section_name)]
#vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::bpf_sock_addr) -> i32 {
return #fn_name(::aya_ebpf::programs::SockAddrContext::new(ctx));
#item
}
})
}
}
}
@ -63,10 +74,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup/connect4"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/connect4")]
fn foo(ctx: *mut ::aya_ebpf::bindings::bpf_sock_addr) -> i32 {
return foo(::aya_ebpf::programs::SockAddrContext::new(ctx));
@ -89,10 +100,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup/connect6"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/connect6")]
fn foo(ctx: *mut ::aya_ebpf::bindings::bpf_sock_addr) -> i32 {
return foo(::aya_ebpf::programs::SockAddrContext::new(ctx));
@ -115,10 +126,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup/bind4"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/bind4")]
fn foo(ctx: *mut ::aya_ebpf::bindings::bpf_sock_addr) -> i32 {
return foo(::aya_ebpf::programs::SockAddrContext::new(ctx));
@ -141,10 +152,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup/bind6"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/bind6")]
fn foo(ctx: *mut ::aya_ebpf::bindings::bpf_sock_addr) -> i32 {
return foo(::aya_ebpf::programs::SockAddrContext::new(ctx));
@ -167,10 +178,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup/getpeername4"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/getpeername4")]
fn foo(ctx: *mut ::aya_ebpf::bindings::bpf_sock_addr) -> i32 {
return foo(::aya_ebpf::programs::SockAddrContext::new(ctx));
@ -193,10 +204,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup/getpeername6"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/getpeername6")]
fn foo(ctx: *mut ::aya_ebpf::bindings::bpf_sock_addr) -> i32 {
return foo(::aya_ebpf::programs::SockAddrContext::new(ctx));
@ -219,10 +230,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup/getsockname4"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/getsockname4")]
fn foo(ctx: *mut ::aya_ebpf::bindings::bpf_sock_addr) -> i32 {
return foo(::aya_ebpf::programs::SockAddrContext::new(ctx));
@ -245,10 +256,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup/getsockname6"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/getsockname6")]
fn foo(ctx: *mut ::aya_ebpf::bindings::bpf_sock_addr) -> i32 {
return foo(::aya_ebpf::programs::SockAddrContext::new(ctx));
@ -271,10 +282,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup/sendmsg4"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/sendmsg4")]
fn foo(ctx: *mut ::aya_ebpf::bindings::bpf_sock_addr) -> i32 {
return foo(::aya_ebpf::programs::SockAddrContext::new(ctx));
@ -297,10 +308,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup/sendmsg6"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/sendmsg6")]
fn foo(ctx: *mut ::aya_ebpf::bindings::bpf_sock_addr) -> i32 {
return foo(::aya_ebpf::programs::SockAddrContext::new(ctx));
@ -323,10 +334,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup/recvmsg4"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/recvmsg4")]
fn foo(ctx: *mut ::aya_ebpf::bindings::bpf_sock_addr) -> i32 {
return foo(::aya_ebpf::programs::SockAddrContext::new(ctx));
@ -349,10 +360,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup/recvmsg6"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/recvmsg6")]
fn foo(ctx: *mut ::aya_ebpf::bindings::bpf_sock_addr) -> i32 {
return foo(::aya_ebpf::programs::SockAddrContext::new(ctx));

@ -1,46 +1,47 @@
use std::borrow::Cow;
use proc_macro2::TokenStream;
use proc_macro_error::abort;
use proc_macro2_diagnostics::{Diagnostic, SpanDiagnosticExt as _};
use quote::quote;
use syn::{Ident, ItemFn, Result};
use syn::{Ident, ItemFn, spanned::Spanned as _};
pub(crate) struct CgroupSockopt {
item: ItemFn,
attach_type: String,
attach_type: Ident,
}
impl CgroupSockopt {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<CgroupSockopt> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self, Diagnostic> {
if attrs.is_empty() {
abort!(attrs, "expected attach type");
return Err(attrs.span().error("missing attach type"));
}
let item = syn::parse2(item)?;
let attach_type: Ident = syn::parse2(attrs)?;
match attach_type.to_string().as_str() {
"getsockopt" | "setsockopt" => (),
_ => abort!(attach_type, "invalid attach type"),
if attach_type != "getsockopt" && attach_type != "setsockopt" {
return Err(attach_type.span().error("invalid attach type"));
}
Ok(CgroupSockopt {
item,
attach_type: attach_type.to_string(),
})
Ok(Self { item, attach_type })
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let section_name: Cow<'_, _> = format!("cgroup/{}", self.attach_type).into();
let fn_vis = &self.item.vis;
let fn_name = self.item.sig.ident.clone();
let item = &self.item;
Ok(quote! {
#[no_mangle]
#[link_section = #section_name]
#fn_vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::bpf_sockopt) -> i32 {
pub(crate) fn expand(&self) -> TokenStream {
let Self { item, attach_type } = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let section_name: Cow<'_, _> = format!("cgroup/{attach_type}").into();
let fn_name = &sig.ident;
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = #section_name)]
#vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::bpf_sockopt) -> i32 {
return #fn_name(::aya_ebpf::programs::SockoptContext::new(ctx));
#item
}
})
}
}
}
@ -61,10 +62,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote!(
#[no_mangle]
#[link_section = "cgroup/getsockopt"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/getsockopt")]
fn foo(ctx: *mut ::aya_ebpf::bindings::bpf_sockopt) -> i32 {
return foo(::aya_ebpf::programs::SockoptContext::new(ctx));
@ -87,10 +88,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote!(
#[no_mangle]
#[link_section = "cgroup/setsockopt"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/setsockopt")]
fn foo(ctx: *mut ::aya_ebpf::bindings::bpf_sockopt) -> i32 {
return foo(::aya_ebpf::programs::SockoptContext::new(ctx));

@ -1,34 +1,39 @@
use proc_macro2::TokenStream;
use proc_macro_error::abort;
use proc_macro2_diagnostics::{Diagnostic, SpanDiagnosticExt as _};
use quote::quote;
use syn::{ItemFn, Result};
use syn::{ItemFn, spanned::Spanned as _};
pub(crate) struct CgroupSysctl {
item: ItemFn,
}
impl CgroupSysctl {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self, Diagnostic> {
if !attrs.is_empty() {
abort!(attrs, "unexpected attribute")
return Err(attrs.span().error("unexpected attribute"));
}
let item = syn::parse2(item)?;
Ok(CgroupSysctl { item })
Ok(Self { item })
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let fn_vis = &self.item.vis;
let fn_name = self.item.sig.ident.clone();
let item = &self.item;
Ok(quote! {
#[no_mangle]
#[link_section = "cgroup/sysctl"]
#fn_vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::bpf_sysctl) -> i32 {
pub(crate) fn expand(&self) -> TokenStream {
let Self { item } = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let fn_name = &sig.ident;
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/sysctl")]
#vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::bpf_sysctl) -> i32 {
return #fn_name(::aya_ebpf::programs::SysctlContext::new(ctx));
#item
}
})
}
}
}
@ -49,10 +54,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "cgroup/sysctl"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "cgroup/sysctl")]
fn foo(ctx: *mut ::aya_ebpf::bindings::bpf_sysctl) -> i32 {
return foo(::aya_ebpf::programs::SysctlContext::new(ctx));

@ -13,39 +13,48 @@ pub(crate) struct FEntry {
}
impl FEntry {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<FEntry> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self> {
let item = syn::parse2(item)?;
let mut args = syn::parse2(attrs)?;
let function = pop_string_arg(&mut args, "function");
let sleepable = pop_bool_arg(&mut args, "sleepable");
err_on_unknown_args(&args)?;
Ok(FEntry {
Ok(Self {
item,
function,
sleepable,
})
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let section_prefix = if self.sleepable { "fentry.s" } else { "fentry" };
let section_name: Cow<'_, _> = if let Some(function) = &self.function {
format!("{}/{}", section_prefix, function).into()
pub(crate) fn expand(&self) -> TokenStream {
let Self {
item,
function,
sleepable,
} = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let section_prefix = if *sleepable { "fentry.s" } else { "fentry" };
let section_name: Cow<'_, _> = if let Some(function) = function {
format!("{section_prefix}/{function}").into()
} else {
section_prefix.into()
};
let fn_vis = &self.item.vis;
let fn_name = self.item.sig.ident.clone();
let item = &self.item;
Ok(quote! {
#[no_mangle]
#[link_section = #section_name]
#fn_vis fn #fn_name(ctx: *mut ::core::ffi::c_void) -> i32 {
let fn_name = &sig.ident;
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = #section_name)]
#vis fn #fn_name(ctx: *mut ::core::ffi::c_void) -> i32 {
let _ = #fn_name(::aya_ebpf::programs::FEntryContext::new(ctx));
return 0;
#item
}
})
}
}
}
@ -66,10 +75,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "fentry"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "fentry")]
fn sys_clone(ctx: *mut ::core::ffi::c_void) -> i32 {
let _ = sys_clone(::aya_ebpf::programs::FEntryContext::new(ctx));
return 0;
@ -95,10 +104,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "fentry/sys_clone"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "fentry/sys_clone")]
fn sys_clone(ctx: *mut ::core::ffi::c_void) -> i32 {
let _ = sys_clone(::aya_ebpf::programs::FEntryContext::new(ctx));
return 0;
@ -124,10 +133,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "fentry.s"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "fentry.s")]
fn sys_clone(ctx: *mut ::core::ffi::c_void) -> i32 {
let _ = sys_clone(::aya_ebpf::programs::FEntryContext::new(ctx));
return 0;

@ -13,39 +13,48 @@ pub(crate) struct FExit {
}
impl FExit {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<FExit> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self> {
let item = syn::parse2(item)?;
let mut args = syn::parse2(attrs)?;
let function = pop_string_arg(&mut args, "function");
let sleepable = pop_bool_arg(&mut args, "sleepable");
err_on_unknown_args(&args)?;
Ok(FExit {
Ok(Self {
item,
function,
sleepable,
})
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let section_prefix = if self.sleepable { "fexit.s" } else { "fexit" };
let section_name: Cow<'_, _> = if let Some(function) = &self.function {
format!("{}/{}", section_prefix, function).into()
pub(crate) fn expand(&self) -> TokenStream {
let Self {
item,
function,
sleepable,
} = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let section_prefix = if *sleepable { "fexit.s" } else { "fexit" };
let section_name: Cow<'_, _> = if let Some(function) = function {
format!("{section_prefix}/{function}").into()
} else {
section_prefix.into()
};
let fn_vis = &self.item.vis;
let fn_name = self.item.sig.ident.clone();
let item = &self.item;
Ok(quote! {
#[no_mangle]
#[link_section = #section_name]
#fn_vis fn #fn_name(ctx: *mut ::core::ffi::c_void) -> i32 {
let fn_name = &sig.ident;
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = #section_name)]
#vis fn #fn_name(ctx: *mut ::core::ffi::c_void) -> i32 {
let _ = #fn_name(::aya_ebpf::programs::FExitContext::new(ctx));
return 0;
#item
}
})
}
}
}
@ -66,10 +75,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "fexit"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "fexit")]
fn sys_clone(ctx: *mut ::core::ffi::c_void) -> i32 {
let _ = sys_clone(::aya_ebpf::programs::FExitContext::new(ctx));
return 0;
@ -95,10 +104,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "fexit/sys_clone"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "fexit/sys_clone")]
fn sys_clone(ctx: *mut ::core::ffi::c_void) -> i32 {
let _ = sys_clone(::aya_ebpf::programs::FExitContext::new(ctx));
return 0;
@ -124,10 +133,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "fexit.s/sys_clone"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "fexit.s/sys_clone")]
fn sys_clone(ctx: *mut ::core::ffi::c_void) -> i32 {
let _ = sys_clone(::aya_ebpf::programs::FExitContext::new(ctx));
return 0;

@ -0,0 +1,71 @@
use proc_macro2::TokenStream;
use proc_macro2_diagnostics::{Diagnostic, SpanDiagnosticExt as _};
use quote::quote;
use syn::{ItemFn, spanned::Spanned as _};
pub(crate) struct FlowDissector {
item: ItemFn,
}
impl FlowDissector {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self, Diagnostic> {
if !attrs.is_empty() {
return Err(attrs.span().error("unexpected attribute"));
}
let item = syn::parse2(item)?;
Ok(Self { item })
}
pub(crate) fn expand(&self) -> TokenStream {
let Self { item } = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let fn_name = &sig.ident;
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = "flow_dissector")]
#vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::__sk_buff) -> u32 {
return #fn_name(::aya_ebpf::programs::FlowDissectorContext::new(ctx));
#item
}
}
}
}
#[cfg(test)]
mod tests {
use syn::parse_quote;
use super::*;
#[test]
fn test_flow_dissector() {
let prog = FlowDissector::parse(
parse_quote! {},
parse_quote! {
fn prog(ctx: &mut ::aya_ebpf::programs::FlowDissectorContext) -> u32 {
0
}
},
)
.unwrap();
let expanded = prog.expand();
let expected = quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = "flow_dissector")]
fn prog(ctx: *mut ::aya_ebpf::bindings::__sk_buff) -> u32 {
return prog(::aya_ebpf::programs::FlowDissectorContext::new(ctx));
fn prog(ctx: &mut ::aya_ebpf::programs::FlowDissectorContext) -> u32 {
0
}
}
};
assert_eq!(expected.to_string(), expanded.to_string());
}
}

@ -1,12 +1,12 @@
use std::borrow::Cow;
use proc_macro2::TokenStream;
use proc_macro2_diagnostics::{Diagnostic, SpanDiagnosticExt as _};
use quote::quote;
use syn::{ItemFn, Result};
use syn::{ItemFn, spanned::Spanned as _};
use crate::args::{err_on_unknown_args, pop_string_arg};
#[allow(clippy::enum_variant_names)]
#[derive(Debug, Copy, Clone)]
pub(crate) enum KProbeKind {
KProbe,
@ -31,14 +31,23 @@ pub(crate) struct KProbe {
}
impl KProbe {
pub(crate) fn parse(kind: KProbeKind, attrs: TokenStream, item: TokenStream) -> Result<KProbe> {
pub(crate) fn parse(
kind: KProbeKind,
attrs: TokenStream,
item: TokenStream,
) -> Result<Self, Diagnostic> {
let item = syn::parse2(item)?;
let span = attrs.span();
let mut args = syn::parse2(attrs)?;
let function = pop_string_arg(&mut args, "function");
let offset = pop_string_arg(&mut args, "offset").map(|v| v.parse::<u64>().unwrap());
let offset = pop_string_arg(&mut args, "offset")
.as_deref()
.map(str::parse)
.transpose()
.map_err(|err| span.error(format!("failed to parse `offset` argument: {err}")))?;
err_on_unknown_args(&args)?;
Ok(KProbe {
Ok(Self {
kind,
item,
function,
@ -46,39 +55,42 @@ impl KProbe {
})
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let section_name: Cow<'_, _> = if self.function.is_some() && self.offset.is_some() {
format!(
"{}/{}+{}",
self.kind,
self.function.as_ref().unwrap(),
self.offset.unwrap()
)
.into()
} else if self.function.is_some() {
format!("{}/{}", self.kind, self.function.as_ref().unwrap()).into()
} else {
format!("{}", self.kind).into()
pub(crate) fn expand(&self) -> TokenStream {
let Self {
kind,
function,
offset,
item,
} = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let section_name: Cow<'_, _> = match function {
None => self.kind.to_string().into(),
Some(function) => match offset {
None => format!("{kind}/{function}").into(),
Some(offset) => format!("{kind}/{function}+{offset}").into(),
},
};
let probe_type = if section_name.as_ref().starts_with("kprobe") {
quote! { ProbeContext }
} else {
quote! { RetProbeContext }
};
let fn_vis = &self.item.vis;
let fn_name = self.item.sig.ident.clone();
let item = &self.item;
Ok(quote! {
#[no_mangle]
#[link_section = #section_name]
#fn_vis fn #fn_name(ctx: *mut ::core::ffi::c_void) -> u32 {
let fn_name = &sig.ident;
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = #section_name)]
#vis fn #fn_name(ctx: *mut ::core::ffi::c_void) -> u32 {
let _ = #fn_name(::aya_ebpf::programs::#probe_type::new(ctx));
return 0;
#item
}
})
}
}
}
@ -101,10 +113,10 @@ mod tests {
)
.unwrap();
assert_eq!(
kprobe.expand().unwrap().to_string(),
kprobe.expand().to_string(),
quote! {
#[no_mangle]
#[link_section = "kprobe"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "kprobe")]
fn foo(ctx: *mut ::core::ffi::c_void) -> u32 {
let _ = foo(::aya_ebpf::programs::ProbeContext::new(ctx));
return 0;
@ -133,10 +145,10 @@ mod tests {
)
.unwrap();
assert_eq!(
kprobe.expand().unwrap().to_string(),
kprobe.expand().to_string(),
quote! {
#[no_mangle]
#[link_section = "kprobe/fib_lookup"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "kprobe/fib_lookup")]
fn foo(ctx: *mut ::core::ffi::c_void) -> u32 {
let _ = foo(::aya_ebpf::programs::ProbeContext::new(ctx));
return 0;
@ -166,10 +178,10 @@ mod tests {
)
.unwrap();
assert_eq!(
kprobe.expand().unwrap().to_string(),
kprobe.expand().to_string(),
quote! {
#[no_mangle]
#[link_section = "kprobe/fib_lookup+10"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "kprobe/fib_lookup+10")]
fn foo(ctx: *mut ::core::ffi::c_void) -> u32 {
let _ = foo(::aya_ebpf::programs::ProbeContext::new(ctx));
return 0;
@ -196,10 +208,10 @@ mod tests {
)
.unwrap();
assert_eq!(
kprobe.expand().unwrap().to_string(),
kprobe.expand().to_string(),
quote! {
#[no_mangle]
#[link_section = "kretprobe"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "kretprobe")]
fn foo(ctx: *mut ::core::ffi::c_void) -> u32 {
let _ = foo(::aya_ebpf::programs::RetProbeContext::new(ctx));
return 0;

@ -1,4 +1,7 @@
#![cfg_attr(test, expect(unused_crate_dependencies, reason = "used in doctests"))]
pub(crate) mod args;
mod btf_map;
mod btf_tracepoint;
mod cgroup_device;
mod cgroup_skb;
@ -8,8 +11,10 @@ mod cgroup_sockopt;
mod cgroup_sysctl;
mod fentry;
mod fexit;
mod flow_dissector;
mod kprobe;
mod lsm;
mod lsm_cgroup;
mod map;
mod perf_event;
mod raw_tracepoint;
@ -23,6 +28,7 @@ mod tracepoint;
mod uprobe;
mod xdp;
use btf_map::BtfMap;
use btf_tracepoint::BtfTracePoint;
use cgroup_device::CgroupDevice;
use cgroup_skb::CgroupSkb;
@ -32,12 +38,13 @@ use cgroup_sockopt::CgroupSockopt;
use cgroup_sysctl::CgroupSysctl;
use fentry::FEntry;
use fexit::FExit;
use flow_dissector::FlowDissector;
use kprobe::{KProbe, KProbeKind};
use lsm::Lsm;
use lsm_cgroup::LsmCgroup;
use map::Map;
use perf_event::PerfEvent;
use proc_macro::TokenStream;
use proc_macro_error::{abort, proc_macro_error};
use raw_tracepoint::RawTracePoint;
use sk_lookup::SkLookup;
use sk_msg::SkMsg;
@ -48,83 +55,78 @@ use tc::SchedClassifier;
use tracepoint::TracePoint;
use uprobe::{UProbe, UProbeKind};
use xdp::Xdp;
#[proc_macro_error]
#[proc_macro_attribute]
pub fn btf_map(attrs: TokenStream, item: TokenStream) -> TokenStream {
match BtfMap::parse(attrs.into(), item.into()) {
Ok(prog) => prog.expand(),
Err(err) => err.into_compile_error(),
}
.into()
}
#[proc_macro_attribute]
pub fn map(attrs: TokenStream, item: TokenStream) -> TokenStream {
match Map::parse(attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.into_compile_error(),
}
.into()
}
#[proc_macro_error]
#[proc_macro_attribute]
pub fn kprobe(attrs: TokenStream, item: TokenStream) -> TokenStream {
match KProbe::parse(KProbeKind::KProbe, attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.emit_as_expr_tokens(),
}
.into()
}
#[proc_macro_error]
#[proc_macro_attribute]
pub fn kretprobe(attrs: TokenStream, item: TokenStream) -> TokenStream {
match KProbe::parse(KProbeKind::KRetProbe, attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.emit_as_expr_tokens(),
}
.into()
}
#[proc_macro_error]
#[proc_macro_attribute]
pub fn uprobe(attrs: TokenStream, item: TokenStream) -> TokenStream {
match UProbe::parse(UProbeKind::UProbe, attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => match prog.expand() {
Ok(tokens) => tokens,
Err(err) => err.emit_as_expr_tokens(),
},
Err(err) => err.emit_as_expr_tokens(),
}
.into()
}
#[proc_macro_error]
#[proc_macro_attribute]
pub fn uretprobe(attrs: TokenStream, item: TokenStream) -> TokenStream {
match UProbe::parse(UProbeKind::URetProbe, attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => match prog.expand() {
Ok(tokens) => tokens,
Err(err) => err.emit_as_expr_tokens(),
},
Err(err) => err.emit_as_expr_tokens(),
}
.into()
}
#[proc_macro_error]
#[proc_macro_attribute]
pub fn sock_ops(attrs: TokenStream, item: TokenStream) -> TokenStream {
match SockOps::parse(attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.emit_as_expr_tokens(),
}
.into()
}
#[proc_macro_error]
#[proc_macro_attribute]
pub fn sk_msg(attrs: TokenStream, item: TokenStream) -> TokenStream {
match SkMsg::parse(attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.emit_as_expr_tokens(),
}
.into()
}
/// Marks a function as an eBPF XDP program that can be attached to a network interface.
@ -149,60 +151,46 @@ pub fn sk_msg(attrs: TokenStream, item: TokenStream) -> TokenStream {
/// XDP_PASS
/// }
/// ```
#[proc_macro_error]
#[proc_macro_attribute]
pub fn xdp(attrs: TokenStream, item: TokenStream) -> TokenStream {
match Xdp::parse(attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.emit_as_expr_tokens(),
}
.into()
}
#[proc_macro_error]
#[proc_macro_attribute]
pub fn classifier(attrs: TokenStream, item: TokenStream) -> TokenStream {
match SchedClassifier::parse(attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.emit_as_expr_tokens(),
}
.into()
}
#[proc_macro_attribute]
pub fn cgroup_sysctl(attrs: TokenStream, item: TokenStream) -> TokenStream {
match CgroupSysctl::parse(attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.emit_as_expr_tokens(),
}
.into()
}
#[proc_macro_error]
#[proc_macro_attribute]
pub fn cgroup_sockopt(attrs: TokenStream, item: TokenStream) -> TokenStream {
match CgroupSockopt::parse(attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.emit_as_expr_tokens(),
}
.into()
}
#[proc_macro_error]
#[proc_macro_attribute]
pub fn cgroup_skb(attrs: TokenStream, item: TokenStream) -> TokenStream {
match CgroupSkb::parse(attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.emit_as_expr_tokens(),
}
.into()
}
/// Marks a function as a [`CgroupSockAddr`] eBPF program.
@ -228,62 +216,47 @@ pub fn cgroup_skb(attrs: TokenStream, item: TokenStream) -> TokenStream {
/// pub fn connect4(ctx: SockAddrContext) -> i32 {
/// match try_connect4(ctx) {
/// Ok(ret) => ret,
/// Err(ret) => match ret.try_into() {
/// Ok(rt) => rt,
/// Err(_) => 1,
/// },
/// Err(ret) => ret,
/// }
/// }
///
/// fn try_connect4(ctx: SockAddrContext) -> Result<i32, i64> {
/// fn try_connect4(ctx: SockAddrContext) -> Result<i32, i32> {
/// Ok(0)
/// }
/// ```
#[proc_macro_error]
#[proc_macro_attribute]
pub fn cgroup_sock_addr(attrs: TokenStream, item: TokenStream) -> TokenStream {
match CgroupSockAddr::parse(attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.emit_as_expr_tokens(),
}
.into()
}
#[proc_macro_error]
#[proc_macro_attribute]
pub fn cgroup_sock(attrs: TokenStream, item: TokenStream) -> TokenStream {
match CgroupSock::parse(attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.emit_as_expr_tokens(),
}
.into()
}
#[proc_macro_error]
#[proc_macro_attribute]
pub fn tracepoint(attrs: TokenStream, item: TokenStream) -> TokenStream {
match TracePoint::parse(attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.emit_as_expr_tokens(),
}
.into()
}
#[proc_macro_error]
#[proc_macro_attribute]
pub fn perf_event(attrs: TokenStream, item: TokenStream) -> TokenStream {
match PerfEvent::parse(attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.emit_as_expr_tokens(),
}
.into()
}
/// Marks a function as a raw tracepoint eBPF program that can be attached at a
@ -314,16 +287,13 @@ pub fn perf_event(attrs: TokenStream, item: TokenStream) -> TokenStream {
/// Ok(0)
/// }
/// ```
#[proc_macro_error]
#[proc_macro_attribute]
pub fn raw_tracepoint(attrs: TokenStream, item: TokenStream) -> TokenStream {
match RawTracePoint::parse(attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.into_compile_error(),
}
.into()
}
/// Marks a function as an LSM program that can be attached to Linux LSM hooks.
@ -338,7 +308,7 @@ pub fn raw_tracepoint(attrs: TokenStream, item: TokenStream) -> TokenStream {
///
/// LSM probes require a kernel compiled with `CONFIG_BPF_LSM=y` and `CONFIG_DEBUG_INFO_BTF=y`.
/// In order for the probes to fire, you also need the BPF LSM to be enabled through your
/// kernel's boot paramters (like `lsm=lockdown,yama,bpf`).
/// kernel's boot parameters (like `lsm=lockdown,yama,bpf`).
///
/// # Minimum kernel version
///
@ -361,16 +331,56 @@ pub fn raw_tracepoint(attrs: TokenStream, item: TokenStream) -> TokenStream {
/// Ok(0)
/// }
/// ```
#[proc_macro_error]
#[proc_macro_attribute]
pub fn lsm(attrs: TokenStream, item: TokenStream) -> TokenStream {
match Lsm::parse(attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.into_compile_error(),
}
.into()
}
/// Marks a function as an LSM program that can be attached to cgroups.
/// This program will only trigger for workloads in the attached cgroups.
/// Used to implement security policy and audit logging.
///
/// The hook name is the first and only argument to the macro.
///
/// LSM probes can be attached to the kernel's security hooks to implement mandatory
/// access control policy and security auditing.
///
/// LSM probes require a kernel compiled with `CONFIG_BPF_LSM=y` and `CONFIG_DEBUG_INFO_BTF=y`.
/// In order for the probes to fire, you also need the BPF LSM to be enabled through your
/// kernel's boot parameters (like `lsm=lockdown,yama,bpf`).
///
/// # Minimum kernel version
///
/// The minimum kernel version required to use this feature is 6.0.
///
/// # Examples
///
/// ```no_run
/// use aya_ebpf::{macros::lsm_cgroup, programs::LsmContext};
///
/// #[lsm_cgroup(hook = "file_open")]
/// pub fn file_open(ctx: LsmContext) -> i32 {
/// match unsafe { try_file_open(ctx) } {
/// Ok(ret) => ret,
/// Err(ret) => ret,
/// }
/// }
///
/// unsafe fn try_file_open(_ctx: LsmContext) -> Result<i32, i32> {
/// Err(0)
/// }
/// ```
#[proc_macro_attribute]
pub fn lsm_cgroup(attrs: TokenStream, item: TokenStream) -> TokenStream {
match LsmCgroup::parse(attrs.into(), item.into()) {
Ok(prog) => prog.expand(),
Err(err) => err.into_compile_error(),
}
.into()
}
/// Marks a function as a [BTF-enabled raw tracepoint][1] eBPF program that can be attached at
@ -403,16 +413,13 @@ pub fn lsm(attrs: TokenStream, item: TokenStream) -> TokenStream {
/// ```
///
/// [1]: https://github.com/torvalds/linux/commit/9e15db66136a14cde3f35691f1d839d950118826
#[proc_macro_error]
#[proc_macro_attribute]
pub fn btf_tracepoint(attrs: TokenStream, item: TokenStream) -> TokenStream {
match BtfTracePoint::parse(attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.into_compile_error(),
}
.into()
}
/// Marks a function as a SK_SKB Stream Parser eBPF program that can be attached
@ -430,7 +437,7 @@ pub fn btf_tracepoint(attrs: TokenStream, item: TokenStream) -> TokenStream {
///
///#[stream_parser]
///fn stream_parser(ctx: SkBuffContext) -> u32 {
/// match { try_stream_parser(ctx) } {
/// match try_stream_parser(ctx) {
/// Ok(ret) => ret,
/// Err(ret) => ret,
/// }
@ -440,7 +447,6 @@ pub fn btf_tracepoint(attrs: TokenStream, item: TokenStream) -> TokenStream {
/// Ok(ctx.len())
///}
/// ```
#[proc_macro_error]
#[proc_macro_attribute]
pub fn stream_parser(attrs: TokenStream, item: TokenStream) -> TokenStream {
sk_skb(SkSkbKind::StreamParser, attrs, item)
@ -461,7 +467,7 @@ pub fn stream_parser(attrs: TokenStream, item: TokenStream) -> TokenStream {
///
///#[stream_verdict]
///fn stream_verdict(ctx: SkBuffContext) -> u32 {
/// match { try_stream_verdict(ctx) } {
/// match try_stream_verdict(ctx) {
/// Ok(ret) => ret,
/// Err(ret) => ret,
/// }
@ -471,7 +477,6 @@ pub fn stream_parser(attrs: TokenStream, item: TokenStream) -> TokenStream {
/// Ok(sk_action::SK_PASS)
///}
/// ```
#[proc_macro_error]
#[proc_macro_attribute]
pub fn stream_verdict(attrs: TokenStream, item: TokenStream) -> TokenStream {
sk_skb(SkSkbKind::StreamVerdict, attrs, item)
@ -479,12 +484,10 @@ pub fn stream_verdict(attrs: TokenStream, item: TokenStream) -> TokenStream {
fn sk_skb(kind: SkSkbKind, attrs: TokenStream, item: TokenStream) -> TokenStream {
match SkSkb::parse(kind, attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.emit_as_expr_tokens(),
}
.into()
}
/// Marks a function as a eBPF Socket Filter program that can be attached to
@ -501,19 +504,16 @@ fn sk_skb(kind: SkSkbKind, attrs: TokenStream, item: TokenStream) -> TokenStream
///
/// #[socket_filter]
/// pub fn accept_all(_ctx: SkBuffContext) -> i64 {
/// return 0
/// 0
/// }
/// ```
#[proc_macro_error]
#[proc_macro_attribute]
pub fn socket_filter(attrs: TokenStream, item: TokenStream) -> TokenStream {
match SocketFilter::parse(attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.emit_as_expr_tokens(),
}
.into()
}
/// Marks a function as a fentry eBPF program that can be attached to almost
@ -528,9 +528,10 @@ pub fn socket_filter(attrs: TokenStream, item: TokenStream) -> TokenStream {
/// # Examples
///
/// ```no_run
/// # #![allow(non_camel_case_types)]
/// use aya_ebpf::{macros::fentry, programs::FEntryContext};
/// # #[expect(non_camel_case_types)]
/// # type filename = u32;
/// # #[expect(non_camel_case_types)]
/// # type path = u32;
///
/// #[fentry(function = "filename_lookup")]
@ -548,16 +549,13 @@ pub fn socket_filter(attrs: TokenStream, item: TokenStream) -> TokenStream {
/// Ok(0)
/// }
/// ```
#[proc_macro_error]
#[proc_macro_attribute]
pub fn fentry(attrs: TokenStream, item: TokenStream) -> TokenStream {
match FEntry::parse(attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.into_compile_error(),
}
.into()
}
/// Marks a function as a fexit eBPF program that can be attached to almost
@ -573,9 +571,10 @@ pub fn fentry(attrs: TokenStream, item: TokenStream) -> TokenStream {
/// # Examples
///
/// ```no_run
/// # #![allow(non_camel_case_types)]
/// use aya_ebpf::{macros::fexit, programs::FExitContext};
/// # #[expect(non_camel_case_types)]
/// # type filename = u32;
/// # #[expect(non_camel_case_types)]
/// # type path = u32;
///
/// #[fexit(function = "filename_lookup")]
@ -593,16 +592,54 @@ pub fn fentry(attrs: TokenStream, item: TokenStream) -> TokenStream {
/// Ok(0)
/// }
/// ```
#[proc_macro_error]
#[proc_macro_attribute]
pub fn fexit(attrs: TokenStream, item: TokenStream) -> TokenStream {
match FExit::parse(attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.into_compile_error(),
}
.into()
}
/// Marks a function as an eBPF Flow Dissector program.
///
/// Flow dissector is a program type that parses metadata out of the packets.
///
/// BPF flow dissectors can be attached per network namespace. These programs
/// are given a packet and expected to populate the fields of
/// `FlowDissectorContext::flow_keys`. The return code of the BPF program is
/// either [`BPF_OK`] to indicate successful dissection, [`BPF_DROP`] to
/// indicate parsing error, or [`BPF_FLOW_DISSECTOR_CONTINUE`] to indicate that
/// no custom dissection was performed, and fallback to standard dissector is
/// requested.
///
/// # Minimum kernel version
///
/// The minimum kernel version required to use this feature is 4.20.
///
/// # Examples
///
/// ```no_run
/// use aya_ebpf::{bindings::bpf_ret_code, macros::flow_dissector, programs::FlowDissectorContext};
///
/// #[flow_dissector]
/// pub fn dissect(_ctx: FlowDissectorContext) -> u32 {
/// // TODO: do something useful here.
/// bpf_ret_code::BPF_FLOW_DISSECTOR_CONTINUE
/// }
/// ```
///
/// [`FlowDissectorContext::flow_keys`]: ../aya_ebpf/programs/flow_dissector/struct.FlowDissectorContext.html#method.flow_keys
/// [`BPF_OK`]: ../aya_ebpf/bindings/bpf_ret_code/constant.bpf_ok
/// [`BPF_DROP`]: ../aya_ebpf/bindings/bpf_ret_code/constant.bpf_drop
/// [`BPF_FLOW_DISSECTOR_CONTINUE`]: ../aya_ebpf/bindings/bpf_ret_code/constant.bpf_flow_dissector_continue
#[proc_macro_attribute]
pub fn flow_dissector(attrs: TokenStream, item: TokenStream) -> TokenStream {
match FlowDissector::parse(attrs.into(), item.into()) {
Ok(prog) => prog.expand(),
Err(err) => err.emit_as_expr_tokens(),
}
.into()
}
/// Marks a function as an eBPF Socket Lookup program that can be attached to
@ -620,19 +657,16 @@ pub fn fexit(attrs: TokenStream, item: TokenStream) -> TokenStream {
/// #[sk_lookup]
/// pub fn accept_all(_ctx: SkLookupContext) -> u32 {
/// // use sk_assign to redirect
/// return 0
/// 0
/// }
/// ```
#[proc_macro_error]
#[proc_macro_attribute]
pub fn sk_lookup(attrs: TokenStream, item: TokenStream) -> TokenStream {
match SkLookup::parse(attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.emit_as_expr_tokens(),
}
.into()
}
/// Marks a function as a cgroup device eBPF program that can be attached to a
@ -653,17 +687,14 @@ pub fn sk_lookup(attrs: TokenStream, item: TokenStream) -> TokenStream {
/// #[cgroup_device]
/// pub fn cgroup_dev(ctx: DeviceContext) -> i32 {
/// // Reject all device access
/// return 0;
/// 0
/// }
/// ```
#[proc_macro_error]
#[proc_macro_attribute]
pub fn cgroup_device(attrs: TokenStream, item: TokenStream) -> TokenStream {
match CgroupDevice::parse(attrs.into(), item.into()) {
Ok(prog) => prog
.expand()
.unwrap_or_else(|err| abort!(err.span(), "{}", err))
.into(),
Err(err) => abort!(err.span(), "{}", err),
Ok(prog) => prog.expand(),
Err(err) => err.emit_as_expr_tokens(),
}
.into()
}

@ -13,41 +13,50 @@ pub(crate) struct Lsm {
}
impl Lsm {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Lsm> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self> {
let item = syn::parse2(item)?;
let mut args = syn::parse2(attrs)?;
let hook = pop_string_arg(&mut args, "hook");
let sleepable = pop_bool_arg(&mut args, "sleepable");
err_on_unknown_args(&args)?;
Ok(Lsm {
Ok(Self {
item,
hook,
sleepable,
})
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let section_prefix = if self.sleepable { "lsm.s" } else { "lsm" };
let section_name: Cow<'_, _> = if let Some(hook) = &self.hook {
format!("{}/{}", section_prefix, hook).into()
pub(crate) fn expand(&self) -> TokenStream {
let Self {
item,
hook,
sleepable,
} = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let section_prefix = if *sleepable { "lsm.s" } else { "lsm" };
let section_name: Cow<'_, _> = if let Some(hook) = hook {
format!("{section_prefix}/{hook}").into()
} else {
section_prefix.into()
};
let fn_vis = &self.item.vis;
let fn_name = self.item.sig.ident.clone();
let item = &self.item;
let fn_name = &sig.ident;
// LSM probes need to return an integer corresponding to the correct
// policy decision. Therefore we do not simply default to a return value
// of 0 as in other program types.
Ok(quote! {
#[no_mangle]
#[link_section = #section_name]
#fn_vis fn #fn_name(ctx: *mut ::core::ffi::c_void) -> i32 {
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = #section_name)]
#vis fn #fn_name(ctx: *mut ::core::ffi::c_void) -> i32 {
return #fn_name(::aya_ebpf::programs::LsmContext::new(ctx));
#item
}
})
}
}
}
@ -71,10 +80,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "lsm.s/bprm_committed_creds"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "lsm.s/bprm_committed_creds")]
fn bprm_committed_creds(ctx: *mut ::core::ffi::c_void) -> i32 {
return bprm_committed_creds(::aya_ebpf::programs::LsmContext::new(ctx));
@ -99,10 +108,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "lsm/bprm_committed_creds"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "lsm/bprm_committed_creds")]
fn bprm_committed_creds(ctx: *mut ::core::ffi::c_void) -> i32 {
return bprm_committed_creds(::aya_ebpf::programs::LsmContext::new(ctx));

@ -0,0 +1,87 @@
use std::borrow::Cow;
use proc_macro2::TokenStream;
use quote::quote;
use syn::{ItemFn, Result};
use crate::args::{err_on_unknown_args, pop_string_arg};
pub(crate) struct LsmCgroup {
item: ItemFn,
hook: Option<String>,
}
impl LsmCgroup {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self> {
let item = syn::parse2(item)?;
let mut args = syn::parse2(attrs)?;
let hook = pop_string_arg(&mut args, "hook");
err_on_unknown_args(&args)?;
Ok(Self { item, hook })
}
pub(crate) fn expand(&self) -> TokenStream {
let Self { item, hook } = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let section_prefix = "lsm_cgroup";
let section_name: Cow<'_, _> = if let Some(name) = hook {
format!("{section_prefix}/{name}").into()
} else {
section_prefix.into()
};
let fn_name = &sig.ident;
// LSM probes need to return an integer corresponding to the correct
// policy decision. Therefore we do not simply default to a return value
// of 0 as in other program types.
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = #section_name)]
#vis fn #fn_name(ctx: *mut ::core::ffi::c_void) -> i32 {
return #fn_name(::aya_ebpf::programs::LsmContext::new(ctx));
#item
}
}
}
}
#[cfg(test)]
mod tests {
use syn::parse_quote;
use super::*;
#[test]
fn test_lsm_cgroup() {
let prog = LsmCgroup::parse(
parse_quote! {
hook = "bprm_committed_creds",
},
parse_quote! {
fn bprm_committed_creds(ctx: &mut ::aya_ebpf::programs::LsmContext) -> i32 {
0
}
},
)
.unwrap();
let expanded = prog.expand();
let expected = quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = "lsm_cgroup/bprm_committed_creds")]
fn bprm_committed_creds(ctx: *mut ::core::ffi::c_void) -> i32 {
return bprm_committed_creds(::aya_ebpf::programs::LsmContext::new(ctx));
fn bprm_committed_creds(ctx: &mut ::aya_ebpf::programs::LsmContext) -> i32 {
0
}
}
};
assert_eq!(expected.to_string(), expanded.to_string());
}
}

@ -11,22 +11,22 @@ pub(crate) struct Map {
}
impl Map {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Map> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self> {
let item: ItemStatic = syn::parse2(item)?;
let mut args = syn::parse2(attrs)?;
let name = name_arg(&mut args).unwrap_or_else(|| item.ident.to_string());
Ok(Map { item, name })
Ok(Self { item, name })
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let section_name: Cow<'_, _> = "maps".to_string().into();
pub(crate) fn expand(&self) -> TokenStream {
let section_name: Cow<'_, _> = "maps".into();
let name = &self.name;
let item = &self.item;
Ok(quote! {
#[link_section = #section_name]
#[export_name = #name]
quote! {
#[unsafe(link_section = #section_name)]
#[unsafe(export_name = #name)]
#item
})
}
}
}
@ -45,10 +45,10 @@ mod tests {
),
)
.unwrap();
let expanded = map.expand().unwrap();
let expanded = map.expand();
let expected = quote!(
#[link_section = "maps"]
#[export_name = "foo"]
#[unsafe(link_section = "maps")]
#[unsafe(export_name = "foo")]
static BAR: HashMap<&'static str, u32> = HashMap::new();
);
assert_eq!(expected.to_string(), expanded.to_string());
@ -63,10 +63,10 @@ mod tests {
),
)
.unwrap();
let expanded = map.expand().unwrap();
let expanded = map.expand();
let expected = quote!(
#[link_section = "maps"]
#[export_name = "BAR"]
#[unsafe(link_section = "maps")]
#[unsafe(export_name = "BAR")]
static BAR: HashMap<&'static str, u32> = HashMap::new();
);
assert_eq!(expected.to_string(), expanded.to_string());

@ -1,35 +1,40 @@
use proc_macro2::TokenStream;
use proc_macro_error::abort;
use proc_macro2_diagnostics::{Diagnostic, SpanDiagnosticExt as _};
use quote::quote;
use syn::{ItemFn, Result};
use syn::{ItemFn, spanned::Spanned as _};
pub(crate) struct PerfEvent {
item: ItemFn,
}
impl PerfEvent {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self, Diagnostic> {
if !attrs.is_empty() {
abort!(attrs, "unexpected attribute")
return Err(attrs.span().error("unexpected attribute"));
}
let item = syn::parse2(item)?;
Ok(PerfEvent { item })
Ok(Self { item })
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let fn_vis = &self.item.vis;
let fn_name = self.item.sig.ident.clone();
let item = &self.item;
Ok(quote! {
#[no_mangle]
#[link_section = "perf_event"]
#fn_vis fn #fn_name(ctx: *mut ::core::ffi::c_void) -> u32 {
pub(crate) fn expand(&self) -> TokenStream {
let Self { item } = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let fn_name = &sig.ident;
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = "perf_event")]
#vis fn #fn_name(ctx: *mut ::core::ffi::c_void) -> u32 {
let _ = #fn_name(::aya_ebpf::programs::PerfEventContext::new(ctx));
return 0;
#item
}
})
}
}
}
@ -50,10 +55,10 @@ mod tests {
),
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "perf_event"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "perf_event")]
fn foo(ctx: *mut ::core::ffi::c_void) -> u32 {
let _ = foo(::aya_ebpf::programs::PerfEventContext::new(ctx));
return 0;

@ -12,33 +12,38 @@ pub(crate) struct RawTracePoint {
}
impl RawTracePoint {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<RawTracePoint> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self> {
let item = syn::parse2(item)?;
let mut args = syn::parse2(attrs)?;
let tracepoint = pop_string_arg(&mut args, "tracepoint");
err_on_unknown_args(&args)?;
Ok(RawTracePoint { item, tracepoint })
Ok(Self { item, tracepoint })
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let section_name: Cow<'_, _> = if let Some(tracepoint) = &self.tracepoint {
format!("raw_tp/{}", tracepoint).into()
pub(crate) fn expand(&self) -> TokenStream {
let Self { item, tracepoint } = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let section_name: Cow<'_, _> = if let Some(tracepoint) = tracepoint {
format!("raw_tp/{tracepoint}").into()
} else {
"raw_tp".into()
};
let fn_vis = &self.item.vis;
let fn_name = self.item.sig.ident.clone();
let item = &self.item;
Ok(quote! {
#[no_mangle]
#[link_section = #section_name]
#fn_vis fn #fn_name(ctx: *mut ::core::ffi::c_void) -> u32 {
let fn_name = &sig.ident;
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = #section_name)]
#vis fn #fn_name(ctx: *mut ::core::ffi::c_void) -> u32 {
let _ = #fn_name(::aya_ebpf::programs::RawTracePointContext::new(ctx));
return 0;
#item
}
})
}
}
}
@ -59,10 +64,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "raw_tp/sys_enter"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "raw_tp/sys_enter")]
fn prog(ctx: *mut ::core::ffi::c_void) -> u32 {
let _ = prog(::aya_ebpf::programs::RawTracePointContext::new(ctx));
return 0;

@ -1,34 +1,39 @@
use proc_macro2::TokenStream;
use proc_macro_error::abort;
use proc_macro2_diagnostics::{Diagnostic, SpanDiagnosticExt as _};
use quote::quote;
use syn::{ItemFn, Result};
use syn::{ItemFn, spanned::Spanned as _};
pub(crate) struct SkLookup {
item: ItemFn,
}
impl SkLookup {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self, Diagnostic> {
if !attrs.is_empty() {
abort!(attrs, "unexpected attribute")
return Err(attrs.span().error("unexpected attribute"));
}
let item = syn::parse2(item)?;
Ok(SkLookup { item })
Ok(Self { item })
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let fn_name = self.item.sig.ident.clone();
let fn_vis = &self.item.vis;
let item = &self.item;
Ok(quote! {
#[no_mangle]
#[link_section = "sk_lookup"]
#fn_vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::bpf_sk_lookup) -> u32 {
pub(crate) fn expand(&self) -> TokenStream {
let Self { item } = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let fn_name = &sig.ident;
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = "sk_lookup")]
#vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::bpf_sk_lookup) -> u32 {
return #fn_name(::aya_ebpf::programs::SkLookupContext::new(ctx));
#item
}
})
}
}
}
@ -49,10 +54,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "sk_lookup"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "sk_lookup")]
fn prog(ctx: *mut ::aya_ebpf::bindings::bpf_sk_lookup) -> u32 {
return prog(::aya_ebpf::programs::SkLookupContext::new(ctx));

@ -1,34 +1,39 @@
use proc_macro2::TokenStream;
use proc_macro_error::abort;
use proc_macro2_diagnostics::{Diagnostic, SpanDiagnosticExt as _};
use quote::quote;
use syn::{ItemFn, Result};
use syn::{ItemFn, spanned::Spanned as _};
pub(crate) struct SkMsg {
item: ItemFn,
}
impl SkMsg {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self, Diagnostic> {
if !attrs.is_empty() {
abort!(attrs, "unexpected attribute")
return Err(attrs.span().error("unexpected attribute"));
}
let item = syn::parse2(item)?;
Ok(SkMsg { item })
Ok(Self { item })
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let fn_vis = &self.item.vis;
let fn_name = self.item.sig.ident.clone();
let item = &self.item;
Ok(quote! {
#[no_mangle]
#[link_section = "sk_msg"]
#fn_vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::sk_msg_md) -> u32 {
pub(crate) fn expand(&self) -> TokenStream {
let Self { item } = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let fn_name = &sig.ident;
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = "sk_msg")]
#vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::sk_msg_md) -> u32 {
return #fn_name(::aya_ebpf::programs::SkMsgContext::new(ctx));
#item
}
})
}
}
}
@ -49,10 +54,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "sk_msg"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "sk_msg")]
fn prog(ctx: *mut ::aya_ebpf::bindings:: sk_msg_md) -> u32 {
return prog(::aya_ebpf::programs::SkMsgContext::new(ctx));

@ -1,11 +1,10 @@
use std::borrow::Cow;
use proc_macro2::TokenStream;
use proc_macro_error::abort;
use proc_macro2_diagnostics::{Diagnostic, SpanDiagnosticExt as _};
use quote::quote;
use syn::{ItemFn, Result};
use syn::{ItemFn, spanned::Spanned as _};
#[allow(clippy::enum_variant_names)]
#[derive(Debug, Copy, Clone)]
pub(crate) enum SkSkbKind {
StreamVerdict,
@ -28,29 +27,37 @@ pub(crate) struct SkSkb {
}
impl SkSkb {
pub(crate) fn parse(kind: SkSkbKind, attrs: TokenStream, item: TokenStream) -> Result<SkSkb> {
pub(crate) fn parse(
kind: SkSkbKind,
attrs: TokenStream,
item: TokenStream,
) -> Result<Self, Diagnostic> {
if !attrs.is_empty() {
abort!(attrs, "unexpected attribute");
return Err(attrs.span().error("unexpected attribute"));
}
let item = syn::parse2(item)?;
Ok(SkSkb { item, kind })
Ok(Self { item, kind })
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let kind = &self.kind;
pub(crate) fn expand(&self) -> TokenStream {
let Self { kind, item } = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let section_name: Cow<'_, _> = format!("sk_skb/{kind}").into();
let fn_name = self.item.sig.ident.clone();
let fn_vis = &self.item.vis;
let item = &self.item;
Ok(quote! {
#[no_mangle]
#[link_section = #section_name]
#fn_vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::__sk_buff) -> u32 {
let fn_name = &sig.ident;
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = #section_name)]
#vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::__sk_buff) -> u32 {
return #fn_name(::aya_ebpf::programs::SkBuffContext::new(ctx));
#item
}
})
}
}
}
@ -72,10 +79,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "sk_skb/stream_parser"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "sk_skb/stream_parser")]
fn prog(ctx: *mut ::aya_ebpf::bindings::__sk_buff) -> u32 {
return prog(::aya_ebpf::programs::SkBuffContext::new(ctx));
@ -99,10 +106,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "sk_skb/stream_verdict"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "sk_skb/stream_verdict")]
fn prog(ctx: *mut ::aya_ebpf::bindings::__sk_buff) -> u32 {
return prog(::aya_ebpf::programs::SkBuffContext::new(ctx));

@ -1,34 +1,39 @@
use proc_macro2::TokenStream;
use proc_macro_error::abort;
use proc_macro2_diagnostics::{Diagnostic, SpanDiagnosticExt as _};
use quote::quote;
use syn::{ItemFn, Result};
use syn::{ItemFn, spanned::Spanned as _};
pub(crate) struct SockOps {
item: ItemFn,
}
impl SockOps {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self, Diagnostic> {
if !attrs.is_empty() {
abort!(attrs, "unexpected attribute")
return Err(attrs.span().error("unexpected attribute"));
}
let item = syn::parse2(item)?;
Ok(SockOps { item })
Ok(Self { item })
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let fn_vis = &self.item.vis;
let fn_name = self.item.sig.ident.clone();
let item = &self.item;
Ok(quote! {
#[no_mangle]
#[link_section = "sockops"]
#fn_vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::bpf_sock_ops) -> u32 {
pub(crate) fn expand(&self) -> TokenStream {
let Self { item } = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let fn_name = &sig.ident;
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = "sockops")]
#vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::bpf_sock_ops) -> u32 {
return #fn_name(::aya_ebpf::programs::SockOpsContext::new(ctx));
#item
}
})
}
}
}
@ -49,10 +54,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "sockops"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "sockops")]
fn prog(ctx: *mut ::aya_ebpf::bindings::bpf_sock_ops) -> u32 {
return prog(::aya_ebpf::programs::SockOpsContext::new(ctx));

@ -1,34 +1,39 @@
use proc_macro2::TokenStream;
use proc_macro_error::abort;
use proc_macro2_diagnostics::{Diagnostic, SpanDiagnosticExt as _};
use quote::quote;
use syn::{ItemFn, Result};
use syn::{ItemFn, spanned::Spanned as _};
pub(crate) struct SocketFilter {
item: ItemFn,
}
impl SocketFilter {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self, Diagnostic> {
if !attrs.is_empty() {
abort!(attrs, "unexpected attribute")
return Err(attrs.span().error("unexpected attribute"));
}
let item = syn::parse2(item)?;
Ok(SocketFilter { item })
Ok(Self { item })
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let fn_name = self.item.sig.ident.clone();
let fn_vis = &self.item.vis;
let item = &self.item;
Ok(quote! {
#[no_mangle]
#[link_section = "socket"]
#fn_vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::__sk_buff) -> i64 {
pub(crate) fn expand(&self) -> TokenStream {
let Self { item } = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let fn_name = &sig.ident;
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = "socket")]
#vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::__sk_buff) -> i64 {
return #fn_name(::aya_ebpf::programs::SkBuffContext::new(ctx));
#item
}
})
}
}
}
@ -49,10 +54,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "socket"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "socket")]
fn prog(ctx: *mut ::aya_ebpf::bindings::__sk_buff) -> i64 {
return prog(::aya_ebpf::programs::SkBuffContext::new(ctx));

@ -1,34 +1,39 @@
use proc_macro2::TokenStream;
use proc_macro_error::abort;
use proc_macro2_diagnostics::{Diagnostic, SpanDiagnosticExt as _};
use quote::quote;
use syn::{ItemFn, Result};
use syn::{ItemFn, spanned::Spanned as _};
pub(crate) struct SchedClassifier {
item: ItemFn,
}
impl SchedClassifier {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self, Diagnostic> {
if !attrs.is_empty() {
abort!(attrs, "unexpected attribute")
return Err(attrs.span().error("unexpected attribute"));
}
let item = syn::parse2(item)?;
Ok(SchedClassifier { item })
Ok(Self { item })
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let fn_vis = &self.item.vis;
let fn_name = self.item.sig.ident.clone();
let item = &self.item;
Ok(quote! {
#[no_mangle]
#[link_section = "classifier"]
#fn_vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::__sk_buff) -> i32 {
pub(crate) fn expand(&self) -> TokenStream {
let Self { item } = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let fn_name = &sig.ident;
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = "classifier")]
#vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::__sk_buff) -> i32 {
return #fn_name(::aya_ebpf::programs::TcContext::new(ctx));
#item
}
})
}
}
}
@ -49,10 +54,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "classifier"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "classifier")]
fn prog(ctx: *mut ::aya_ebpf::bindings::__sk_buff) -> i32 {
return prog(::aya_ebpf::programs::TcContext::new(ctx));

@ -1,52 +1,64 @@
use std::borrow::Cow;
use proc_macro2::TokenStream;
use proc_macro_error::abort;
use proc_macro2_diagnostics::{Diagnostic, SpanDiagnosticExt as _};
use quote::quote;
use syn::{ItemFn, Result};
use syn::{ItemFn, spanned::Spanned as _};
use crate::args::{err_on_unknown_args, pop_string_arg};
pub(crate) struct TracePoint {
item: ItemFn,
category: Option<String>,
name: Option<String>,
name_and_category: Option<(String, String)>,
}
impl TracePoint {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<TracePoint> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self, Diagnostic> {
let item = syn::parse2(item)?;
let span = attrs.span();
let mut args = syn::parse2(attrs)?;
let name = pop_string_arg(&mut args, "name");
let category = pop_string_arg(&mut args, "category");
err_on_unknown_args(&args)?;
Ok(TracePoint {
item,
category,
name,
})
match (name, category) {
(None, None) => Ok(Self {
item,
name_and_category: None,
}),
(Some(name), Some(category)) => Ok(Self {
item,
name_and_category: Some((name, category)),
}),
_ => Err(span.error("expected `name` and `category` arguments")),
}
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let section_name: Cow<'_, _> = match (&self.category, &self.name) {
(Some(category), Some(name)) => format!("tracepoint/{}/{}", category, name).into(),
(Some(_), None) => abort!(self.item, "expected `name` and `category` arguments"),
(None, Some(_)) => abort!(self.item, "expected `name` and `category` arguments"),
_ => "tracepoint".into(),
pub(crate) fn expand(&self) -> TokenStream {
let Self {
item,
name_and_category,
} = self;
let section_name: Cow<'_, _> = match name_and_category {
Some((name, category)) => format!("tracepoint/{category}/{name}").into(),
None => "tracepoint".into(),
};
let fn_vis = &self.item.vis;
let fn_name = self.item.sig.ident.clone();
let item = &self.item;
Ok(quote! {
#[no_mangle]
#[link_section = #section_name]
#fn_vis fn #fn_name(ctx: *mut ::core::ffi::c_void) -> u32 {
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let fn_name = &sig.ident;
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = #section_name)]
#vis fn #fn_name(ctx: *mut ::core::ffi::c_void) -> u32 {
let _ = #fn_name(::aya_ebpf::programs::TracePointContext::new(ctx));
return 0;
#item
}
})
}
}
}
@ -67,10 +79,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "tracepoint/syscalls/sys_enter_bind"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "tracepoint/syscalls/sys_enter_bind")]
fn prog(ctx: *mut ::core::ffi::c_void) -> u32 {
let _ = prog(::aya_ebpf::programs::TracePointContext::new(ctx));
return 0;

@ -1,13 +1,12 @@
use std::borrow::Cow;
use proc_macro2::TokenStream;
use proc_macro_error::abort;
use proc_macro2_diagnostics::{Diagnostic, SpanDiagnosticExt as _};
use quote::quote;
use syn::{ItemFn, Result};
use syn::{ItemFn, spanned::Spanned as _};
use crate::args::{err_on_unknown_args, pop_bool_arg, pop_string_arg};
#[allow(clippy::enum_variant_names)]
#[derive(Debug, Copy, Clone)]
pub(crate) enum UProbeKind {
UProbe,
@ -34,15 +33,24 @@ pub(crate) struct UProbe {
}
impl UProbe {
pub(crate) fn parse(kind: UProbeKind, attrs: TokenStream, item: TokenStream) -> Result<UProbe> {
pub(crate) fn parse(
kind: UProbeKind,
attrs: TokenStream,
item: TokenStream,
) -> Result<Self, Diagnostic> {
let item = syn::parse2(item)?;
let span = attrs.span();
let mut args = syn::parse2(attrs)?;
let path = pop_string_arg(&mut args, "path");
let function = pop_string_arg(&mut args, "function");
let offset = pop_string_arg(&mut args, "offset").map(|v| v.parse::<u64>().unwrap());
let offset = pop_string_arg(&mut args, "offset")
.as_deref()
.map(str::parse)
.transpose()
.map_err(|err| span.error(format!("failed to parse `offset` argument: {err}")))?;
let sleepable = pop_bool_arg(&mut args, "sleepable");
err_on_unknown_args(&args)?;
Ok(UProbe {
Ok(Self {
kind,
item,
path,
@ -52,39 +60,38 @@ impl UProbe {
})
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let prefix = if self.sleepable {
format!("{}.s", self.kind)
} else {
format!("{}", self.kind)
};
let section_name: Cow<'_, _> = if self.path.is_some() && self.offset.is_some() {
if self.function.is_none() {
abort!(self.item.sig.ident, "expected `function` attribute");
}
let mut path = self.path.as_ref().unwrap().clone();
if path.starts_with('/') {
path.remove(0);
}
format!(
"{}/{}:{}+{}",
prefix,
path,
self.function.as_ref().unwrap(),
self.offset.unwrap()
)
.into()
} else if self.path.is_some() {
if self.function.is_none() {
abort!(self.item.sig.ident, "expected `function` attribute");
}
let mut path = self.path.as_ref().unwrap().clone();
if path.starts_with('/') {
path.remove(0);
pub(crate) fn expand(&self) -> Result<TokenStream, Diagnostic> {
let Self {
kind,
path,
function,
offset,
item,
sleepable,
} = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let mut prefix = kind.to_string();
if *sleepable {
prefix.push_str(".s");
}
let section_name: Cow<'_, _> = match path {
None => prefix.into(),
Some(path) => {
let path = path.strip_prefix("/").unwrap_or(path);
// TODO: check this in parse instead.
let function = function
.as_deref()
.ok_or(item.sig.span().error("expected `function` attribute"))?;
match offset {
None => format!("{prefix}/{path}:{function}").into(),
Some(offset) => format!("{prefix}/{path}:{function}+{offset}").into(),
}
}
format!("{}/{}:{}", prefix, path, self.function.as_ref().unwrap()).into()
} else {
prefix.to_string().into()
};
let probe_type = if section_name.as_ref().starts_with("uprobe") {
@ -92,13 +99,11 @@ impl UProbe {
} else {
quote! { RetProbeContext }
};
let fn_vis = &self.item.vis;
let fn_name = self.item.sig.ident.clone();
let item = &self.item;
let fn_name = &sig.ident;
Ok(quote! {
#[no_mangle]
#[link_section = #section_name]
#fn_vis fn #fn_name(ctx: *mut ::core::ffi::c_void) -> u32 {
#[unsafe(no_mangle)]
#[unsafe(link_section = #section_name)]
#vis fn #fn_name(ctx: *mut ::core::ffi::c_void) -> u32 {
let _ = #fn_name(::aya_ebpf::programs::#probe_type::new(ctx));
return 0;
@ -129,8 +134,8 @@ mod tests {
assert_eq!(
uprobe.expand().unwrap().to_string(),
quote! {
#[no_mangle]
#[link_section = "uprobe"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "uprobe")]
fn foo(ctx: *mut ::core::ffi::c_void) -> u32 {
let _ = foo(::aya_ebpf::programs::ProbeContext::new(ctx));
return 0;
@ -159,8 +164,8 @@ mod tests {
assert_eq!(
uprobe.expand().unwrap().to_string(),
quote! {
#[no_mangle]
#[link_section = "uprobe.s"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "uprobe.s")]
fn foo(ctx: *mut ::core::ffi::c_void) -> u32 {
let _ = foo(::aya_ebpf::programs::ProbeContext::new(ctx));
return 0;
@ -192,8 +197,8 @@ mod tests {
assert_eq!(
uprobe.expand().unwrap().to_string(),
quote! {
#[no_mangle]
#[link_section = "uprobe/self/proc/exe:trigger_uprobe"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "uprobe/self/proc/exe:trigger_uprobe")]
fn foo(ctx: *mut ::core::ffi::c_void) -> u32 {
let _ = foo(::aya_ebpf::programs::ProbeContext::new(ctx));
return 0;
@ -224,8 +229,8 @@ mod tests {
assert_eq!(
uprobe.expand().unwrap().to_string(),
quote! {
#[no_mangle]
#[link_section = "uprobe/self/proc/exe:foo+123"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "uprobe/self/proc/exe:foo+123")]
fn foo(ctx: *mut ::core::ffi::c_void) -> u32 {
let _ = foo(::aya_ebpf::programs::ProbeContext::new(ctx));
return 0;
@ -254,8 +259,8 @@ mod tests {
assert_eq!(
uprobe.expand().unwrap().to_string(),
quote! {
#[no_mangle]
#[link_section = "uretprobe"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "uretprobe")]
fn foo(ctx: *mut ::core::ffi::c_void) -> u32 {
let _ = foo(::aya_ebpf::programs::RetProbeContext::new(ctx));
return 0;

@ -1,8 +1,9 @@
use proc_macro2::TokenStream;
use proc_macro2_diagnostics::{Diagnostic, SpanDiagnosticExt as _};
use quote::quote;
use syn::{Error, ItemFn, Result};
use syn::{ItemFn, spanned::Spanned as _};
use crate::args::{err_on_unknown_args, pop_bool_arg, pop_string_arg, Args};
use crate::args::{Args, err_on_unknown_args, pop_bool_arg, pop_string_arg};
pub(crate) struct Xdp {
item: ItemFn,
@ -17,8 +18,9 @@ pub(crate) enum XdpMap {
}
impl Xdp {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Xdp> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self, Diagnostic> {
let item = syn::parse2(item)?;
let span = attrs.span();
let mut args: Args = syn::parse2(attrs)?;
let frags = pop_bool_arg(&mut args, "frags");
@ -26,39 +28,42 @@ impl Xdp {
Some("cpumap") => Some(XdpMap::CpuMap),
Some("devmap") => Some(XdpMap::DevMap),
Some(name) => {
return Err(Error::new_spanned(
"map",
format!("Invalid value. Expected 'cpumap' or 'devmap', found '{name}'"),
))
return Err(span.error(format!(
"Invalid value. Expected 'cpumap' or 'devmap', found '{name}'"
)));
}
None => None,
};
err_on_unknown_args(&args)?;
Ok(Xdp { item, frags, map })
Ok(Self { item, frags, map })
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let mut section_name = vec![if self.frags { "xdp.frags" } else { "xdp" }];
match self.map {
pub(crate) fn expand(&self) -> TokenStream {
let Self { item, frags, map } = self;
let ItemFn {
attrs: _,
vis,
sig,
block: _,
} = item;
let mut section_name = vec![if *frags { "xdp.frags" } else { "xdp" }];
match map {
Some(XdpMap::CpuMap) => section_name.push("cpumap"),
Some(XdpMap::DevMap) => section_name.push("devmap"),
None => (),
};
let section_name = section_name.join("/");
let fn_vis = &self.item.vis;
let fn_name = self.item.sig.ident.clone();
let item = &self.item;
Ok(quote! {
#[no_mangle]
#[link_section = #section_name]
#fn_vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::xdp_md) -> u32 {
let fn_name = &sig.ident;
quote! {
#[unsafe(no_mangle)]
#[unsafe(link_section = #section_name)]
#vis fn #fn_name(ctx: *mut ::aya_ebpf::bindings::xdp_md) -> u32 {
return #fn_name(::aya_ebpf::programs::XdpContext::new(ctx));
#item
}
})
}
}
}
@ -79,10 +84,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "xdp"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "xdp")]
fn prog(ctx: *mut ::aya_ebpf::bindings::xdp_md) -> u32 {
return prog(::aya_ebpf::programs::XdpContext::new(ctx));
@ -105,10 +110,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "xdp.frags"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "xdp.frags")]
fn prog(ctx: *mut ::aya_ebpf::bindings::xdp_md) -> u32 {
return prog(::aya_ebpf::programs::XdpContext::new(ctx));
@ -131,10 +136,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "xdp/cpumap"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "xdp/cpumap")]
fn prog(ctx: *mut ::aya_ebpf::bindings::xdp_md) -> u32 {
return prog(::aya_ebpf::programs::XdpContext::new(ctx));
@ -157,10 +162,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "xdp/devmap"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "xdp/devmap")]
fn prog(ctx: *mut ::aya_ebpf::bindings::xdp_md) -> u32 {
return prog(::aya_ebpf::programs::XdpContext::new(ctx));
@ -197,10 +202,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "xdp.frags/cpumap"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "xdp.frags/cpumap")]
fn prog(ctx: *mut ::aya_ebpf::bindings::xdp_md) -> u32 {
return prog(::aya_ebpf::programs::XdpContext::new(ctx));
@ -223,10 +228,10 @@ mod tests {
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expanded = prog.expand();
let expected = quote! {
#[no_mangle]
#[link_section = "xdp.frags/devmap"]
#[unsafe(no_mangle)]
#[unsafe(link_section = "xdp.frags/devmap")]
fn prog(ctx: *mut ::aya_ebpf::bindings::xdp_md) -> u32 {
return prog(::aya_ebpf::programs::XdpContext::new(ctx));

@ -5,6 +5,47 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## 0.1.15 (2024-10-09)
<csr-id-a75fc2f7691dad21822c2eff35281abd3c4b5d23/>
### Other
- <csr-id-a75fc2f7691dad21822c2eff35281abd3c4b5d23/> Allow logging `core::net::Ipv4Addr` and `core::net::Ipv6Addr`
IP address types are available in `core`, so they can be used also in
eBPF programs. This change adds support of these types in aya-log.
* Add implementation of `WriteTuBuf` to these types.
* Support these types in `Ipv4Formatter` and `Ipv6Formatter`.
* Support them with `DisplayHint::Ip`.
* Add support for formatting `[u8; 4]`, to be able to handle
`Ipv4Addr::octets`.
### Chore
- <csr-id-c3f0c7dc3fb285da091454426eeda0723389f0f1/> Prepare for aya-log-ebpf release
### Commit Statistics
<csr-read-only-do-not-edit/>
- 3 commits contributed to the release.
- 223 days passed between releases.
- 2 commits were understood as [conventional](https://www.conventionalcommits.org).
- 0 issues like '(#ID)' were seen in commit messages
### Commit Details
<csr-read-only-do-not-edit/>
<details><summary>view details</summary>
* **Uncategorized**
- Prepare for aya-log-ebpf release ([`c3f0c7d`](https://github.com/aya-rs/aya/commit/c3f0c7dc3fb285da091454426eeda0723389f0f1))
- Allow logging `core::net::Ipv4Addr` and `core::net::Ipv6Addr` ([`a75fc2f`](https://github.com/aya-rs/aya/commit/a75fc2f7691dad21822c2eff35281abd3c4b5d23))
- Appease clippy ([`09442c2`](https://github.com/aya-rs/aya/commit/09442c2cbe9513365dfc1df8d4f7cf6f808a67ed))
</details>
## v0.1.14 (2024-02-28)
<csr-id-b3e7ef741c5b8d09fc7dc8302576f8174be75ff4/>
@ -190,7 +231,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<csr-read-only-do-not-edit/>
- 37 commits contributed to the release over the course of 469 calendar days.
- 38 commits contributed to the release.
- 469 days passed between releases.
- 18 commits were understood as [conventional](https://www.conventionalcommits.org).
- 0 issues like '(#ID)' were seen in commit messages
@ -202,6 +243,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<details><summary>view details</summary>
* **Uncategorized**
- Release aya-log-common v0.1.14, aya-log v0.2.0 ([`b6a84b6`](https://github.com/aya-rs/aya/commit/b6a84b658ae00f23d0f1721c30d11f2e57f99eab))
- Add CHANGELOG ([`4f0f095`](https://github.com/aya-rs/aya/commit/4f0f0957758362296c2d0a4749d354edd8dc181e))
- Release aya-log-common v0.1.14, aya-log v0.2.0 ([`c22a696`](https://github.com/aya-rs/aya/commit/c22a6963d44befb5591d4b21c09767c43935cb54))
- Merge pull request #882 from dave-tucker/metadata ([`0fadd69`](https://github.com/aya-rs/aya/commit/0fadd695377b8a3f0d9a3af3bc8140f0f1bed8d2))
@ -276,7 +318,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<csr-read-only-do-not-edit/>
- 12 commits contributed to the release over the course of 110 calendar days.
- 12 commits contributed to the release.
- 4 commits were understood as [conventional](https://www.conventionalcommits.org).
- 0 issues like '(#ID)' were seen in commit messages

@ -1,14 +1,19 @@
[package]
name = "aya-log-common"
version = "0.1.14"
description = "A logging library for eBPF programs."
keywords = ["bpf", "ebpf", "log", "logging"]
documentation = "https://docs.rs/aya-log"
keywords = ["bpf", "ebpf", "log", "logging"]
name = "aya-log-common"
version = "0.1.15"
authors.workspace = true
edition.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
homepage.workspace = true
edition.workspace = true
rust-version.workspace = true
[lints]
workspace = true
[dependencies]
num_enum = { workspace = true }

@ -1,13 +1,16 @@
#![cfg_attr(
target_arch = "bpf",
expect(unused_crate_dependencies, reason = "compiler_builtins")
)]
#![no_std]
use core::num::{NonZeroUsize, TryFromIntError};
use core::{
net::{IpAddr, Ipv4Addr, Ipv6Addr},
num::TryFromIntError,
};
use num_enum::IntoPrimitive;
pub const LOG_BUF_CAPACITY: usize = 8192;
pub const LOG_FIELDS: usize = 6;
pub type LogValueLength = u16;
#[repr(u8)]
@ -52,7 +55,8 @@ impl_formatter_for_types!(
f32, f64,
char,
str,
&str
&str,
IpAddr, Ipv4Addr, Ipv6Addr
}
);
@ -65,6 +69,8 @@ impl_formatter_for_types!(
}
);
impl<const N: usize> LowerHexFormatter for &[u8; N] {}
pub trait UpperHexFormatter {}
impl_formatter_for_types!(
UpperHexFormatter: {
@ -74,8 +80,14 @@ impl_formatter_for_types!(
}
);
impl<const N: usize> UpperHexFormatter for &[u8; N] {}
pub trait IpFormatter {}
impl IpFormatter for IpAddr {}
impl IpFormatter for Ipv4Addr {}
impl IpFormatter for Ipv6Addr {}
impl IpFormatter for u32 {}
impl IpFormatter for [u8; 4] {}
impl IpFormatter for [u8; 16] {}
impl IpFormatter for [u16; 8] {}
@ -85,10 +97,14 @@ impl LowerMacFormatter for [u8; 6] {}
pub trait UpperMacFormatter {}
impl UpperMacFormatter for [u8; 6] {}
pub trait PointerFormatter {}
impl<T> PointerFormatter for *const T {}
impl<T> PointerFormatter for *mut T {}
#[repr(u8)]
#[derive(Copy, Clone, Debug, IntoPrimitive)]
pub enum RecordField {
Target = 1,
pub enum RecordFieldKind {
Target,
Level,
Module,
File,
@ -100,7 +116,7 @@ pub enum RecordField {
/// programs to userspace.
#[repr(u8)]
#[derive(Copy, Clone, Debug, IntoPrimitive)]
pub enum Argument {
pub enum ArgumentKind {
DisplayHint,
I8,
@ -118,6 +134,11 @@ pub enum Argument {
F32,
F64,
Ipv4Addr,
Ipv6Addr,
/// `[u8; 4]` array which represents an IPv4 address.
ArrU8Len4,
/// `[u8; 6]` array which represents a MAC address.
ArrU8Len6,
/// `[u8; 16]` array which represents an IPv6 address.
@ -127,6 +148,8 @@ pub enum Argument {
Bytes,
Str,
Pointer,
}
/// All display hints
@ -145,162 +168,225 @@ pub enum DisplayHint {
LowerMac,
/// `:MAC`
UpperMac,
/// `:p`
Pointer,
}
// Must be inlined, else the BPF backend emits:
//
// llvm: <unknown>:0:0: in function _ZN14aya_log_common5write17hc9ed05433e23a663E { i64, i64 } (i8, ptr, i64, ptr, i64): only integer returns supported
#[inline(always)]
pub(crate) fn write(tag: u8, value: &[u8], buf: &mut [u8]) -> Option<NonZeroUsize> {
let wire_len: LogValueLength = match value.len().try_into() {
Ok(wire_len) => Some(wire_len),
Err(TryFromIntError { .. }) => None,
}?;
let mut size = 0;
macro_rules! copy_from_slice {
($value:expr) => {{
let buf = buf.get_mut(size..)?;
let buf = buf.get_mut(..$value.len())?;
buf.copy_from_slice($value);
size += $value.len();
}};
}
copy_from_slice!(&[tag]);
copy_from_slice!(&wire_len.to_ne_bytes());
copy_from_slice!(value);
NonZeroUsize::new(size)
mod sealed {
pub trait Sealed {}
}
pub trait WriteToBuf {
fn write(self, buf: &mut [u8]) -> Option<NonZeroUsize>;
pub trait Argument: sealed::Sealed {
fn as_argument(&self) -> (ArgumentKind, impl AsRef<[u8]>);
}
macro_rules! impl_write_to_buf {
($type:ident, $arg_type:expr) => {
impl WriteToBuf for $type {
// This need not be inlined because the return value is Option<N> where N is
// mem::size_of<$type>, which is a compile-time constant.
#[inline(never)]
fn write(self, buf: &mut [u8]) -> Option<NonZeroUsize> {
write($arg_type.into(), &self.to_ne_bytes(), buf)
macro_rules! impl_argument {
($self:ident, $arg_type:expr) => {
impl sealed::Sealed for $self {}
impl Argument for $self {
fn as_argument(&self) -> (ArgumentKind, impl AsRef<[u8]>) {
($arg_type, self.to_ne_bytes())
}
}
};
}
impl_write_to_buf!(i8, Argument::I8);
impl_write_to_buf!(i16, Argument::I16);
impl_write_to_buf!(i32, Argument::I32);
impl_write_to_buf!(i64, Argument::I64);
impl_write_to_buf!(isize, Argument::Isize);
impl_write_to_buf!(u8, Argument::U8);
impl_write_to_buf!(u16, Argument::U16);
impl_write_to_buf!(u32, Argument::U32);
impl_write_to_buf!(u64, Argument::U64);
impl_write_to_buf!(usize, Argument::Usize);
impl_write_to_buf!(f32, Argument::F32);
impl_write_to_buf!(f64, Argument::F64);
impl WriteToBuf for [u8; 16] {
// This need not be inlined because the return value is Option<N> where N is 16, which is a
// compile-time constant.
#[inline(never)]
fn write(self, buf: &mut [u8]) -> Option<NonZeroUsize> {
write(Argument::ArrU8Len16.into(), &self, buf)
impl_argument!(i8, ArgumentKind::I8);
impl_argument!(i16, ArgumentKind::I16);
impl_argument!(i32, ArgumentKind::I32);
impl_argument!(i64, ArgumentKind::I64);
impl_argument!(isize, ArgumentKind::Isize);
impl_argument!(u8, ArgumentKind::U8);
impl_argument!(u16, ArgumentKind::U16);
impl_argument!(u32, ArgumentKind::U32);
impl_argument!(u64, ArgumentKind::U64);
impl_argument!(usize, ArgumentKind::Usize);
impl_argument!(f32, ArgumentKind::F32);
impl_argument!(f64, ArgumentKind::F64);
enum Either<L, R> {
Left(L),
Right(R),
}
impl<L, R> AsRef<[u8]> for Either<L, R>
where
L: AsRef<[u8]>,
R: AsRef<[u8]>,
{
fn as_ref(&self) -> &[u8] {
match self {
Self::Left(l) => l.as_ref(),
Self::Right(r) => r.as_ref(),
}
}
}
impl sealed::Sealed for IpAddr {}
impl Argument for IpAddr {
fn as_argument(&self) -> (ArgumentKind, impl AsRef<[u8]>) {
match self {
Self::V4(ipv4_addr) => {
let (kind, value) = ipv4_addr.as_argument();
(kind, Either::Left(value))
}
Self::V6(ipv6_addr) => {
let (kind, value) = ipv6_addr.as_argument();
(kind, Either::Right(value))
}
}
}
}
impl WriteToBuf for [u16; 8] {
// This need not be inlined because the return value is Option<N> where N is 16, which is a
// compile-time constant.
#[inline(never)]
fn write(self, buf: &mut [u8]) -> Option<NonZeroUsize> {
let bytes = unsafe { core::mem::transmute::<[u16; 8], [u8; 16]>(self) };
write(Argument::ArrU16Len8.into(), &bytes, buf)
impl sealed::Sealed for Ipv4Addr {}
impl Argument for Ipv4Addr {
fn as_argument(&self) -> (ArgumentKind, impl AsRef<[u8]>) {
(ArgumentKind::Ipv4Addr, self.octets())
}
}
impl WriteToBuf for [u8; 6] {
// This need not be inlined because the return value is Option<N> where N is 6, which is a
// compile-time constant.
#[inline(never)]
fn write(self, buf: &mut [u8]) -> Option<NonZeroUsize> {
write(Argument::ArrU8Len6.into(), &self, buf)
impl sealed::Sealed for Ipv6Addr {}
impl Argument for Ipv6Addr {
fn as_argument(&self) -> (ArgumentKind, impl AsRef<[u8]>) {
(ArgumentKind::Ipv6Addr, self.octets())
}
}
impl WriteToBuf for &[u8] {
// Must be inlined, else the BPF backend emits:
//
// llvm: <unknown>:0:0: in function _ZN63_$LT$$RF$$u5b$u8$u5d$$u20$as$u20$aya_log_common..WriteToBuf$GT$5write17h08f30a45f7b9f09dE { i64, i64 } (ptr, i64, ptr, i64): only integer returns supported
#[inline(always)]
fn write(self, buf: &mut [u8]) -> Option<NonZeroUsize> {
write(Argument::Bytes.into(), self, buf)
impl<const N: usize> sealed::Sealed for [u8; N] {}
impl<const N: usize> Argument for [u8; N] {
fn as_argument(&self) -> (ArgumentKind, impl AsRef<[u8]>) {
let kind = match N {
4 => ArgumentKind::ArrU8Len4,
6 => ArgumentKind::ArrU8Len6,
16 => ArgumentKind::ArrU8Len16,
_ => ArgumentKind::Bytes,
};
(kind, *self)
}
}
impl WriteToBuf for &str {
// Must be inlined, else the BPF backend emits:
//
// llvm: <unknown>:0:0: in function _ZN54_$LT$$RF$str$u20$as$u20$aya_log_common..WriteToBuf$GT$5write17h7e2d1ccaa758e2b5E { i64, i64 } (ptr, i64, ptr, i64): only integer returns supported
#[inline(always)]
fn write(self, buf: &mut [u8]) -> Option<NonZeroUsize> {
write(Argument::Str.into(), self.as_bytes(), buf)
impl sealed::Sealed for &[u8] {}
impl Argument for &[u8] {
fn as_argument(&self) -> (ArgumentKind, impl AsRef<[u8]>) {
(ArgumentKind::Bytes, *self)
}
}
impl WriteToBuf for DisplayHint {
// This need not be inlined because the return value is Option<N> where N is 1, which is a
// compile-time constant.
#[inline(never)]
fn write(self, buf: &mut [u8]) -> Option<NonZeroUsize> {
let v: u8 = self.into();
write(Argument::DisplayHint.into(), &v.to_ne_bytes(), buf)
impl sealed::Sealed for &str {}
impl Argument for &str {
fn as_argument(&self) -> (ArgumentKind, impl AsRef<[u8]>) {
(ArgumentKind::Str, self.as_bytes())
}
}
impl sealed::Sealed for DisplayHint {}
impl Argument for DisplayHint {
fn as_argument(&self) -> (ArgumentKind, impl AsRef<[u8]>) {
let v: u8 = (*self).into();
(ArgumentKind::DisplayHint, v.to_ne_bytes())
}
}
impl<T> sealed::Sealed for *const T {}
impl<T> Argument for *const T {
fn as_argument(&self) -> (ArgumentKind, impl AsRef<[u8]>) {
(ArgumentKind::Pointer, (*self as usize).to_ne_bytes())
}
}
impl<T> sealed::Sealed for *mut T {}
impl<T> Argument for *mut T {
fn as_argument(&self) -> (ArgumentKind, impl AsRef<[u8]>) {
(ArgumentKind::Pointer, (*self as usize).to_ne_bytes())
}
}
fn wire_len(value: &[u8]) -> Option<[u8; 2]> {
match LogValueLength::try_from(value.len()) {
Ok(wire_len) => Some(wire_len.to_ne_bytes()),
Err(TryFromIntError { .. }) => None,
}
}
#[doc(hidden)]
#[inline(always)] // This function takes too many arguments to not be inlined.
pub fn write_record_header(
buf: &mut [u8],
target: &str,
level: Level,
module: &str,
file: &str,
line: u32,
num_args: usize,
) -> Option<NonZeroUsize> {
let level: u8 = level.into();
let mut size = 0;
macro_rules! write {
($tag:expr, $value:expr) => {{
let buf = buf.get_mut(size..)?;
let len = write($tag.into(), $value, buf)?;
size += len.get();
}};
pub struct Field<T>([u8; 1], [u8; 2], T);
impl<T: AsRef<[u8]>> Field<T> {
pub fn new(kind: impl Into<u8>, value: T) -> Option<Self> {
let wire_len = wire_len(value.as_ref())?;
Some(Self([kind.into()], wire_len, value))
}
pub fn with_bytes(&self, op: &mut impl FnMut(&[u8]) -> Option<()>) -> Option<()> {
let Self(kind, wire_len, value) = self;
op(&kind[..])?;
op(&wire_len[..])?;
op(value.as_ref())?;
Some(())
}
write!(RecordField::Target, target.as_bytes());
write!(RecordField::Level, &level.to_ne_bytes());
write!(RecordField::Module, module.as_bytes());
write!(RecordField::File, file.as_bytes());
write!(RecordField::Line, &line.to_ne_bytes());
write!(RecordField::NumArgs, &num_args.to_ne_bytes());
NonZeroUsize::new(size)
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn log_value_length_sufficient() {
assert!(
LOG_BUF_CAPACITY <= LogValueLength::MAX.into(),
"{} > {}",
LOG_BUF_CAPACITY,
LogValueLength::MAX
);
#[doc(hidden)]
pub struct Header<'a> {
target: Field<&'a [u8]>,
level: Field<[u8; 1]>,
module: Field<&'a [u8]>,
file: Field<&'a [u8]>,
line: Field<[u8; 4]>,
num_args: Field<[u8; 4]>,
}
impl<'a> Header<'a> {
pub fn new(
target: &'a str,
level: Level,
module: &'a str,
file: &'a str,
line: u32,
num_args: u32,
) -> Option<Self> {
let target = target.as_bytes();
let level: u8 = level.into();
let level = level.to_ne_bytes();
let module = module.as_bytes();
let file = file.as_bytes();
let line = line.to_ne_bytes();
let num_args = num_args.to_ne_bytes();
let target = Field::new(RecordFieldKind::Target, target)?;
let level = Field::new(RecordFieldKind::Level, level)?;
let module = Field::new(RecordFieldKind::Module, module)?;
let file = Field::new(RecordFieldKind::File, file)?;
let line = Field::new(RecordFieldKind::Line, line)?;
let num_args = Field::new(RecordFieldKind::NumArgs, num_args)?;
Some(Self {
target,
level,
module,
file,
line,
num_args,
})
}
pub fn with_bytes(&self, op: &mut impl FnMut(&[u8]) -> Option<()>) -> Option<()> {
let Self {
target,
level,
module,
file,
line,
num_args,
} = self;
target.with_bytes(op)?;
level.with_bytes(op)?;
module.with_bytes(op)?;
file.with_bytes(op)?;
line.with_bytes(op)?;
num_args.with_bytes(op)?;
Some(())
}
}

@ -1,12 +1,17 @@
[package]
description = "Proc macros used by aya-log-ebpf"
name = "aya-log-ebpf-macros"
version = "0.1.0"
description = "Proc macros used by aya-log-ebpf"
authors.workspace = true
edition.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
homepage.workspace = true
edition.workspace = true
rust-version.workspace = true
[lints]
workspace = true
[dependencies]
aya-log-common = { path = "../aya-log-common", version = "^0.1.14", default-features = false }

@ -1,11 +1,11 @@
use aya_log_common::DisplayHint;
use aya_log_parser::{parse, Fragment};
use aya_log_parser::{Fragment, Parameter, parse};
use proc_macro2::{Ident, Span, TokenStream};
use quote::quote;
use syn::{
Error, Expr, LitStr, Result, Token,
parse::{Parse, ParseStream},
punctuated::Punctuated,
Error, Expr, LitStr, Result, Token,
};
pub(crate) struct LogArgs {
@ -21,7 +21,7 @@ mod kw {
}
impl Parse for LogArgs {
fn parse(input: ParseStream) -> Result<Self> {
fn parse(input: ParseStream<'_>) -> Result<Self> {
let ctx: Expr = input.parse()?;
input.parse::<Token![,]>()?;
@ -68,23 +68,28 @@ impl Parse for LogArgs {
}
}
pub(crate) fn log(args: LogArgs, level: Option<TokenStream>) -> Result<TokenStream> {
let ctx = args.ctx;
let target = match args.target {
pub(crate) fn log(args: LogArgs, level_expr: Option<TokenStream>) -> Result<TokenStream> {
let LogArgs {
ctx,
target,
level,
format_string,
formatting_args,
} = args;
let target = match target {
Some(t) => quote! { #t },
None => quote! { module_path!() },
};
let lvl: TokenStream = if let Some(l) = level {
l
} else if let Some(l) = args.level {
quote! { #l }
} else {
return Err(Error::new(
args.format_string.span(),
"missing `level` argument: try passing an `aya_log_ebpf::Level` value",
));
let level_expr = match level_expr {
Some(level_expr) => level_expr,
None => {
let level_expr = level.ok_or(Error::new(
format_string.span(),
"missing `level` argument: try passing an `aya_log_ebpf::Level` value",
))?;
quote! { #level_expr }
}
};
let format_string = args.format_string;
let format_string_val = format_string.value();
let fragments = parse(&format_string_val).map_err(|e| {
@ -100,12 +105,12 @@ pub(crate) fn log(args: LogArgs, level: Option<TokenStream>) -> Result<TokenStre
for fragment in fragments {
match fragment {
Fragment::Literal(s) => values.push(quote!(#s)),
Fragment::Parameter(p) => {
let arg = match args.formatting_args {
Some(ref args) => args[arg_i].clone(),
Fragment::Parameter(Parameter { hint }) => {
let arg = match &formatting_args {
Some(args) => &args[arg_i],
None => return Err(Error::new(format_string.span(), "no arguments provided")),
};
let (hint, formatter) = match p.hint {
let (hint, formatter) = match hint {
DisplayHint::Default => {
(quote!(DisplayHint::Default), quote!(DefaultFormatter))
}
@ -122,6 +127,9 @@ pub(crate) fn log(args: LogArgs, level: Option<TokenStream>) -> Result<TokenStre
DisplayHint::UpperMac => {
(quote!(DisplayHint::UpperMac), quote!(UpperMacFormatter))
}
DisplayHint::Pointer => {
(quote!(DisplayHint::Pointer), quote!(PointerFormatter))
}
};
let hint = quote!(::aya_log_ebpf::macro_support::#hint);
let arg = quote!(
@ -139,34 +147,96 @@ pub(crate) fn log(args: LogArgs, level: Option<TokenStream>) -> Result<TokenStre
}
}
let idents: Vec<_> = (0..values.len())
.map(|arg_i| quote::format_ident!("__arg{arg_i}"))
.collect();
let num_args = values.len();
let values_iter = values.iter();
let size = Ident::new("size", Span::mixed_site());
let len = Ident::new("len", Span::mixed_site());
let slice = Ident::new("slice", Span::mixed_site());
let record = Ident::new("record", Span::mixed_site());
let num_args = u32::try_from(num_args).map_err(|core::num::TryFromIntError { .. }| {
Error::new(
Span::call_site(),
format!("too many arguments: {num_args} overflows u32"),
)
})?;
let level = Ident::new("level", Span::mixed_site());
let header = Ident::new("__header", Span::call_site());
let tmp = Ident::new("__tmp", Span::call_site());
let kind = Ident::new("__kind", Span::call_site());
let value = Ident::new("__value", Span::call_site());
let size = Ident::new("__size", Span::call_site());
let capacity = Ident::new("__capacity", Span::call_site());
let pos = Ident::new("__pos", Span::call_site());
let op = Ident::new("__op", Span::call_site());
let buf = Ident::new("__buf", Span::call_site());
Ok(quote! {
match unsafe { &mut ::aya_log_ebpf::AYA_LOG_BUF }.get_ptr_mut(0).and_then(|ptr| unsafe { ptr.as_mut() }) {
None => {},
Some(::aya_log_ebpf::LogBuf { buf }) => {
{
let #level = #level_expr;
if ::aya_log_ebpf::macro_support::level_enabled(#level) {
// Silence unused variable warning; we may need ctx in the future.
let _ = #ctx;
let _: Option<()> = (|| {
let #size = ::aya_log_ebpf::write_record_header(
buf,
#target,
#lvl,
module_path!(),
file!(),
line!(),
#num_args,
)?;
let mut #size = #size.get();
use ::aya_log_ebpf::macro_support::{Header, Field, Argument, AYA_LOGS};
let #header = Header::new(
#target,
#level,
module_path!(),
file!(),
line!(),
#num_args,
)?;
#(
let #slice = buf.get_mut(#size..)?;
let #len = ::aya_log_ebpf::WriteToBuf::write(#values_iter, #slice)?;
#size += #len.get();
let #tmp = #values;
let (#kind, #value) = #tmp.as_argument();
let #idents = Field::new(#kind, #value)?;
)*
let #record = buf.get(..#size)?;
unsafe { &mut ::aya_log_ebpf::AYA_LOGS }.output(#ctx, #record, 0);
let mut #size = size_of::<::aya_log_ebpf::macro_support::LogValueLength>(); // For the size field itself.
let mut #op = |slice: &[u8]| {
#size += slice.len();
Some(())
};
#header.with_bytes(&mut #op)?;
#(
#idents.with_bytes(&mut #op)?;
)*
let #size = match ::aya_log_ebpf::macro_support::LogValueLength::try_from(#size) {
Ok(#size) => #size,
Err(core::num::TryFromIntError { .. }) => return None,
};
let #size = core::hint::black_box(#size);
let mut #capacity = 64;
while #capacity < #size {
#capacity <<= 1;
if #capacity > 8192 {
// The size is too large to log.
return None;
}
}
let mut #buf = core::hint::black_box(AYA_LOGS.reserve_bytes(#capacity.into(), 0)?);
match (|| {
let mut #pos = 0;
let mut #op = |slice: &[u8]| {
let #buf = #buf.get_mut(#pos..)?;
let #buf = #buf.get_mut(..slice.len())?;
#buf.copy_from_slice(slice);
#pos += slice.len();
Some(())
};
#op(#size.to_ne_bytes().as_ref())?;
#header.with_bytes(&mut #op)?;
#(
#idents.with_bytes(&mut #op)?;
)*
Some(())
})() {
Some(()) => #buf.submit(0),
None => #buf.discard(0),
}
Some(())
})();
}

@ -1,15 +1,23 @@
[package]
description = "A parser for the aya log format strings"
name = "aya-log-parser"
version = "0.1.13"
description = "A parser for the aya log format strings"
authors.workspace = true
edition.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
homepage.workspace = true
edition.workspace = true
rust-version.workspace = true
[lints]
workspace = true
[dependencies]
aya-log-common = { path = "../aya-log-common", version = "^0.1.14", default-features = false }
[dev-dependencies]
assert_matches = { workspace = true }
[lib]
path = "src/lib.rs"

@ -52,39 +52,28 @@ fn push_literal(frag: &mut Vec<Fragment>, unescaped_literal: &str) -> Result<(),
Ok(())
}
/// Parses the display hint (e.g. the `ipv4` in `{:ipv4}`).
fn parse_display_hint(s: &str) -> Result<DisplayHint, String> {
Ok(match s {
"p" | "x" => DisplayHint::LowerHex,
"X" => DisplayHint::UpperHex,
"i" => DisplayHint::Ip,
"mac" => DisplayHint::LowerMac,
"MAC" => DisplayHint::UpperMac,
_ => return Err(format!("unknown display hint: {s:?}")),
})
}
/// Parse `Param` from the given `&str` which can specify an optional format
/// like `:x` or `:ipv4` (without curly braces, which are parsed by the `parse`
/// function).
fn parse_param(mut input: &str) -> Result<Parameter, String> {
const HINT_PREFIX: &str = ":";
// Then, optional hint
let mut hint = DisplayHint::Default;
if input.starts_with(HINT_PREFIX) {
// skip the prefix
input = &input[HINT_PREFIX.len()..];
if input.is_empty() {
return Err("malformed format string (missing display hint after ':')".into());
fn parse_param(input: &str) -> Result<Parameter, String> {
let hint = match input.strip_prefix(":") {
Some(input) => match input {
"" => return Err("malformed format string (missing display hint after ':')".into()),
"x" => DisplayHint::LowerHex,
"X" => DisplayHint::UpperHex,
"i" => DisplayHint::Ip,
"mac" => DisplayHint::LowerMac,
"MAC" => DisplayHint::UpperMac,
"p" => DisplayHint::Pointer,
input => return Err(format!("unknown display hint: {input:?}")),
},
None => {
if !input.is_empty() {
return Err(format!("unexpected content {input:?} in format string"));
}
DisplayHint::Default
}
hint = parse_display_hint(input)?;
} else if !input.is_empty() {
return Err(format!("unexpected content {input:?} in format string"));
}
};
Ok(Parameter { hint })
}
@ -140,6 +129,8 @@ pub fn parse(format_string: &str) -> Result<Vec<Fragment>, String> {
#[cfg(test)]
mod test {
use assert_matches::assert_matches;
use super::*;
#[test]
@ -165,13 +156,13 @@ mod test {
}),
Fragment::Literal(" lmao {} {something} ".into()),
Fragment::Parameter(Parameter {
hint: DisplayHint::LowerHex
hint: DisplayHint::Pointer
}),
])
);
assert!(parse("foo {:}").is_err());
assert!(parse("foo { bar").is_err());
assert!(parse("foo } bar").is_err());
assert!(parse("foo { bar }").is_err());
assert_matches!(parse("foo {:}"), Err(_));
assert_matches!(parse("foo { bar"), Err(_));
assert_matches!(parse("foo } bar"), Err(_));
assert_matches!(parse("foo { bar }"), Err(_));
}
}

File diff suppressed because one or more lines are too long

@ -1,27 +1,31 @@
[package]
name = "aya-log"
version = "0.2.0"
description = "A logging library for eBPF programs."
documentation = "https://docs.rs/aya-log"
keywords = ["bpf", "ebpf", "log", "logging"]
name = "aya-log"
readme = "README.md"
documentation = "https://docs.rs/aya-log"
version = "0.2.1"
authors.workspace = true
edition.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
homepage.workspace = true
edition.workspace = true
rust-version.workspace = true
[lints]
workspace = true
[dependencies]
aya = { path = "../aya", version = "^0.12.0", features = ["async_tokio"] }
aya-log-common = { path = "../aya-log-common", version = "^0.1.14", default-features = false }
bytes = { workspace = true }
aya = { path = "../aya", version = "^0.13.1", default-features = false }
aya-log-common = { path = "../aya-log-common", version = "^0.1.15", default-features = false }
log = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true, features = ["rt"] }
[dev-dependencies]
env_logger = { workspace = true }
testing_logger = { workspace = true }
tokio = { workspace = true, features = ["net", "rt"] }
[lib]
path = "src/lib.rs"

@ -38,7 +38,15 @@ use aya_log::EbpfLogger;
env_logger::init();
// Will log using the default logger, which is TermLogger in this case
EbpfLogger::init(&mut bpf).unwrap();
let logger = EbpfLogger::init(&mut bpf).unwrap();
let mut logger = tokio::io::unix::AsyncFd::with_interest(logger, tokio::io::Interest::READABLE).unwrap();
tokio::task::spawn(async move {
loop {
let mut guard = logger.readable_mut().await.unwrap();
guard.get_inner_mut().flush();
guard.clear_ready();
}
});
```
### eBPF code
@ -61,3 +69,33 @@ fn try_xdp_firewall(ctx: XdpContext) -> Result<u32, ()> {
[aya]: https://github.com/aya-rs/aya
[log]: https://docs.rs/log
[env_logger]: https://docs.rs/env_logger
## Disabling log levels at load-time
eBPF instruction budgets are tight. Even if a log statement never executes at
runtime, the verifier must still evaluate its instructions unless it can prove
they're unreachable. `aya-log` now exposes a global `AYA_LOG_LEVEL` inside the
eBPF object allowing you to selectively enable levels before the program is
loaded.
By default all bits are set (all logging enabled). To disable all logging:
```rust
let mut bpf = aya::EbpfLoader::new()
.override_global(aya_log::LEVEL, &0, false /* must_exist */)
.load_file("prog.bpf.o")?;
# Ok::<(), aya::EbpfError>(())
```
Enable only Error and Warn:
```rust
let level = aya_log::Level::Warn as u8;
let mut bpf = EbpfLoader::new()
.override_global(aya_log::LEVEL, &level, false /* must_exist */)
.load_file("prog.bpf.o")?;
```
Because the level is placed in global read-only data, the verifier sees the
disabled branch as unreachable and prunes the logging instructions, reducing
overall instruction count and avoiding potential instruction limit issues.

File diff suppressed because it is too large Load Diff

@ -5,6 +5,219 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## 0.2.1 (2024-11-01)
### New Features
- <csr-id-8c79b71bd5699a686f33360520aa95c1a2895fa5/> Rename Bpf to Ebpf
And BpfLoader to EbpfLoader.
This also adds type aliases to preserve the use of the old names, making
updating to a new Aya release less of a burden. These aliases are marked
as deprecated since we'll likely remove them in a later release.
### Bug Fixes
- <csr-id-ca0c32d1076af81349a52235a4b6fb3937a697b3/> Fill bss maps with zeros
The loader should fill bss maps with zeros according to the size of the
ELF section.
Failure to do so yields weird verifier messages as follows:
```
cannot access ptr member ops with moff 0 in struct bpf_map with off 0 size 4
```
Reference to this in the cilium/ebpf code is here [1].
I could not find a reference in libbpf.
### Other
- <csr-id-366c599c2083baf72c40c816da2c530dec7fd612/> cgroup_iter_order NFPROTO* nf_inet_hooks
Adds the following to codegen:
- `bpf_cgroup_iter_order`: used in `bpf_link_info.iter.group.order`
- `NFPROTO_*`: used in `bpf_link_info.netfilter.pf`
- `nf_inet_hooks`: used in `bpf_link_info.netfilter.hooknum`
Include `linux/netfilter.h` in `linux_wrapper.h` for `NFPROTO_*` and
`nf_inet_hooks` to generate.
- <csr-id-fbb09304a2de0d8baf7ea20c9727fcd2e4fb7f41/> revamp MapInfo be more friendly with older kernels
Adds detection for whether a field is available in `MapInfo`:
- For `map_type()`, we treturn new enum `MapType` instead of the integer
representation.
- For fields that can't be zero, we return `Option<NonZero*>` type.
- For `name_as_str()`, it now uses the feature probe `bpf_name()` to
detect if field is available.
Although the feature probe checks for program name, it can also be
used for map name since they were both introduced in the same commit.
- <csr-id-88f5ac31142f1657b41b1ee0f217dcd9125b210a/> revamp ProgramInfo be more friendly with older kernels
Purpose of this commit is to add detections for whether a field is
available in `ProgramInfo`.
- For `program_type()`, we return the new enum `ProgramType` instead of
the integer representation.
- For fields that we know cannot be zero, we return `Option<NonZero*>`
type.
- For `name_as_str()`, it now also uses the feature probe `bpf_name()`
to detect if field is available or not.
- Two additional feature probes are added for the fields:
- `prog_info_map_ids()` probe -> `map_ids()` field
- `prog_info_gpl_compatible()` probe -> `gpl_compatible()` field
With the `prog_info_map_ids()` probe, the previous implementation that
I had for `bpf_prog_get_info_by_fd()` is shortened to use the probe
instead of having to make 2 potential syscalls.
The `test_loaded_at()` test is also moved into info tests since it is
better related to the info tests.
- <csr-id-1634fa7188e40ed75da53517f1fdb7396c348c34/> add conversion u32 to enum type for prog, link, & attach type
Add conversion from u32 to program type, link type, and attach type.
Additionally, remove duplicate match statement for u32 conversion to
`BPF_MAP_TYPE_BLOOM_FILTER` & `BPF_MAP_TYPE_CGRP_STORAGE`.
New error `InvalidTypeBinding<T>` is created to represent when a
parsed/received value binding to a type is invalid.
This is used in the new conversions added here, and also replaces
`InvalidMapTypeError` in `TryFrom` for `bpf_map_type`.
- <csr-id-b513af12e8baa5c5097eaf0afdae61a830c3f877/> add archs powerpc64 and s390x to aya
bpfman, a project using aya, has a requirement to support powerpc64 and
s390x architectures. Adding these two architectures to aya.
- <csr-id-b06ff402780b80862933791831c578e4c339fc96/> Generate new bindings
### Test
- <csr-id-4dc4b5ccd48bd86e2cc59ad7386514c1531450af/> adjust test to not use byte arrays
Where possible, replace the hardcoded byte arrays in the tests with the
structs they represent, then convert the structs to byte arrays.
- <csr-id-eef7346fb2231f8741410381198015cceeebfac9/> adjust test byte arrays for big endian
Adding support for s390x (big endian architecture) and found that some
of the unit tests have structures and files implemented as byte arrays.
They are all coded as little endian and need a bug endian version to
work properly.
### New Features (BREAKING)
- <csr-id-fd48c55466a23953ce7a4912306e1acf059b498b/> Rename BpfRelocationError -> EbpfRelocationError
- <csr-id-cf3e2ca677c81224368fb2838ebc5b10ee98419a/> Rename BpfSectionKind to EbpfSectionKind
### Commit Statistics
<csr-read-only-do-not-edit/>
- 25 commits contributed to the release over the course of 241 calendar days.
- 247 days passed between releases.
- 12 commits were understood as [conventional](https://www.conventionalcommits.org).
- 0 issues like '(#ID)' were seen in commit messages
### Commit Details
<csr-read-only-do-not-edit/>
<details><summary>view details</summary>
* **Uncategorized**
- Merge pull request #1073 from dave-tucker/reloc-bug ([`b2ac9fe`](https://github.com/aya-rs/aya/commit/b2ac9fe85db6c25d0b8155a75a2df96a80a19811))
- Fill bss maps with zeros ([`ca0c32d`](https://github.com/aya-rs/aya/commit/ca0c32d1076af81349a52235a4b6fb3937a697b3))
- Merge pull request #1055 from aya-rs/codegen ([`59b3873`](https://github.com/aya-rs/aya/commit/59b3873a92d1eb49ca1008cb193e962fa95b3e97))
- [codegen] Update libbpf to 80b16457cb23db4d633b17ba0305f29daa2eb307 ([`f8ad84c`](https://github.com/aya-rs/aya/commit/f8ad84c3d322d414f27375044ba694a169abfa76))
- Cgroup_iter_order NFPROTO* nf_inet_hooks ([`366c599`](https://github.com/aya-rs/aya/commit/366c599c2083baf72c40c816da2c530dec7fd612))
- Release aya-obj v0.2.0, aya v0.13.0, safety bump aya v0.13.0 ([`c169b72`](https://github.com/aya-rs/aya/commit/c169b727e6b8f8c2dda57f54b8c77f8b551025c6))
- Appease clippy ([`aa240ba`](https://github.com/aya-rs/aya/commit/aa240baadf99d3fea0477a9b3966789b0f4ffe57))
- Merge pull request #1007 from tyrone-wu/aya/info-api ([`15eb935`](https://github.com/aya-rs/aya/commit/15eb935bce6d41fb67189c48ce582b074544e0ed))
- Revamp MapInfo be more friendly with older kernels ([`fbb0930`](https://github.com/aya-rs/aya/commit/fbb09304a2de0d8baf7ea20c9727fcd2e4fb7f41))
- Revamp ProgramInfo be more friendly with older kernels ([`88f5ac3`](https://github.com/aya-rs/aya/commit/88f5ac31142f1657b41b1ee0f217dcd9125b210a))
- Add conversion u32 to enum type for prog, link, & attach type ([`1634fa7`](https://github.com/aya-rs/aya/commit/1634fa7188e40ed75da53517f1fdb7396c348c34))
- Merge pull request #974 from Billy99/billy99-arch-ppc64-s390x ([`ab5e688`](https://github.com/aya-rs/aya/commit/ab5e688fd49fcfb402ad47d51cb445437fbd8cb7))
- Adjust test to not use byte arrays ([`4dc4b5c`](https://github.com/aya-rs/aya/commit/4dc4b5ccd48bd86e2cc59ad7386514c1531450af))
- Add archs powerpc64 and s390x to aya ([`b513af1`](https://github.com/aya-rs/aya/commit/b513af12e8baa5c5097eaf0afdae61a830c3f877))
- Adjust test byte arrays for big endian ([`eef7346`](https://github.com/aya-rs/aya/commit/eef7346fb2231f8741410381198015cceeebfac9))
- Merge pull request #989 from aya-rs/codegen ([`8015e10`](https://github.com/aya-rs/aya/commit/8015e100796c550804ccf8fea691c63ec1ac36b8))
- [codegen] Update libbpf to 686f600bca59e107af4040d0838ca2b02c14ff50 ([`8d7446e`](https://github.com/aya-rs/aya/commit/8d7446e01132fe1751605b87a6b4a0165273de15))
- Merge pull request #978 from aya-rs/codegen ([`06aa5c8`](https://github.com/aya-rs/aya/commit/06aa5c8ed344bd0d85096a0fd033ff0bd90a2f88))
- [codegen] Update libbpf to c1a6c770c46c6e78ad6755bf596c23a4e6f6b216 ([`8b50a6a`](https://github.com/aya-rs/aya/commit/8b50a6a5738b5a57121205490d26805c74cb63de))
- Document miri skip reasons ([`35962a4`](https://github.com/aya-rs/aya/commit/35962a4794484aa3b37dadc98a70a659fd107b75))
- Generate new bindings ([`b06ff40`](https://github.com/aya-rs/aya/commit/b06ff402780b80862933791831c578e4c339fc96))
- Merge pull request #528 from dave-tucker/rename-all-the-things ([`63d8d4d`](https://github.com/aya-rs/aya/commit/63d8d4d34bdbbee149047dc0a5e9c2b191f3b32d))
- Rename Bpf to Ebpf ([`8c79b71`](https://github.com/aya-rs/aya/commit/8c79b71bd5699a686f33360520aa95c1a2895fa5))
- Rename BpfRelocationError -> EbpfRelocationError ([`fd48c55`](https://github.com/aya-rs/aya/commit/fd48c55466a23953ce7a4912306e1acf059b498b))
- Rename BpfSectionKind to EbpfSectionKind ([`cf3e2ca`](https://github.com/aya-rs/aya/commit/cf3e2ca677c81224368fb2838ebc5b10ee98419a))
</details>
## 0.2.0 (2024-10-09)
<csr-id-fbb09304a2de0d8baf7ea20c9727fcd2e4fb7f41/>
<csr-id-88f5ac31142f1657b41b1ee0f217dcd9125b210a/>
<csr-id-1634fa7188e40ed75da53517f1fdb7396c348c34/>
<csr-id-b513af12e8baa5c5097eaf0afdae61a830c3f877/>
<csr-id-b06ff402780b80862933791831c578e4c339fc96/>
<csr-id-4dc4b5ccd48bd86e2cc59ad7386514c1531450af/>
<csr-id-eef7346fb2231f8741410381198015cceeebfac9/>
### New Features
- <csr-id-8c79b71bd5699a686f33360520aa95c1a2895fa5/> Rename Bpf to Ebpf
And BpfLoader to EbpfLoader.
This also adds type aliases to preserve the use of the old names, making
updating to a new Aya release less of a burden. These aliases are marked
as deprecated since we'll likely remove them in a later release.
### Other
- <csr-id-fbb09304a2de0d8baf7ea20c9727fcd2e4fb7f41/> revamp MapInfo be more friendly with older kernels
Adds detection for whether a field is available in `MapInfo`:
- For `map_type()`, we treturn new enum `MapType` instead of the integer
representation.
- For fields that can't be zero, we return `Option<NonZero*>` type.
- For `name_as_str()`, it now uses the feature probe `bpf_name()` to
detect if field is available.
Although the feature probe checks for program name, it can also be
used for map name since they were both introduced in the same commit.
- <csr-id-88f5ac31142f1657b41b1ee0f217dcd9125b210a/> revamp ProgramInfo be more friendly with older kernels
Purpose of this commit is to add detections for whether a field is
available in `ProgramInfo`.
- For `program_type()`, we return the new enum `ProgramType` instead of
the integer representation.
- For fields that we know cannot be zero, we return `Option<NonZero*>`
type.
- For `name_as_str()`, it now also uses the feature probe `bpf_name()`
to detect if field is available or not.
- Two additional feature probes are added for the fields:
- `prog_info_map_ids()` probe -> `map_ids()` field
- `prog_info_gpl_compatible()` probe -> `gpl_compatible()` field
With the `prog_info_map_ids()` probe, the previous implementation that
I had for `bpf_prog_get_info_by_fd()` is shortened to use the probe
instead of having to make 2 potential syscalls.
The `test_loaded_at()` test is also moved into info tests since it is
better related to the info tests.
- <csr-id-1634fa7188e40ed75da53517f1fdb7396c348c34/> add conversion u32 to enum type for prog, link, & attach type
Add conversion from u32 to program type, link type, and attach type.
Additionally, remove duplicate match statement for u32 conversion to
`BPF_MAP_TYPE_BLOOM_FILTER` & `BPF_MAP_TYPE_CGRP_STORAGE`.
New error `InvalidTypeBinding<T>` is created to represent when a
parsed/received value binding to a type is invalid.
This is used in the new conversions added here, and also replaces
`InvalidMapTypeError` in `TryFrom` for `bpf_map_type`.
- <csr-id-b513af12e8baa5c5097eaf0afdae61a830c3f877/> add archs powerpc64 and s390x to aya
bpfman, a project using aya, has a requirement to support powerpc64 and
s390x architectures. Adding these two architectures to aya.
- <csr-id-b06ff402780b80862933791831c578e4c339fc96/> Generate new bindings
### Test
- <csr-id-4dc4b5ccd48bd86e2cc59ad7386514c1531450af/> adjust test to not use byte arrays
Where possible, replace the hardcoded byte arrays in the tests with the
structs they represent, then convert the structs to byte arrays.
- <csr-id-eef7346fb2231f8741410381198015cceeebfac9/> adjust test byte arrays for big endian
Adding support for s390x (big endian architecture) and found that some
of the unit tests have structures and files implemented as byte arrays.
They are all coded as little endian and need a bug endian version to
work properly.
### New Features (BREAKING)
- <csr-id-fd48c55466a23953ce7a4912306e1acf059b498b/> Rename BpfRelocationError -> EbpfRelocationError
- <csr-id-cf3e2ca677c81224368fb2838ebc5b10ee98419a/> Rename BpfSectionKind to EbpfSectionKind
## 0.1.0 (2024-02-28)
<csr-id-b3e7ef741c5b8d09fc7dc8302576f8174be75ff4/>
@ -417,7 +630,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<csr-read-only-do-not-edit/>
- 145 commits contributed to the release over the course of 422 calendar days.
- 146 commits contributed to the release.
- 63 commits were understood as [conventional](https://www.conventionalcommits.org).
- 1 unique issue was worked on: [#608](https://github.com/aya-rs/aya/issues/608)
@ -430,6 +643,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
* **[#608](https://github.com/aya-rs/aya/issues/608)**
- Fix load errors for empty (but existent) BTF/BTF.ext sections ([`5894c4c`](https://github.com/aya-rs/aya/commit/5894c4ce82948c7e5fe766f41b690d036fcca907))
* **Uncategorized**
- Release aya-obj v0.1.0, aya v0.12.0, safety bump aya-log v0.2.0 ([`0e99fa0`](https://github.com/aya-rs/aya/commit/0e99fa0f340b2fb2e0da3b330aa6555322a77eec))
- Merge pull request #891 from dave-tucker/changelog ([`431ce23`](https://github.com/aya-rs/aya/commit/431ce23f27ef5c36a6b38c73b38f23b1cf007900))
- Add CHANGELOG ([`72e8aab`](https://github.com/aya-rs/aya/commit/72e8aab6c8be8663c5b6ff6b606a51debf512f7d))
- Appease new nightly clippy lints ([`3369169`](https://github.com/aya-rs/aya/commit/3369169aaca6510a47318fc29bbdb801b60b1c21))

@ -1,20 +1,24 @@
[package]
name = "aya-obj"
version = "0.1.0"
description = "An eBPF object file parsing library with BTF and relocation support."
documentation = "https://docs.rs/aya-obj"
keywords = ["bpf", "btf", "ebpf", "elf", "object"]
name = "aya-obj"
readme = "README.md"
documentation = "https://docs.rs/aya-obj"
version = "0.2.1"
authors.workspace = true
edition.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
homepage.workspace = true
edition.workspace = true
rust-version.workspace = true
[lints]
workspace = true
[dependencies]
bytes = { workspace = true }
core-error = { workspace = true, default-features = true }
hashbrown = { workspace = true, default-features = true }
hashbrown = { workspace = true, features = ["default-hasher", "equivalent"] }
log = { workspace = true }
object = { workspace = true, features = ["elf", "read_core"] }
thiserror = { workspace = true }
@ -24,4 +28,4 @@ assert_matches = { workspace = true }
rbpf = { workspace = true }
[features]
std = []
std = ["thiserror/std"]

@ -1,13 +1,9 @@
#include <asm-generic/socket.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/if_link.h>
#include <linux/netfilter.h>
#include <linux/perf_event.h>
#include <linux/pkt_cls.h>
#include <linux/pkt_sched.h>
#include <linux/rtnetlink.h>
/* workaround the fact that bindgen can't parse the IOC macros */
int AYA_PERF_EVENT_IOC_ENABLE = PERF_EVENT_IOC_ENABLE;
int AYA_PERF_EVENT_IOC_DISABLE = PERF_EVENT_IOC_DISABLE;
int AYA_PERF_EVENT_IOC_SET_BPF = PERF_EVENT_IOC_SET_BPF;
#include <sys/socket.h>

@ -5,27 +5,30 @@ use alloc::{
vec,
vec::Vec,
};
use core::{ffi::CStr, mem, ptr};
use core::{
cell::OnceCell,
ffi::{CStr, FromBytesUntilNulError},
mem, ptr,
};
use bytes::BufMut;
use bytes::BufMut as _;
use log::debug;
use object::{Endianness, SectionIndex};
#[cfg(not(feature = "std"))]
use crate::std;
use crate::{
Object,
btf::{
Array, BtfEnum, BtfEnum64, BtfKind, BtfMember, BtfType, Const, DataSec, DataSecEntry, Enum,
Enum64, Enum64Fallback, Enum64VariantFallback, FuncInfo, FuncLinkage, Int, IntEncoding,
LineInfo, Struct, Typedef, Union, Var, VarLinkage,
info::{FuncSecInfo, LineSecInfo},
relocation::Relocation,
Array, BtfEnum, BtfKind, BtfMember, BtfType, Const, Enum, FuncInfo, FuncLinkage, Int,
IntEncoding, LineInfo, Struct, Typedef, Union, VarLinkage,
},
generated::{btf_ext_header, btf_header},
util::{bytes_of, HashMap},
Object,
util::{HashMap, bytes_of},
};
pub(crate) const MAX_RESOLVE_DEPTH: u8 = 32;
pub(crate) const MAX_RESOLVE_DEPTH: usize = 32;
pub(crate) const MAX_SPEC_LEN: usize = 64;
/// The error type returned when `BTF` operations fail.
@ -98,21 +101,21 @@ pub enum BtfError {
},
/// unknown BTF type id
#[error("Unknown BTF type id `{type_id}`")]
#[error("unknown BTF type id `{type_id}`")]
UnknownBtfType {
/// type id
type_id: u32,
},
/// unexpected btf type id
#[error("Unexpected BTF type id `{type_id}`")]
#[error("unexpected BTF type id `{type_id}`")]
UnexpectedBtfType {
/// type id
type_id: u32,
},
/// unknown BTF type
#[error("Unknown BTF type `{type_name}`")]
#[error("unknown BTF type `{type_name}`")]
UnknownBtfTypeName {
/// type name
type_name: String,
@ -127,7 +130,7 @@ pub enum BtfError {
#[cfg(feature = "std")]
/// Loading the btf failed
#[error("the BPF_BTF_LOAD syscall failed. Verifier output: {verifier_log}")]
#[error("the BPF_BTF_LOAD syscall returned {io_error}. Verifier output: {verifier_log}")]
LoadError {
/// The [`std::io::Error`] returned by the `BPF_BTF_LOAD` syscall.
#[source]
@ -157,15 +160,19 @@ pub enum BtfError {
/// unable to get symbol name
#[error("Unable to get symbol name")]
InvalidSymbolName,
/// BTF map wrapper's layout is unexpected
#[error("BTF map wrapper's layout is unexpected: {0:?}")]
UnexpectedBtfMapWrapperLayout(Struct),
}
/// Available BTF features
#[derive(Default, Debug)]
#[allow(missing_docs)]
pub struct BtfFeatures {
btf_func: bool,
btf_func_global: bool,
btf_datasec: bool,
btf_datasec_zero: bool,
btf_float: bool,
btf_decl_tag: bool,
btf_type_tag: bool,
@ -174,19 +181,22 @@ pub struct BtfFeatures {
impl BtfFeatures {
#[doc(hidden)]
#[expect(clippy::too_many_arguments, reason = "this interface is terrible")]
pub fn new(
btf_func: bool,
btf_func_global: bool,
btf_datasec: bool,
btf_datasec_zero: bool,
btf_float: bool,
btf_decl_tag: bool,
btf_type_tag: bool,
btf_enum64: bool,
) -> Self {
BtfFeatures {
Self {
btf_func,
btf_func_global,
btf_datasec,
btf_datasec_zero,
btf_float,
btf_decl_tag,
btf_type_tag,
@ -209,6 +219,11 @@ impl BtfFeatures {
self.btf_datasec
}
/// Returns true if zero-length DATASec entries are accepted.
pub fn btf_datasec_zero(&self) -> bool {
self.btf_datasec_zero
}
/// Returns true if the BTF_FLOAT is supported.
pub fn btf_float(&self) -> bool {
self.btf_float
@ -251,10 +266,19 @@ pub struct Btf {
_endianness: Endianness,
}
fn add_type(header: &mut btf_header, types: &mut BtfTypes, btf_type: BtfType) -> u32 {
let size = btf_type.type_info_size() as u32;
let type_id = types.len();
types.push(btf_type);
header.type_len += size;
header.str_off += size;
type_id as u32
}
impl Btf {
/// Creates a new empty instance with its header initialized
pub fn new() -> Btf {
Btf {
pub fn new() -> Self {
Self {
header: btf_header {
magic: 0xeb9f,
version: 0x01,
@ -282,7 +306,7 @@ impl Btf {
/// Adds a string to BTF metadata, returning an offset
pub fn add_string(&mut self, name: &str) -> u32 {
let str = name.bytes().chain(std::iter::once(0));
let str = name.bytes().chain(core::iter::once(0));
let name_offset = self.strings.len();
self.strings.extend(str);
self.header.str_len = self.strings.len() as u32;
@ -291,18 +315,13 @@ impl Btf {
/// Adds a type to BTF metadata, returning a type id
pub fn add_type(&mut self, btf_type: BtfType) -> u32 {
let size = btf_type.type_info_size() as u32;
let type_id = self.types.len();
self.types.push(btf_type);
self.header.type_len += size;
self.header.str_off += size;
type_id as u32
add_type(&mut self.header, &mut self.types, btf_type)
}
/// Loads BTF metadata from `/sys/kernel/btf/vmlinux`.
#[cfg(feature = "std")]
pub fn from_sys_fs() -> Result<Btf, BtfError> {
Btf::parse_file("/sys/kernel/btf/vmlinux", Endianness::default())
pub fn from_sys_fs() -> Result<Self, BtfError> {
Self::parse_file("/sys/kernel/btf/vmlinux", Endianness::default())
}
/// Loads BTF metadata from the given `path`.
@ -310,10 +329,10 @@ impl Btf {
pub fn parse_file<P: AsRef<std::path::Path>>(
path: P,
endianness: Endianness,
) -> Result<Btf, BtfError> {
use std::{borrow::ToOwned, fs};
) -> Result<Self, BtfError> {
use std::{borrow::ToOwned as _, fs};
let path = path.as_ref();
Btf::parse(
Self::parse(
&fs::read(path).map_err(|error| BtfError::FileError {
path: path.to_owned(),
error,
@ -323,7 +342,7 @@ impl Btf {
}
/// Parses BTF from binary data of the given endianness
pub fn parse(data: &[u8], endianness: Endianness) -> Result<Btf, BtfError> {
pub fn parse(data: &[u8], endianness: Endianness) -> Result<Self, BtfError> {
if data.len() < mem::size_of::<btf_header>() {
return Err(BtfError::InvalidHeader);
}
@ -338,9 +357,9 @@ impl Btf {
}
let strings = data[str_off..str_off + str_len].to_vec();
let types = Btf::read_type_info(&header, data, endianness)?;
let types = Self::read_type_info(&header, data, endianness)?;
Ok(Btf {
Ok(Self {
header,
strings,
types,
@ -389,13 +408,9 @@ impl Btf {
}
let offset = offset as usize;
let nul = self.strings[offset..]
.iter()
.position(|c| *c == 0u8)
.ok_or(BtfError::InvalidStringOffset { offset })?;
let s = CStr::from_bytes_with_nul(&self.strings[offset..=offset + nul])
.map_err(|_| BtfError::InvalidStringOffset { offset })?;
let s = CStr::from_bytes_until_nul(&self.strings[offset..])
.map_err(|FromBytesUntilNulError { .. }| BtfError::InvalidStringOffset { offset })?;
Ok(s.to_string_lossy())
}
@ -436,7 +451,7 @@ impl Btf {
pub(crate) fn type_size(&self, root_type_id: u32) -> Result<usize, BtfError> {
let mut type_id = root_type_id;
let mut n_elems = 1;
for _ in 0..MAX_RESOLVE_DEPTH {
for () in core::iter::repeat_n((), MAX_RESOLVE_DEPTH) {
let ty = self.types.type_by_id(type_id)?;
let size = match ty {
BtfType::Array(Array { array, .. }) => {
@ -467,7 +482,6 @@ impl Btf {
pub fn to_bytes(&self) -> Vec<u8> {
// Safety: btf_header is POD
let mut buf = unsafe { bytes_of::<btf_header>(&self.header).to_vec() };
// Skip the first type since it's always BtfType::Unknown for type_by_id to work
buf.extend(self.types.to_bytes());
buf.put(self.strings.as_slice());
buf
@ -487,19 +501,8 @@ impl Btf {
symbol_offsets: &HashMap<String, u64>,
features: &BtfFeatures,
) -> Result<(), BtfError> {
// ENUM64 placeholder type needs to be added before we take ownership of
// self.types to ensure that the offsets in the BtfHeader are correct.
let placeholder_name = self.add_string("enum64_placeholder");
let enum64_placeholder_id = (!features.btf_enum64
&& self.types().any(|t| t.kind() == BtfKind::Enum64))
.then(|| {
self.add_type(BtfType::Int(Int::new(
placeholder_name,
1,
IntEncoding::None,
0,
)))
});
let enum64_placeholder_id = OnceCell::new();
let filler_var_id = OnceCell::new();
let mut types = mem::take(&mut self.types);
for i in 0..types.types.len() {
let t = &mut types.types[i];
@ -519,7 +522,7 @@ impl Btf {
}
// Sanitize DATASEC if they are not supported.
BtfType::DataSec(d) if !features.btf_datasec => {
debug!("{}: not supported. replacing with STRUCT", kind);
debug!("{kind}: not supported. replacing with STRUCT");
// STRUCT aren't allowed to have "." in their name, fixup this if needed.
let mut name_offset = d.name_offset;
@ -532,7 +535,7 @@ impl Btf {
name_offset = self.add_string(&fixed_name);
}
let entries = std::mem::take(&mut d.entries);
let entries = core::mem::take(&mut d.entries);
let members = entries
.iter()
@ -568,7 +571,7 @@ impl Btf {
// There are some cases when the compiler does indeed populate the size.
if d.size > 0 {
debug!("{} {}: size fixup not required", kind, name);
debug!("{kind} {name}: size fixup not required");
} else {
// We need to get the size of the section from the ELF file.
// Fortunately, we cached these when parsing it initially
@ -579,7 +582,7 @@ impl Btf {
return Err(BtfError::UnknownSectionSize { section_name: name });
}
};
debug!("{} {}: fixup size to {}", kind, name, size);
debug!("{kind} {name}: fixup size to {size}");
d.size = *size as u32;
// The Vec<btf_var_secinfo> contains BTF_KIND_VAR sections
@ -587,17 +590,57 @@ impl Btf {
// we need to get the offset from the ELF file.
// This was also cached during initial parsing and
// we can query by name in symbol_offsets.
let old_size = d.type_info_size();
let mut entries = mem::take(&mut d.entries);
let mut fixed_section = d.clone();
let mut section_size = d.size;
let name_offset = d.name_offset;
// Kernels before 5.12 reject zero-length DATASEC. See
// https://github.com/torvalds/linux/commit/13ca51d5eb358edcb673afccb48c3440b9fda21b.
if entries.is_empty() && !features.btf_datasec_zero {
let filler_var_id = *filler_var_id.get_or_init(|| {
let filler_type_name = self.add_string("__aya_datasec_filler_type");
let filler_type_id = add_type(
&mut self.header,
&mut types,
BtfType::Int(Int::new(
filler_type_name,
1,
IntEncoding::None,
0,
)),
);
let filler_var_name = self.add_string("__aya_datasec_filler");
add_type(
&mut self.header,
&mut types,
BtfType::Var(Var::new(
filler_var_name,
filler_type_id,
VarLinkage::Static,
)),
)
});
let filler_len = section_size.max(1);
debug!(
"{kind} {name}: injecting filler entry for zero-length DATASEC (len={filler_len})"
);
entries.push(DataSecEntry {
btf_type: filler_var_id,
offset: 0,
size: filler_len,
});
if section_size == 0 {
section_size = filler_len;
}
}
for e in entries.iter_mut() {
if let BtfType::Var(var) = types.type_by_id(e.btf_type)? {
let var_name = self.string_at(var.name_offset)?;
if var.linkage == VarLinkage::Static {
debug!(
"{} {}: VAR {}: fixup not required",
kind, name, var_name
);
debug!("{kind} {name}: VAR {var_name}: fixup not required");
continue;
}
@ -610,17 +653,20 @@ impl Btf {
}
};
e.offset = *offset as u32;
debug!(
"{} {}: VAR {}: fixup offset {}",
kind, name, var_name, offset
);
debug!("{kind} {name}: VAR {var_name}: fixup offset {offset}");
} else {
return Err(BtfError::InvalidDatasec);
}
}
fixed_section.entries = entries;
let fixed_section = DataSec::new(name_offset, entries, section_size);
let new_size = fixed_section.type_info_size();
if new_size != old_size {
self.header.type_len =
self.header.type_len - old_size as u32 + new_size as u32;
self.header.str_off = self.header.type_len;
}
// Must reborrow here because we borrow `types` immutably above.
// Must reborrow here because we borrow `types` above.
let t = &mut types.types[i];
*t = BtfType::DataSec(fixed_section);
}
@ -635,7 +681,7 @@ impl Btf {
}
// Sanitize FUNC_PROTO.
BtfType::FuncProto(ty) if !features.btf_func => {
debug!("{}: not supported. replacing with ENUM", kind);
debug!("{kind}: not supported. replacing with ENUM");
let members: Vec<BtfEnum> = ty
.params
.iter()
@ -652,7 +698,7 @@ impl Btf {
let name = self.string_at(ty.name_offset)?;
// Sanitize FUNC.
if !features.btf_func {
debug!("{}: not supported. replacing with TYPEDEF", kind);
debug!("{kind}: not supported. replacing with TYPEDEF");
*t = BtfType::Typedef(Typedef::new(ty.name_offset, ty.btf_type));
} else if !features.btf_func_global
|| name == "memset"
@ -668,8 +714,7 @@ impl Btf {
if ty.linkage() == FuncLinkage::Global {
if !features.btf_func_global {
debug!(
"{}: BTF_FUNC_GLOBAL not supported. replacing with BTF_FUNC_STATIC",
kind
"{kind}: BTF_FUNC_GLOBAL not supported. replacing with BTF_FUNC_STATIC",
);
} else {
debug!("changing FUNC {name} linkage to BTF_FUNC_STATIC");
@ -680,39 +725,85 @@ impl Btf {
}
// Sanitize FLOAT.
BtfType::Float(ty) if !features.btf_float => {
debug!("{}: not supported. replacing with STRUCT", kind);
debug!("{kind}: not supported. replacing with STRUCT");
*t = BtfType::Struct(Struct::new(0, vec![], ty.size));
}
// Sanitize DECL_TAG.
BtfType::DeclTag(ty) if !features.btf_decl_tag => {
debug!("{}: not supported. replacing with INT", kind);
debug!("{kind}: not supported. replacing with INT");
*t = BtfType::Int(Int::new(ty.name_offset, 1, IntEncoding::None, 0));
}
// Sanitize TYPE_TAG.
BtfType::TypeTag(ty) if !features.btf_type_tag => {
debug!("{}: not supported. replacing with CONST", kind);
debug!("{kind}: not supported. replacing with CONST");
*t = BtfType::Const(Const::new(ty.btf_type));
}
// Sanitize Signed ENUMs.
BtfType::Enum(ty) if !features.btf_enum64 && ty.is_signed() => {
debug!("{}: signed ENUMs not supported. Marking as unsigned", kind);
debug!("{kind}: signed ENUMs not supported. Marking as unsigned");
ty.set_signed(false);
}
// Sanitize ENUM64.
BtfType::Enum64(ty) if !features.btf_enum64 => {
debug!("{}: not supported. replacing with UNION", kind);
let placeholder_id =
enum64_placeholder_id.expect("enum64_placeholder_id must be set");
let members: Vec<BtfMember> = ty
.variants
.iter()
.map(|v| BtfMember {
name_offset: v.name_offset,
btf_type: placeholder_id,
offset: 0,
})
.collect();
*t = BtfType::Union(Union::new(ty.name_offset, members.len() as u32, members));
BtfType::Enum64(ty) => {
// Kernels before 6.0 do not support ENUM64. See
// https://github.com/torvalds/linux/commit/6089fb325cf737eeb2c4d236c94697112ca860da.
if !features.btf_enum64 {
debug!("{kind}: not supported. replacing with UNION");
// `ty` is borrowed from `types` and we use that borrow
// below, so we must not borrow it again in the
// get_or_init closure.
let is_signed = ty.is_signed();
let Enum64 {
name_offset,
size,
variants,
..
} = ty;
let (name_offset, size, variants) =
(*name_offset, *size, mem::take(variants));
let fallback = Enum64Fallback {
signed: is_signed,
variants: variants
.iter()
.copied()
.map(
|BtfEnum64 {
name_offset,
value_high,
value_low,
}| Enum64VariantFallback {
name_offset,
value: (u64::from(value_high) << 32) | u64::from(value_low),
},
)
.collect(),
};
// The rewritten UNION still needs a concrete member type. Share a single
// synthetic INT placeholder between every downgraded ENUM64.
let placeholder_id = enum64_placeholder_id.get_or_init(|| {
let placeholder_name = self.add_string("enum64_placeholder");
add_type(
&mut self.header,
&mut types,
BtfType::Int(Int::new(placeholder_name, 1, IntEncoding::None, 0)),
)
});
let members: Vec<BtfMember> = variants
.iter()
.map(|v| BtfMember {
name_offset: v.name_offset,
btf_type: *placeholder_id,
offset: 0,
})
.collect();
// Must reborrow here because we borrow `types` above.
let t = &mut types.types[i];
*t = BtfType::Union(Union::new(name_offset, size, members, Some(fallback)));
}
}
// The type does not need fixing up or sanitization.
_ => {}
@ -737,7 +828,7 @@ impl Object {
&mut self,
features: &BtfFeatures,
) -> Result<Option<&Btf>, BtfError> {
if let Some(ref mut obj_btf) = &mut self.btf {
if let Some(obj_btf) = &mut self.btf {
if obj_btf.is_empty() {
return Ok(None);
}
@ -755,8 +846,8 @@ impl Object {
}
unsafe fn read_btf_header(data: &[u8]) -> btf_header {
// safety: btf_header is POD so read_unaligned is safe
ptr::read_unaligned(data.as_ptr() as *const btf_header)
// Safety: Btf_header is POD so read_unaligned is safe
unsafe { ptr::read_unaligned(data.as_ptr().cast()) }
}
/// Data in the `.BTF.ext` section
@ -774,11 +865,7 @@ pub struct BtfExt {
}
impl BtfExt {
pub(crate) fn parse(
data: &[u8],
endianness: Endianness,
btf: &Btf,
) -> Result<BtfExt, BtfError> {
pub(crate) fn parse(data: &[u8], endianness: Endianness, btf: &Btf) -> Result<Self, BtfError> {
#[repr(C)]
#[derive(Debug, Copy, Clone)]
struct MinimalHeader {
@ -788,7 +875,7 @@ impl BtfExt {
pub hdr_len: u32,
}
if data.len() < std::mem::size_of::<MinimalHeader>() {
if data.len() < core::mem::size_of::<MinimalHeader>() {
return Err(BtfError::InvalidHeader);
}
@ -796,7 +883,7 @@ impl BtfExt {
// first find the actual size of the header by converting into the minimal valid header
// Safety: MinimalHeader is POD so read_unaligned is safe
let minimal_header = unsafe {
ptr::read_unaligned::<MinimalHeader>(data.as_ptr() as *const MinimalHeader)
ptr::read_unaligned::<MinimalHeader>(data.as_ptr().cast::<MinimalHeader>())
};
let len_to_read = minimal_header.hdr_len as usize;
@ -809,18 +896,18 @@ impl BtfExt {
// forwards compatibility: if newer headers are bigger
// than the pre-generated btf_ext_header we should only
// read up to btf_ext_header
let len_to_read = len_to_read.min(std::mem::size_of::<btf_ext_header>());
let len_to_read = len_to_read.min(core::mem::size_of::<btf_ext_header>());
// now create our full-fledge header; but start with it
// zeroed out so unavailable fields stay as zero on older
// BTF.ext sections
let mut header = std::mem::MaybeUninit::<btf_ext_header>::zeroed();
let mut header = core::mem::MaybeUninit::<btf_ext_header>::zeroed();
// Safety: we have checked that len_to_read is less than
// size_of::<btf_ext_header> and less than
// data.len(). Additionally, we know that the header has
// been initialized so it's safe to call for assume_init.
unsafe {
std::ptr::copy(data.as_ptr(), header.as_mut_ptr() as *mut u8, len_to_read);
core::ptr::copy(data.as_ptr(), header.as_mut_ptr().cast::<u8>(), len_to_read);
header.assume_init()
}
};
@ -859,7 +946,7 @@ impl BtfExt {
})
};
let mut ext = BtfExt {
let mut ext = Self {
header,
relocations: Vec::new(),
func_info: FuncInfo::new(),
@ -954,8 +1041,8 @@ impl BtfExt {
self.info_data(self.header.line_info_off, self.header.line_info_len)
}
pub(crate) fn relocations(&self) -> impl Iterator<Item = &(u32, Vec<Relocation>)> {
self.relocations.iter()
pub(crate) fn relocations(&self) -> &[(u32, Vec<Relocation>)] {
self.relocations.as_slice()
}
pub(crate) fn func_info_rec_size(&self) -> usize {
@ -1056,7 +1143,7 @@ impl BtfTypes {
pub(crate) fn resolve_type(&self, root_type_id: u32) -> Result<u32, BtfError> {
let mut type_id = root_type_id;
for _ in 0..MAX_RESOLVE_DEPTH {
for () in core::iter::repeat_n((), MAX_RESOLVE_DEPTH) {
let ty = self.type_by_id(type_id)?;
use BtfType::*;
@ -1110,11 +1197,18 @@ mod tests {
#[test]
fn test_parse_header() {
let data: &[u8] = &[
0x9f, 0xeb, 0x01, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x54,
0x2a, 0x00, 0x64, 0x54, 0x2a, 0x00, 0x10, 0x64, 0x1c, 0x00,
];
let header = unsafe { read_btf_header(data) };
let header = btf_header {
magic: 0xeb9f,
version: 0x01,
flags: 0x00,
hdr_len: 0x18,
type_off: 0x00,
type_len: 0x2a5464,
str_off: 0x2a5464,
str_len: 0x1c6410,
};
let data = unsafe { bytes_of::<btf_header>(&header).to_vec() };
let header = unsafe { read_btf_header(&data) };
assert_eq!(header.magic, 0xeb9f);
assert_eq!(header.version, 0x01);
assert_eq!(header.flags, 0x00);
@ -1129,74 +1223,175 @@ mod tests {
fn test_parse_btf() {
// this generated BTF data is from an XDP program that simply returns XDP_PASS
// compiled using clang
let data: &[u8] = &[
0x9f, 0xeb, 0x01, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x01,
0x00, 0x00, 0x0c, 0x01, 0x00, 0x00, 0xe1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x02, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x00,
0x00, 0x04, 0x18, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x20, 0x00,
0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
0x20, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x30, 0x00,
0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x3f, 0x00, 0x00, 0x00,
0x03, 0x00, 0x00, 0x00, 0xa0, 0x00, 0x00, 0x00, 0x4e, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x08, 0x04, 0x00, 0x00, 0x00, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
0x00, 0x0d, 0x06, 0x00, 0x00, 0x00, 0x61, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x65, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x00, 0x20, 0x00,
0x00, 0x01, 0x69, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x0c, 0x05, 0x00, 0x00, 0x00,
0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00,
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
0x08, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xbc, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
0xd0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x09, 0x00, 0x00, 0x00, 0x01, 0x00,
0x00, 0x00, 0xd9, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00,
0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x78,
0x64, 0x70, 0x5f, 0x6d, 0x64, 0x00, 0x64, 0x61, 0x74, 0x61, 0x00, 0x64, 0x61, 0x74,
0x61, 0x5f, 0x65, 0x6e, 0x64, 0x00, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x65, 0x74,
0x61, 0x00, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x66, 0x69, 0x6e,
0x64, 0x65, 0x78, 0x00, 0x72, 0x78, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x69,
0x6e, 0x64, 0x65, 0x78, 0x00, 0x65, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x66,
0x69, 0x6e, 0x64, 0x65, 0x78, 0x00, 0x5f, 0x5f, 0x75, 0x33, 0x32, 0x00, 0x75, 0x6e,
0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x00, 0x63, 0x74, 0x78,
0x00, 0x69, 0x6e, 0x74, 0x00, 0x78, 0x64, 0x70, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x00,
0x78, 0x64, 0x70, 0x2f, 0x70, 0x61, 0x73, 0x73, 0x00, 0x2f, 0x68, 0x6f, 0x6d, 0x65,
0x2f, 0x64, 0x61, 0x76, 0x65, 0x2f, 0x64, 0x65, 0x76, 0x2f, 0x62, 0x70, 0x66, 0x64,
0x2f, 0x62, 0x70, 0x66, 0x2f, 0x78, 0x64, 0x70, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x2e,
0x62, 0x70, 0x66, 0x2e, 0x63, 0x00, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75,
0x72, 0x6e, 0x20, 0x58, 0x44, 0x50, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x3b, 0x00, 0x63,
0x68, 0x61, 0x72, 0x00, 0x5f, 0x5f, 0x41, 0x52, 0x52, 0x41, 0x59, 0x5f, 0x53, 0x49,
0x5a, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x5f, 0x00, 0x5f, 0x6c, 0x69, 0x63,
0x65, 0x6e, 0x73, 0x65, 0x00, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x00,
];
let data: &[u8] = if cfg!(target_endian = "little") {
&[
0x9f, 0xeb, 0x01, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x01,
0x00, 0x00, 0x0c, 0x01, 0x00, 0x00, 0xe1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x02, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x00,
0x00, 0x04, 0x18, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x20, 0x00,
0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
0x20, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x30, 0x00,
0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x3f, 0x00, 0x00, 0x00,
0x03, 0x00, 0x00, 0x00, 0xa0, 0x00, 0x00, 0x00, 0x4e, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x08, 0x04, 0x00, 0x00, 0x00, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
0x00, 0x0d, 0x06, 0x00, 0x00, 0x00, 0x61, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x65, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x00, 0x20, 0x00,
0x00, 0x01, 0x69, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x0c, 0x05, 0x00, 0x00, 0x00,
0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00,
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
0x08, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xbc, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
0xd0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x09, 0x00, 0x00, 0x00, 0x01, 0x00,
0x00, 0x00, 0xd9, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00,
0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x78,
0x64, 0x70, 0x5f, 0x6d, 0x64, 0x00, 0x64, 0x61, 0x74, 0x61, 0x00, 0x64, 0x61, 0x74,
0x61, 0x5f, 0x65, 0x6e, 0x64, 0x00, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x65, 0x74,
0x61, 0x00, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x66, 0x69, 0x6e,
0x64, 0x65, 0x78, 0x00, 0x72, 0x78, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x69,
0x6e, 0x64, 0x65, 0x78, 0x00, 0x65, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x66,
0x69, 0x6e, 0x64, 0x65, 0x78, 0x00, 0x5f, 0x5f, 0x75, 0x33, 0x32, 0x00, 0x75, 0x6e,
0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x00, 0x63, 0x74, 0x78,
0x00, 0x69, 0x6e, 0x74, 0x00, 0x78, 0x64, 0x70, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x00,
0x78, 0x64, 0x70, 0x2f, 0x70, 0x61, 0x73, 0x73, 0x00, 0x2f, 0x68, 0x6f, 0x6d, 0x65,
0x2f, 0x64, 0x61, 0x76, 0x65, 0x2f, 0x64, 0x65, 0x76, 0x2f, 0x62, 0x70, 0x66, 0x64,
0x2f, 0x62, 0x70, 0x66, 0x2f, 0x78, 0x64, 0x70, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x2e,
0x62, 0x70, 0x66, 0x2e, 0x63, 0x00, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75,
0x72, 0x6e, 0x20, 0x58, 0x44, 0x50, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x3b, 0x00, 0x63,
0x68, 0x61, 0x72, 0x00, 0x5f, 0x5f, 0x41, 0x52, 0x52, 0x41, 0x59, 0x5f, 0x53, 0x49,
0x5a, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x5f, 0x00, 0x5f, 0x6c, 0x69, 0x63,
0x65, 0x6e, 0x73, 0x65, 0x00, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x00,
]
} else {
&[
0xeb, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x0c, 0x00, 0x00, 0x01, 0x0c, 0x00, 0x00, 0x00, 0xe1, 0x00, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x04, 0x00,
0x00, 0x06, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00,
0x00, 0x20, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x40,
0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00,
0x00, 0x30, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x3f,
0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0xa0, 0x00, 0x00, 0x00, 0x4e, 0x08, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x54, 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00,
0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x61, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x65, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x01, 0x00,
0x00, 0x20, 0x00, 0x00, 0x00, 0x69, 0x0c, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x05,
0x00, 0x00, 0x00, 0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00,
0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00,
0x00, 0xbc, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x20,
0x00, 0x00, 0x00, 0xd0, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00,
0x00, 0x01, 0x00, 0x00, 0x00, 0xd9, 0x0f, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x78,
0x64, 0x70, 0x5f, 0x6d, 0x64, 0x00, 0x64, 0x61, 0x74, 0x61, 0x00, 0x64, 0x61, 0x74,
0x61, 0x5f, 0x65, 0x6e, 0x64, 0x00, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x65, 0x74,
0x61, 0x00, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x66, 0x69, 0x6e,
0x64, 0x65, 0x78, 0x00, 0x72, 0x78, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x69,
0x6e, 0x64, 0x65, 0x78, 0x00, 0x65, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x66,
0x69, 0x6e, 0x64, 0x65, 0x78, 0x00, 0x5f, 0x5f, 0x75, 0x33, 0x32, 0x00, 0x75, 0x6e,
0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x00, 0x63, 0x74, 0x78,
0x00, 0x69, 0x6e, 0x74, 0x00, 0x78, 0x64, 0x70, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x00,
0x78, 0x64, 0x70, 0x2f, 0x70, 0x61, 0x73, 0x73, 0x00, 0x2f, 0x68, 0x6f, 0x6d, 0x65,
0x2f, 0x64, 0x61, 0x76, 0x65, 0x2f, 0x64, 0x65, 0x76, 0x2f, 0x62, 0x70, 0x66, 0x64,
0x2f, 0x62, 0x70, 0x66, 0x2f, 0x78, 0x64, 0x70, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x2e,
0x62, 0x70, 0x66, 0x2e, 0x63, 0x00, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75,
0x72, 0x6e, 0x20, 0x58, 0x44, 0x50, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x3b, 0x00, 0x63,
0x68, 0x61, 0x72, 0x00, 0x5f, 0x5f, 0x41, 0x52, 0x52, 0x41, 0x59, 0x5f, 0x53, 0x49,
0x5a, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x5f, 0x00, 0x5f, 0x6c, 0x69, 0x63,
0x65, 0x6e, 0x73, 0x65, 0x00, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x00,
]
};
assert_eq!(data.len(), 517);
let btf = Btf::parse(data, Endianness::default()).unwrap_or_else(|e| panic!("{}", e));
let btf = Btf::parse(data, Endianness::default()).unwrap();
let data2 = btf.to_bytes();
assert_eq!(data2.len(), 517);
assert_eq!(data, data2);
let ext_data: &[u8] = &[
0x9f, 0xeb, 0x01, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x00,
0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x72, 0x00, 0x00, 0x00, 0x01, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
0x72, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0x00,
0x00, 0x00, 0xa2, 0x00, 0x00, 0x00, 0x05, 0x2c, 0x00, 0x00,
];
const FUNC_LEN: u32 = 0x14;
const LINE_INFO_LEN: u32 = 0x1c;
const CORE_RELO_LEN: u32 = 0;
const DATA_LEN: u32 = (FUNC_LEN + LINE_INFO_LEN + CORE_RELO_LEN) / 4;
struct TestStruct {
_header: btf_ext_header,
_data: [u32; DATA_LEN as usize],
}
let test_data = TestStruct {
_header: btf_ext_header {
magic: 0xeb9f,
version: 1,
flags: 0,
hdr_len: 0x20,
func_info_off: 0,
func_info_len: FUNC_LEN,
line_info_off: FUNC_LEN,
line_info_len: LINE_INFO_LEN,
core_relo_off: FUNC_LEN + LINE_INFO_LEN,
core_relo_len: CORE_RELO_LEN,
},
_data: [
0x00000008u32,
0x00000072u32,
0x00000001u32,
0x00000000u32,
0x00000007u32,
0x00000010u32,
0x00000072u32,
0x00000001u32,
0x00000000u32,
0x0000007bu32,
0x000000a2u32,
0x00002c05u32,
],
};
let ext_data = unsafe { bytes_of::<TestStruct>(&test_data).to_vec() };
assert_eq!(ext_data.len(), 80);
let _: BtfExt = BtfExt::parse(ext_data, Endianness::default(), &btf)
.unwrap_or_else(|e| panic!("{}", e));
let _: BtfExt = BtfExt::parse(&ext_data, Endianness::default(), &btf).unwrap();
}
#[test]
fn parsing_older_ext_data() {
let btf_data = [
159, 235, 1, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
];
let btf_ext_data = [
159, 235, 1, 0, 24, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 8, 0, 0,
0, 16, 0, 0, 0,
];
const TYPE_LEN: u32 = 0;
const STR_LEN: u32 = 1;
struct BtfTestStruct {
_header: btf_header,
_data: [u8; (TYPE_LEN + STR_LEN) as usize],
}
let btf_test_data = BtfTestStruct {
_header: btf_header {
magic: 0xeb9f,
version: 0x01,
flags: 0x00,
hdr_len: 24,
type_off: 0,
type_len: TYPE_LEN,
str_off: TYPE_LEN,
str_len: TYPE_LEN + STR_LEN,
},
_data: [0x00u8],
};
let btf_data = unsafe { bytes_of::<BtfTestStruct>(&btf_test_data).to_vec() };
const FUNC_INFO_LEN: u32 = 4;
const LINE_INFO_LEN: u32 = 4;
const CORE_RELO_LEN: u32 = 16;
let ext_header = btf_ext_header {
magic: 0xeb9f,
version: 1,
flags: 0,
hdr_len: 24,
func_info_off: 0,
func_info_len: FUNC_INFO_LEN,
line_info_off: FUNC_INFO_LEN,
line_info_len: LINE_INFO_LEN,
core_relo_off: FUNC_INFO_LEN + LINE_INFO_LEN,
core_relo_len: CORE_RELO_LEN,
};
let btf_ext_data = unsafe { bytes_of::<btf_ext_header>(&ext_header).to_vec() };
let btf = Btf::parse(&btf_data, Endianness::default()).unwrap();
let btf_ext = BtfExt::parse(&btf_ext_data, Endianness::default(), &btf).unwrap();
assert_eq!(btf_ext.func_info_rec_size(), 8);
@ -1217,7 +1412,7 @@ mod tests {
let btf_bytes = btf.to_bytes();
let raw_btf = btf_bytes.as_slice();
let btf = Btf::parse(raw_btf, Endianness::default()).unwrap_or_else(|e| panic!("{}", e));
let btf = Btf::parse(raw_btf, Endianness::default()).unwrap();
assert_eq!(btf.string_at(1).unwrap(), "int");
assert_eq!(btf.string_at(5).unwrap(), "widget");
}
@ -1704,6 +1899,10 @@ mod tests {
#[test]
#[cfg(feature = "std")]
#[cfg_attr(miri, ignore = "`open` not available when isolation is enabled")]
#[cfg_attr(
target_endian = "big",
ignore = "Not possible to emulate \"/sys/kernel/btf/vmlinux\" as big endian"
)]
fn test_read_btf_from_sys_fs() {
let btf = Btf::parse_file("/sys/kernel/btf/vmlinux", Endianness::default()).unwrap();
let task_struct_id = btf

@ -1,12 +1,12 @@
use alloc::{string::String, vec, vec::Vec};
use bytes::BufMut;
use bytes::BufMut as _;
use object::Endianness;
use crate::{
generated::{bpf_func_info, bpf_line_info},
relocation::INS_SIZE,
util::{bytes_of, HashMap},
util::{HashMap, bytes_of},
};
/* The func_info subsection layout:
@ -41,7 +41,7 @@ impl FuncSecInfo {
rec_size: usize,
func_info_data: &[u8],
endianness: Endianness,
) -> FuncSecInfo {
) -> Self {
let func_info = func_info_data
.chunks(rec_size)
.map(|data| {
@ -65,7 +65,7 @@ impl FuncSecInfo {
})
.collect();
FuncSecInfo {
Self {
_sec_name_offset: sec_name_offset,
num_info,
func_info,
@ -83,6 +83,7 @@ impl FuncSecInfo {
}
/// Returns the number of [bpf_func_info] entries.
#[expect(clippy::len_without_is_empty)]
pub fn len(&self) -> usize {
self.func_info.len()
}
@ -100,8 +101,8 @@ pub struct FuncInfo {
}
impl FuncInfo {
pub(crate) fn new() -> FuncInfo {
FuncInfo {
pub(crate) fn new() -> Self {
Self {
data: HashMap::new(),
}
}
@ -137,7 +138,7 @@ impl LineSecInfo {
rec_size: usize,
func_info_data: &[u8],
endianness: Endianness,
) -> LineSecInfo {
) -> Self {
let line_info = func_info_data
.chunks(rec_size)
.map(|data| {
@ -170,7 +171,7 @@ impl LineSecInfo {
})
.collect();
LineSecInfo {
Self {
_sec_name_offset: sec_name_offset,
num_info,
line_info,
@ -188,6 +189,7 @@ impl LineSecInfo {
}
/// Returns the number of entries.
#[expect(clippy::len_without_is_empty)]
pub fn len(&self) -> usize {
self.line_info.len()
}
@ -199,8 +201,8 @@ pub(crate) struct LineInfo {
}
impl LineInfo {
pub(crate) fn new() -> LineInfo {
LineInfo {
pub(crate) fn new() -> Self {
Self {
data: HashMap::new(),
}
}

@ -1,6 +1,6 @@
//! BTF loading, parsing and relocation.
#[allow(clippy::module_inception)]
#[expect(clippy::module_inception)]
mod btf;
mod info;
mod relocation;

@ -2,7 +2,7 @@ use alloc::{
borrow::{Cow, ToOwned as _},
collections::BTreeMap,
format,
string::{String, ToString},
string::{String, ToString as _},
vec,
vec::Vec,
};
@ -10,19 +10,17 @@ use core::{mem, ops::Bound::Included, ptr};
use object::SectionIndex;
#[cfg(not(feature = "std"))]
use crate::std;
use crate::{
Function, Object,
btf::{
fields_are_compatible, types_are_compatible, Array, Btf, BtfError, BtfMember, BtfType,
IntEncoding, Struct, Union, MAX_SPEC_LEN,
Array, Btf, BtfError, BtfKind, BtfMember, BtfType, IntEncoding, MAX_SPEC_LEN, Struct,
Union, fields_are_compatible, types_are_compatible,
},
generated::{
bpf_core_relo, bpf_core_relo_kind::*, bpf_insn, BPF_ALU, BPF_ALU64, BPF_B, BPF_CALL,
BPF_DW, BPF_H, BPF_JMP, BPF_K, BPF_LD, BPF_LDX, BPF_ST, BPF_STX, BPF_W, BTF_INT_SIGNED,
BPF_ALU, BPF_ALU64, BPF_B, BPF_CALL, BPF_DW, BPF_H, BPF_JMP, BPF_K, BPF_LD, BPF_LDX,
BPF_ST, BPF_STX, BPF_W, BTF_INT_SIGNED, bpf_core_relo, bpf_core_relo_kind::*, bpf_insn,
},
util::HashMap,
Function, Object,
};
/// The error type returned by [`Object::relocate_btf`].
@ -60,7 +58,9 @@ enum RelocationError {
},
/// Invalid instruction index referenced by relocation
#[error("invalid instruction index #{index} referenced by relocation #{relocation_number}, the program contains {num_instructions} instructions")]
#[error(
"invalid instruction index #{index} referenced by relocation #{relocation_number}, the program contains {num_instructions} instructions"
)]
InvalidInstructionIndex {
/// The invalid instruction index
index: usize,
@ -71,7 +71,9 @@ enum RelocationError {
},
/// Multiple candidate target types found with different memory layouts
#[error("error relocating {type_name}, multiple candidate target types found with different memory layouts: {candidates:?}")]
#[error(
"error relocating {type_name}, multiple candidate target types found with different memory layouts: {candidates:?}"
)]
ConflictingCandidates {
/// The type name
type_name: String,
@ -129,7 +131,9 @@ enum RelocationError {
error: Cow<'static, str>,
},
#[error("applying relocation `{kind:?}` missing target BTF info for type `{type_id}` at instruction #{ins_index}")]
#[error(
"applying relocation `{kind:?}` missing target BTF info for type `{type_id}` at instruction #{ins_index}"
)]
MissingTargetDefinition {
kind: RelocationKind,
type_id: u32,
@ -196,15 +200,14 @@ pub(crate) struct Relocation {
}
impl Relocation {
#[allow(unused_unsafe)]
pub(crate) unsafe fn parse(data: &[u8], number: usize) -> Result<Relocation, BtfError> {
pub(crate) unsafe fn parse(data: &[u8], number: usize) -> Result<Self, BtfError> {
if mem::size_of::<bpf_core_relo>() > data.len() {
return Err(BtfError::InvalidRelocationInfo);
}
let rel = unsafe { ptr::read_unaligned::<bpf_core_relo>(data.as_ptr() as *const _) };
let rel = unsafe { ptr::read_unaligned::<bpf_core_relo>(data.as_ptr().cast()) };
Ok(Relocation {
Ok(Self {
kind: rel.kind.try_into()?,
ins_offset: rel.insn_off as usize,
type_id: rel.type_id,
@ -222,7 +225,7 @@ impl Object {
_ => return Ok(()),
};
let mut candidates_cache = HashMap::<u32, Vec<Candidate>>::new();
let mut candidates_cache = HashMap::<u32, Vec<Candidate<'_>>>::new();
for (sec_name_off, relos) in btf_ext.relocations() {
let section_name =
local_btf
@ -248,12 +251,12 @@ impl Object {
target_btf,
&mut candidates_cache,
) {
Ok(_) => {}
Ok(()) => {}
Err(error) => {
return Err(BtfRelocationError {
section: section_name.to_string(),
error,
})
});
}
}
}
@ -406,10 +409,26 @@ fn find_candidates<'target>(
) -> Result<Vec<Candidate<'target>>, BtfError> {
let mut candidates = Vec::new();
let local_name = flavorless_name(local_name);
for (type_id, ty) in target_btf.types().enumerate() {
if local_ty.kind() != ty.kind() {
continue;
}
let local_kind = local_ty.kind();
// When we downgrade an ENUM64 to a UNION we still want to match the enum
// definition recorded in the target BTF. If the sanitized type has a
// fallback, allow ENUM64 candidates through the kind check.
//
// Note that we do not sanitize the target BTF so the kinds will not
// naturally match!
let allow_enum_match = matches!(
local_ty,
BtfType::Union(Union {
enum64_fallback: Some(_),
..
})
);
for (type_id, ty) in target_btf.types().enumerate().filter(|(_, ty)| {
let candidate_kind = ty.kind();
candidate_kind == local_kind || (allow_enum_match && candidate_kind == BtfKind::Enum64)
}) {
let name = &*target_btf.type_name(ty)?;
if local_name != flavorless_name(name) {
continue;
@ -427,8 +446,8 @@ fn find_candidates<'target>(
}
fn match_candidate<'target>(
local_spec: &AccessSpec,
candidate: &'target Candidate,
local_spec: &AccessSpec<'_>,
candidate: &'target Candidate<'_>,
) -> Result<Option<AccessSpec<'target>>, RelocationError> {
let mut target_spec = AccessSpec {
btf: candidate.btf,
@ -457,18 +476,12 @@ fn match_candidate<'target>(
}
RelocationKind::EnumVariantExists | RelocationKind::EnumVariantValue => {
let target_id = candidate.btf.resolve_type(candidate.type_id)?;
let target_ty = candidate.btf.type_by_id(target_id)?;
// the first accessor is guaranteed to have a name by construction
let local_variant_name = local_spec.accessors[0].name.as_ref().unwrap();
fn match_enum<'a>(
iterator: impl Iterator<Item = (usize, u32)>,
candidate: &Candidate,
local_variant_name: &str,
target_id: u32,
mut target_spec: AccessSpec<'a>,
) -> Result<Option<AccessSpec<'a>>, RelocationError> {
for (index, name_offset) in iterator {
let match_enum = |iterator: &mut dyn Iterator<Item = u32>| {
// the first accessor is guaranteed to have a name by construction
let local_variant_name = local_spec.accessors[0].name.as_ref().unwrap();
for (index, name_offset) in iterator.enumerate() {
let target_variant_name = candidate.btf.string_at(name_offset)?;
if flavorless_name(local_variant_name) == flavorless_name(&target_variant_name)
{
@ -482,29 +495,23 @@ fn match_candidate<'target>(
}
}
Ok(None)
}
};
match target_ty {
BtfType::Enum(en) => match_enum(
en.variants
.iter()
.map(|member| member.name_offset)
.enumerate(),
candidate,
local_variant_name,
target_id,
target_spec,
),
BtfType::Enum64(en) => match_enum(
en.variants
.iter()
.map(|member| member.name_offset)
.enumerate(),
candidate,
local_variant_name,
target_id,
target_spec,
),
match candidate.btf.type_by_id(target_id)? {
BtfType::Enum(en) => {
match_enum(&mut en.variants.iter().map(|member| member.name_offset))
}
BtfType::Enum64(en) => {
match_enum(&mut en.variants.iter().map(|member| member.name_offset))
}
BtfType::Union(Union {
enum64_fallback: Some(fallback),
..
}) => {
// Local ENUM64 types become UNIONs during sanitisation; the fallback retains
// their original variant names so we can line them up with target enums.
match_enum(&mut fallback.variants.iter().map(|variant| variant.name_offset))
}
_ => Ok(None),
}
}
@ -669,7 +676,7 @@ impl<'a> AccessSpec<'a> {
root_type_id: u32,
spec: &str,
relocation: Relocation,
) -> Result<AccessSpec<'a>, RelocationError> {
) -> Result<Self, RelocationError> {
let parts = spec
.split(':')
.map(|s| s.parse::<usize>())
@ -700,60 +707,75 @@ impl<'a> AccessSpec<'a> {
bit_offset: 0,
}
}
RelocationKind::EnumVariantExists | RelocationKind::EnumVariantValue => match ty {
BtfType::Enum(_) | BtfType::Enum64(_) => {
if parts.len() != 1 {
return Err(RelocationError::InvalidAccessString {
access_str: spec.to_string(),
});
}
let index = parts[0];
RelocationKind::EnumVariantExists | RelocationKind::EnumVariantValue => {
let index = || match parts.as_slice() {
[index] => Ok(*index),
_ => Err(RelocationError::InvalidAccessString {
access_str: spec.to_string(),
}),
};
let (n_variants, name_offset) = match ty {
BtfType::Enum(en) => (
let (n_variants, name_offset, index) = match ty {
BtfType::Enum(en) => {
let index = index()?;
(
en.variants.len(),
en.variants.get(index).map(|v| v.name_offset),
),
BtfType::Enum64(en) => (
index,
)
}
BtfType::Enum64(en) => {
let index = index()?;
(
en.variants.len(),
en.variants.get(index).map(|v| v.name_offset),
),
_ => unreachable!(),
};
if name_offset.is_none() {
return Err(RelocationError::InvalidAccessIndex {
type_name: btf.err_type_name(ty),
spec: spec.to_string(),
index,
max_index: n_variants,
error: "tried to access nonexistant enum variant",
)
}
BtfType::Union(Union {
enum64_fallback: Some(fallback),
..
}) => {
let index = index()?;
(
fallback.variants.len(),
fallback.variants.get(index).map(|v| v.name_offset),
index,
)
}
_ => {
return Err(RelocationError::InvalidRelocationKindForType {
relocation_number: relocation.number,
relocation_kind: format!("{:?}", relocation.kind),
type_kind: format!("{:?}", ty.kind()),
error: "enum relocation on non-enum type",
});
}
let accessors = vec![Accessor {
type_id,
};
let name_offset =
name_offset.ok_or_else(|| RelocationError::InvalidAccessIndex {
type_name: btf.err_type_name(ty),
spec: spec.to_string(),
index,
name: Some(btf.string_at(name_offset.unwrap())?.to_string()),
}];
AccessSpec {
btf,
root_type_id,
relocation,
parts,
accessors,
bit_offset: 0,
}
}
_ => {
return Err(RelocationError::InvalidRelocationKindForType {
relocation_number: relocation.number,
relocation_kind: format!("{:?}", relocation.kind),
type_kind: format!("{:?}", ty.kind()),
error: "enum relocation on non-enum type",
})
max_index: n_variants,
error: "tried to access nonexistant enum variant",
})?;
let name = btf.string_at(name_offset)?;
let accessors = vec![Accessor {
type_id,
index,
name: Some(name.to_string()),
}];
AccessSpec {
btf,
root_type_id,
relocation,
parts,
accessors,
bit_offset: 0,
}
},
}
RelocationKind::FieldByteOffset
| RelocationKind::FieldByteSize
@ -874,7 +896,7 @@ struct ComputedRelocation {
target: Option<ComputedRelocationValue>,
}
#[derive(Debug)]
#[derive(Clone, Copy, Debug)]
struct ComputedRelocationValue {
value: u64,
size: u32,
@ -892,21 +914,21 @@ fn poison_insn(ins: &mut bpf_insn) {
impl ComputedRelocation {
fn new(
rel: &Relocation,
local_spec: &AccessSpec,
target_spec: Option<&AccessSpec>,
) -> Result<ComputedRelocation, RelocationError> {
local_spec: &AccessSpec<'_>,
target_spec: Option<&AccessSpec<'_>>,
) -> Result<Self, RelocationError> {
use RelocationKind::*;
let ret = match rel.kind {
FieldByteOffset | FieldByteSize | FieldExists | FieldSigned | FieldLShift64
| FieldRShift64 => ComputedRelocation {
| FieldRShift64 => Self {
local: Self::compute_field_relocation(rel, Some(local_spec))?,
target: Self::compute_field_relocation(rel, target_spec).ok(),
},
TypeIdLocal | TypeIdTarget | TypeExists | TypeSize => ComputedRelocation {
TypeIdLocal | TypeIdTarget | TypeExists | TypeSize => Self {
local: Self::compute_type_relocation(rel, local_spec, target_spec)?,
target: Self::compute_type_relocation(rel, local_spec, target_spec).ok(),
},
EnumVariantExists | EnumVariantValue => ComputedRelocation {
EnumVariantExists | EnumVariantValue => Self {
local: Self::compute_enum_relocation(rel, Some(local_spec))?,
target: Self::compute_enum_relocation(rel, target_spec).ok(),
},
@ -956,7 +978,7 @@ impl ComputedRelocation {
return Ok(());
};
let class = (ins.code & 0x07) as u32;
let class = u32::from(ins.code & 0x07);
let target_value = target.value;
@ -1006,7 +1028,7 @@ impl ComputedRelocation {
target.size,
)
.into(),
})
});
}
}
@ -1020,7 +1042,7 @@ impl ComputedRelocation {
relocation_number: rel.number,
index: ins_index,
error: format!("invalid target size {size}").into(),
})
});
}
} as u8;
ins.code = ins.code & 0xE0 | size | ins.code & 0x07;
@ -1043,7 +1065,7 @@ impl ComputedRelocation {
relocation_number: rel.number,
index: ins_index,
error: format!("invalid instruction class {class:x}").into(),
})
});
}
};
@ -1052,11 +1074,11 @@ impl ComputedRelocation {
fn compute_enum_relocation(
rel: &Relocation,
spec: Option<&AccessSpec>,
spec: Option<&AccessSpec<'_>>,
) -> Result<ComputedRelocationValue, RelocationError> {
use RelocationKind::*;
let value = match (rel.kind, spec) {
(EnumVariantExists, spec) => spec.is_some() as u64,
(EnumVariantExists, spec) => u64::from(spec.is_some()),
(EnumVariantValue, Some(spec)) => {
let accessor = &spec.accessors[0];
match spec.btf.type_by_id(accessor.type_id)? {
@ -1065,12 +1087,23 @@ impl ComputedRelocation {
if en.is_signed() {
value as i32 as u64
} else {
value as u64
u64::from(value)
}
}
BtfType::Enum64(en) => {
let variant = &en.variants[accessor.index];
(variant.value_high as u64) << 32 | variant.value_low as u64
(u64::from(variant.value_high) << 32) | u64::from(variant.value_low)
}
BtfType::Union(Union {
enum64_fallback: Some(fallback),
..
}) => {
let variant = &fallback.variants[accessor.index];
if fallback.signed {
(variant.value as i64) as u64
} else {
variant.value
}
}
// candidate selection ensures that rel_kind == local_kind == target_kind
_ => unreachable!(),
@ -1094,7 +1127,7 @@ impl ComputedRelocation {
fn compute_field_relocation(
rel: &Relocation,
spec: Option<&AccessSpec>,
spec: Option<&AccessSpec<'_>>,
) -> Result<ComputedRelocationValue, RelocationError> {
use RelocationKind::*;
@ -1102,7 +1135,7 @@ impl ComputedRelocation {
// this is the bpf_preserve_field_info(member_access, FIELD_EXISTENCE) case. If we
// managed to build a spec, it means the field exists.
return Ok(ComputedRelocationValue {
value: spec.is_some() as u64,
value: u64::from(spec.is_some()),
size: 0,
type_id: None,
});
@ -1191,34 +1224,32 @@ impl ComputedRelocation {
type_id: None,
};
#[allow(clippy::wildcard_in_or_patterns)]
match rel.kind {
FieldByteOffset => {
value.value = byte_off as u64;
value.value = u64::from(byte_off);
if !is_bitfield {
value.size = byte_size;
value.type_id = Some(member_type_id);
}
}
FieldByteSize => {
value.value = byte_size as u64;
value.value = u64::from(byte_size);
}
FieldSigned => match member_ty {
BtfType::Enum(en) => value.value = en.is_signed() as u64,
BtfType::Enum64(en) => value.value = en.is_signed() as u64,
BtfType::Enum(en) => value.value = u64::from(en.is_signed()),
BtfType::Enum64(en) => value.value = u64::from(en.is_signed()),
BtfType::Int(i) => value.value = i.encoding() as u64 & IntEncoding::Signed as u64,
_ => (),
},
#[cfg(target_endian = "little")]
FieldLShift64 => {
value.value = 64 - (bit_off + bit_size - byte_off * 8) as u64;
}
#[cfg(target_endian = "big")]
FieldLShift64 => {
value.value = (8 - byte_size) * 8 + (bit_off - byte_off * 8);
value.value = if cfg!(target_endian = "little") {
64 - u64::from(bit_off + bit_size - byte_off * 8)
} else {
u64::from((8 - byte_size) * 8 + (bit_off - byte_off * 8))
}
}
FieldRShift64 => {
value.value = 64 - bit_size as u64;
value.value = 64 - u64::from(bit_size);
}
kind @ (FieldExists | TypeIdLocal | TypeIdTarget | TypeExists | TypeSize
| EnumVariantExists | EnumVariantValue) => {
@ -1231,15 +1262,15 @@ impl ComputedRelocation {
fn compute_type_relocation(
rel: &Relocation,
local_spec: &AccessSpec,
target_spec: Option<&AccessSpec>,
local_spec: &AccessSpec<'_>,
target_spec: Option<&AccessSpec<'_>>,
) -> Result<ComputedRelocationValue, RelocationError> {
use RelocationKind::*;
let value = match (rel.kind, target_spec) {
(TypeIdLocal, _) => local_spec.root_type_id as u64,
(TypeIdTarget, Some(target_spec)) => target_spec.root_type_id as u64,
(TypeExists, target_spec) => target_spec.is_some() as u64,
(TypeIdLocal, _) => u64::from(local_spec.root_type_id),
(TypeIdTarget, Some(target_spec)) => u64::from(target_spec.root_type_id),
(TypeExists, target_spec) => u64::from(target_spec.is_some()),
(TypeSize, Some(target_spec)) => {
target_spec.btf.type_size(target_spec.root_type_id)? as u64
}

@ -1,6 +1,6 @@
#![allow(missing_docs)]
#![expect(missing_docs)]
use alloc::{string::ToString, vec, vec::Vec};
use alloc::{string::ToString as _, vec, vec::Vec};
use core::{fmt::Display, mem, ptr};
use object::Endianness;
@ -41,7 +41,7 @@ pub struct Fwd {
impl Fwd {
pub(crate) fn to_bytes(&self) -> Vec<u8> {
bytes_of::<Fwd>(self).to_vec()
bytes_of::<Self>(self).to_vec()
}
pub(crate) fn kind(&self) -> BtfKind {
@ -63,7 +63,7 @@ pub struct Const {
impl Const {
pub(crate) fn to_bytes(&self) -> Vec<u8> {
bytes_of::<Const>(self).to_vec()
bytes_of::<Self>(self).to_vec()
}
pub(crate) fn kind(&self) -> BtfKind {
@ -94,7 +94,7 @@ pub struct Volatile {
impl Volatile {
pub(crate) fn to_bytes(&self) -> Vec<u8> {
bytes_of::<Volatile>(self).to_vec()
bytes_of::<Self>(self).to_vec()
}
pub(crate) fn kind(&self) -> BtfKind {
@ -115,7 +115,7 @@ pub struct Restrict {
impl Restrict {
pub(crate) fn to_bytes(&self) -> Vec<u8> {
bytes_of::<Restrict>(self).to_vec()
bytes_of::<Self>(self).to_vec()
}
pub(crate) fn kind(&self) -> BtfKind {
@ -239,10 +239,10 @@ pub enum FuncLinkage {
impl From<u32> for FuncLinkage {
fn from(v: u32) -> Self {
match v {
0 => FuncLinkage::Static,
1 => FuncLinkage::Global,
2 => FuncLinkage::Extern,
_ => FuncLinkage::Unknown,
0 => Self::Static,
1 => Self::Global,
2 => Self::Extern,
_ => Self::Unknown,
}
}
}
@ -322,11 +322,11 @@ pub enum IntEncoding {
impl From<u32> for IntEncoding {
fn from(v: u32) -> Self {
match v {
0 => IntEncoding::None,
1 => IntEncoding::Signed,
2 => IntEncoding::Char,
4 => IntEncoding::Bool,
_ => IntEncoding::Unknown,
0 => Self::None,
1 => Self::Signed,
2 => Self::Char,
4 => Self::Bool,
_ => Self::Unknown,
}
}
}
@ -473,7 +473,7 @@ impl Enum {
}
#[repr(C)]
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Copy)]
pub struct BtfEnum64 {
pub(crate) name_offset: u32,
pub(crate) value_low: u32,
@ -549,7 +549,7 @@ impl Enum64 {
info |= 1 << 31
};
info |= (variants.len() as u32) & 0xFFFF;
Enum64 {
Self {
name_offset,
info,
// According to the documentation:
@ -651,6 +651,22 @@ impl Struct {
}
}
/// Snapshot of a single `ENUM64` variant so we can recover its 64-bit constant
/// after the type is rewritten into a UNION.
#[derive(Clone, Debug)]
pub(crate) struct Enum64VariantFallback {
pub(crate) name_offset: u32,
pub(crate) value: u64,
}
/// Aggregate of the metadata we need to faithfully reconstruct a downgraded
/// `ENUM64` during CO-RE relocation.
#[derive(Clone, Debug)]
pub(crate) struct Enum64Fallback {
pub(crate) signed: bool,
pub(crate) variants: Vec<Enum64VariantFallback>,
}
#[repr(C)]
#[derive(Clone, Debug)]
pub struct Union {
@ -658,16 +674,24 @@ pub struct Union {
info: u32,
pub(crate) size: u32,
pub(crate) members: Vec<BtfMember>,
pub(crate) enum64_fallback: Option<Enum64Fallback>,
}
impl Union {
pub(crate) fn new(name_offset: u32, size: u32, members: Vec<BtfMember>) -> Self {
let info = (BtfKind::Union as u32) << 24;
pub(crate) fn new(
name_offset: u32,
size: u32,
members: Vec<BtfMember>,
enum64_fallback: Option<Enum64Fallback>,
) -> Self {
let mut info = (BtfKind::Union as u32) << 24;
info |= (members.len() as u32) & 0xFFFF;
Self {
name_offset,
info,
size,
members,
enum64_fallback,
}
}
@ -677,6 +701,7 @@ impl Union {
info,
size,
members,
enum64_fallback: _,
} = self;
[
bytes_of::<u32>(name_offset),
@ -860,10 +885,10 @@ pub enum VarLinkage {
impl From<u32> for VarLinkage {
fn from(v: u32) -> Self {
match v {
0 => VarLinkage::Static,
1 => VarLinkage::Global,
2 => VarLinkage::Extern,
_ => VarLinkage::Unknown,
0 => Self::Static,
1 => Self::Global,
2 => Self::Extern,
_ => Self::Unknown,
}
}
}
@ -1087,26 +1112,26 @@ impl TryFrom<u32> for BtfKind {
impl Display for BtfKind {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
BtfKind::Unknown => write!(f, "[UNKNOWN]"),
BtfKind::Int => write!(f, "[INT]"),
BtfKind::Float => write!(f, "[FLOAT]"),
BtfKind::Ptr => write!(f, "[PTR]"),
BtfKind::Array => write!(f, "[ARRAY]"),
BtfKind::Struct => write!(f, "[STRUCT]"),
BtfKind::Union => write!(f, "[UNION]"),
BtfKind::Enum => write!(f, "[ENUM]"),
BtfKind::Fwd => write!(f, "[FWD]"),
BtfKind::Typedef => write!(f, "[TYPEDEF]"),
BtfKind::Volatile => write!(f, "[VOLATILE]"),
BtfKind::Const => write!(f, "[CONST]"),
BtfKind::Restrict => write!(f, "[RESTRICT]"),
BtfKind::Func => write!(f, "[FUNC]"),
BtfKind::FuncProto => write!(f, "[FUNC_PROTO]"),
BtfKind::Var => write!(f, "[VAR]"),
BtfKind::DataSec => write!(f, "[DATASEC]"),
BtfKind::DeclTag => write!(f, "[DECL_TAG]"),
BtfKind::TypeTag => write!(f, "[TYPE_TAG]"),
BtfKind::Enum64 => write!(f, "[ENUM64]"),
Self::Unknown => write!(f, "[UNKNOWN]"),
Self::Int => write!(f, "[INT]"),
Self::Float => write!(f, "[FLOAT]"),
Self::Ptr => write!(f, "[PTR]"),
Self::Array => write!(f, "[ARRAY]"),
Self::Struct => write!(f, "[STRUCT]"),
Self::Union => write!(f, "[UNION]"),
Self::Enum => write!(f, "[ENUM]"),
Self::Fwd => write!(f, "[FWD]"),
Self::Typedef => write!(f, "[TYPEDEF]"),
Self::Volatile => write!(f, "[VOLATILE]"),
Self::Const => write!(f, "[CONST]"),
Self::Restrict => write!(f, "[RESTRICT]"),
Self::Func => write!(f, "[FUNC]"),
Self::FuncProto => write!(f, "[FUNC_PROTO]"),
Self::Var => write!(f, "[VAR]"),
Self::DataSec => write!(f, "[DATASEC]"),
Self::DeclTag => write!(f, "[DECL_TAG]"),
Self::TypeTag => write!(f, "[TYPE_TAG]"),
Self::Enum64 => write!(f, "[ENUM64]"),
}
}
}
@ -1116,7 +1141,7 @@ unsafe fn read<T>(data: &[u8]) -> Result<T, BtfError> {
return Err(BtfError::InvalidTypeInfo);
}
Ok(ptr::read_unaligned::<T>(data.as_ptr() as *const T))
Ok(unsafe { ptr::read_unaligned(data.as_ptr().cast()) })
}
unsafe fn read_array<T>(data: &[u8], len: usize) -> Result<Vec<T>, BtfError> {
@ -1126,50 +1151,49 @@ unsafe fn read_array<T>(data: &[u8], len: usize) -> Result<Vec<T>, BtfError> {
let data = &data[0..mem::size_of::<T>() * len];
let r = data
.chunks(mem::size_of::<T>())
.map(|chunk| ptr::read_unaligned(chunk.as_ptr() as *const T))
.map(|chunk| unsafe { ptr::read_unaligned(chunk.as_ptr().cast()) })
.collect();
Ok(r)
}
impl BtfType {
#[allow(unused_unsafe)]
pub(crate) unsafe fn read(data: &[u8], endianness: Endianness) -> Result<BtfType, BtfError> {
pub(crate) unsafe fn read(data: &[u8], endianness: Endianness) -> Result<Self, BtfError> {
let ty = unsafe { read_array::<u32>(data, 3)? };
let data = &data[mem::size_of::<u32>() * 3..];
let vlen = type_vlen(ty[1]);
Ok(match type_kind(ty[1])? {
BtfKind::Unknown => BtfType::Unknown,
BtfKind::Fwd => BtfType::Fwd(Fwd {
BtfKind::Unknown => Self::Unknown,
BtfKind::Fwd => Self::Fwd(Fwd {
name_offset: ty[0],
info: ty[1],
_unused: 0,
}),
BtfKind::Const => BtfType::Const(Const {
BtfKind::Const => Self::Const(Const {
name_offset: ty[0],
info: ty[1],
btf_type: ty[2],
}),
BtfKind::Volatile => BtfType::Volatile(Volatile {
BtfKind::Volatile => Self::Volatile(Volatile {
name_offset: ty[0],
info: ty[1],
btf_type: ty[2],
}),
BtfKind::Restrict => BtfType::Restrict(Restrict {
BtfKind::Restrict => Self::Restrict(Restrict {
name_offset: ty[0],
_info: ty[1],
btf_type: ty[2],
}),
BtfKind::Ptr => BtfType::Ptr(Ptr {
BtfKind::Ptr => Self::Ptr(Ptr {
name_offset: ty[0],
info: ty[1],
btf_type: ty[2],
}),
BtfKind::Typedef => BtfType::Typedef(Typedef {
BtfKind::Typedef => Self::Typedef(Typedef {
name_offset: ty[0],
info: ty[1],
btf_type: ty[2],
}),
BtfKind::Func => BtfType::Func(Func {
BtfKind::Func => Self::Func(Func {
name_offset: ty[0],
info: ty[1],
btf_type: ty[2],
@ -1183,73 +1207,74 @@ impl BtfType {
} else {
u32::from_be_bytes
};
BtfType::Int(Int {
Self::Int(Int {
name_offset: ty[0],
info: ty[1],
size: ty[2],
data: read_u32(data[..mem::size_of::<u32>()].try_into().unwrap()),
})
}
BtfKind::Float => BtfType::Float(Float {
BtfKind::Float => Self::Float(Float {
name_offset: ty[0],
info: ty[1],
size: ty[2],
}),
BtfKind::Enum => BtfType::Enum(Enum {
BtfKind::Enum => Self::Enum(Enum {
name_offset: ty[0],
info: ty[1],
size: ty[2],
variants: unsafe { read_array::<BtfEnum>(data, vlen)? },
}),
BtfKind::Enum64 => BtfType::Enum64(Enum64 {
BtfKind::Enum64 => Self::Enum64(Enum64 {
name_offset: ty[0],
info: ty[1],
size: ty[2],
variants: unsafe { read_array::<BtfEnum64>(data, vlen)? },
}),
BtfKind::Array => BtfType::Array(Array {
BtfKind::Array => Self::Array(Array {
name_offset: ty[0],
info: ty[1],
_unused: 0,
array: unsafe { read(data)? },
}),
BtfKind::Struct => BtfType::Struct(Struct {
BtfKind::Struct => Self::Struct(Struct {
name_offset: ty[0],
info: ty[1],
size: ty[2],
members: unsafe { read_array::<BtfMember>(data, vlen)? },
}),
BtfKind::Union => BtfType::Union(Union {
BtfKind::Union => Self::Union(Union {
name_offset: ty[0],
info: ty[1],
size: ty[2],
members: unsafe { read_array::<BtfMember>(data, vlen)? },
enum64_fallback: None,
}),
BtfKind::FuncProto => BtfType::FuncProto(FuncProto {
BtfKind::FuncProto => Self::FuncProto(FuncProto {
name_offset: ty[0],
info: ty[1],
return_type: ty[2],
params: unsafe { read_array::<BtfParam>(data, vlen)? },
}),
BtfKind::Var => BtfType::Var(Var {
BtfKind::Var => Self::Var(Var {
name_offset: ty[0],
info: ty[1],
btf_type: ty[2],
linkage: unsafe { read(data)? },
}),
BtfKind::DataSec => BtfType::DataSec(DataSec {
BtfKind::DataSec => Self::DataSec(DataSec {
name_offset: ty[0],
info: ty[1],
size: ty[2],
entries: unsafe { read_array::<DataSecEntry>(data, vlen)? },
}),
BtfKind::DeclTag => BtfType::DeclTag(DeclTag {
BtfKind::DeclTag => Self::DeclTag(DeclTag {
name_offset: ty[0],
info: ty[1],
btf_type: ty[2],
component_index: unsafe { read(data)? },
}),
BtfKind::TypeTag => BtfType::TypeTag(TypeTag {
BtfKind::TypeTag => Self::TypeTag(TypeTag {
name_offset: ty[0],
info: ty[1],
btf_type: ty[2],
@ -1259,163 +1284,163 @@ impl BtfType {
pub(crate) fn to_bytes(&self) -> Vec<u8> {
match self {
BtfType::Unknown => vec![],
BtfType::Fwd(t) => t.to_bytes(),
BtfType::Const(t) => t.to_bytes(),
BtfType::Volatile(t) => t.to_bytes(),
BtfType::Restrict(t) => t.to_bytes(),
BtfType::Ptr(t) => t.to_bytes(),
BtfType::Typedef(t) => t.to_bytes(),
BtfType::Func(t) => t.to_bytes(),
BtfType::Int(t) => t.to_bytes(),
BtfType::Float(t) => t.to_bytes(),
BtfType::Enum(t) => t.to_bytes(),
BtfType::Enum64(t) => t.to_bytes(),
BtfType::Array(t) => t.to_bytes(),
BtfType::Struct(t) => t.to_bytes(),
BtfType::Union(t) => t.to_bytes(),
BtfType::FuncProto(t) => t.to_bytes(),
BtfType::Var(t) => t.to_bytes(),
BtfType::DataSec(t) => t.to_bytes(),
BtfType::DeclTag(t) => t.to_bytes(),
BtfType::TypeTag(t) => t.to_bytes(),
Self::Unknown => vec![],
Self::Fwd(t) => t.to_bytes(),
Self::Const(t) => t.to_bytes(),
Self::Volatile(t) => t.to_bytes(),
Self::Restrict(t) => t.to_bytes(),
Self::Ptr(t) => t.to_bytes(),
Self::Typedef(t) => t.to_bytes(),
Self::Func(t) => t.to_bytes(),
Self::Int(t) => t.to_bytes(),
Self::Float(t) => t.to_bytes(),
Self::Enum(t) => t.to_bytes(),
Self::Enum64(t) => t.to_bytes(),
Self::Array(t) => t.to_bytes(),
Self::Struct(t) => t.to_bytes(),
Self::Union(t) => t.to_bytes(),
Self::FuncProto(t) => t.to_bytes(),
Self::Var(t) => t.to_bytes(),
Self::DataSec(t) => t.to_bytes(),
Self::DeclTag(t) => t.to_bytes(),
Self::TypeTag(t) => t.to_bytes(),
}
}
pub(crate) fn size(&self) -> Option<u32> {
match self {
BtfType::Int(t) => Some(t.size),
BtfType::Float(t) => Some(t.size),
BtfType::Enum(t) => Some(t.size),
BtfType::Enum64(t) => Some(t.size),
BtfType::Struct(t) => Some(t.size),
BtfType::Union(t) => Some(t.size),
BtfType::DataSec(t) => Some(t.size),
BtfType::Ptr(_) => Some(mem::size_of::<&()>() as u32),
Self::Int(t) => Some(t.size),
Self::Float(t) => Some(t.size),
Self::Enum(t) => Some(t.size),
Self::Enum64(t) => Some(t.size),
Self::Struct(t) => Some(t.size),
Self::Union(t) => Some(t.size),
Self::DataSec(t) => Some(t.size),
Self::Ptr(_) => Some(mem::size_of::<&()>() as u32),
_ => None,
}
}
pub(crate) fn btf_type(&self) -> Option<u32> {
match self {
BtfType::Const(t) => Some(t.btf_type),
BtfType::Volatile(t) => Some(t.btf_type),
BtfType::Restrict(t) => Some(t.btf_type),
BtfType::Ptr(t) => Some(t.btf_type),
BtfType::Typedef(t) => Some(t.btf_type),
Self::Const(t) => Some(t.btf_type),
Self::Volatile(t) => Some(t.btf_type),
Self::Restrict(t) => Some(t.btf_type),
Self::Ptr(t) => Some(t.btf_type),
Self::Typedef(t) => Some(t.btf_type),
// FuncProto contains the return type here, and doesn't directly reference another type
BtfType::FuncProto(t) => Some(t.return_type),
BtfType::Var(t) => Some(t.btf_type),
BtfType::DeclTag(t) => Some(t.btf_type),
BtfType::TypeTag(t) => Some(t.btf_type),
Self::FuncProto(t) => Some(t.return_type),
Self::Var(t) => Some(t.btf_type),
Self::DeclTag(t) => Some(t.btf_type),
Self::TypeTag(t) => Some(t.btf_type),
_ => None,
}
}
pub(crate) fn type_info_size(&self) -> usize {
match self {
BtfType::Unknown => mem::size_of::<Fwd>(),
BtfType::Fwd(t) => t.type_info_size(),
BtfType::Const(t) => t.type_info_size(),
BtfType::Volatile(t) => t.type_info_size(),
BtfType::Restrict(t) => t.type_info_size(),
BtfType::Ptr(t) => t.type_info_size(),
BtfType::Typedef(t) => t.type_info_size(),
BtfType::Func(t) => t.type_info_size(),
BtfType::Int(t) => t.type_info_size(),
BtfType::Float(t) => t.type_info_size(),
BtfType::Enum(t) => t.type_info_size(),
BtfType::Enum64(t) => t.type_info_size(),
BtfType::Array(t) => t.type_info_size(),
BtfType::Struct(t) => t.type_info_size(),
BtfType::Union(t) => t.type_info_size(),
BtfType::FuncProto(t) => t.type_info_size(),
BtfType::Var(t) => t.type_info_size(),
BtfType::DataSec(t) => t.type_info_size(),
BtfType::DeclTag(t) => t.type_info_size(),
BtfType::TypeTag(t) => t.type_info_size(),
Self::Unknown => mem::size_of::<Fwd>(),
Self::Fwd(t) => t.type_info_size(),
Self::Const(t) => t.type_info_size(),
Self::Volatile(t) => t.type_info_size(),
Self::Restrict(t) => t.type_info_size(),
Self::Ptr(t) => t.type_info_size(),
Self::Typedef(t) => t.type_info_size(),
Self::Func(t) => t.type_info_size(),
Self::Int(t) => t.type_info_size(),
Self::Float(t) => t.type_info_size(),
Self::Enum(t) => t.type_info_size(),
Self::Enum64(t) => t.type_info_size(),
Self::Array(t) => t.type_info_size(),
Self::Struct(t) => t.type_info_size(),
Self::Union(t) => t.type_info_size(),
Self::FuncProto(t) => t.type_info_size(),
Self::Var(t) => t.type_info_size(),
Self::DataSec(t) => t.type_info_size(),
Self::DeclTag(t) => t.type_info_size(),
Self::TypeTag(t) => t.type_info_size(),
}
}
pub(crate) fn name_offset(&self) -> u32 {
match self {
BtfType::Unknown => 0,
BtfType::Fwd(t) => t.name_offset,
BtfType::Const(t) => t.name_offset,
BtfType::Volatile(t) => t.name_offset,
BtfType::Restrict(t) => t.name_offset,
BtfType::Ptr(t) => t.name_offset,
BtfType::Typedef(t) => t.name_offset,
BtfType::Func(t) => t.name_offset,
BtfType::Int(t) => t.name_offset,
BtfType::Float(t) => t.name_offset,
BtfType::Enum(t) => t.name_offset,
BtfType::Enum64(t) => t.name_offset,
BtfType::Array(t) => t.name_offset,
BtfType::Struct(t) => t.name_offset,
BtfType::Union(t) => t.name_offset,
BtfType::FuncProto(t) => t.name_offset,
BtfType::Var(t) => t.name_offset,
BtfType::DataSec(t) => t.name_offset,
BtfType::DeclTag(t) => t.name_offset,
BtfType::TypeTag(t) => t.name_offset,
Self::Unknown => 0,
Self::Fwd(t) => t.name_offset,
Self::Const(t) => t.name_offset,
Self::Volatile(t) => t.name_offset,
Self::Restrict(t) => t.name_offset,
Self::Ptr(t) => t.name_offset,
Self::Typedef(t) => t.name_offset,
Self::Func(t) => t.name_offset,
Self::Int(t) => t.name_offset,
Self::Float(t) => t.name_offset,
Self::Enum(t) => t.name_offset,
Self::Enum64(t) => t.name_offset,
Self::Array(t) => t.name_offset,
Self::Struct(t) => t.name_offset,
Self::Union(t) => t.name_offset,
Self::FuncProto(t) => t.name_offset,
Self::Var(t) => t.name_offset,
Self::DataSec(t) => t.name_offset,
Self::DeclTag(t) => t.name_offset,
Self::TypeTag(t) => t.name_offset,
}
}
pub(crate) fn kind(&self) -> BtfKind {
match self {
BtfType::Unknown => BtfKind::Unknown,
BtfType::Fwd(t) => t.kind(),
BtfType::Const(t) => t.kind(),
BtfType::Volatile(t) => t.kind(),
BtfType::Restrict(t) => t.kind(),
BtfType::Ptr(t) => t.kind(),
BtfType::Typedef(t) => t.kind(),
BtfType::Func(t) => t.kind(),
BtfType::Int(t) => t.kind(),
BtfType::Float(t) => t.kind(),
BtfType::Enum(t) => t.kind(),
BtfType::Enum64(t) => t.kind(),
BtfType::Array(t) => t.kind(),
BtfType::Struct(t) => t.kind(),
BtfType::Union(t) => t.kind(),
BtfType::FuncProto(t) => t.kind(),
BtfType::Var(t) => t.kind(),
BtfType::DataSec(t) => t.kind(),
BtfType::DeclTag(t) => t.kind(),
BtfType::TypeTag(t) => t.kind(),
Self::Unknown => BtfKind::Unknown,
Self::Fwd(t) => t.kind(),
Self::Const(t) => t.kind(),
Self::Volatile(t) => t.kind(),
Self::Restrict(t) => t.kind(),
Self::Ptr(t) => t.kind(),
Self::Typedef(t) => t.kind(),
Self::Func(t) => t.kind(),
Self::Int(t) => t.kind(),
Self::Float(t) => t.kind(),
Self::Enum(t) => t.kind(),
Self::Enum64(t) => t.kind(),
Self::Array(t) => t.kind(),
Self::Struct(t) => t.kind(),
Self::Union(t) => t.kind(),
Self::FuncProto(t) => t.kind(),
Self::Var(t) => t.kind(),
Self::DataSec(t) => t.kind(),
Self::DeclTag(t) => t.kind(),
Self::TypeTag(t) => t.kind(),
}
}
pub(crate) fn is_composite(&self) -> bool {
matches!(self, BtfType::Struct(_) | BtfType::Union(_))
matches!(self, Self::Struct(_) | Self::Union(_))
}
pub(crate) fn members(&self) -> Option<impl Iterator<Item = &BtfMember>> {
match self {
BtfType::Struct(t) => Some(t.members.iter()),
BtfType::Union(t) => Some(t.members.iter()),
Self::Struct(t) => Some(t.members.iter()),
Self::Union(t) => Some(t.members.iter()),
_ => None,
}
}
pub(crate) fn member_bit_field_size(&self, member: &BtfMember) -> Option<usize> {
match self {
BtfType::Struct(t) => Some(t.member_bit_field_size(member)),
BtfType::Union(t) => Some(t.member_bit_field_size(member)),
Self::Struct(t) => Some(t.member_bit_field_size(member)),
Self::Union(t) => Some(t.member_bit_field_size(member)),
_ => None,
}
}
pub(crate) fn member_bit_offset(&self, member: &BtfMember) -> Option<usize> {
match self {
BtfType::Struct(t) => Some(t.member_bit_offset(member)),
BtfType::Union(t) => Some(t.member_bit_offset(member)),
Self::Struct(t) => Some(t.member_bit_offset(member)),
Self::Union(t) => Some(t.member_bit_offset(member)),
_ => None,
}
}
pub(crate) fn is_compatible(&self, other: &BtfType) -> bool {
pub(crate) fn is_compatible(&self, other: &Self) -> bool {
if self.kind() == other.kind() {
return true;
}
@ -1450,7 +1475,7 @@ pub(crate) fn types_are_compatible(
return Ok(false);
}
for _ in 0..MAX_RESOLVE_DEPTH {
for () in core::iter::repeat_n((), MAX_RESOLVE_DEPTH) {
local_id = local_btf.resolve_type(local_id)?;
target_id = target_btf.resolve_type(target_id)?;
let local_ty = local_btf.type_by_id(local_id)?;
@ -1519,7 +1544,7 @@ pub(crate) fn fields_are_compatible(
target_btf: &Btf,
mut target_id: u32,
) -> Result<bool, BtfError> {
for _ in 0..MAX_RESOLVE_DEPTH {
for () in core::iter::repeat_n((), MAX_RESOLVE_DEPTH) {
local_id = local_btf.resolve_type(local_id)?;
target_id = target_btf.resolve_type(target_id)?;
let local_ty = local_btf.type_by_id(local_id)?;
@ -1566,8 +1591,12 @@ pub(crate) fn fields_are_compatible(
fn bytes_of<T>(val: &T) -> &[u8] {
// Safety: all btf types are POD
//
// TODO: This is a fragile assumption and we should stop doing this. We should also remove
// repr(C) from our types, it doesn't make sense to rely on this.
unsafe { crate::util::bytes_of(val) }
}
#[cfg(test)]
mod tests {
use assert_matches::assert_matches;
@ -1577,10 +1606,8 @@ mod tests {
#[test]
fn test_read_btf_type_int() {
let endianness = Endianness::default();
let data: &[u8] = &[
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x40, 0x00,
0x00, 0x00,
];
let bpf_type = BtfType::Int(Int::new(1, 8, IntEncoding::None, 0));
let data: &[u8] = &bpf_type.to_bytes();
assert_matches!(unsafe { BtfType::read(data, endianness) }.unwrap(), BtfType::Int(new @ Int {
name_offset,
info: _,
@ -1594,42 +1621,11 @@ mod tests {
});
}
#[test]
fn test_write_btf_long_unsigned_int() {
let data: &[u8] = &[
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00, 0x40, 0x00,
0x00, 0x00,
];
let int = Int::new(1, 8, IntEncoding::None, 0);
assert_eq!(int.to_bytes(), data);
}
#[test]
fn test_write_btf_uchar() {
let data: &[u8] = &[
0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00,
0x00, 0x00,
];
let int = Int::new(0x13, 1, IntEncoding::None, 0);
assert_eq!(int.to_bytes(), data);
}
#[test]
fn test_write_btf_signed_short_int() {
let data: &[u8] = &[
0x4a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00,
0x00, 0x01,
];
let int = Int::new(0x4a, 2, IntEncoding::Signed, 0);
assert_eq!(int.to_bytes(), data);
}
#[test]
fn test_read_btf_type_ptr() {
let endianness = Endianness::default();
let data: &[u8] = &[
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x00, 0x00, 0x00,
];
let bpf_type = BtfType::Ptr(Ptr::new(0, 0x06));
let data: &[u8] = &bpf_type.to_bytes();
assert_matches!(unsafe { BtfType::read(data, endianness) }.unwrap(), BtfType::Ptr(got) => {
assert_eq!(got.to_bytes(), data);
});
@ -1638,10 +1634,8 @@ mod tests {
#[test]
fn test_read_btf_type_array() {
let endianness = Endianness::default();
let data: &[u8] = &[
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
];
let bpf_type = BtfType::Array(Array::new(0, 1, 0x12, 2));
let data: &[u8] = &bpf_type.to_bytes();
assert_matches!(unsafe { BtfType::read(data, endianness) }.unwrap(), BtfType::Array(got) => {
assert_eq!(got.to_bytes(), data);
});
@ -1650,10 +1644,13 @@ mod tests {
#[test]
fn test_read_btf_type_struct() {
let endianness = Endianness::default();
let data: &[u8] = &[
0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x00, 0x47, 0x02,
0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
];
let members = vec![BtfMember {
name_offset: 0x0247,
btf_type: 0x12,
offset: 0,
}];
let bpf_type = BtfType::Struct(Struct::new(0, members, 4));
let data: &[u8] = &bpf_type.to_bytes();
assert_matches!(unsafe { BtfType::read(data, endianness) }.unwrap(), BtfType::Struct(got) => {
assert_eq!(got.to_bytes(), data);
});
@ -1662,10 +1659,13 @@ mod tests {
#[test]
fn test_read_btf_type_union() {
let endianness = Endianness::default();
let data: &[u8] = &[
0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x05, 0x04, 0x00, 0x00, 0x00, 0x0d, 0x04,
0x00, 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
];
let members = vec![BtfMember {
name_offset: 0x040d,
btf_type: 0x68,
offset: 0,
}];
let bpf_type = BtfType::Union(Union::new(0, 4, members, None));
let data: &[u8] = &bpf_type.to_bytes();
assert_matches!(unsafe { BtfType::read(data, endianness) }.unwrap(), BtfType::Union(got) => {
assert_eq!(got.to_bytes(), data);
});
@ -1674,10 +1674,11 @@ mod tests {
#[test]
fn test_read_btf_type_enum() {
let endianness = Endianness::default();
let data: &[u8] = &[
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x06, 0x04, 0x00, 0x00, 0x00, 0xc9, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcf, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
];
let enum1 = BtfEnum::new(0xc9, 0);
let enum2 = BtfEnum::new(0xcf, 1);
let variants = vec![enum1, enum2];
let bpf_type = BtfType::Enum(Enum::new(0, false, variants));
let data: &[u8] = &bpf_type.to_bytes();
assert_matches!(unsafe { BtfType::read(data, endianness) }.unwrap(), BtfType::Enum(got) => {
assert_eq!(got.to_bytes(), data);
});
@ -1686,9 +1687,13 @@ mod tests {
#[test]
fn test_read_btf_type_fwd() {
let endianness = Endianness::default();
let data: &[u8] = &[
0x0b, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x00,
];
let info = (BtfKind::Fwd as u32) << 24;
let bpf_type = BtfType::Fwd(Fwd {
name_offset: 0x550b,
info,
_unused: 0,
});
let data: &[u8] = &bpf_type.to_bytes();
assert_matches!(unsafe { BtfType::read(data, endianness) }.unwrap(), BtfType::Fwd(got) => {
assert_eq!(got.to_bytes(), data);
});
@ -1697,9 +1702,8 @@ mod tests {
#[test]
fn test_read_btf_type_typedef() {
let endianness = Endianness::default();
let data: &[u8] = &[
0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x0b, 0x00, 0x00, 0x00,
];
let bpf_type = BtfType::Typedef(Typedef::new(0x31, 0x0b));
let data: &[u8] = &bpf_type.to_bytes();
assert_matches!(unsafe { BtfType::read(data, endianness) }.unwrap(), BtfType::Typedef(got) => {
assert_eq!(got.to_bytes(), data);
});
@ -1708,9 +1712,13 @@ mod tests {
#[test]
fn test_read_btf_type_volatile() {
let endianness = Endianness::default();
let data: &[u8] = &[
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x24, 0x00, 0x00, 0x00,
];
let info = (BtfKind::Volatile as u32) << 24;
let bpf_type = BtfType::Volatile(Volatile {
name_offset: 0,
info,
btf_type: 0x24,
});
let data: &[u8] = &bpf_type.to_bytes();
assert_matches!(unsafe { BtfType::read(data, endianness) }.unwrap(), BtfType::Volatile(got) => {
assert_eq!(got.to_bytes(), data);
});
@ -1719,9 +1727,8 @@ mod tests {
#[test]
fn test_read_btf_type_const() {
let endianness = Endianness::default();
let data: &[u8] = &[
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x01, 0x00, 0x00, 0x00,
];
let bpf_type = BtfType::Const(Const::new(1));
let data: &[u8] = &bpf_type.to_bytes();
assert_matches!(unsafe { BtfType::read(data, endianness) }.unwrap(), BtfType::Const(got) => {
assert_eq!(got.to_bytes(), data);
});
@ -1730,9 +1737,13 @@ mod tests {
#[test]
fn test_read_btf_type_restrict() {
let endianness = Endianness::default();
let data: &[u8] = &[
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x04, 0x00, 0x00, 0x00,
];
let info = (BtfKind::Restrict as u32) << 24;
let bpf_type = BtfType::Restrict(Restrict {
name_offset: 0,
_info: info,
btf_type: 4,
});
let data: &[u8] = &bpf_type.to_bytes();
assert_matches!(unsafe { BtfType::read(data, endianness) }.unwrap(), BtfType::Restrict(got) => {
assert_eq!(got.to_bytes(), data);
});
@ -1741,9 +1752,8 @@ mod tests {
#[test]
fn test_read_btf_type_func() {
let endianness = Endianness::default();
let data: &[u8] = &[
0x17, 0x8b, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0c, 0xf0, 0xe4, 0x00, 0x00,
];
let bpf_type = BtfType::Func(Func::new(0x000f8b17, 0xe4f0, FuncLinkage::Global));
let data: &[u8] = &bpf_type.to_bytes();
assert_matches!(unsafe { BtfType::read(data, endianness) }.unwrap(), BtfType::Func(got) => {
assert_eq!(got.to_bytes(), data);
});
@ -1752,10 +1762,12 @@ mod tests {
#[test]
fn test_read_btf_type_func_proto() {
let endianness = Endianness::default();
let data: &[u8] = &[
0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x12, 0x00, 0x00, 0x00,
];
let params = vec![BtfParam {
name_offset: 0,
btf_type: 0x12,
}];
let bpf_type = BtfType::FuncProto(FuncProto::new(params, 0));
let data: &[u8] = &bpf_type.to_bytes();
assert_matches!(unsafe { BtfType::read(data, endianness) }.unwrap(), BtfType::FuncProto(got) => {
assert_eq!(got.to_bytes(), data);
});
@ -1765,10 +1777,8 @@ mod tests {
fn test_read_btf_type_func_var() {
let endianness = Endianness::default();
// NOTE: There was no data in /sys/kernell/btf/vmlinux for this type
let data: &[u8] = &[
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00,
];
let bpf_type = BtfType::Var(Var::new(0, 0xf0, VarLinkage::Static));
let data: &[u8] = &bpf_type.to_bytes();
assert_matches!(unsafe { BtfType::read(data, endianness) }.unwrap(), BtfType::Var(got) => {
assert_eq!(got.to_bytes(), data);
});
@ -1777,10 +1787,13 @@ mod tests {
#[test]
fn test_read_btf_type_func_datasec() {
let endianness = Endianness::default();
let data: &[u8] = &[
0xd9, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
];
let entries = vec![DataSecEntry {
btf_type: 11,
offset: 0,
size: 4,
}];
let bpf_type = BtfType::DataSec(DataSec::new(0xd9, entries, 0));
let data: &[u8] = &bpf_type.to_bytes();
assert_matches!(unsafe { BtfType::read(data, endianness) }.unwrap(), BtfType::DataSec(DataSec {
name_offset: _,
info: _,
@ -1802,9 +1815,8 @@ mod tests {
#[test]
fn test_read_btf_type_float() {
let endianness = Endianness::default();
let data: &[u8] = &[
0x78, 0xfd, 0x02, 0x00, 0x00, 0x00, 0x00, 0x10, 0x08, 0x00, 0x00, 0x00,
];
let bpf_type = BtfType::Float(Float::new(0x02fd, 8));
let data: &[u8] = &bpf_type.to_bytes();
assert_matches!(unsafe { BtfType::read(data, endianness) }.unwrap(), BtfType::Float(got) => {
assert_eq!(got.to_bytes(), data);
});
@ -1854,17 +1866,11 @@ mod tests {
}
#[test]
pub fn test_read_btf_type_enum64() {
fn test_read_btf_type_enum64() {
let endianness = Endianness::default();
let data: &[u8] = &[
0x00, 0x00, 0x00, 0x00, // name offset
0x01, 0x00, 0x00, 0x13, // info: vlen, type_kind
0x08, 0x00, 0x00, 0x00, // size
0xd7, 0x06, 0x00, 0x00, // enum variant name offset
0xbb, 0xbb, 0xbb, 0xbb, // enum variant low
0xaa, 0xaa, 0xaa, 0xaa, // enum variant high
];
let variants = vec![BtfEnum64::new(0, 0xbbbbbbbbaaaaaaaau64)];
let bpf_type = BtfType::Enum64(Enum64::new(0, false, variants));
let data: &[u8] = &bpf_type.to_bytes();
assert_matches!(unsafe { BtfType::read(data, endianness) }.unwrap(), BtfType::Enum64(got) => {
assert_eq!(got.to_bytes(), data);
});

@ -1,4 +1,4 @@
/* automatically generated by rust-bindgen 0.69.4 */
/* automatically generated by rust-bindgen 0.72.1 */
pub type __u8 = ::core::ffi::c_uchar;
pub type __u16 = ::core::ffi::c_ushort;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -1,31 +1,41 @@
//! eBPF bindings generated by rust-bindgen
#![allow(
dead_code,
non_camel_case_types,
non_snake_case,
clippy::all,
missing_docs
)]
mod btf_internal_bindings;
// don't re-export __u8 __u16 etc which are already exported by the
// linux_bindings_* module
pub use btf_internal_bindings::{bpf_core_relo, bpf_core_relo_kind, btf_ext_header};
#[cfg(target_arch = "aarch64")]
mod linux_bindings_aarch64;
#[cfg(target_arch = "arm")]
mod linux_bindings_armv7;
#[cfg(target_arch = "loongarch64")]
mod linux_bindings_loongarch64;
#[cfg(target_arch = "mips")]
mod linux_bindings_mips;
#[cfg(target_arch = "powerpc64")]
mod linux_bindings_powerpc64;
#[cfg(target_arch = "riscv64")]
mod linux_bindings_riscv64;
#[cfg(target_arch = "s390x")]
mod linux_bindings_s390x;
#[cfg(target_arch = "x86_64")]
mod linux_bindings_x86_64;
// don't re-export __u8 __u16 etc which are already exported by the
// linux_bindings_* module
pub use btf_internal_bindings::{bpf_core_relo, bpf_core_relo_kind, btf_ext_header};
#[cfg(target_arch = "aarch64")]
pub use linux_bindings_aarch64::*;
#[cfg(target_arch = "arm")]
pub use linux_bindings_armv7::*;
#[cfg(target_arch = "loongarch64")]
pub use linux_bindings_loongarch64::*;
#[cfg(target_arch = "mips")]
pub use linux_bindings_mips::*;
#[cfg(target_arch = "powerpc64")]
pub use linux_bindings_powerpc64::*;
#[cfg(target_arch = "riscv64")]
pub use linux_bindings_riscv64::*;
#[cfg(target_arch = "s390x")]
pub use linux_bindings_s390x::*;
#[cfg(target_arch = "x86_64")]
pub use linux_bindings_x86_64::*;

@ -49,7 +49,7 @@
//! let instructions = &function.instructions;
//! let data = unsafe {
//! core::slice::from_raw_parts(
//! instructions.as_ptr() as *const u8,
//! instructions.as_ptr().cast(),
//! instructions.len() * core::mem::size_of::<bpf_insn>(),
//! )
//! };
@ -65,28 +65,32 @@
html_favicon_url = "https://aya-rs.dev/assets/images/crabby.svg"
)]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![deny(clippy::all, missing_docs)]
#![allow(clippy::missing_safety_doc, clippy::len_without_is_empty)]
#![deny(missing_docs)]
#![cfg_attr(
any(feature = "std", test),
expect(unused_crate_dependencies, reason = "used in doctests")
)]
extern crate alloc;
#[cfg(feature = "std")]
extern crate std;
#[cfg(not(feature = "std"))]
mod std {
pub mod error {
pub use core_error::Error;
}
pub use core::*;
pub mod os {
pub mod fd {
pub type RawFd = core::ffi::c_int;
}
}
}
pub mod btf;
#[expect(
clippy::all,
clippy::cast_lossless,
clippy::ptr_as_ptr,
clippy::ref_as_ptr,
clippy::use_self,
missing_docs,
non_camel_case_types,
non_snake_case,
trivial_numeric_casts,
unreachable_pub,
unsafe_op_in_unsafe_fn
)]
pub mod generated;
pub mod links;
pub mod maps;
pub mod obj;
pub mod programs;
@ -108,15 +112,15 @@ impl VerifierLog {
}
}
impl std::fmt::Debug for VerifierLog {
impl core::fmt::Debug for VerifierLog {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let Self(log) = self;
f.write_str(log)
}
}
impl std::fmt::Display for VerifierLog {
impl core::fmt::Display for VerifierLog {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
<Self as std::fmt::Debug>::fmt(self, f)
<Self as core::fmt::Debug>::fmt(self, f)
}
}

@ -0,0 +1,100 @@
//! Link type bindings.
use crate::{
InvalidTypeBinding,
generated::{bpf_attach_type, bpf_link_type},
};
impl TryFrom<u32> for bpf_link_type {
type Error = InvalidTypeBinding<u32>;
fn try_from(link_type: u32) -> Result<Self, Self::Error> {
use bpf_link_type::*;
Ok(match link_type {
x if x == BPF_LINK_TYPE_UNSPEC as u32 => BPF_LINK_TYPE_UNSPEC,
x if x == BPF_LINK_TYPE_RAW_TRACEPOINT as u32 => BPF_LINK_TYPE_RAW_TRACEPOINT,
x if x == BPF_LINK_TYPE_TRACING as u32 => BPF_LINK_TYPE_TRACING,
x if x == BPF_LINK_TYPE_CGROUP as u32 => BPF_LINK_TYPE_CGROUP,
x if x == BPF_LINK_TYPE_ITER as u32 => BPF_LINK_TYPE_ITER,
x if x == BPF_LINK_TYPE_NETNS as u32 => BPF_LINK_TYPE_NETNS,
x if x == BPF_LINK_TYPE_XDP as u32 => BPF_LINK_TYPE_XDP,
x if x == BPF_LINK_TYPE_PERF_EVENT as u32 => BPF_LINK_TYPE_PERF_EVENT,
x if x == BPF_LINK_TYPE_KPROBE_MULTI as u32 => BPF_LINK_TYPE_KPROBE_MULTI,
x if x == BPF_LINK_TYPE_STRUCT_OPS as u32 => BPF_LINK_TYPE_STRUCT_OPS,
x if x == BPF_LINK_TYPE_NETFILTER as u32 => BPF_LINK_TYPE_NETFILTER,
x if x == BPF_LINK_TYPE_TCX as u32 => BPF_LINK_TYPE_TCX,
x if x == BPF_LINK_TYPE_UPROBE_MULTI as u32 => BPF_LINK_TYPE_UPROBE_MULTI,
x if x == BPF_LINK_TYPE_NETKIT as u32 => BPF_LINK_TYPE_NETKIT,
_ => return Err(InvalidTypeBinding { value: link_type }),
})
}
}
impl TryFrom<u32> for bpf_attach_type {
type Error = InvalidTypeBinding<u32>;
fn try_from(attach_type: u32) -> Result<Self, Self::Error> {
use bpf_attach_type::*;
Ok(match attach_type {
x if x == BPF_CGROUP_INET_INGRESS as u32 => BPF_CGROUP_INET_INGRESS,
x if x == BPF_CGROUP_INET_EGRESS as u32 => BPF_CGROUP_INET_EGRESS,
x if x == BPF_CGROUP_INET_SOCK_CREATE as u32 => BPF_CGROUP_INET_SOCK_CREATE,
x if x == BPF_CGROUP_SOCK_OPS as u32 => BPF_CGROUP_SOCK_OPS,
x if x == BPF_SK_SKB_STREAM_PARSER as u32 => BPF_SK_SKB_STREAM_PARSER,
x if x == BPF_SK_SKB_STREAM_VERDICT as u32 => BPF_SK_SKB_STREAM_VERDICT,
x if x == BPF_CGROUP_DEVICE as u32 => BPF_CGROUP_DEVICE,
x if x == BPF_SK_MSG_VERDICT as u32 => BPF_SK_MSG_VERDICT,
x if x == BPF_CGROUP_INET4_BIND as u32 => BPF_CGROUP_INET4_BIND,
x if x == BPF_CGROUP_INET6_BIND as u32 => BPF_CGROUP_INET6_BIND,
x if x == BPF_CGROUP_INET4_CONNECT as u32 => BPF_CGROUP_INET4_CONNECT,
x if x == BPF_CGROUP_INET6_CONNECT as u32 => BPF_CGROUP_INET6_CONNECT,
x if x == BPF_CGROUP_INET4_POST_BIND as u32 => BPF_CGROUP_INET4_POST_BIND,
x if x == BPF_CGROUP_INET6_POST_BIND as u32 => BPF_CGROUP_INET6_POST_BIND,
x if x == BPF_CGROUP_UDP4_SENDMSG as u32 => BPF_CGROUP_UDP4_SENDMSG,
x if x == BPF_CGROUP_UDP6_SENDMSG as u32 => BPF_CGROUP_UDP6_SENDMSG,
x if x == BPF_LIRC_MODE2 as u32 => BPF_LIRC_MODE2,
x if x == BPF_FLOW_DISSECTOR as u32 => BPF_FLOW_DISSECTOR,
x if x == BPF_CGROUP_SYSCTL as u32 => BPF_CGROUP_SYSCTL,
x if x == BPF_CGROUP_UDP4_RECVMSG as u32 => BPF_CGROUP_UDP4_RECVMSG,
x if x == BPF_CGROUP_UDP6_RECVMSG as u32 => BPF_CGROUP_UDP6_RECVMSG,
x if x == BPF_CGROUP_GETSOCKOPT as u32 => BPF_CGROUP_GETSOCKOPT,
x if x == BPF_CGROUP_SETSOCKOPT as u32 => BPF_CGROUP_SETSOCKOPT,
x if x == BPF_TRACE_RAW_TP as u32 => BPF_TRACE_RAW_TP,
x if x == BPF_TRACE_FENTRY as u32 => BPF_TRACE_FENTRY,
x if x == BPF_TRACE_FEXIT as u32 => BPF_TRACE_FEXIT,
x if x == BPF_MODIFY_RETURN as u32 => BPF_MODIFY_RETURN,
x if x == BPF_LSM_MAC as u32 => BPF_LSM_MAC,
x if x == BPF_TRACE_ITER as u32 => BPF_TRACE_ITER,
x if x == BPF_CGROUP_INET4_GETPEERNAME as u32 => BPF_CGROUP_INET4_GETPEERNAME,
x if x == BPF_CGROUP_INET6_GETPEERNAME as u32 => BPF_CGROUP_INET6_GETPEERNAME,
x if x == BPF_CGROUP_INET4_GETSOCKNAME as u32 => BPF_CGROUP_INET4_GETSOCKNAME,
x if x == BPF_CGROUP_INET6_GETSOCKNAME as u32 => BPF_CGROUP_INET6_GETSOCKNAME,
x if x == BPF_XDP_DEVMAP as u32 => BPF_XDP_DEVMAP,
x if x == BPF_CGROUP_INET_SOCK_RELEASE as u32 => BPF_CGROUP_INET_SOCK_RELEASE,
x if x == BPF_XDP_CPUMAP as u32 => BPF_XDP_CPUMAP,
x if x == BPF_SK_LOOKUP as u32 => BPF_SK_LOOKUP,
x if x == BPF_XDP as u32 => BPF_XDP,
x if x == BPF_SK_SKB_VERDICT as u32 => BPF_SK_SKB_VERDICT,
x if x == BPF_SK_REUSEPORT_SELECT as u32 => BPF_SK_REUSEPORT_SELECT,
x if x == BPF_SK_REUSEPORT_SELECT_OR_MIGRATE as u32 => {
BPF_SK_REUSEPORT_SELECT_OR_MIGRATE
}
x if x == BPF_PERF_EVENT as u32 => BPF_PERF_EVENT,
x if x == BPF_TRACE_KPROBE_MULTI as u32 => BPF_TRACE_KPROBE_MULTI,
x if x == BPF_LSM_CGROUP as u32 => BPF_LSM_CGROUP,
x if x == BPF_STRUCT_OPS as u32 => BPF_STRUCT_OPS,
x if x == BPF_NETFILTER as u32 => BPF_NETFILTER,
x if x == BPF_TCX_INGRESS as u32 => BPF_TCX_INGRESS,
x if x == BPF_TCX_EGRESS as u32 => BPF_TCX_EGRESS,
x if x == BPF_TRACE_UPROBE_MULTI as u32 => BPF_TRACE_UPROBE_MULTI,
x if x == BPF_CGROUP_UNIX_CONNECT as u32 => BPF_CGROUP_UNIX_CONNECT,
x if x == BPF_CGROUP_UNIX_SENDMSG as u32 => BPF_CGROUP_UNIX_SENDMSG,
x if x == BPF_CGROUP_UNIX_RECVMSG as u32 => BPF_CGROUP_UNIX_RECVMSG,
x if x == BPF_CGROUP_UNIX_GETPEERNAME as u32 => BPF_CGROUP_UNIX_GETPEERNAME,
x if x == BPF_CGROUP_UNIX_GETSOCKNAME as u32 => BPF_CGROUP_UNIX_GETSOCKNAME,
x if x == BPF_NETKIT_PRIMARY as u32 => BPF_NETKIT_PRIMARY,
x if x == BPF_NETKIT_PEER as u32 => BPF_NETKIT_PEER,
_ => return Err(InvalidTypeBinding { value: attach_type }),
})
}
}

@ -3,18 +3,10 @@
use alloc::vec::Vec;
use core::mem;
#[cfg(not(feature = "std"))]
use crate::std;
use crate::EbpfSectionKind;
/// Invalid map type encontered
pub struct InvalidMapTypeError {
/// The map type
pub map_type: u32,
}
use crate::{EbpfSectionKind, InvalidTypeBinding};
impl TryFrom<u32> for crate::generated::bpf_map_type {
type Error = InvalidMapTypeError;
type Error = InvalidTypeBinding<u32>;
fn try_from(map_type: u32) -> Result<Self, Self::Error> {
use crate::generated::bpf_map_type::*;
@ -31,7 +23,6 @@ impl TryFrom<u32> for crate::generated::bpf_map_type {
x if x == BPF_MAP_TYPE_LRU_HASH as u32 => BPF_MAP_TYPE_LRU_HASH,
x if x == BPF_MAP_TYPE_LRU_PERCPU_HASH as u32 => BPF_MAP_TYPE_LRU_PERCPU_HASH,
x if x == BPF_MAP_TYPE_LPM_TRIE as u32 => BPF_MAP_TYPE_LPM_TRIE,
x if x == BPF_MAP_TYPE_BLOOM_FILTER as u32 => BPF_MAP_TYPE_BLOOM_FILTER,
x if x == BPF_MAP_TYPE_ARRAY_OF_MAPS as u32 => BPF_MAP_TYPE_ARRAY_OF_MAPS,
x if x == BPF_MAP_TYPE_HASH_OF_MAPS as u32 => BPF_MAP_TYPE_HASH_OF_MAPS,
x if x == BPF_MAP_TYPE_DEVMAP as u32 => BPF_MAP_TYPE_DEVMAP,
@ -42,7 +33,6 @@ impl TryFrom<u32> for crate::generated::bpf_map_type {
x if x == BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED as u32 => {
BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED
}
x if x == BPF_MAP_TYPE_CGRP_STORAGE as u32 => BPF_MAP_TYPE_CGRP_STORAGE,
x if x == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY as u32 => BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
x if x == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED as u32 => {
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED
@ -58,7 +48,8 @@ impl TryFrom<u32> for crate::generated::bpf_map_type {
x if x == BPF_MAP_TYPE_BLOOM_FILTER as u32 => BPF_MAP_TYPE_BLOOM_FILTER,
x if x == BPF_MAP_TYPE_USER_RINGBUF as u32 => BPF_MAP_TYPE_USER_RINGBUF,
x if x == BPF_MAP_TYPE_CGRP_STORAGE as u32 => BPF_MAP_TYPE_CGRP_STORAGE,
_ => return Err(InvalidMapTypeError { map_type }),
x if x == BPF_MAP_TYPE_ARENA as u32 => BPF_MAP_TYPE_ARENA,
_ => return Err(InvalidTypeBinding { value: map_type }),
})
}
}
@ -108,15 +99,14 @@ impl TryFrom<u32> for PinningType {
fn try_from(value: u32) -> Result<Self, Self::Error> {
match value {
0 => Ok(PinningType::None),
1 => Ok(PinningType::ByName),
0 => Ok(Self::None),
1 => Ok(Self::ByName),
pinning_type => Err(PinningError::Unsupported { pinning_type }),
}
}
}
/// Map definition in legacy BPF map declaration style
#[allow(non_camel_case_types)]
#[repr(C)]
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
pub struct bpf_map_def {
@ -154,96 +144,96 @@ impl Map {
/// Returns the map type
pub fn map_type(&self) -> u32 {
match self {
Map::Legacy(m) => m.def.map_type,
Map::Btf(m) => m.def.map_type,
Self::Legacy(m) => m.def.map_type,
Self::Btf(m) => m.def.map_type,
}
}
/// Returns the key size in bytes
pub fn key_size(&self) -> u32 {
match self {
Map::Legacy(m) => m.def.key_size,
Map::Btf(m) => m.def.key_size,
Self::Legacy(m) => m.def.key_size,
Self::Btf(m) => m.def.key_size,
}
}
/// Returns the value size in bytes
pub fn value_size(&self) -> u32 {
match self {
Map::Legacy(m) => m.def.value_size,
Map::Btf(m) => m.def.value_size,
Self::Legacy(m) => m.def.value_size,
Self::Btf(m) => m.def.value_size,
}
}
/// Set the value size in bytes
pub fn set_value_size(&mut self, size: u32) {
match self {
Map::Legacy(m) => m.def.value_size = size,
Map::Btf(m) => m.def.value_size = size,
Self::Legacy(m) => m.def.value_size = size,
Self::Btf(m) => m.def.value_size = size,
}
}
/// Returns the max entry number
pub fn max_entries(&self) -> u32 {
match self {
Map::Legacy(m) => m.def.max_entries,
Map::Btf(m) => m.def.max_entries,
Self::Legacy(m) => m.def.max_entries,
Self::Btf(m) => m.def.max_entries,
}
}
/// Sets the max entry number
pub fn set_max_entries(&mut self, v: u32) {
match self {
Map::Legacy(m) => m.def.max_entries = v,
Map::Btf(m) => m.def.max_entries = v,
Self::Legacy(m) => m.def.max_entries = v,
Self::Btf(m) => m.def.max_entries = v,
}
}
/// Returns the map flags
pub fn map_flags(&self) -> u32 {
match self {
Map::Legacy(m) => m.def.map_flags,
Map::Btf(m) => m.def.map_flags,
Self::Legacy(m) => m.def.map_flags,
Self::Btf(m) => m.def.map_flags,
}
}
/// Returns the pinning type of the map
pub fn pinning(&self) -> PinningType {
match self {
Map::Legacy(m) => m.def.pinning,
Map::Btf(m) => m.def.pinning,
Self::Legacy(m) => m.def.pinning,
Self::Btf(m) => m.def.pinning,
}
}
/// Returns the map data
pub fn data(&self) -> &[u8] {
match self {
Map::Legacy(m) => &m.data,
Map::Btf(m) => &m.data,
Self::Legacy(m) => &m.data,
Self::Btf(m) => &m.data,
}
}
/// Returns the map data as mutable
pub fn data_mut(&mut self) -> &mut Vec<u8> {
match self {
Map::Legacy(m) => m.data.as_mut(),
Map::Btf(m) => m.data.as_mut(),
Self::Legacy(m) => m.data.as_mut(),
Self::Btf(m) => m.data.as_mut(),
}
}
/// Returns the section index
pub fn section_index(&self) -> usize {
match self {
Map::Legacy(m) => m.section_index,
Map::Btf(m) => m.section_index,
Self::Legacy(m) => m.section_index,
Self::Btf(m) => m.section_index,
}
}
/// Returns the section kind.
pub fn section_kind(&self) -> EbpfSectionKind {
match self {
Map::Legacy(m) => m.section_kind,
Map::Btf(_) => EbpfSectionKind::BtfMaps,
Self::Legacy(m) => m.section_kind,
Self::Btf(_) => EbpfSectionKind::BtfMaps,
}
}
@ -253,8 +243,8 @@ impl Map {
/// need symbols in order to be relocated.
pub fn symbol_index(&self) -> Option<usize> {
match self {
Map::Legacy(m) => m.symbol_index,
Map::Btf(m) => Some(m.symbol_index),
Self::Legacy(m) => m.symbol_index,
Self::Btf(m) => Some(m.symbol_index),
}
}
}

File diff suppressed because it is too large Load Diff

@ -16,12 +16,12 @@ pub enum CgroupSockAttachType {
}
impl From<CgroupSockAttachType> for bpf_attach_type {
fn from(s: CgroupSockAttachType) -> bpf_attach_type {
fn from(s: CgroupSockAttachType) -> Self {
match s {
CgroupSockAttachType::PostBind4 => bpf_attach_type::BPF_CGROUP_INET4_POST_BIND,
CgroupSockAttachType::PostBind6 => bpf_attach_type::BPF_CGROUP_INET6_POST_BIND,
CgroupSockAttachType::SockCreate => bpf_attach_type::BPF_CGROUP_INET_SOCK_CREATE,
CgroupSockAttachType::SockRelease => bpf_attach_type::BPF_CGROUP_INET_SOCK_RELEASE,
CgroupSockAttachType::PostBind4 => Self::BPF_CGROUP_INET4_POST_BIND,
CgroupSockAttachType::PostBind6 => Self::BPF_CGROUP_INET6_POST_BIND,
CgroupSockAttachType::SockCreate => Self::BPF_CGROUP_INET_SOCK_CREATE,
CgroupSockAttachType::SockRelease => Self::BPF_CGROUP_INET_SOCK_RELEASE,
}
}
}

@ -31,20 +31,20 @@ pub enum CgroupSockAddrAttachType {
}
impl From<CgroupSockAddrAttachType> for bpf_attach_type {
fn from(s: CgroupSockAddrAttachType) -> bpf_attach_type {
fn from(s: CgroupSockAddrAttachType) -> Self {
match s {
CgroupSockAddrAttachType::Bind4 => bpf_attach_type::BPF_CGROUP_INET4_BIND,
CgroupSockAddrAttachType::Bind6 => bpf_attach_type::BPF_CGROUP_INET6_BIND,
CgroupSockAddrAttachType::Connect4 => bpf_attach_type::BPF_CGROUP_INET4_CONNECT,
CgroupSockAddrAttachType::Connect6 => bpf_attach_type::BPF_CGROUP_INET6_CONNECT,
CgroupSockAddrAttachType::GetPeerName4 => bpf_attach_type::BPF_CGROUP_INET4_GETPEERNAME,
CgroupSockAddrAttachType::GetPeerName6 => bpf_attach_type::BPF_CGROUP_INET6_GETPEERNAME,
CgroupSockAddrAttachType::GetSockName4 => bpf_attach_type::BPF_CGROUP_INET4_GETSOCKNAME,
CgroupSockAddrAttachType::GetSockName6 => bpf_attach_type::BPF_CGROUP_INET6_GETSOCKNAME,
CgroupSockAddrAttachType::UDPSendMsg4 => bpf_attach_type::BPF_CGROUP_UDP4_SENDMSG,
CgroupSockAddrAttachType::UDPSendMsg6 => bpf_attach_type::BPF_CGROUP_UDP6_SENDMSG,
CgroupSockAddrAttachType::UDPRecvMsg4 => bpf_attach_type::BPF_CGROUP_UDP4_RECVMSG,
CgroupSockAddrAttachType::UDPRecvMsg6 => bpf_attach_type::BPF_CGROUP_UDP6_RECVMSG,
CgroupSockAddrAttachType::Bind4 => Self::BPF_CGROUP_INET4_BIND,
CgroupSockAddrAttachType::Bind6 => Self::BPF_CGROUP_INET6_BIND,
CgroupSockAddrAttachType::Connect4 => Self::BPF_CGROUP_INET4_CONNECT,
CgroupSockAddrAttachType::Connect6 => Self::BPF_CGROUP_INET6_CONNECT,
CgroupSockAddrAttachType::GetPeerName4 => Self::BPF_CGROUP_INET4_GETPEERNAME,
CgroupSockAddrAttachType::GetPeerName6 => Self::BPF_CGROUP_INET6_GETPEERNAME,
CgroupSockAddrAttachType::GetSockName4 => Self::BPF_CGROUP_INET4_GETSOCKNAME,
CgroupSockAddrAttachType::GetSockName6 => Self::BPF_CGROUP_INET6_GETSOCKNAME,
CgroupSockAddrAttachType::UDPSendMsg4 => Self::BPF_CGROUP_UDP4_SENDMSG,
CgroupSockAddrAttachType::UDPSendMsg6 => Self::BPF_CGROUP_UDP6_SENDMSG,
CgroupSockAddrAttachType::UDPRecvMsg4 => Self::BPF_CGROUP_UDP4_RECVMSG,
CgroupSockAddrAttachType::UDPRecvMsg6 => Self::BPF_CGROUP_UDP6_RECVMSG,
}
}
}

@ -11,10 +11,10 @@ pub enum CgroupSockoptAttachType {
}
impl From<CgroupSockoptAttachType> for bpf_attach_type {
fn from(s: CgroupSockoptAttachType) -> bpf_attach_type {
fn from(s: CgroupSockoptAttachType) -> Self {
match s {
CgroupSockoptAttachType::Get => bpf_attach_type::BPF_CGROUP_GETSOCKOPT,
CgroupSockoptAttachType::Set => bpf_attach_type::BPF_CGROUP_SETSOCKOPT,
CgroupSockoptAttachType::Get => Self::BPF_CGROUP_GETSOCKOPT,
CgroupSockoptAttachType::Set => Self::BPF_CGROUP_SETSOCKOPT,
}
}
}

@ -3,6 +3,7 @@
pub mod cgroup_sock;
pub mod cgroup_sock_addr;
pub mod cgroup_sockopt;
mod types;
pub mod xdp;
pub use cgroup_sock::CgroupSockAttachType;

@ -0,0 +1,51 @@
//! Program type bindings.
use crate::{
InvalidTypeBinding,
generated::bpf_prog_type::{self, *},
};
impl TryFrom<u32> for bpf_prog_type {
type Error = InvalidTypeBinding<u32>;
fn try_from(prog_type: u32) -> Result<Self, Self::Error> {
Ok(match prog_type {
x if x == BPF_PROG_TYPE_UNSPEC as u32 => BPF_PROG_TYPE_UNSPEC,
x if x == BPF_PROG_TYPE_SOCKET_FILTER as u32 => BPF_PROG_TYPE_SOCKET_FILTER,
x if x == BPF_PROG_TYPE_KPROBE as u32 => BPF_PROG_TYPE_KPROBE,
x if x == BPF_PROG_TYPE_SCHED_CLS as u32 => BPF_PROG_TYPE_SCHED_CLS,
x if x == BPF_PROG_TYPE_SCHED_ACT as u32 => BPF_PROG_TYPE_SCHED_ACT,
x if x == BPF_PROG_TYPE_TRACEPOINT as u32 => BPF_PROG_TYPE_TRACEPOINT,
x if x == BPF_PROG_TYPE_XDP as u32 => BPF_PROG_TYPE_XDP,
x if x == BPF_PROG_TYPE_PERF_EVENT as u32 => BPF_PROG_TYPE_PERF_EVENT,
x if x == BPF_PROG_TYPE_CGROUP_SKB as u32 => BPF_PROG_TYPE_CGROUP_SKB,
x if x == BPF_PROG_TYPE_CGROUP_SOCK as u32 => BPF_PROG_TYPE_CGROUP_SOCK,
x if x == BPF_PROG_TYPE_LWT_IN as u32 => BPF_PROG_TYPE_LWT_IN,
x if x == BPF_PROG_TYPE_LWT_OUT as u32 => BPF_PROG_TYPE_LWT_OUT,
x if x == BPF_PROG_TYPE_LWT_XMIT as u32 => BPF_PROG_TYPE_LWT_XMIT,
x if x == BPF_PROG_TYPE_SOCK_OPS as u32 => BPF_PROG_TYPE_SOCK_OPS,
x if x == BPF_PROG_TYPE_SK_SKB as u32 => BPF_PROG_TYPE_SK_SKB,
x if x == BPF_PROG_TYPE_CGROUP_DEVICE as u32 => BPF_PROG_TYPE_CGROUP_DEVICE,
x if x == BPF_PROG_TYPE_SK_MSG as u32 => BPF_PROG_TYPE_SK_MSG,
x if x == BPF_PROG_TYPE_RAW_TRACEPOINT as u32 => BPF_PROG_TYPE_RAW_TRACEPOINT,
x if x == BPF_PROG_TYPE_CGROUP_SOCK_ADDR as u32 => BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
x if x == BPF_PROG_TYPE_LWT_SEG6LOCAL as u32 => BPF_PROG_TYPE_LWT_SEG6LOCAL,
x if x == BPF_PROG_TYPE_LIRC_MODE2 as u32 => BPF_PROG_TYPE_LIRC_MODE2,
x if x == BPF_PROG_TYPE_SK_REUSEPORT as u32 => BPF_PROG_TYPE_SK_REUSEPORT,
x if x == BPF_PROG_TYPE_FLOW_DISSECTOR as u32 => BPF_PROG_TYPE_FLOW_DISSECTOR,
x if x == BPF_PROG_TYPE_CGROUP_SYSCTL as u32 => BPF_PROG_TYPE_CGROUP_SYSCTL,
x if x == BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE as u32 => {
BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE
}
x if x == BPF_PROG_TYPE_CGROUP_SOCKOPT as u32 => BPF_PROG_TYPE_CGROUP_SOCKOPT,
x if x == BPF_PROG_TYPE_TRACING as u32 => BPF_PROG_TYPE_TRACING,
x if x == BPF_PROG_TYPE_STRUCT_OPS as u32 => BPF_PROG_TYPE_STRUCT_OPS,
x if x == BPF_PROG_TYPE_EXT as u32 => BPF_PROG_TYPE_EXT,
x if x == BPF_PROG_TYPE_LSM as u32 => BPF_PROG_TYPE_LSM,
x if x == BPF_PROG_TYPE_SK_LOOKUP as u32 => BPF_PROG_TYPE_SK_LOOKUP,
x if x == BPF_PROG_TYPE_SYSCALL as u32 => BPF_PROG_TYPE_SYSCALL,
x if x == BPF_PROG_TYPE_NETFILTER as u32 => BPF_PROG_TYPE_NETFILTER,
_ => return Err(InvalidTypeBinding { value: prog_type }),
})
}
}

@ -16,9 +16,9 @@ pub enum XdpAttachType {
impl From<XdpAttachType> for bpf_attach_type {
fn from(value: XdpAttachType) -> Self {
match value {
XdpAttachType::Interface => bpf_attach_type::BPF_XDP,
XdpAttachType::CpuMap => bpf_attach_type::BPF_XDP_CPUMAP,
XdpAttachType::DevMap => bpf_attach_type::BPF_XDP_DEVMAP,
XdpAttachType::Interface => Self::BPF_XDP,
XdpAttachType::CpuMap => Self::BPF_XDP_CPUMAP,
XdpAttachType::DevMap => Self::BPF_XDP_DEVMAP,
}
}
}

@ -1,24 +1,27 @@
//! Program relocation handling.
use alloc::{borrow::ToOwned, collections::BTreeMap, string::String};
use alloc::{borrow::ToOwned as _, collections::BTreeMap, string::String};
use core::mem;
use log::debug;
use object::{SectionIndex, SymbolKind};
#[cfg(not(feature = "std"))]
use crate::std;
use crate::{
EbpfSectionKind,
generated::{
bpf_insn, BPF_CALL, BPF_JMP, BPF_K, BPF_PSEUDO_CALL, BPF_PSEUDO_FUNC, BPF_PSEUDO_MAP_FD,
BPF_PSEUDO_MAP_VALUE,
BPF_CALL, BPF_JMP, BPF_K, BPF_PSEUDO_CALL, BPF_PSEUDO_FUNC, BPF_PSEUDO_MAP_FD,
BPF_PSEUDO_MAP_VALUE, bpf_insn,
},
maps::Map,
obj::{Function, Object},
util::{HashMap, HashSet},
EbpfSectionKind,
};
#[cfg(feature = "std")]
type RawFd = std::os::fd::RawFd;
#[cfg(not(feature = "std"))]
type RawFd = core::ffi::c_int;
pub(crate) const INS_SIZE: usize = mem::size_of::<bpf_insn>();
/// The error type returned by [`Object::relocate_maps`] and [`Object::relocate_calls`]
@ -64,7 +67,9 @@ pub enum RelocationError {
},
/// Unknown function
#[error("program at section {section_index} and address {address:#x} was not found while relocating")]
#[error(
"program at section {section_index} and address {address:#x} was not found while relocating"
)]
UnknownProgram {
/// The function section index
section_index: usize,
@ -104,7 +109,7 @@ pub(crate) struct Symbol {
impl Object {
/// Relocates the map references
pub fn relocate_maps<'a, I: Iterator<Item = (&'a str, std::os::fd::RawFd, &'a Map)>>(
pub fn relocate_maps<'a, I: Iterator<Item = (&'a str, RawFd, &'a Map)>>(
&mut self,
maps: I,
text_sections: &HashSet<usize>,
@ -179,8 +184,8 @@ impl Object {
fn relocate_maps<'a, I: Iterator<Item = &'a Relocation>>(
fun: &mut Function,
relocations: I,
maps_by_section: &HashMap<usize, (&str, std::os::fd::RawFd, &Map)>,
maps_by_symbol: &HashMap<usize, (&str, std::os::fd::RawFd, &Map)>,
maps_by_section: &HashMap<usize, (&str, RawFd, &Map)>,
maps_by_symbol: &HashMap<usize, (&str, RawFd, &Map)>,
symbol_table: &HashMap<usize, Symbol>,
text_sections: &HashSet<usize>,
) -> Result<(), RelocationError> {
@ -197,7 +202,7 @@ fn relocate_maps<'a, I: Iterator<Item = &'a Relocation>>(
// make sure that the relocation offset is properly aligned
let ins_offset = rel_offset - section_offset;
if ins_offset % INS_SIZE != 0 {
if !ins_offset.is_multiple_of(INS_SIZE) {
return Err(RelocationError::InvalidRelocationOffset {
offset: rel.offset,
relocation_number: rel_n,
@ -234,7 +239,7 @@ fn relocate_maps<'a, I: Iterator<Item = &'a Relocation>>(
m
} else {
let Some(m) = maps_by_section.get(&section_index) else {
debug!("failed relocating map by section index {}", section_index);
debug!("failed relocating map by section index {section_index}");
return Err(RelocationError::SectionNotFound {
symbol_index: rel.symbol_index,
symbol_name: sym.name.clone(),
@ -284,8 +289,8 @@ impl<'a> FunctionLinker<'a> {
relocations: &'a HashMap<SectionIndex, HashMap<u64, Relocation>>,
symbol_table: &'a HashMap<usize, Symbol>,
text_sections: &'a HashSet<usize>,
) -> FunctionLinker<'a> {
FunctionLinker {
) -> Self {
Self {
functions,
linked_functions: HashMap::new(),
relocations,
@ -398,7 +403,7 @@ impl<'a> FunctionLinker<'a> {
fun.section_index.0,
(fun.section_offset as i64
+ ((ins_index - start_ins) as i64) * ins_size
+ (ins.imm + 1) as i64 * ins_size) as u64,
+ i64::from(ins.imm + 1) * ins_size) as u64,
)
};
@ -483,21 +488,21 @@ impl<'a> FunctionLinker<'a> {
}
fn insn_is_call(ins: &bpf_insn) -> bool {
let klass = (ins.code & 0x07) as u32;
let op = (ins.code & 0xF0) as u32;
let src = (ins.code & 0x08) as u32;
let klass = u32::from(ins.code & 0x07);
let op = u32::from(ins.code & 0xF0);
let src = u32::from(ins.code & 0x08);
klass == BPF_JMP
&& op == BPF_CALL
&& src == BPF_K
&& ins.src_reg() as u32 == BPF_PSEUDO_CALL
&& u32::from(ins.src_reg()) == BPF_PSEUDO_CALL
&& ins.dst_reg() == 0
&& ins.off == 0
}
#[cfg(test)]
mod test {
use alloc::{string::ToString, vec, vec::Vec};
use alloc::{string::ToString as _, vec, vec::Vec};
use super::*;
use crate::maps::{BtfMap, LegacyMap};
@ -515,7 +520,7 @@ mod test {
}
fn ins(bytes: &[u8]) -> bpf_insn {
unsafe { core::ptr::read_unaligned(bytes.as_ptr() as *const _) }
unsafe { core::ptr::read_unaligned(bytes.as_ptr().cast()) }
}
fn fake_legacy_map(symbol_index: usize) -> Map {

@ -1,4 +1,4 @@
use core::{mem, slice};
use core::{mem, ptr, slice};
#[cfg(feature = "std")]
pub(crate) use std::collections::HashMap;
#[cfg(feature = "std")]
@ -11,6 +11,5 @@ pub(crate) use hashbrown::HashSet;
/// bytes_of converts a <T> to a byte slice
pub(crate) unsafe fn bytes_of<T>(val: &T) -> &[u8] {
let size = mem::size_of::<T>();
slice::from_raw_parts(slice::from_ref(val).as_ptr().cast(), size)
unsafe { slice::from_raw_parts(ptr::from_ref(val).cast(), mem::size_of_val(val)) }
}

@ -1,17 +1,22 @@
[package]
description = "A tool for generating bindings for Linux Kernel types"
name = "aya-tool"
version = "0.1.0"
publish = false
description = "A tool for generating bindings for Linux Kernel types"
version = "0.1.0"
authors.workspace = true
edition.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
homepage.workspace = true
edition.workspace = true
rust-version.workspace = true
[lints]
workspace = true
[dependencies]
anyhow = { workspace = true, default-features = true }
bindgen = { workspace = true, default-features = true }
clap = { workspace = true, default-features = true, features = ["derive"] }
anyhow = { workspace = true, default-features = true }
thiserror = { workspace = true }
tempfile = { workspace = true }
thiserror = { workspace = true }

@ -1,6 +1,8 @@
#![expect(unused_crate_dependencies, reason = "used in lib")]
use std::{path::PathBuf, process::exit};
use aya_tool::generate::{generate, InputFile};
use aya_tool::generate::{InputFile, generate};
use clap::Parser;
#[derive(Parser)]

@ -1,23 +1,23 @@
use bindgen::{Builder, EnumVariation};
pub fn user_builder() -> Builder {
fn common_builder() -> Builder {
bindgen::builder()
.use_core()
.layout_tests(false)
.generate_comments(false)
.prepend_enum_name(false)
.default_enum_style(EnumVariation::Rust {
non_exhaustive: false,
})
.clang_macro_fallback()
}
pub fn user_builder() -> Builder {
common_builder().default_enum_style(EnumVariation::Rust {
non_exhaustive: false,
})
}
pub fn bpf_builder() -> Builder {
bindgen::builder()
.use_core()
common_builder()
.ctypes_prefix("::aya_ebpf::cty")
.layout_tests(false)
.generate_comments(false)
.clang_arg("-Wno-unknown-attributes")
.default_enum_style(EnumVariation::ModuleConsts)
.prepend_enum_name(false)
}

@ -1,32 +1,27 @@
use std::{
fs::{self, File},
io::{self, Write},
io::{self, Write as _},
path::{Path, PathBuf},
process::Command,
process::{Command, Output},
str,
};
use tempfile::tempdir;
use thiserror::Error;
use crate::bindgen;
#[derive(Error, Debug)]
pub enum Error {
#[error("error executing bpftool")]
BpfTool(#[source] io::Error),
#[error("{stderr}\nbpftool failed with exit code {code}")]
BpfToolExit { code: i32, stderr: String },
#[error("bpftool failed: {0:?}")]
BpfToolExit(Output),
#[error("bindgen failed")]
Bindgen(#[source] io::Error),
#[error("{stderr}\nbindgen failed with exit code {code}")]
BindgenExit { code: i32, stderr: String },
#[error("rustfmt failed")]
Rustfmt(#[source] io::Error),
#[error("bindgen failed: {0:?}")]
BindgenExit(Output),
#[error("error reading header file")]
ReadHeaderFile(#[source] io::Error),
@ -47,7 +42,7 @@ pub fn generate<T: AsRef<str>>(
.map(|s| s.as_ref().into())
.collect::<Vec<_>>();
let mut bindgen = bindgen::bpf_builder();
let mut bindgen = crate::bindgen::bpf_builder();
let (additional_flags, ctypes_prefix) = extract_ctypes_prefix(&additional_flags);
if let Some(prefix) = ctypes_prefix {
@ -69,7 +64,7 @@ pub fn generate<T: AsRef<str>>(
let dir = tempdir().unwrap();
let file_path = dir.path().join(name);
let mut file = File::create(&file_path).unwrap();
let _ = file.write(c_header.as_bytes()).unwrap();
let () = file.write_all(c_header.as_bytes()).unwrap();
let flags = combine_flags(&bindgen.command_line_flags(), &additional_flags);
@ -79,14 +74,13 @@ pub fn generate<T: AsRef<str>>(
.output()
.map_err(Error::Bindgen)?;
if !output.status.success() {
return Err(Error::BindgenExit {
code: output.status.code().unwrap(),
stderr: str::from_utf8(&output.stderr).unwrap().to_owned(),
});
let Output { status, .. } = &output;
if !status.success() {
return Err(Error::BindgenExit(output));
}
let Output { stdout, .. } = output;
Ok(str::from_utf8(&output.stdout).unwrap().to_owned())
Ok(String::from_utf8(stdout).unwrap())
}
fn c_header_from_btf(path: &Path) -> Result<String, Error> {
@ -97,14 +91,13 @@ fn c_header_from_btf(path: &Path) -> Result<String, Error> {
.output()
.map_err(Error::BpfTool)?;
if !output.status.success() {
return Err(Error::BpfToolExit {
code: output.status.code().unwrap(),
stderr: str::from_utf8(&output.stderr).unwrap().to_owned(),
});
let Output { status, .. } = &output;
if !status.success() {
return Err(Error::BpfToolExit(output));
}
let Output { stdout, .. } = output;
Ok(str::from_utf8(&output.stdout).unwrap().to_owned())
Ok(String::from_utf8(stdout).unwrap())
}
fn extract_ctypes_prefix(s: &[String]) -> (Vec<String>, Option<String>) {
@ -179,32 +172,32 @@ mod test {
#[test]
fn test_combine_flags() {
assert_eq!(
combine_flags(&to_vec("a b"), &to_vec("c d"),).join(" "),
combine_flags(&to_vec("a b"), &to_vec("c d")).join(" "),
"a b c d",
);
assert_eq!(
combine_flags(&to_vec("a -- b"), &to_vec("a b"),).join(" "),
combine_flags(&to_vec("a -- b"), &to_vec("a b")).join(" "),
"a a b -- b",
);
assert_eq!(
combine_flags(&to_vec("a -- b"), &to_vec("c d"),).join(" "),
combine_flags(&to_vec("a -- b"), &to_vec("c d")).join(" "),
"a c d -- b",
);
assert_eq!(
combine_flags(&to_vec("a b"), &to_vec("c -- d"),).join(" "),
combine_flags(&to_vec("a b"), &to_vec("c -- d")).join(" "),
"a b c -- d",
);
assert_eq!(
combine_flags(&to_vec("a -- b"), &to_vec("c -- d"),).join(" "),
combine_flags(&to_vec("a -- b"), &to_vec("c -- d")).join(" "),
"a c -- b d",
);
assert_eq!(
combine_flags(&to_vec("a -- b"), &to_vec("-- c d"),).join(" "),
combine_flags(&to_vec("a -- b"), &to_vec("-- c d")).join(" "),
"a -- b c d",
);
}

@ -1,20 +1,4 @@
use std::{
fs::File,
io::{self, Write},
path::Path,
};
#![expect(unused_crate_dependencies, reason = "used in bin")]
pub mod bindgen;
pub mod generate;
pub mod rustfmt;
pub use generate::{generate, InputFile};
pub fn write_to_file<T: AsRef<Path>>(path: T, code: &str) -> Result<(), io::Error> {
let mut file = File::create(path)?;
file.write_all(code.as_bytes())
}
pub fn write_to_file_fmt<T: AsRef<Path>>(path: T, code: &str) -> Result<(), io::Error> {
write_to_file(path, &rustfmt::format(code)?)
}

@ -1,25 +0,0 @@
use std::{
io::{self, Write},
process::{Command, Stdio},
};
pub fn format(code: &str) -> Result<String, io::Error> {
let mut child = Command::new("rustfmt")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?;
let stdin = child.stdin.as_mut().unwrap();
stdin.write_all(code.as_bytes())?;
let output = child.wait_with_output()?;
if !output.status.success() {
return Err(io::Error::new(
io::ErrorKind::Other,
format!(
"rustfmt failed with exit code: {}",
output.status.code().unwrap()
),
));
}
Ok(String::from_utf8(output.stdout).unwrap())
}

@ -8,6 +8,9 @@ history and changelog. We also tag PRs on github with a [breaking change] label.
## Summary
- [v0.14.0](#v0140)
- MSRV has been bumped to 1.85.0.
- [v0.12.0](#v0120)
- In `aya::Bpf::programs`, `name` uses the function name from the ELF file.
- Maps API has been reworked.
@ -17,6 +20,15 @@ history and changelog. We also tag PRs on github with a [breaking change] label.
- BTF types have moved to the `aya-obj` crate.
- `aya::PerfEvent::attach` and `detach` signatures have changed.
## v0.14.0
### MSRV has been bumped to 1.85.0
The minimum supported Rust version has been bumped to 1.85.0. This is due to
the move to edition 2024 which was first available in this version.
To migrate you will need to ensure that you are using rustc 1.85.0 or later.
## v0.12.0
### In `aya::Bpf::programs`, `name` uses the function name from the ELF file

@ -5,6 +5,647 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
### Breaking Changes
- Remove `AsyncPerfEventArray` and `AsyncPerfEventArrayBuffer` These types have been removed to
avoid maintaining support for multiple async runtimes. Use `PerfEventArrayBuffer`, which
implements `As{,Raw}Fd` for integration with async executors.
- Rename `EbpfLoader::map_pin_path` to `EbpfLoader::default_map_pin_directory`.
- Rename `EbpfLoader::set_global` to `EbpfLoader::override_global`.
- Rename `EbpfLoader::set_max_entries` to `EbpfLoader::map_max_entries`.
### Other
- Provide deprecated aliases to ease migration, these will be removed in a future release;
- `EbpfLoader::set_global` calls `EbpfLoader::override_global`, and
- `EbpfLoader::set_max_entries` calls `EbpfLoader::map_max_entries`.
## 0.13.1 (2024-11-01)
### Chore
- <csr-id-e575712c596d03b93f75d160e3d95241eb895d39/> Add comments in `*_wrong_map` tests
- <csr-id-70ac91dc1e6f209a701cd868db215763d65efa73/> Rename bpf -> ebpf
- <csr-id-481b73b6d8dd9a796d891bba137400c2a43a0afe/> Fix unused_qualifications lints
This was failing the docs build.
### Documentation
- <csr-id-f1773d5af43f5f29b100572e65a60d58f2ce7fac/> fix typo
- <csr-id-57a69fe9d28e858562a429bacd9a0a7700b96726/> Use `Ebpf` instead of `Bpf`
### New Features
- <csr-id-5478cac008471bdb80aa30733e4456b70ec1a5bd/> Implement TCX
This commit adds the initial support for TCX
bpf links. This is a new, multi-program, attachment
type allows for the caller to specify where
they would like to be attached relative to other
programs at the attachment point using the LinkOrder
type.
- <csr-id-110a76cb9a1b2ab5c5ad3b6c0828a4ae670e67a0/> Provide a deprecated `BpfError` alias
- <csr-id-8c79b71bd5699a686f33360520aa95c1a2895fa5/> Rename Bpf to Ebpf
And BpfLoader to EbpfLoader.
This also adds type aliases to preserve the use of the old names, making
updating to a new Aya release less of a burden. These aliases are marked
as deprecated since we'll likely remove them in a later release.
### Bug Fixes
- <csr-id-ca0c32d1076af81349a52235a4b6fb3937a697b3/> Fill bss maps with zeros
The loader should fill bss maps with zeros according to the size of the
ELF section.
Failure to do so yields weird verifier messages as follows:
```
cannot access ptr member ops with moff 0 in struct bpf_map with off 0 size 4
```
Reference to this in the cilium/ebpf code is here [1].
I could not find a reference in libbpf.
- <csr-id-3d57d358e40591acf23dfde740697fbfff026410/> Fix PerfEventArray resize logic
There was a logic bug in the previously merged patch where we
set the correctly calculated max_entries size with the original.
To fix this and prevent regressions a unit test was added.
This highlighted that the original map definition needs to be
mutated in order for the max_entries change to be properly applied.
As such, this resize logic moved out of aya::sys into aya::maps
- <csr-id-25d986a26d9c88cd499a8b795054d583f01476b2/> Set PerfEventArray max_entries to nCPUs
Both libbpf and cilium/ebpf have will set the max_entries of a
BPF_MAP_TYPE_PERF_EVENT_ARRAY to the number of online CPUs if
it was omitted at map definition time. This adds that same
logic to Aya.
- <csr-id-38d8e32baa5a4538de9daa6fae634aea6372573c/> fix panic when creating map on custom ubuntu kernel
- <csr-id-5e13283f59b0c3b4cb47de1e31d8d0960e80b4cc/> fix rustdocs-args ordering in taplo to -D warnings
This fixes the current rustdoc build error by correcting the ordering of
`rustdoc-args` to `-D warnings`. Additionally, this also removes the
`recorder_arrays` field (defaults to false) so that the order is not
modified, which is what caused the error in the first place.
### Other
- <csr-id-c44f8b0f5bddd820a4a98cff293126c0146b827a/> use FdLink in SockOps programs
- <csr-id-02d1db5fc043fb7af90c14d13de6419ec5b9bcb5/> remove unwrap and NonZero* in info
Addresses the feedback from #1007:
- remove panic from `unwrap` and `expect`
- Option<NonZero*> => Option<int> with `0` mapping to `None`
- <csr-id-fbb09304a2de0d8baf7ea20c9727fcd2e4fb7f41/> revamp MapInfo be more friendly with older kernels
Adds detection for whether a field is available in `MapInfo`:
- For `map_type()`, we treturn new enum `MapType` instead of the integer
representation.
- For fields that can't be zero, we return `Option<NonZero*>` type.
- For `name_as_str()`, it now uses the feature probe `bpf_name()` to
detect if field is available.
Although the feature probe checks for program name, it can also be
used for map name since they were both introduced in the same commit.
- <csr-id-88f5ac31142f1657b41b1ee0f217dcd9125b210a/> revamp ProgramInfo be more friendly with older kernels
Purpose of this commit is to add detections for whether a field is
available in `ProgramInfo`.
- For `program_type()`, we return the new enum `ProgramType` instead of
the integer representation.
- For fields that we know cannot be zero, we return `Option<NonZero*>`
type.
- For `name_as_str()`, it now also uses the feature probe `bpf_name()`
to detect if field is available or not.
- Two additional feature probes are added for the fields:
- `prog_info_map_ids()` probe -> `map_ids()` field
- `prog_info_gpl_compatible()` probe -> `gpl_compatible()` field
With the `prog_info_map_ids()` probe, the previous implementation that
I had for `bpf_prog_get_info_by_fd()` is shortened to use the probe
instead of having to make 2 potential syscalls.
The `test_loaded_at()` test is also moved into info tests since it is
better related to the info tests.
- <csr-id-1634fa7188e40ed75da53517f1fdb7396c348c34/> add conversion u32 to enum type for prog, link, & attach type
Add conversion from u32 to program type, link type, and attach type.
Additionally, remove duplicate match statement for u32 conversion to
`BPF_MAP_TYPE_BLOOM_FILTER` & `BPF_MAP_TYPE_CGRP_STORAGE`.
New error `InvalidTypeBinding<T>` is created to represent when a
parsed/received value binding to a type is invalid.
This is used in the new conversions added here, and also replaces
`InvalidMapTypeError` in `TryFrom` for `bpf_map_type`.
- <csr-id-cb8e47880082ccfcd75b02209b686e15426e9b6a/> improve integration tests for info API
Improves the existing integraiton tests for `loaded_programs()` and
`loaded_maps()` in consideration for older kernels:
- Opt for `SocketFilter` program in tests since XDP requires v4.8 and
fragments requires v5.18.
- For assertion tests, first perform the assertion, if the assertion
fails, then it checks the host kernel version to see if it is above
the minimum version requirement. If not, then continue with test,
otherwise fail.
For assertions that are skipped, they're logged in stderr which can
be observed with `-- --nocapture`.
This also fixes the `bpf_prog_get_info_by_fd()` call for kernels below
v4.15. If calling syscall on kernels below v4.15, it can produce an
`E2BIG` error because `check_uarg_tail_zero()` expects the entire
struct to all-zero bytes (which is caused from the map info).
Instead, we first attempt the syscall with the map info filled, if it
returns `E2BIG`, then perform syscall again with empty closure.
Also adds doc for which version a kernel feature was introduced for
better awareness.
The tests have been verified kernel versions:
- 4.13.0
- 4.15.0
- 6.1.0
- <csr-id-cd1db86fd490b3c0f03229bd8999a2e67ccecfc4/> adjust bpf programs for big endian
In aya/src/sys/bpf.rs, there are several simple bpf programs written as
byte arrays. These need to be adjusted to account for big endian.
- <csr-id-a25f501ecebaceaacdd1212fac34f528b51ad0fd/> expose run_time_ns and run_cnt fields in ProgramInfo
Added functions to expose `run_time_ns` & `run_cnt` statistics from
ProgramInfo/bpf_prog_info.
- <csr-id-fa6af6a20439cccd8ab961f83dce545fb5884dd4/> add BPF_ENABLE_STATS syscall function
Add bpf syscall function for BPF_ENABLE_STATS to enable stats tracking
for benchmarking purposes.
Additionally, move `#[cfg(test)]` annotation around the `Drop` trait
instead. Having separate functions causes some complications when
needing ownership/moving of the inner value `OwnedFd` when `Drop` is
manually implemented.
- <csr-id-d413e2f285643cbeb665fd3c517e2c9d93d45825/> :programs::uprobe: fix bad variable name
The variable fn_name was very much *not* the fn_name, but rather the
object file path.
- <csr-id-462514ed4c4c06e9618d029a57708c7fa14ab748/> adjust symbol lookup tests for object crate alignment requirements
The object::File::parse API requires parameter to be aligned with 8 bytes.
Adjusted the Vec in the tests with miri to meet this requirement.
- <csr-id-e6e1bfeb58ac392637061640365b057182ee1b39/> add symbol lookup in associated debug files
This change enhances the logic for symbol lookup in uprobe or uretprobe.
If the symbol is not found in the original binary, the search continues
in the debug file associated through the debuglink section. Before
searching the symbol table, it compares the build IDs of the two files.
The symbol lookup will only be terminated if both build IDs exist and do
not match. This modification does not affect the existing symbol lookup
logic.
- <csr-id-b06ff402780b80862933791831c578e4c339fc96/> Generate new bindings
- <csr-id-a4e68ebdbf0e0b591509f36316d12d9689d23f89/> include license in crate workspace
This PR includes the licenses files in the crate workspace subdirectory.
Without this, they won't be showing on crates.io and would be giving out
errors on tooling such as rust2rpm.
- <csr-id-e38eac6352ccb5c2b44d621161a27898744ea397/> appease new nightly clippy lints
```
error: unnecessary qualification
--> aya/src/maps/ring_buf.rs:434:22
|
434 | ptr: ptr::NonNull::new(ptr).ok_or(
| ^^^^^^^^^^^^^^^^^
|
note: the lint level is defined here
--> aya/src/lib.rs:72:5
|
72 | unused_qualifications,
| ^^^^^^^^^^^^^^^^^^^^^
help: remove the unnecessary path segments
|
434 - ptr: ptr::NonNull::new(ptr).ok_or(
434 + ptr: NonNull::new(ptr).ok_or(
|
error: unnecessary qualification
--> aya/src/maps/mod.rs:225:21
|
225 | let mut limit = std::mem::MaybeUninit::<rlimit>::uninit();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
help: remove the unnecessary path segments
|
225 - let mut limit = std::mem::MaybeUninit::<rlimit>::uninit();
225 + let mut limit = mem::MaybeUninit::<rlimit>::uninit();
|
error: unnecessary qualification
--> aya/src/programs/mod.rs:614:9
|
614 | crate::obj::Program {
| ^^^^^^^^^^^^^^^^^^^
|
help: remove the unnecessary path segments
|
614 - crate::obj::Program {
614 + obj::Program {
|
error: unnecessary qualification
--> aya/src/util.rs:373:14
|
373 | unsafe { std::slice::from_raw_parts(bpf_name.as_ptr() as
*const _, length) }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
help: remove the unnecessary path segments
|
373 - unsafe { std::slice::from_raw_parts(bpf_name.as_ptr() as
*const _, length) }
373 + unsafe { slice::from_raw_parts(bpf_name.as_ptr() as *const _,
length) }
|
error: unnecessary qualification
--> aya/src/maps/mod.rs:1130:47
|
1130 | .copy_from_slice(unsafe {
std::mem::transmute(TEST_NAME) });
| ^^^^^^^^^^^^^^^^^^^
|
note: the lint level is defined here
--> aya/src/lib.rs:72:5
|
72 | unused_qualifications,
| ^^^^^^^^^^^^^^^^^^^^^
help: remove the unnecessary path segments
|
1130 - .copy_from_slice(unsafe {
std::mem::transmute(TEST_NAME) });
1130 + .copy_from_slice(unsafe {
mem::transmute(TEST_NAME) });
|
```
### Performance
- <csr-id-d05110fd86f9b317d47ffb7cf5c00e588635d4cd/> cache `nr_cpus` in a thread_local
### Test
- <csr-id-eef7346fb2231f8741410381198015cceeebfac9/> adjust test byte arrays for big endian
Adding support for s390x (big endian architecture) and found that some
of the unit tests have structures and files implemented as byte arrays.
They are all coded as little endian and need a bug endian version to
work properly.
### New Features (BREAKING)
- <csr-id-fd48c55466a23953ce7a4912306e1acf059b498b/> Rename BpfRelocationError -> EbpfRelocationError
- <csr-id-cf3e2ca677c81224368fb2838ebc5b10ee98419a/> Rename BpfSectionKind to EbpfSectionKind
### Commit Statistics
<csr-read-only-do-not-edit/>
- 69 commits contributed to the release over the course of 241 calendar days.
- 247 days passed between releases.
- 32 commits were understood as [conventional](https://www.conventionalcommits.org).
- 0 issues like '(#ID)' were seen in commit messages
### Commit Details
<csr-read-only-do-not-edit/>
<details><summary>view details</summary>
* **Uncategorized**
- Release aya-obj v0.2.1 ([`c6a34ca`](https://github.com/aya-rs/aya/commit/c6a34cade195d682e1eece5b71e3ab48e48f3cda))
- Merge pull request #1073 from dave-tucker/reloc-bug ([`b2ac9fe`](https://github.com/aya-rs/aya/commit/b2ac9fe85db6c25d0b8155a75a2df96a80a19811))
- Fill bss maps with zeros ([`ca0c32d`](https://github.com/aya-rs/aya/commit/ca0c32d1076af81349a52235a4b6fb3937a697b3))
- Release aya-obj v0.2.0, aya v0.13.0, safety bump aya v0.13.0 ([`c169b72`](https://github.com/aya-rs/aya/commit/c169b727e6b8f8c2dda57f54b8c77f8b551025c6))
- Implement TCX ([`5478cac`](https://github.com/aya-rs/aya/commit/5478cac008471bdb80aa30733e4456b70ec1a5bd))
- Cache `nr_cpus` in a thread_local ([`d05110f`](https://github.com/aya-rs/aya/commit/d05110fd86f9b317d47ffb7cf5c00e588635d4cd))
- Clarify `Arc` usage ([`afd777b`](https://github.com/aya-rs/aya/commit/afd777b705312b7bafec2a116041a2318d3aa70f))
- Replace `Arc` with `&'static` ([`e992c28`](https://github.com/aya-rs/aya/commit/e992c280cbae7af7e484767a0b79314b14a4de84))
- Avoid intermediate allocations in parse_cpu_ranges ([`0e86757`](https://github.com/aya-rs/aya/commit/0e867572ff8e009bbcd1a63037b4ab5b80e35549))
- Reduce duplication in `{nr,possible}_cpus` ([`f3b2744`](https://github.com/aya-rs/aya/commit/f3b27440725a0eb2f1615c92cb0047e3b1548d66))
- Replace `lazy_static` with `std::sync::LazyLock` ([`2b299d4`](https://github.com/aya-rs/aya/commit/2b299d4fba1ddda70c2e8af324f999cb23683559))
- Appease clippy ([`0f16363`](https://github.com/aya-rs/aya/commit/0f163633e3d73c59f857880c967c27e9f52e8610))
- Merge pull request #1023 from l2dy/fdlink/sockops ([`2cd3576`](https://github.com/aya-rs/aya/commit/2cd35769dce05b46a4dd07381c990c6acd4cfe0d))
- Use FdLink in SockOps programs ([`c44f8b0`](https://github.com/aya-rs/aya/commit/c44f8b0f5bddd820a4a98cff293126c0146b827a))
- Remove unwrap and NonZero* in info ([`02d1db5`](https://github.com/aya-rs/aya/commit/02d1db5fc043fb7af90c14d13de6419ec5b9bcb5))
- Merge pull request #985 from reyzell/main ([`40f3032`](https://github.com/aya-rs/aya/commit/40f303205f7a800877fe3f9a4fb1893141741e13))
- Add the option to support multiple and overrideable programs per cgroup ([`f790685`](https://github.com/aya-rs/aya/commit/f790685d759cbd97cb09ad48d87cdece28fbe579))
- Merge pull request #1007 from tyrone-wu/aya/info-api ([`15eb935`](https://github.com/aya-rs/aya/commit/15eb935bce6d41fb67189c48ce582b074544e0ed))
- Revamp MapInfo be more friendly with older kernels ([`fbb0930`](https://github.com/aya-rs/aya/commit/fbb09304a2de0d8baf7ea20c9727fcd2e4fb7f41))
- Revamp ProgramInfo be more friendly with older kernels ([`88f5ac3`](https://github.com/aya-rs/aya/commit/88f5ac31142f1657b41b1ee0f217dcd9125b210a))
- Add conversion u32 to enum type for prog, link, & attach type ([`1634fa7`](https://github.com/aya-rs/aya/commit/1634fa7188e40ed75da53517f1fdb7396c348c34))
- Improve integration tests for info API ([`cb8e478`](https://github.com/aya-rs/aya/commit/cb8e47880082ccfcd75b02209b686e15426e9b6a))
- Merge pull request #959 from tyrone-wu/aya/program_info_stats ([`ab000ad`](https://github.com/aya-rs/aya/commit/ab000ad7c3b0715c3cdd9798bd08fc834b114f1a))
- Merge pull request #974 from Billy99/billy99-arch-ppc64-s390x ([`ab5e688`](https://github.com/aya-rs/aya/commit/ab5e688fd49fcfb402ad47d51cb445437fbd8cb7))
- Adjust bpf programs for big endian ([`cd1db86`](https://github.com/aya-rs/aya/commit/cd1db86fd490b3c0f03229bd8999a2e67ccecfc4))
- Adjust test byte arrays for big endian ([`eef7346`](https://github.com/aya-rs/aya/commit/eef7346fb2231f8741410381198015cceeebfac9))
- Simplify doctest ([`4362020`](https://github.com/aya-rs/aya/commit/43620206918facbf003d8b878ae28c5b07955167))
- Appease nightly clippy ([`bce3c4f`](https://github.com/aya-rs/aya/commit/bce3c4fb1d0cd6e8f9f64420c59e02a42c96b2c8))
- Expose run_time_ns and run_cnt fields in ProgramInfo ([`a25f501`](https://github.com/aya-rs/aya/commit/a25f501ecebaceaacdd1212fac34f528b51ad0fd))
- Add BPF_ENABLE_STATS syscall function ([`fa6af6a`](https://github.com/aya-rs/aya/commit/fa6af6a20439cccd8ab961f83dce545fb5884dd4))
- Fix PerfEventArray resize logic ([`3d57d35`](https://github.com/aya-rs/aya/commit/3d57d358e40591acf23dfde740697fbfff026410))
- Add comments in `*_wrong_map` tests ([`e575712`](https://github.com/aya-rs/aya/commit/e575712c596d03b93f75d160e3d95241eb895d39))
- Set PerfEventArray max_entries to nCPUs ([`25d986a`](https://github.com/aya-rs/aya/commit/25d986a26d9c88cd499a8b795054d583f01476b2))
- Use MockableFd everywhere ([`e12fcf4`](https://github.com/aya-rs/aya/commit/e12fcf46cb1e0856a8105ed43fda184fa4648713))
- Merge pull request #991 from l2dy/typo-1 ([`2cd9858`](https://github.com/aya-rs/aya/commit/2cd9858ea9381232acaffcb5a08bc74e90a8863e))
- Fix typo ([`f1773d5`](https://github.com/aya-rs/aya/commit/f1773d5af43f5f29b100572e65a60d58f2ce7fac))
- Merge pull request #983 from ajwerner/fix-variable-name ([`d5414bf`](https://github.com/aya-rs/aya/commit/d5414bf10c80ae8cef757f0cdf06bfdd38746daa))
- :programs::uprobe: fix bad variable name ([`d413e2f`](https://github.com/aya-rs/aya/commit/d413e2f285643cbeb665fd3c517e2c9d93d45825))
- Fix panic when creating map on custom ubuntu kernel ([`38d8e32`](https://github.com/aya-rs/aya/commit/38d8e32baa5a4538de9daa6fae634aea6372573c))
- Appease clippy ([`78acd74`](https://github.com/aya-rs/aya/commit/78acd74badb6aa2463f89fbdf713325dad75dc9e))
- Don't deny unused_qualifications ([`781914f`](https://github.com/aya-rs/aya/commit/781914f058ef805bd0780ff72a2a66c63255bc07))
- Fix rustdocs-args ordering in taplo to -D warnings ([`5e13283`](https://github.com/aya-rs/aya/commit/5e13283f59b0c3b4cb47de1e31d8d0960e80b4cc))
- Remove deny(pointer_structural_match) ([`4e843a3`](https://github.com/aya-rs/aya/commit/4e843a35237c2de49d17621dccb4a2a35bb4030c))
- Merge pull request #938 from swananan/enhance_urpobe_symbol_lookup ([`bde4b5f`](https://github.com/aya-rs/aya/commit/bde4b5f86b12a3e4ac2f99898edb1b564fe9dd7e))
- Fix clippy ([`c7898c5`](https://github.com/aya-rs/aya/commit/c7898c596f2f74f29570101d0f71f35b0ab4104b))
- Adjust symbol lookup tests for object crate alignment requirements ([`462514e`](https://github.com/aya-rs/aya/commit/462514ed4c4c06e9618d029a57708c7fa14ab748))
- Add symbol lookup in associated debug files ([`e6e1bfe`](https://github.com/aya-rs/aya/commit/e6e1bfeb58ac392637061640365b057182ee1b39))
- Merge pull request #928 from seanyoung/io-error ([`d0e9b95`](https://github.com/aya-rs/aya/commit/d0e9b95aa5edc6c056687caeb950e1ce44b18d66))
- S/MiriSafeFd/MockableFd/ ([`a11b61e`](https://github.com/aya-rs/aya/commit/a11b61ebfde8713c35b6f2a760e470d3586803a7))
- Remove miri ignores ([`cb6d3bd`](https://github.com/aya-rs/aya/commit/cb6d3bd75d162e4928fdf4daa7f515e1ad85ae85))
- Document miri skip reasons ([`35962a4`](https://github.com/aya-rs/aya/commit/35962a4794484aa3b37dadc98a70a659fd107b75))
- Avoid crashing under Miri ([`7a7d168`](https://github.com/aya-rs/aya/commit/7a7d16885a89af8c10a52e5aba0927784d42f551))
- Deduplicate test helpers ([`7e1666f`](https://github.com/aya-rs/aya/commit/7e1666fb83e5c2b270cb24becb84adebbe29be1a))
- Reduce duplication ([`58e154e`](https://github.com/aya-rs/aya/commit/58e154e1bc4846a6a2afcb8397aa599cfb7ea6fd))
- Expose io_error in SyscallError ([`a6c45f6`](https://github.com/aya-rs/aya/commit/a6c45f61c77c4bbec4409debb8447cd606f0db5d))
- Appease clippy ([`09442c2`](https://github.com/aya-rs/aya/commit/09442c2cbe9513365dfc1df8d4f7cf6f808a67ed))
- Generate new bindings ([`b06ff40`](https://github.com/aya-rs/aya/commit/b06ff402780b80862933791831c578e4c339fc96))
- Appease clippy ([`0a32dac`](https://github.com/aya-rs/aya/commit/0a32dacd2fd2f225f4a3709ac4ea2838a9937378))
- Merge pull request #528 from dave-tucker/rename-all-the-things ([`63d8d4d`](https://github.com/aya-rs/aya/commit/63d8d4d34bdbbee149047dc0a5e9c2b191f3b32d))
- Include license in crate workspace ([`a4e68eb`](https://github.com/aya-rs/aya/commit/a4e68ebdbf0e0b591509f36316d12d9689d23f89))
- Use `Ebpf` instead of `Bpf` ([`57a69fe`](https://github.com/aya-rs/aya/commit/57a69fe9d28e858562a429bacd9a0a7700b96726))
- Provide a deprecated `BpfError` alias ([`110a76c`](https://github.com/aya-rs/aya/commit/110a76cb9a1b2ab5c5ad3b6c0828a4ae670e67a0))
- Rename Bpf to Ebpf ([`8c79b71`](https://github.com/aya-rs/aya/commit/8c79b71bd5699a686f33360520aa95c1a2895fa5))
- Rename BpfRelocationError -> EbpfRelocationError ([`fd48c55`](https://github.com/aya-rs/aya/commit/fd48c55466a23953ce7a4912306e1acf059b498b))
- Rename BpfSectionKind to EbpfSectionKind ([`cf3e2ca`](https://github.com/aya-rs/aya/commit/cf3e2ca677c81224368fb2838ebc5b10ee98419a))
- Rename bpf -> ebpf ([`70ac91d`](https://github.com/aya-rs/aya/commit/70ac91dc1e6f209a701cd868db215763d65efa73))
- Fix unused_qualifications lints ([`481b73b`](https://github.com/aya-rs/aya/commit/481b73b6d8dd9a796d891bba137400c2a43a0afe))
- Add `CgroupDevice::query` ([`542306d`](https://github.com/aya-rs/aya/commit/542306d295e51ac1ec117ce453544f201875af3d))
- Appease new nightly clippy lints ([`e38eac6`](https://github.com/aya-rs/aya/commit/e38eac6352ccb5c2b44d621161a27898744ea397))
</details>
## 0.13.0 (2024-10-09)
<csr-id-e575712c596d03b93f75d160e3d95241eb895d39/>
<csr-id-70ac91dc1e6f209a701cd868db215763d65efa73/>
<csr-id-481b73b6d8dd9a796d891bba137400c2a43a0afe/>
<csr-id-c44f8b0f5bddd820a4a98cff293126c0146b827a/>
<csr-id-02d1db5fc043fb7af90c14d13de6419ec5b9bcb5/>
<csr-id-fbb09304a2de0d8baf7ea20c9727fcd2e4fb7f41/>
<csr-id-88f5ac31142f1657b41b1ee0f217dcd9125b210a/>
<csr-id-1634fa7188e40ed75da53517f1fdb7396c348c34/>
<csr-id-cb8e47880082ccfcd75b02209b686e15426e9b6a/>
<csr-id-cd1db86fd490b3c0f03229bd8999a2e67ccecfc4/>
<csr-id-a25f501ecebaceaacdd1212fac34f528b51ad0fd/>
<csr-id-fa6af6a20439cccd8ab961f83dce545fb5884dd4/>
<csr-id-d413e2f285643cbeb665fd3c517e2c9d93d45825/>
<csr-id-462514ed4c4c06e9618d029a57708c7fa14ab748/>
<csr-id-e6e1bfeb58ac392637061640365b057182ee1b39/>
<csr-id-b06ff402780b80862933791831c578e4c339fc96/>
<csr-id-a4e68ebdbf0e0b591509f36316d12d9689d23f89/>
<csr-id-e38eac6352ccb5c2b44d621161a27898744ea397/>
<csr-id-eef7346fb2231f8741410381198015cceeebfac9/>
### Chore
- <csr-id-e575712c596d03b93f75d160e3d95241eb895d39/> Add comments in `*_wrong_map` tests
- <csr-id-70ac91dc1e6f209a701cd868db215763d65efa73/> Rename bpf -> ebpf
- <csr-id-481b73b6d8dd9a796d891bba137400c2a43a0afe/> Fix unused_qualifications lints
This was failing the docs build.
### Documentation
- <csr-id-f1773d5af43f5f29b100572e65a60d58f2ce7fac/> fix typo
- <csr-id-57a69fe9d28e858562a429bacd9a0a7700b96726/> Use `Ebpf` instead of `Bpf`
### New Features
- <csr-id-5478cac008471bdb80aa30733e4456b70ec1a5bd/> Implement TCX
This commit adds the initial support for TCX
bpf links. This is a new, multi-program, attachment
type allows for the caller to specify where
they would like to be attached relative to other
programs at the attachment point using the LinkOrder
type.
- <csr-id-110a76cb9a1b2ab5c5ad3b6c0828a4ae670e67a0/> Provide a deprecated `BpfError` alias
- <csr-id-8c79b71bd5699a686f33360520aa95c1a2895fa5/> Rename Bpf to Ebpf
And BpfLoader to EbpfLoader.
This also adds type aliases to preserve the use of the old names, making
updating to a new Aya release less of a burden. These aliases are marked
as deprecated since we'll likely remove them in a later release.
### Bug Fixes
- <csr-id-3d57d358e40591acf23dfde740697fbfff026410/> Fix PerfEventArray resize logic
There was a logic bug in the previously merged patch where we
set the correctly calculated max_entries size with the original.
To fix this and prevent regressions a unit test was added.
This highlighted that the original map definition needs to be
mutated in order for the max_entries change to be properly applied.
As such, this resize logic moved out of aya::sys into aya::maps
- <csr-id-25d986a26d9c88cd499a8b795054d583f01476b2/> Set PerfEventArray max_entries to nCPUs
Both libbpf and cilium/ebpf have will set the max_entries of a
BPF_MAP_TYPE_PERF_EVENT_ARRAY to the number of online CPUs if
it was omitted at map definition time. This adds that same
logic to Aya.
- <csr-id-38d8e32baa5a4538de9daa6fae634aea6372573c/> fix panic when creating map on custom ubuntu kernel
- <csr-id-5e13283f59b0c3b4cb47de1e31d8d0960e80b4cc/> fix rustdocs-args ordering in taplo to -D warnings
This fixes the current rustdoc build error by correcting the ordering of
`rustdoc-args` to `-D warnings`. Additionally, this also removes the
`recorder_arrays` field (defaults to false) so that the order is not
modified, which is what caused the error in the first place.
### Other
- <csr-id-c44f8b0f5bddd820a4a98cff293126c0146b827a/> use FdLink in SockOps programs
- <csr-id-02d1db5fc043fb7af90c14d13de6419ec5b9bcb5/> remove unwrap and NonZero* in info
Addresses the feedback from #1007:
- remove panic from `unwrap` and `expect`
- Option<NonZero*> => Option<int> with `0` mapping to `None`
- <csr-id-fbb09304a2de0d8baf7ea20c9727fcd2e4fb7f41/> revamp MapInfo be more friendly with older kernels
Adds detection for whether a field is available in `MapInfo`:
- For `map_type()`, we treturn new enum `MapType` instead of the integer
representation.
- For fields that can't be zero, we return `Option<NonZero*>` type.
- For `name_as_str()`, it now uses the feature probe `bpf_name()` to
detect if field is available.
Although the feature probe checks for program name, it can also be
used for map name since they were both introduced in the same commit.
- <csr-id-88f5ac31142f1657b41b1ee0f217dcd9125b210a/> revamp ProgramInfo be more friendly with older kernels
Purpose of this commit is to add detections for whether a field is
available in `ProgramInfo`.
- For `program_type()`, we return the new enum `ProgramType` instead of
the integer representation.
- For fields that we know cannot be zero, we return `Option<NonZero*>`
type.
- For `name_as_str()`, it now also uses the feature probe `bpf_name()`
to detect if field is available or not.
- Two additional feature probes are added for the fields:
- `prog_info_map_ids()` probe -> `map_ids()` field
- `prog_info_gpl_compatible()` probe -> `gpl_compatible()` field
With the `prog_info_map_ids()` probe, the previous implementation that
I had for `bpf_prog_get_info_by_fd()` is shortened to use the probe
instead of having to make 2 potential syscalls.
The `test_loaded_at()` test is also moved into info tests since it is
better related to the info tests.
- <csr-id-1634fa7188e40ed75da53517f1fdb7396c348c34/> add conversion u32 to enum type for prog, link, & attach type
Add conversion from u32 to program type, link type, and attach type.
Additionally, remove duplicate match statement for u32 conversion to
`BPF_MAP_TYPE_BLOOM_FILTER` & `BPF_MAP_TYPE_CGRP_STORAGE`.
New error `InvalidTypeBinding<T>` is created to represent when a
parsed/received value binding to a type is invalid.
This is used in the new conversions added here, and also replaces
`InvalidMapTypeError` in `TryFrom` for `bpf_map_type`.
- <csr-id-cb8e47880082ccfcd75b02209b686e15426e9b6a/> improve integration tests for info API
Improves the existing integraiton tests for `loaded_programs()` and
`loaded_maps()` in consideration for older kernels:
- Opt for `SocketFilter` program in tests since XDP requires v4.8 and
fragments requires v5.18.
- For assertion tests, first perform the assertion, if the assertion
fails, then it checks the host kernel version to see if it is above
the minimum version requirement. If not, then continue with test,
otherwise fail.
For assertions that are skipped, they're logged in stderr which can
be observed with `-- --nocapture`.
This also fixes the `bpf_prog_get_info_by_fd()` call for kernels below
v4.15. If calling syscall on kernels below v4.15, it can produce an
`E2BIG` error because `check_uarg_tail_zero()` expects the entire
struct to all-zero bytes (which is caused from the map info).
Instead, we first attempt the syscall with the map info filled, if it
returns `E2BIG`, then perform syscall again with empty closure.
Also adds doc for which version a kernel feature was introduced for
better awareness.
The tests have been verified kernel versions:
- 4.13.0
- 4.15.0
- 6.1.0
- <csr-id-cd1db86fd490b3c0f03229bd8999a2e67ccecfc4/> adjust bpf programs for big endian
In aya/src/sys/bpf.rs, there are several simple bpf programs written as
byte arrays. These need to be adjusted to account for big endian.
- <csr-id-a25f501ecebaceaacdd1212fac34f528b51ad0fd/> expose run_time_ns and run_cnt fields in ProgramInfo
Added functions to expose `run_time_ns` & `run_cnt` statistics from
ProgramInfo/bpf_prog_info.
- <csr-id-fa6af6a20439cccd8ab961f83dce545fb5884dd4/> add BPF_ENABLE_STATS syscall function
Add bpf syscall function for BPF_ENABLE_STATS to enable stats tracking
for benchmarking purposes.
Additionally, move `#[cfg(test)]` annotation around the `Drop` trait
instead. Having separate functions causes some complications when
needing ownership/moving of the inner value `OwnedFd` when `Drop` is
manually implemented.
- <csr-id-d413e2f285643cbeb665fd3c517e2c9d93d45825/> :programs::uprobe: fix bad variable name
The variable fn_name was very much *not* the fn_name, but rather the
object file path.
- <csr-id-462514ed4c4c06e9618d029a57708c7fa14ab748/> adjust symbol lookup tests for object crate alignment requirements
The object::File::parse API requires parameter to be aligned with 8 bytes.
Adjusted the Vec in the tests with miri to meet this requirement.
- <csr-id-e6e1bfeb58ac392637061640365b057182ee1b39/> add symbol lookup in associated debug files
This change enhances the logic for symbol lookup in uprobe or uretprobe.
If the symbol is not found in the original binary, the search continues
in the debug file associated through the debuglink section. Before
searching the symbol table, it compares the build IDs of the two files.
The symbol lookup will only be terminated if both build IDs exist and do
not match. This modification does not affect the existing symbol lookup
logic.
- <csr-id-b06ff402780b80862933791831c578e4c339fc96/> Generate new bindings
- <csr-id-a4e68ebdbf0e0b591509f36316d12d9689d23f89/> include license in crate workspace
This PR includes the licenses files in the crate workspace subdirectory.
Without this, they won't be showing on crates.io and would be giving out
errors on tooling such as rust2rpm.
- <csr-id-e38eac6352ccb5c2b44d621161a27898744ea397/> appease new nightly clippy lints
```
error: unnecessary qualification
--> aya/src/maps/ring_buf.rs:434:22
|
434 | ptr: ptr::NonNull::new(ptr).ok_or(
| ^^^^^^^^^^^^^^^^^
|
note: the lint level is defined here
--> aya/src/lib.rs:72:5
|
72 | unused_qualifications,
| ^^^^^^^^^^^^^^^^^^^^^
help: remove the unnecessary path segments
|
434 - ptr: ptr::NonNull::new(ptr).ok_or(
434 + ptr: NonNull::new(ptr).ok_or(
|
error: unnecessary qualification
--> aya/src/maps/mod.rs:225:21
|
225 | let mut limit = std::mem::MaybeUninit::<rlimit>::uninit();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
help: remove the unnecessary path segments
|
225 - let mut limit = std::mem::MaybeUninit::<rlimit>::uninit();
225 + let mut limit = mem::MaybeUninit::<rlimit>::uninit();
|
error: unnecessary qualification
--> aya/src/programs/mod.rs:614:9
|
614 | crate::obj::Program {
| ^^^^^^^^^^^^^^^^^^^
|
help: remove the unnecessary path segments
|
614 - crate::obj::Program {
614 + obj::Program {
|
error: unnecessary qualification
--> aya/src/util.rs:373:14
|
373 | unsafe { std::slice::from_raw_parts(bpf_name.as_ptr() as
*const _, length) }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
help: remove the unnecessary path segments
|
373 - unsafe { std::slice::from_raw_parts(bpf_name.as_ptr() as
*const _, length) }
373 + unsafe { slice::from_raw_parts(bpf_name.as_ptr() as *const _,
length) }
|
error: unnecessary qualification
--> aya/src/maps/mod.rs:1130:47
|
1130 | .copy_from_slice(unsafe {
std::mem::transmute(TEST_NAME) });
| ^^^^^^^^^^^^^^^^^^^
|
note: the lint level is defined here
--> aya/src/lib.rs:72:5
|
72 | unused_qualifications,
| ^^^^^^^^^^^^^^^^^^^^^
help: remove the unnecessary path segments
|
1130 - .copy_from_slice(unsafe {
std::mem::transmute(TEST_NAME) });
1130 + .copy_from_slice(unsafe {
mem::transmute(TEST_NAME) });
|
```
### Performance
- <csr-id-d05110fd86f9b317d47ffb7cf5c00e588635d4cd/> cache `nr_cpus` in a thread_local
### Test
- <csr-id-eef7346fb2231f8741410381198015cceeebfac9/> adjust test byte arrays for big endian
Adding support for s390x (big endian architecture) and found that some
of the unit tests have structures and files implemented as byte arrays.
They are all coded as little endian and need a bug endian version to
work properly.
### New Features (BREAKING)
- <csr-id-fd48c55466a23953ce7a4912306e1acf059b498b/> Rename BpfRelocationError -> EbpfRelocationError
- <csr-id-cf3e2ca677c81224368fb2838ebc5b10ee98419a/> Rename BpfSectionKind to EbpfSectionKind
## 0.12.0 (2024-02-28)
<csr-id-b3e7ef741c5b8d09fc7dc8302576f8174be75ff4/>
@ -179,6 +820,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<csr-id-572d047e37111b732be49ef3ad6fb16f70aa4063/>
<csr-id-6f3cce75cf11af27a9267dd88a688fc24e6b17b5/>
<csr-id-c74813f8c545fca288094f47b20096e58eb5f46a/>
<csr-id-13b1fc63ef2ae083ba03ce9de24cb4f31f989d21/>
### Chore
@ -828,7 +1470,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<csr-read-only-do-not-edit/>
- 433 commits contributed to the release over the course of 631 calendar days.
- 434 commits contributed to the release.
- 631 days passed between releases.
- 182 commits were understood as [conventional](https://www.conventionalcommits.org).
- 0 issues like '(#ID)' were seen in commit messages
@ -840,6 +1482,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<details><summary>view details</summary>
* **Uncategorized**
- Release aya-obj v0.1.0, aya v0.12.0, safety bump aya-log v0.2.0 ([`0e99fa0`](https://github.com/aya-rs/aya/commit/0e99fa0f340b2fb2e0da3b330aa6555322a77eec))
- Don't use path deps in workspace ([`13b1fc6`](https://github.com/aya-rs/aya/commit/13b1fc63ef2ae083ba03ce9de24cb4f31f989d21))
- Merge pull request #892 from dave-tucker/breaking-changes-v2 ([`daa5a47`](https://github.com/aya-rs/aya/commit/daa5a473105e0c99f5f171ba519d076a7157af6e))
- Merge pull request #891 from dave-tucker/changelog ([`431ce23`](https://github.com/aya-rs/aya/commit/431ce23f27ef5c36a6b38c73b38f23b1cf007900))
@ -1618,7 +2261,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<csr-read-only-do-not-edit/>
- 105 commits contributed to the release over the course of 123 calendar days.
- 105 commits contributed to the release.
- 125 days passed between releases.
- 39 commits were understood as [conventional](https://www.conventionalcommits.org).
- 1 unique issue was worked on: [#111](https://github.com/aya-rs/aya/issues/111)
@ -1787,7 +2430,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<csr-read-only-do-not-edit/>
- 22 commits contributed to the release over the course of 22 calendar days.
- 22 commits contributed to the release.
- 28 days passed between releases.
- 9 commits were understood as [conventional](https://www.conventionalcommits.org).
- 0 issues like '(#ID)' were seen in commit messages
@ -1847,7 +2490,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<csr-read-only-do-not-edit/>
- 7 commits contributed to the release over the course of 17 calendar days.
- 7 commits contributed to the release.
- 24 days passed between releases.
- 5 commits were understood as [conventional](https://www.conventionalcommits.org).
- 0 issues like '(#ID)' were seen in commit messages
@ -1898,7 +2541,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<csr-read-only-do-not-edit/>
- 13 commits contributed to the release over the course of 38 calendar days.
- 13 commits contributed to the release.
- 52 days passed between releases.
- 8 commits were understood as [conventional](https://www.conventionalcommits.org).
- 0 issues like '(#ID)' were seen in commit messages
@ -2000,7 +2643,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<csr-read-only-do-not-edit/>
- 29 commits contributed to the release over the course of 43 calendar days.
- 29 commits contributed to the release.
- 43 days passed between releases.
- 24 commits were understood as [conventional](https://www.conventionalcommits.org).
- 3 unique issues were worked on: [#18](https://github.com/aya-rs/aya/issues/18), [#31](https://github.com/aya-rs/aya/issues/31), [#32](https://github.com/aya-rs/aya/issues/32)
@ -2032,7 +2675,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Don't error out parsing padded map sections ([`b657930`](https://github.com/aya-rs/aya/commit/b657930a3ee61f88ada0630afdac6b1c77459244))
- Added support for armv7-unknown-linux-gnueabi and armv7-unknown-linux-gnueabihf ([`8311abf`](https://github.com/aya-rs/aya/commit/8311abfdcbbe70da6abdd67b78b831d53998aad5))
- Tc: make qdisc_add_clsact return io::Error ([`9c8e78b`](https://github.com/aya-rs/aya/commit/9c8e78b7d4192b376ec2e532d9ddcf81c3c5182e))
- Aya, aya-ebpf-bindings: regenerate bindings ([`122a530`](https://github.com/aya-rs/aya/commit/122a5306e72c7560629bcef160e7f676b84eabd7))
- Aya, aya-bpf-bindings: regenerate bindings ([`122a530`](https://github.com/aya-rs/aya/commit/122a5306e72c7560629bcef160e7f676b84eabd7))
- Kprobe: remove pid argument ([`08c71df`](https://github.com/aya-rs/aya/commit/08c71dfeb19b2b4358d75baf5b95f8d4e6521935))
- Add missing load() in kprobe example ([`bb15e82`](https://github.com/aya-rs/aya/commit/bb15e82c1d8373700dda52f69d6c4bf6f5489a03))
- Support both bpf_map_def layout variants ([`d8d3117`](https://github.com/aya-rs/aya/commit/d8d311738c974f3b6fad22006ab2b827d0925ce8))
@ -2058,7 +2701,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<csr-read-only-do-not-edit/>
- 5 commits contributed to the release over the course of 1 calendar day.
- 5 commits contributed to the release.
- 1 day passed between releases.
- 1 commit was understood as [conventional](https://www.conventionalcommits.org).
- 0 issues like '(#ID)' were seen in commit messages

@ -1,38 +1,36 @@
[package]
name = "aya"
version = "0.12.0"
description = "An eBPF library with a focus on developer experience and operability."
documentation = "https://docs.rs/aya"
keywords = ["bpf", "ebpf", "kernel", "linux"]
name = "aya"
readme = "README.md"
documentation = "https://docs.rs/aya"
rust-version = "1.66"
version = "0.13.1"
authors.workspace = true
edition.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
homepage.workspace = true
edition.workspace = true
rust-version.workspace = true
[lints]
workspace = true
[dependencies]
assert_matches = { workspace = true }
async-io = { workspace = true, optional = true }
aya-obj = { path = "../aya-obj", version = "^0.1.0", features = ["std"] }
aya-obj = { path = "../aya-obj", version = "^0.2.1", features = ["std"] }
bitflags = { workspace = true }
bytes = { workspace = true }
lazy_static = { workspace = true }
hashbrown = { workspace = true }
libc = { workspace = true }
log = { workspace = true }
object = { workspace = true, features = ["elf", "read_core", "std", "write"] }
once_cell = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true, features = ["rt"], optional = true }
[dev-dependencies]
tempfile = { workspace = true }
[features]
default = []
async_tokio = ["tokio/net"]
async_std = ["dep:async-io"]
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs", "-D", "warnings"]

@ -2,57 +2,45 @@ use std::{
borrow::Cow,
collections::{HashMap, HashSet},
fs, io,
os::{
fd::{AsFd as _, AsRawFd as _, OwnedFd},
raw::c_int,
},
os::fd::{AsFd as _, AsRawFd as _},
path::{Path, PathBuf},
sync::Arc,
sync::{Arc, LazyLock},
};
use aya_obj::{
btf::{BtfFeatures, BtfRelocationError},
generated::{BPF_F_SLEEPABLE, BPF_F_XDP_HAS_FRAGS},
EbpfSectionKind, Features, Object, ParseError, ProgramSection,
btf::{Btf, BtfError, BtfFeatures, BtfRelocationError},
generated::{
BPF_F_SLEEPABLE, BPF_F_XDP_HAS_FRAGS,
bpf_map_type::{self, *},
},
relocation::EbpfRelocationError,
EbpfSectionKind, Features,
};
use log::{debug, warn};
use thiserror::Error;
use crate::{
generated::{
bpf_map_type, bpf_map_type::*, AYA_PERF_EVENT_IOC_DISABLE, AYA_PERF_EVENT_IOC_ENABLE,
AYA_PERF_EVENT_IOC_SET_BPF,
},
maps::{Map, MapData, MapError},
obj::{
btf::{Btf, BtfError},
Object, ParseError, ProgramSection,
},
programs::{
BtfTracePoint, CgroupDevice, CgroupSkb, CgroupSkbAttachType, CgroupSock, CgroupSockAddr,
CgroupSockopt, CgroupSysctl, Extension, FEntry, FExit, KProbe, LircMode2, Lsm, PerfEvent,
ProbeKind, Program, ProgramData, ProgramError, RawTracePoint, SchedClassifier, SkLookup,
SkMsg, SkSkb, SkSkbKind, SockOps, SocketFilter, TracePoint, UProbe, Xdp,
CgroupSockopt, CgroupSysctl, Extension, FEntry, FExit, FlowDissector, Iter, KProbe,
LircMode2, Lsm, LsmCgroup, PerfEvent, ProbeKind, Program, ProgramData, ProgramError,
RawTracePoint, SchedClassifier, SkLookup, SkMsg, SkSkb, SkSkbKind, SockOps, SocketFilter,
TracePoint, UProbe, Xdp,
},
sys::{
bpf_load_btf, is_bpf_cookie_supported, is_bpf_global_data_supported,
is_btf_datasec_supported, is_btf_decl_tag_supported, is_btf_enum64_supported,
is_btf_float_supported, is_btf_func_global_supported, is_btf_func_supported,
is_btf_supported, is_btf_type_tag_supported, is_perf_link_supported,
is_btf_datasec_supported, is_btf_datasec_zero_supported, is_btf_decl_tag_supported,
is_btf_enum64_supported, is_btf_float_supported, is_btf_func_global_supported,
is_btf_func_supported, is_btf_supported, is_btf_type_tag_supported, is_perf_link_supported,
is_probe_read_kernel_supported, is_prog_id_supported, is_prog_name_supported,
retry_with_verifier_logs,
},
util::{bytes_of, bytes_of_slice, page_size, possible_cpus, POSSIBLE_CPUS},
util::{bytes_of, bytes_of_slice, nr_cpus, page_size},
};
pub(crate) const BPF_OBJ_NAME_LEN: usize = 16;
pub(crate) const PERF_EVENT_IOC_ENABLE: c_int = AYA_PERF_EVENT_IOC_ENABLE;
pub(crate) const PERF_EVENT_IOC_DISABLE: c_int = AYA_PERF_EVENT_IOC_DISABLE;
pub(crate) const PERF_EVENT_IOC_SET_BPF: c_int = AYA_PERF_EVENT_IOC_SET_BPF;
/// Marker trait for types that can safely be converted to and from byte slices.
#[expect(clippy::missing_safety_doc)]
pub unsafe trait Pod: Copy + 'static {}
macro_rules! unsafe_impl_pod {
@ -68,11 +56,9 @@ unsafe_impl_pod!(i8, u8, i16, u16, i32, u32, i64, u64, u128, i128);
// It only makes sense that an array of POD types is itself POD
unsafe impl<T: Pod, const N: usize> Pod for [T; N] {}
pub use aya_obj::maps::{bpf_map_def, PinningType};
pub use aya_obj::maps::{PinningType, bpf_map_def};
lazy_static::lazy_static! {
pub(crate) static ref FEATURES: Features = detect_features();
}
pub(crate) static FEATURES: LazyLock<Features> = LazyLock::new(detect_features);
fn detect_features() -> Features {
let btf = if is_btf_supported() {
@ -80,6 +66,7 @@ fn detect_features() -> Features {
is_btf_func_supported(),
is_btf_func_global_supported(),
is_btf_datasec_supported(),
is_btf_datasec_zero_supported(),
is_btf_float_supported(),
is_btf_decl_tag_supported(),
is_btf_type_tag_supported(),
@ -98,7 +85,7 @@ fn detect_features() -> Features {
is_prog_id_supported(BPF_MAP_TYPE_DEVMAP),
btf,
);
debug!("BPF Feature Detection: {:#?}", f);
debug!("BPF Feature Detection: {f:#?}");
f
}
@ -123,7 +110,7 @@ pub fn features() -> &'static Features {
/// // load the BTF data from /sys/kernel/btf/vmlinux
/// .btf(Btf::from_sys_fs().ok().as_ref())
/// // load pinned maps from /sys/fs/bpf/my-program
/// .map_pin_path("/sys/fs/bpf/my-program")
/// .default_map_pin_directory("/sys/fs/bpf/my-program")
/// // finally load the code
/// .load_file("file.o")?;
/// # Ok::<(), aya::EbpfError>(())
@ -131,9 +118,15 @@ pub fn features() -> &'static Features {
#[derive(Debug)]
pub struct EbpfLoader<'a> {
btf: Option<Cow<'a, Btf>>,
map_pin_path: Option<PathBuf>,
default_map_pin_directory: Option<PathBuf>,
globals: HashMap<&'a str, (&'a [u8], bool)>,
// Max entries overrides the max_entries field of the map that matches the provided name
// before the map is created.
max_entries: HashMap<&'a str, u32>,
// Map pin path overrides the pin path of the map that matches the provided name before
// it is created.
map_pin_path_by_name: HashMap<&'a str, std::borrow::Cow<'a, Path>>,
extensions: HashSet<&'a str>,
verifier_log_level: VerifierLogLevel,
allow_unsupported_maps: bool,
@ -169,9 +162,10 @@ impl<'a> EbpfLoader<'a> {
pub fn new() -> Self {
Self {
btf: Btf::from_sys_fs().ok().map(Cow::Owned),
map_pin_path: None,
default_map_pin_directory: None,
globals: HashMap::new(),
max_entries: HashMap::new(),
map_pin_path_by_name: HashMap::new(),
extensions: HashSet::new(),
verifier_log_level: VerifierLogLevel::default(),
allow_unsupported_maps: false,
@ -229,40 +223,43 @@ impl<'a> EbpfLoader<'a> {
/// Pinned maps will be loaded from `path/MAP_NAME`.
/// The caller is responsible for ensuring the directory exists.
///
/// Note that if a path is provided for a specific map via [`EbpfLoader::map_pin_path`],
/// it will take precedence over this path.
///
/// # Example
///
/// ```no_run
/// use aya::EbpfLoader;
///
/// let bpf = EbpfLoader::new()
/// .map_pin_path("/sys/fs/bpf/my-program")
/// .default_map_pin_directory("/sys/fs/bpf/my-program")
/// .load_file("file.o")?;
/// # Ok::<(), aya::EbpfError>(())
/// ```
///
pub fn map_pin_path<P: AsRef<Path>>(&mut self, path: P) -> &mut Self {
self.map_pin_path = Some(path.as_ref().to_owned());
pub fn default_map_pin_directory<P: AsRef<Path>>(&mut self, path: P) -> &mut Self {
self.default_map_pin_directory = Some(path.as_ref().to_owned());
self
}
/// Sets the value of a global variable.
/// Override the value of a global variable.
///
/// If the `must_exist` argument is `true`, [`EbpfLoader::load`] will fail with [`ParseError::SymbolNotFound`] if the loaded object code does not contain the variable.
///
/// From Rust eBPF, a global variable can be defined as follows:
///
/// ```no_run
/// #[no_mangle]
/// #[unsafe(no_mangle)]
/// static VERSION: i32 = 0;
/// ```
///
/// Then it can be accessed using `core::ptr::read_volatile`:
///
/// ```no_run
/// # #[no_mangle]
/// # #[unsafe(no_mangle)]
/// # static VERSION: i32 = 0;
/// # unsafe fn try_test() {
/// let version = core::ptr::read_volatile(&VERSION);
/// # fn try_test() {
/// let version = unsafe { core::ptr::read_volatile(&VERSION) };
/// # }
/// ```
///
@ -278,13 +275,13 @@ impl<'a> EbpfLoader<'a> {
/// use aya::EbpfLoader;
///
/// let bpf = EbpfLoader::new()
/// .set_global("VERSION", &2, true)
/// .set_global("PIDS", &[1234u16, 5678], true)
/// .override_global("VERSION", &2, true)
/// .override_global("PIDS", &[1234u16, 5678], true)
/// .load_file("file.o")?;
/// # Ok::<(), aya::EbpfError>(())
/// ```
///
pub fn set_global<T: Into<GlobalData<'a>>>(
pub fn override_global<T: Into<GlobalData<'a>>>(
&mut self,
name: &'a str,
value: T,
@ -294,6 +291,17 @@ impl<'a> EbpfLoader<'a> {
self
}
/// Override the value of a global variable.
#[deprecated(since = "0.13.2", note = "please use `override_global` instead")]
pub fn set_global<T: Into<GlobalData<'a>>>(
&mut self,
name: &'a str,
value: T,
must_exist: bool,
) -> &mut Self {
self.override_global(name, value, must_exist)
}
/// Set the max_entries for specified map.
///
/// Overwrite the value of max_entries of the map that matches
@ -305,16 +313,49 @@ impl<'a> EbpfLoader<'a> {
/// use aya::EbpfLoader;
///
/// let bpf = EbpfLoader::new()
/// .set_max_entries("map", 64)
/// .map_max_entries("map", 64)
/// .load_file("file.o")?;
/// # Ok::<(), aya::EbpfError>(())
/// ```
///
pub fn set_max_entries(&mut self, name: &'a str, size: u32) -> &mut Self {
pub fn map_max_entries(&mut self, name: &'a str, size: u32) -> &mut Self {
self.max_entries.insert(name, size);
self
}
/// Set the max_entries for specified map.
#[deprecated(since = "0.13.2", note = "please use `map_max_entries` instead")]
pub fn set_max_entries(&mut self, name: &'a str, size: u32) -> &mut Self {
self.map_max_entries(name, size)
}
/// Set the pin path for the map that matches the provided name.
///
/// Note that this is an absolute path to the pinned map; it is not a prefix
/// to be combined with the map name, and it is not relative to the
/// configured base directory for pinned maps.
///
/// Each call to this function with the same name overwrites the path to the
/// pinned map; last one wins.
///
/// # Example
///
/// ```no_run
/// # use std::path::Path;
///
/// # let mut loader = aya::EbpfLoader::new();
/// # let pin_path = Path::new("/sys/fs/bpf/my-pinned-map");
/// let bpf = loader
/// .map_pin_path("map", pin_path)
/// .load_file("file.o")?;
/// # Ok::<(), aya::EbpfError>(())
/// ```
///
pub fn map_pin_path<P: Into<Cow<'a, Path>>>(&mut self, name: &'a str, path: P) -> &mut Self {
self.map_pin_path_by_name.insert(name, path.into());
self
}
/// Treat the provided program as an [`Extension`]
///
/// When attempting to load the program with the provided `name`
@ -375,6 +416,10 @@ impl<'a> EbpfLoader<'a> {
/// Loads eBPF bytecode from a buffer.
///
/// The buffer needs to be 4-bytes aligned. If you are bundling the bytecode statically
/// into your binary, it is recommended that you do so using
/// [`include_bytes_aligned`](crate::include_bytes_aligned).
///
/// # Examples
///
/// ```no_run
@ -388,12 +433,13 @@ impl<'a> EbpfLoader<'a> {
pub fn load(&mut self, data: &[u8]) -> Result<Ebpf, EbpfError> {
let Self {
btf,
map_pin_path,
default_map_pin_directory,
globals,
max_entries,
extensions,
verifier_log_level,
allow_unsupported_maps,
map_pin_path_by_name,
} = self;
let mut obj = Object::parse(data)?;
obj.patch_map_data(globals.clone())?;
@ -410,8 +456,10 @@ impl<'a> EbpfLoader<'a> {
| ProgramSection::FEntry { sleepable: _ }
| ProgramSection::FExit { sleepable: _ }
| ProgramSection::Lsm { sleepable: _ }
| ProgramSection::BtfTracePoint => {
return Err(EbpfError::BtfError(err))
| ProgramSection::LsmCgroup
| ProgramSection::BtfTracePoint
| ProgramSection::Iter { sleepable: _ } => {
return Err(EbpfError::BtfError(err));
}
ProgramSection::KRetProbe
| ProgramSection::KProbe
@ -438,12 +486,17 @@ impl<'a> EbpfLoader<'a> {
| ProgramSection::PerfEvent
| ProgramSection::RawTracePoint
| ProgramSection::SkLookup
| ProgramSection::FlowDissector
| ProgramSection::CgroupSock { attach_type: _ }
| ProgramSection::CgroupDevice => {}
}
}
warn!("Object BTF couldn't be loaded in the kernel: {err}");
if obj.has_btf_relocations() {
return Err(EbpfError::BtfError(err));
}
warn!("object BTF couldn't be loaded in the kernel: {err}");
None
}
@ -465,13 +518,11 @@ impl<'a> EbpfLoader<'a> {
{
continue;
}
let num_cpus = || -> Result<u32, EbpfError> {
Ok(possible_cpus()
.map_err(|error| EbpfError::FileError {
path: PathBuf::from(POSSIBLE_CPUS),
error,
})?
.len() as u32)
let num_cpus = || {
Ok(nr_cpus().map_err(|(path, error)| EbpfError::FileError {
path: PathBuf::from(path),
error,
})? as u32)
};
let map_type: bpf_map_type = obj.map_type().try_into().map_err(MapError::from)?;
if let Some(max_entries) = max_entries_override(
@ -493,16 +544,21 @@ impl<'a> EbpfLoader<'a> {
_ => (),
}
let btf_fd = btf_fd.as_deref().map(|fd| fd.as_fd());
let mut map = match obj.pinning() {
PinningType::None => MapData::create(obj, &name, btf_fd)?,
PinningType::ByName => {
// pin maps in /sys/fs/bpf by default to align with libbpf
// behavior https://github.com/libbpf/libbpf/blob/v1.2.2/src/libbpf.c#L2161.
let path = map_pin_path
.as_deref()
.unwrap_or_else(|| Path::new("/sys/fs/bpf"));
MapData::create_pinned_by_name(path, obj, &name, btf_fd)?
let mut map = if let Some(pin_path) = map_pin_path_by_name.get(name.as_str()) {
MapData::create_pinned_by_name(pin_path, obj, &name, btf_fd)?
} else {
match obj.pinning() {
PinningType::None => MapData::create(obj, &name, btf_fd)?,
PinningType::ByName => {
// pin maps in /sys/fs/bpf by default to align with libbpf
// behavior https://github.com/libbpf/libbpf/blob/v1.2.2/src/libbpf.c#L2161.
let path = default_map_pin_directory
.as_deref()
.unwrap_or_else(|| Path::new("/sys/fs/bpf"));
let path = path.join(&name);
MapData::create_pinned_by_name(path, obj, &name, btf_fd)?
}
}
};
map.finalize()?;
@ -529,15 +585,11 @@ impl<'a> EbpfLoader<'a> {
.map(|(name, prog_obj)| {
let function_obj = obj.functions.get(&prog_obj.function_key()).unwrap().clone();
let prog_name = if FEATURES.bpf_name() {
Some(name.clone())
} else {
None
};
let prog_name = FEATURES.bpf_name().then(|| name.clone().into());
let section = prog_obj.section.clone();
let obj = (prog_obj, function_obj);
let btf_fd = btf_fd.clone();
let btf_fd = btf_fd.as_ref().map(Arc::clone);
let program = if extensions.contains(name.as_str()) {
Program::Extension(Extension {
data: ProgramData::new(prog_name, obj, btf_fd, *verifier_log_level),
@ -623,15 +675,15 @@ impl<'a> EbpfLoader<'a> {
}
ProgramSection::CgroupSkb => Program::CgroupSkb(CgroupSkb {
data: ProgramData::new(prog_name, obj, btf_fd, *verifier_log_level),
expected_attach_type: None,
attach_type: None,
}),
ProgramSection::CgroupSkbIngress => Program::CgroupSkb(CgroupSkb {
data: ProgramData::new(prog_name, obj, btf_fd, *verifier_log_level),
expected_attach_type: Some(CgroupSkbAttachType::Ingress),
attach_type: Some(CgroupSkbAttachType::Ingress),
}),
ProgramSection::CgroupSkbEgress => Program::CgroupSkb(CgroupSkb {
data: ProgramData::new(prog_name, obj, btf_fd, *verifier_log_level),
expected_attach_type: Some(CgroupSkbAttachType::Egress),
attach_type: Some(CgroupSkbAttachType::Egress),
}),
ProgramSection::CgroupSockAddr { attach_type, .. } => {
Program::CgroupSockAddr(CgroupSockAddr {
@ -656,6 +708,9 @@ impl<'a> EbpfLoader<'a> {
}
Program::Lsm(Lsm { data })
}
ProgramSection::LsmCgroup => Program::LsmCgroup(LsmCgroup {
data: ProgramData::new(prog_name, obj, btf_fd, *verifier_log_level),
}),
ProgramSection::BtfTracePoint => Program::BtfTracePoint(BtfTracePoint {
data: ProgramData::new(prog_name, obj, btf_fd, *verifier_log_level),
}),
@ -675,6 +730,9 @@ impl<'a> EbpfLoader<'a> {
}
Program::FExit(FExit { data })
}
ProgramSection::FlowDissector => Program::FlowDissector(FlowDissector {
data: ProgramData::new(prog_name, obj, btf_fd, *verifier_log_level),
}),
ProgramSection::Extension => Program::Extension(Extension {
data: ProgramData::new(prog_name, obj, btf_fd, *verifier_log_level),
}),
@ -690,6 +748,14 @@ impl<'a> EbpfLoader<'a> {
ProgramSection::CgroupDevice => Program::CgroupDevice(CgroupDevice {
data: ProgramData::new(prog_name, obj, btf_fd, *verifier_log_level),
}),
ProgramSection::Iter { sleepable } => {
let mut data =
ProgramData::new(prog_name, obj, btf_fd, *verifier_log_level);
if *sleepable {
data.flags = BPF_F_SLEEPABLE;
}
Program::Iter(Iter { data })
}
}
};
(name, program)
@ -697,23 +763,17 @@ impl<'a> EbpfLoader<'a> {
.collect();
let maps = maps
.drain()
.map(parse_map)
.map(|data| parse_map(data, *allow_unsupported_maps))
.collect::<Result<HashMap<String, Map>, EbpfError>>()?;
if !*allow_unsupported_maps {
maps.iter().try_for_each(|(_, x)| match x {
Map::Unsupported(map) => Err(EbpfError::MapError(MapError::Unsupported {
map_type: map.obj().map_type(),
})),
_ => Ok(()),
})?;
};
Ok(Ebpf { maps, programs })
}
}
fn parse_map(data: (String, MapData)) -> Result<(String, Map), EbpfError> {
fn parse_map(
data: (String, MapData),
allow_unsupported_maps: bool,
) -> Result<(String, Map), EbpfError> {
let (name, map) = data;
let map_type = bpf_map_type::try_from(map.obj().map_type()).map_err(MapError::from)?;
let map = match map_type {
@ -737,9 +797,16 @@ fn parse_map(data: (String, MapData)) -> Result<(String, Map), EbpfError> {
BPF_MAP_TYPE_DEVMAP => Map::DevMap(map),
BPF_MAP_TYPE_DEVMAP_HASH => Map::DevMapHash(map),
BPF_MAP_TYPE_XSKMAP => Map::XskMap(map),
m => {
warn!("The map {name} is of type {:#?} which is currently unsupported in Aya, use `allow_unsupported_maps()` to load it anyways", m);
Map::Unsupported(map)
BPF_MAP_TYPE_SK_STORAGE => Map::SkStorage(map),
m_type => {
if allow_unsupported_maps {
Map::Unsupported(map)
} else {
return Err(EbpfError::MapError(MapError::Unsupported {
name,
map_type: m_type,
}));
}
}
};
@ -780,11 +847,7 @@ fn adjust_to_page_size(byte_size: u32, page_size: u32) -> u32 {
fn div_ceil(n: u32, rhs: u32) -> u32 {
let d = n / rhs;
let r = n % rhs;
if r > 0 && rhs > 0 {
d + 1
} else {
d
}
if r > 0 && rhs > 0 { d + 1 } else { d }
}
let pages_needed = div_ceil(byte_size, page_size);
page_size * pages_needed.next_power_of_two()
@ -792,7 +855,7 @@ fn adjust_to_page_size(byte_size: u32, page_size: u32) -> u32 {
#[cfg(test)]
mod tests {
use crate::generated::bpf_map_type::*;
use aya_obj::generated::bpf_map_type::*;
const PAGE_SIZE: u32 = 4096;
const NUM_CPUS: u32 = 4;
@ -891,6 +954,10 @@ impl Ebpf {
/// [maps](crate::maps) defined in it. If the kernel supports [BTF](Btf)
/// debug info, it is automatically loaded from `/sys/kernel/btf/vmlinux`.
///
/// The buffer needs to be 4-bytes aligned. If you are bundling the bytecode statically
/// into your binary, it is recommended that you do so using
/// [`include_bytes_aligned`](crate::include_bytes_aligned).
///
/// For more loading options, see [EbpfLoader].
///
/// # Examples
@ -988,6 +1055,35 @@ impl Ebpf {
self.maps.iter_mut().map(|(name, map)| (name.as_str(), map))
}
/// Attempts to get mutable references to `N` maps at once.
///
/// Returns an array of length `N` with the results of each query, in the same order
/// as the requested map names. For soundness, at most one mutable reference will be
/// returned to any map. `None` will be used if a map with the given name is missing.
///
/// This method performs a check to ensure that there are no duplicate map names,
/// which currently has a time-complexity of *O(n²)*. Be careful when passing a large
/// number of names.
///
/// # Panics
///
/// Panics if any names are duplicated.
///
/// # Examples
/// ```no_run
/// # let mut bpf = aya::Ebpf::load(&[])?;
/// match bpf.maps_disjoint_mut(["MAP1", "MAP2"]) {
/// [Some(m1), Some(m2)] => println!("Got MAP1 and MAP2"),
/// [Some(m1), None] => println!("Got only MAP1"),
/// [None, Some(m2)] => println!("Got only MAP2"),
/// [None, None] => println!("No maps"),
/// }
/// # Ok::<(), aya::EbpfError>(())
/// ```
pub fn maps_disjoint_mut<const N: usize>(&mut self, names: [&str; N]) -> [Option<&mut Map>; N] {
self.maps.get_disjoint_mut(names)
}
/// Returns a reference to the program with the given name.
///
/// You can use this to inspect a program and its properties. To load and attach a program, use
@ -1021,7 +1117,7 @@ impl Ebpf {
///
/// let program: &mut UProbe = bpf.program_mut("SSL_read").unwrap().try_into()?;
/// program.load()?;
/// program.attach(Some("SSL_read"), 0, "libssl", None)?;
/// program.attach("SSL_read", "libssl", None, None)?;
/// # Ok::<(), aya::EbpfError>(())
/// ```
pub fn program_mut(&mut self, name: &str) -> Option<&mut Program> {
@ -1123,11 +1219,14 @@ pub enum EbpfError {
#[deprecated(since = "0.13.0", note = "use `EbpfError` instead")]
pub type BpfError = EbpfError;
fn load_btf(raw_btf: Vec<u8>, verifier_log_level: VerifierLogLevel) -> Result<OwnedFd, BtfError> {
fn load_btf(
raw_btf: Vec<u8>,
verifier_log_level: VerifierLogLevel,
) -> Result<crate::MockableFd, BtfError> {
let (ret, verifier_log) = retry_with_verifier_logs(10, |logger| {
bpf_load_btf(raw_btf.as_slice(), logger, verifier_log_level)
});
ret.map_err(|(_, io_error)| BtfError::LoadError {
ret.map_err(|io_error| BtfError::LoadError {
io_error,
verifier_log,
})
@ -1136,7 +1235,7 @@ fn load_btf(raw_btf: Vec<u8>, verifier_log_level: VerifierLogLevel) -> Result<Ow
/// Global data that can be exported to eBPF programs before they are loaded.
///
/// Valid global data includes `Pod` types and slices of `Pod` types. See also
/// [EbpfLoader::set_global].
/// [EbpfLoader::override_global].
pub struct GlobalData<'a> {
bytes: &'a [u8],
}
@ -1153,7 +1252,7 @@ impl<'a, T: Pod> From<&'a T> for GlobalData<'a> {
fn from(v: &'a T) -> Self {
GlobalData {
// Safety: v is Pod
bytes: unsafe { bytes_of(v) },
bytes: bytes_of(v),
}
}
}

@ -37,60 +37,19 @@
html_favicon_url = "https://aya-rs.dev/assets/images/crabby.svg"
)]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![deny(
clippy::all,
clippy::use_self,
absolute_paths_not_starting_with_crate,
deprecated_in_future,
elided_lifetimes_in_paths,
explicit_outlives_requirements,
ffi_unwind_calls,
keyword_idents,
//let_underscore_drop,
macro_use_extern_crate,
meta_variable_misuse,
missing_abi,
//missing_copy_implementations,
missing_docs,
non_ascii_idents,
noop_method_call,
rust_2021_incompatible_closure_captures,
rust_2021_incompatible_or_patterns,
rust_2021_prefixes_incompatible_syntax,
rust_2021_prelude_collisions,
single_use_lifetimes,
trivial_numeric_casts,
unreachable_pub,
//unsafe_op_in_unsafe_fn,
unstable_features,
unused_crate_dependencies,
unused_extern_crates,
unused_import_braces,
unused_lifetimes,
unused_macro_rules,
//unused_qualifications, https://github.com/rust-lang/rust/commit/9ccc7b7 added size_of to the prelude, but we need to continue to qualify it so that we build on older compilers.
//unused_results,
)]
#![allow(clippy::missing_safety_doc, clippy::len_without_is_empty)]
#![cfg_attr(
all(feature = "async_tokio", feature = "async_std"),
allow(unused_crate_dependencies)
)]
#![deny(missing_docs)]
mod bpf;
use aya_obj::generated;
pub mod maps;
use aya_obj as obj;
pub mod pin;
pub mod programs;
pub use programs::loaded_programs;
mod sys;
pub mod sys;
pub mod util;
use std::os::fd::{AsFd, BorrowedFd, OwnedFd};
use std::os::fd::{AsFd, AsRawFd, BorrowedFd, FromRawFd, OwnedFd, RawFd};
pub use aya_obj::btf::{Btf, BtfError};
pub use bpf::*;
pub use obj::btf::{Btf, BtfError};
pub use object::Endianness;
#[doc(hidden)]
pub use sys::netlink_set_link_up;
@ -123,51 +82,78 @@ impl MockableFd {
#[cfg(test)]
fn from_fd(fd: OwnedFd) -> Self {
Self { fd: Some(fd) }
let fd = Some(fd);
Self { fd }
}
#[cfg(not(test))]
fn try_clone(&self) -> std::io::Result<Self> {
fn inner(&self) -> &OwnedFd {
let Self { fd } = self;
let fd = fd.try_clone()?;
Ok(Self { fd })
fd
}
#[cfg(test)]
fn try_clone(&self) -> std::io::Result<Self> {
fn inner(&self) -> &OwnedFd {
let Self { fd } = self;
let fd = fd.as_ref().map(OwnedFd::try_clone).transpose()?;
Ok(Self { fd })
fd.as_ref().unwrap()
}
}
impl AsFd for MockableFd {
#[cfg(not(test))]
fn as_fd(&self) -> BorrowedFd<'_> {
let Self { fd } = self;
fd.as_fd()
fn into_inner(self) -> OwnedFd {
self.fd
}
#[cfg(test)]
fn into_inner(mut self) -> OwnedFd {
self.fd.take().unwrap()
}
fn try_clone(&self) -> std::io::Result<Self> {
let fd = self.inner();
let fd = fd.try_clone()?;
Ok(Self::from_fd(fd))
}
}
impl<T> From<T> for MockableFd
where
OwnedFd: From<T>,
{
fn from(value: T) -> Self {
let fd = OwnedFd::from(value);
Self::from_fd(fd)
}
}
impl AsFd for MockableFd {
fn as_fd(&self) -> BorrowedFd<'_> {
let Self { fd } = self;
fd.as_ref().unwrap().as_fd()
self.inner().as_fd()
}
}
impl Drop for MockableFd {
#[cfg(not(test))]
fn drop(&mut self) {
// Intentional no-op.
impl AsRawFd for MockableFd {
fn as_raw_fd(&self) -> RawFd {
self.inner().as_raw_fd()
}
}
#[cfg(test)]
impl FromRawFd for MockableFd {
unsafe fn from_raw_fd(fd: RawFd) -> Self {
let fd = unsafe { OwnedFd::from_raw_fd(fd) };
Self::from_fd(fd)
}
}
#[cfg(test)]
impl Drop for MockableFd {
fn drop(&mut self) {
use std::os::fd::AsRawFd as _;
let Self { fd } = self;
if fd.as_ref().unwrap().as_raw_fd() >= Self::mock_signed_fd() {
let fd: OwnedFd = fd.take().unwrap();
let fd = fd.take().unwrap();
if fd.as_raw_fd() < Self::mock_signed_fd() {
std::mem::drop(fd)
} else {
std::mem::forget(fd)
}
}

@ -1,13 +1,11 @@
use std::{
borrow::{Borrow, BorrowMut},
marker::PhantomData,
os::fd::AsFd as _,
};
use crate::{
maps::{check_bounds, check_kv_size, IterableMap, MapData, MapError},
sys::{bpf_map_lookup_elem, bpf_map_update_elem, SyscallError},
Pod,
maps::{IterableMap, MapData, MapError, check_bounds, check_kv_size, hash_map},
};
/// A fixed-size array.
@ -49,6 +47,7 @@ impl<T: Borrow<MapData>, V: Pod> Array<T, V> {
/// Returns the number of elements in the array.
///
/// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side.
#[expect(clippy::len_without_is_empty)]
pub fn len(&self) -> u32 {
self.inner.borrow().obj.max_entries()
}
@ -62,14 +61,7 @@ impl<T: Borrow<MapData>, V: Pod> Array<T, V> {
pub fn get(&self, index: &u32, flags: u64) -> Result<V, MapError> {
let data = self.inner.borrow();
check_bounds(data, *index)?;
let fd = data.fd().as_fd();
let value =
bpf_map_lookup_elem(fd, index, flags).map_err(|(_, io_error)| SyscallError {
call: "bpf_map_lookup_elem",
io_error,
})?;
value.ok_or(MapError::KeyNotFound)
hash_map::get(data, index, flags)
}
/// An iterator over the elements of the array. The iterator item type is `Result<V,
@ -89,14 +81,7 @@ impl<T: BorrowMut<MapData>, V: Pod> Array<T, V> {
pub fn set(&mut self, index: u32, value: impl Borrow<V>, flags: u64) -> Result<(), MapError> {
let data = self.inner.borrow_mut();
check_bounds(data, index)?;
let fd = data.fd().as_fd();
bpf_map_update_elem(fd, Some(&index), value.borrow(), flags).map_err(|(_, io_error)| {
SyscallError {
call: "bpf_map_update_elem",
io_error,
}
})?;
Ok(())
hash_map::insert(data, &index, value.borrow(), flags)
}
}

@ -1,5 +1,6 @@
//! Array types.
#[allow(clippy::module_inception)]
#[expect(clippy::module_inception)]
mod array;
mod per_cpu_array;
mod program_array;

@ -5,9 +5,9 @@ use std::{
};
use crate::{
maps::{check_bounds, check_kv_size, IterableMap, MapData, MapError, PerCpuValues},
sys::{bpf_map_lookup_elem_per_cpu, bpf_map_update_elem_per_cpu, SyscallError},
Pod,
maps::{IterableMap, MapData, MapError, PerCpuValues, check_bounds, check_kv_size},
sys::{SyscallError, bpf_map_lookup_elem_per_cpu, bpf_map_update_elem_per_cpu},
};
/// A per-CPU fixed-size array.
@ -37,7 +37,7 @@ use crate::{
/// let mut array = PerCpuArray::try_from(bpf.map_mut("ARRAY").unwrap())?;
///
/// // set array[1] = 42 for all cpus
/// let nr_cpus = nr_cpus()?;
/// let nr_cpus = nr_cpus().map_err(|(_, error)| error)?;
/// array.set(1, PerCpuValues::try_from(vec![42u32; nr_cpus])?, 0)?;
///
/// // retrieve the values at index 1 for all cpus
@ -68,6 +68,7 @@ impl<T: Borrow<MapData>, V: Pod> PerCpuArray<T, V> {
/// Returns the number of elements in the array.
///
/// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side.
#[expect(clippy::len_without_is_empty)]
pub fn len(&self) -> u32 {
self.inner.borrow().obj.max_entries()
}
@ -83,12 +84,11 @@ impl<T: Borrow<MapData>, V: Pod> PerCpuArray<T, V> {
check_bounds(data, *index)?;
let fd = data.fd().as_fd();
let value = bpf_map_lookup_elem_per_cpu(fd, index, flags).map_err(|(_, io_error)| {
SyscallError {
let value =
bpf_map_lookup_elem_per_cpu(fd, index, flags).map_err(|io_error| SyscallError {
call: "bpf_map_lookup_elem",
io_error,
}
})?;
})?;
value.ok_or(MapError::KeyNotFound)
}
@ -111,7 +111,7 @@ impl<T: BorrowMut<MapData>, V: Pod> PerCpuArray<T, V> {
check_bounds(data, index)?;
let fd = data.fd().as_fd();
bpf_map_update_elem_per_cpu(fd, &index, &values, flags).map_err(|(_, io_error)| {
bpf_map_update_elem_per_cpu(fd, &index, &values, flags).map_err(|io_error| {
SyscallError {
call: "bpf_map_update_elem",
io_error,

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save