Merge branch 'main' into main

reviewable/pr728/r3
Mohammad Javad Pooladkhay 2 years ago committed by GitHub
commit dffb17b5b8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -9,6 +9,9 @@ on:
branches:
- main
schedule:
- cron: 00 4 * * *
env:
CARGO_TERM_COLOR: always
@ -17,7 +20,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
@ -32,8 +35,12 @@ jobs:
- name: Check C formatting
run: git ls-files -- '*.c' '*.h' | xargs clang-format --dry-run --Werror
- name: Check Markdown
uses: DavidAnson/markdownlint-cli2-action@v9
- run: taplo fmt --check
- name: Check TOML formatting
run: taplo fmt --check
- name: Check formatting
run: cargo fmt --all -- --check
@ -66,7 +73,7 @@ jobs:
- riscv64gc-unknown-linux-gnu
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
@ -119,7 +126,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
@ -151,7 +158,7 @@ jobs:
- ubuntu-22.04
runs-on: ${{ matrix.runner }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: recursive
@ -198,12 +205,15 @@ jobs:
run: |
set -euxo pipefail
brew update
brew install dpkg findutils gnu-tar llvm pkg-config
# Workaround for https://github.com/Homebrew/homebrew-core/pull/139492.
brew reinstall qemu
# https://github.com/actions/setup-python/issues/577
find /usr/local/bin -type l -exec sh -c 'readlink -f "$1" \
| grep -q ^/Library/Frameworks/Python.framework/Versions/' _ {} \; -exec rm -v {} \;
brew install dpkg findutils gnu-tar llvm pkg-config qemu
echo /usr/local/opt/findutils/libexec/gnubin >> $GITHUB_PATH
echo /usr/local/opt/gnu-tar/libexec/gnubin >> $GITHUB_PATH
echo /usr/local/opt/llvm/bin >> $GITHUB_PATH
# https://github.com/Homebrew/homebrew-core/issues/140244
codesign --verify $(which qemu-system-x86_64) || brew reinstall qemu --build-from-source
- name: bpf-linker
if: runner.os == 'macOS'
@ -220,7 +230,6 @@ jobs:
# linux-image-5.10.0-23-cloud-arm64-unsigned_5.10.179-3_arm64.deb \
printf '%s\0' \
linux-image-6.1.0-10-cloud-arm64-unsigned_6.1.38-2_arm64.deb \
linux-image-6.4.0-2-cloud-arm64-unsigned_6.4.4-3_arm64.deb \
| xargs -0 -t -P0 -I {} wget -nd -nv -P test/.tmp/debian-kernels/arm64 ftp://ftp.us.debian.org/debian/pool/main/l/linux/{}
- name: Download debian kernels
@ -233,7 +242,6 @@ jobs:
# linux-image-5.10.0-23-cloud-amd64-unsigned_5.10.179-3_amd64.deb \
printf '%s\0' \
linux-image-6.1.0-10-cloud-amd64-unsigned_6.1.38-2_amd64.deb \
linux-image-6.4.0-2-cloud-amd64-unsigned_6.4.4-3_amd64.deb \
| xargs -0 -t -P0 -I {} wget -nd -nv -P test/.tmp/debian-kernels/amd64 ftp://ftp.us.debian.org/debian/pool/main/l/linux/{}
- name: Extract debian kernels
@ -242,7 +250,11 @@ jobs:
find test/.tmp -name '*.deb' -print0 | xargs -t -0 -I {} \
sh -c "dpkg --fsys-tarfile {} | tar -C test/.tmp --wildcards --extract '*vmlinuz*' --file -"
- name: Run integration tests
- name: Run local integration tests
if: runner.os == 'Linux'
run: cargo xtask integration-test local
- name: Run virtualized integration tests
run: find test/.tmp -name 'vmlinuz-*' | xargs -t cargo xtask integration-test vm
# Provides a single status check for the entire build workflow.

@ -7,7 +7,7 @@ jobs:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: recursive

@ -9,7 +9,7 @@ jobs:
if: startsWith(github.ref, 'refs/tags/')
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
fetch-depth: 0

@ -0,0 +1,8 @@
config:
no-duplicate-heading: false
globs:
- "**/*.md"
ignores:
- "target/**/*"
- "xtask/libbpf/**/*"

@ -115,14 +115,14 @@ the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
version 2.0, available [here][covenant-2-0].
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
[covenant-2-0]: https://www.contributor-covenant.org/version/2/0/code_of_conduct
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
For answers to common questions about this code of conduct, see the
[FAQ](https://www.contributor-covenant.org/faq). Translations are available
[here](https://www.contributor-covenant.org/translations).

@ -17,12 +17,16 @@ version of aya you're using and which version of the linux kernel.
If you find an API that is not documented, unclear or missing examples, please
file an issue. If you make changes to the documentation, please read
https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html and make sure
your changes conform to the format outlined here
https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html#documenting-components.
[How To Write Documentation] and make sure your changes conform to the
format outlined in [Documenting Components].
If you want to make changes to the Aya Book, see the readme in the book repo
https://github.com/aya-rs/book.
[How To Write Documentation]: https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html
[Documenting Components]: https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html#documenting-components
If you want to make changes to the Aya Book, see the readme in the
[book repository].
[book repository]: https://github.com/aya-rs/book
## Fixing bugs and implementing new features
@ -31,7 +35,11 @@ helps us avoid duplicating work. If your work includes publicly visible changes,
make sure those are properly documented as explained in the section above.
### Running tests
Run the unit tests with `cargo test`. See [Aya Integration Tests](https://github.com/aya-rs/aya/blob/main/test/README.md) regarding running the integration tests.
Run the unit tests with `cargo test`. See [Aya Integration Tests] regarding
running the integration tests.
[Aya Integration Tests]: https://github.com/aya-rs/aya/blob/main/test/README.md
### Commits
@ -45,21 +53,19 @@ change, please squash those together before asking for a review.
A good commit message should describe what changed and why.
1. The first line should:
* contain a short description of the change (preferably 50 characters or less,
and no more than 72 characters)
* be entirely in lowercase with the exception of proper nouns, acronyms, and
the words that refer to code, like function/variable names
* be prefixed with the name of the sub crate being changed
Examples:
* aya: handle reordered functions
* aya-bpf: SkSkbContext: add ::l3_csum_replace
2. Keep the second line blank.
3. Wrap all other lines at 72 columns (except for long URLs).
4. If your patch fixes an open issue, you can add a reference to it at the end
- Contain a short description of the change (preferably 50 characters or less,
and no more than 72 characters)
- Be entirely in lowercase with the exception of proper nouns, acronyms, and
the words that refer to code, like function/variable names
- Be prefixed with the name of the sub crate being changed
Examples:
- `aya: handle reordered functions`
- `aya-bpf: SkSkbContext: add ::l3_csum_replace`
1. Keep the second line blank.
1. Wrap all other lines at 72 columns (except for long URLs).
1. If your patch fixes an open issue, you can add a reference to it at the end
of the log. Use the `Fixes: #` prefix and the issue number. For other
references use `Refs: #`. `Refs` may include multiple issues, separated by a
comma.

@ -49,7 +49,7 @@ default-members = [
[workspace.dependencies]
anyhow = { version = "1", default-features = false }
assert_matches = { version = "1.5.0", default-features = false }
async-io = { version = "1.3", default-features = false }
async-io = { version = "2.0", default-features = false }
aya = { path = "aya", version = "0.11.0", default-features = false }
aya-bpf = { path = "bpf/aya-bpf", default-features = false }
aya-log = { path = "aya-log", default-features = false }
@ -57,16 +57,18 @@ aya-log-common = { path = "aya-log-common", version = "0.1.13", default-features
aya-log-parser = { path = "aya-log-parser", default-features = false }
aya-obj = { path = "aya-obj", version = "0.1.0", default-features = false }
aya-tool = { path = "aya-tool", default-features = false }
bindgen = { version = "0.66", default-features = false }
bindgen = { version = "0.69", default-features = false }
bitflags = { version = "2.2.1", default-features = false }
bytes = { version = "1", default-features = false }
cargo_metadata = { version = "0.17.0", default-features = false }
cargo_metadata = { version = "0.18.0", default-features = false }
clap = { version = "4", default-features = false }
const-assert = { version = "1.0.1", default-features = false }
core-error = { version = "0.0.0", default-features = false }
dialoguer = { version = "0.10", default-features = false }
dialoguer = { version = "0.11", default-features = false }
diff = { version = "0.1.13", default-features = false }
env_logger = { version = "0.10", default-features = false }
futures = { version = "0.3.12", default-features = false }
epoll = { version = "4.3.3", default-features = false }
futures = { version = "0.3.28", default-features = false }
hashbrown = { version = "0.14", default-features = false }
indoc = { version = "2.0", default-features = false }
integration-ebpf = { path = "test/integration-ebpf", default-features = false }
@ -74,14 +76,14 @@ lazy_static = { version = "1", default-features = false }
libc = { version = "0.2.105", default-features = false }
log = { version = "0.4", default-features = false }
netns-rs = { version = "0.1", default-features = false }
nix = { version = "0.26.2", default-features = false }
nix = { version = "0.27.0", default-features = false }
num_enum = { version = "0.7", default-features = false }
object = { version = "0.32", default-features = false }
parking_lot = { version = "0.12.0", default-features = false }
proc-macro-error = { version = "1.0", default-features = false }
proc-macro2 = { version = "1", default-features = false }
public-api = { version = "0.31.2", default-features = false }
public-api = { version = "0.32.0", default-features = false }
quote = { version = "1", default-features = false }
rand = { version = "0.8", default-features = false }
rbpf = { version = "0.2.0", default-features = false }
rustdoc-json = { version = "0.8.6", default-features = false }
rustup-toolchain = { version = "0.1.5", default-features = false }
@ -89,10 +91,12 @@ rustversion = { version = "1.0.0", default-features = false }
syn = { version = "2", default-features = false }
tempfile = { version = "3", default-features = false }
test-case = { version = "3.1.0", default-features = false }
test-log = { version = "0.2.13", default-features = false }
testing_logger = { version = "0.1.1", default-features = false }
thiserror = { version = "1", default-features = false }
tokio = { version = "1.24.0", default-features = false }
which = { version = "4.4.0", default-features = false }
which = { version = "5.0.0", default-features = false }
xdpilone = { version = "1.0", default-features = false }
xtask = { path = "xtask", default-features = false }
[profile.dev]

@ -2,14 +2,14 @@
[![Crates.io][crates-badge]][crates-url]
![License][license-badge]
![Build status][build-badge]
[![Build status][build-badge]][build-url]
[![Book][book-badge]][book-url]
[crates-badge]: https://img.shields.io/crates/v/aya.svg?style=for-the-badge&logo=rust
[crates-url]: https://crates.io/crates/aya
[license-badge]: https://img.shields.io/badge/license-MIT%2FApache--2.0-blue?style=for-the-badge
[build-badge]: https://img.shields.io/github/actions/workflow/status/aya-rs/aya/build-aya.yml?branch=main&style=for-the-badge
[build-badge]: https://img.shields.io/github/actions/workflow/status/aya-rs/aya/ci.yml?style=for-the-badge
[build-url]: https://github.com/aya-rs/aya/actions/workflows/ci.yml
[book-badge]: https://img.shields.io/badge/read%20the-book-9cf.svg?style=for-the-badge&logo=mdbook
[book-url]: https://aya-rs.dev/book
@ -26,8 +26,8 @@
[![Discord][discord-badge]][chat-url] [![Awesome][awesome-badge]][awesome-aya]
Join [the conversation on Discord][chat-url] to discuss anything related to Aya, or discover
and contribute to a list of [Awesome Aya][awesome-aya] projects.
Join [the conversation on Discord][chat-url] to discuss anything related to Aya
or discover and contribute to a list of [Awesome Aya][awesome-aya] projects.
[discord-badge]: https://img.shields.io/badge/Discord-chat-5865F2?style=for-the-badge&logo=discord
[chat-url]: https://discord.gg/xHW2cb2N6G
@ -37,7 +37,7 @@ and contribute to a list of [Awesome Aya][awesome-aya] projects.
## Overview
eBPF is a technology that allows running user-supplied programs inside the Linux
kernel. For more info see https://ebpf.io/what-is-ebpf.
kernel. For more info see [What is eBBF](https://ebpf.io/what-is-ebpf).
Aya is an eBPF library built with a focus on operability and developer
experience. It does not rely on [libbpf] nor [bcc] - it's built from the ground
@ -69,9 +69,8 @@ Some of the major features provided include:
### Example
Aya supports a large chunk of the eBPF API. The following example shows how to use a
`BPF_PROG_TYPE_CGROUP_SKB` program with aya:
Aya supports a large chunk of the eBPF API. The following example shows how to
use a `BPF_PROG_TYPE_CGROUP_SKB` program with aya:
```rust
use std::fs::File;
@ -96,12 +95,15 @@ ingress.attach(cgroup, CgroupSkbAttachType::Ingress)?;
## Contributing
Please see the [contributing guide](https://github.com/aya-rs/aya/blob/main/CONTRIBUTING.md).
## License
Aya is distributed under the terms of either the [MIT license] or the [Apache License] (version
2.0), at your option.
Aya is distributed under the terms of either the [MIT license] or the
[Apache License] (version 2.0), at your option.
Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in this crate by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions.
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
be dual licensed as above, without any additional terms or conditions.
[MIT license]: https://github.com/aya-rs/aya/blob/main/LICENSE-MIT
[Apache license]: https://github.com/aya-rs/aya/blob/main/LICENSE-APACHE

@ -68,7 +68,7 @@ pub(crate) fn pop_bool_arg(args: &mut Args, name: &str) -> bool {
}
pub(crate) fn err_on_unknown_args(args: &Args) -> Result<()> {
if let Some(arg) = args.args.get(0) {
if let Some(arg) = args.args.first() {
let tokens = match arg {
Arg::String(name_val) => name_val.name.clone(),
Arg::Bool(ident) => ident.clone(),

@ -45,9 +45,10 @@ impl BtfTracePoint {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn test_btf_tracepoint() {
let prog = BtfTracePoint::parse(

@ -34,9 +34,10 @@ impl CgroupDevice {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn test_cgroup_device() {
let prog = CgroupDevice::parse(

@ -48,9 +48,10 @@ impl CgroupSkb {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn cgroup_skb() {
let prog = CgroupSkb::parse(

@ -46,9 +46,10 @@ impl CgroupSock {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn cgroup_sock_post_bind4() {
let prog = CgroupSock::parse(

@ -48,9 +48,10 @@ impl CgroupSockAddr {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn cgroup_sock_addr_connect4() {
let prog = CgroupSockAddr::parse(

@ -46,9 +46,10 @@ impl CgroupSockopt {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn cgroup_sockopt_getsockopt() {
let prog = CgroupSockopt::parse(

@ -34,9 +34,10 @@ impl CgroupSysctl {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn test_cgroup_sysctl() {
let prog = CgroupSysctl::parse(

@ -51,9 +51,10 @@ impl FEntry {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn test_fentry() {
let prog = FEntry::parse(

@ -51,9 +51,10 @@ impl FExit {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn test_fexit() {
let prog = FExit::parse(

@ -1,7 +1,6 @@
use std::borrow::Cow;
use proc_macro2::TokenStream;
use quote::quote;
use syn::{ItemFn, Result};
@ -79,9 +78,10 @@ impl KProbe {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn test_kprobe() {
let kprobe = KProbe::parse(

@ -44,10 +44,9 @@ use sk_msg::SkMsg;
use sk_skb::{SkSkb, SkSkbKind};
use sock_ops::SockOps;
use socket_filter::SocketFilter;
use uprobe::{UProbe, UProbeKind};
use tc::SchedClassifier;
use tracepoint::TracePoint;
use uprobe::{UProbe, UProbeKind};
use xdp::Xdp;
#[proc_macro_error]
#[proc_macro_attribute]
@ -128,6 +127,28 @@ pub fn sk_msg(attrs: TokenStream, item: TokenStream) -> TokenStream {
}
}
/// Marks a function as an eBPF XDP program that can be attached to a network interface.
///
/// On some NIC drivers, XDP probes are compatible with jumbo frames through the use of
/// multi-buffer packets. Programs can opt-in this support by passing the `frags` argument.
///
/// XDP programs can also be chained through the use of CPU maps and dev maps, but must opt-in
/// with the `map = "cpumap"` or `map = "devmap"` arguments.
///
/// # Minimum kernel version
///
/// The minimum kernel version required to use this feature is 4.8.
///
/// # Examples
///
/// ```no_run
/// use aya_bpf::{bindings::xdp_action::XDP_PASS, macros::xdp, programs::XdpContext};
///
/// #[xdp(frags)]
/// pub fn xdp(ctx: XdpContext) -> u32 {
/// XDP_PASS
/// }
/// ```
#[proc_macro_error]
#[proc_macro_attribute]
pub fn xdp(attrs: TokenStream, item: TokenStream) -> TokenStream {

@ -53,9 +53,10 @@ impl Lsm {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn test_lsm_sleepable() {
let prog = Lsm::parse(

@ -2,11 +2,9 @@ use std::borrow::Cow;
use proc_macro2::TokenStream;
use quote::quote;
use syn::Result;
use syn::{ItemStatic, Result};
use crate::args::name_arg;
use syn::ItemStatic;
pub(crate) struct Map {
item: ItemStatic,
name: String,
@ -34,9 +32,10 @@ impl Map {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn test_map_with_name() {
let map = Map::parse(

@ -35,9 +35,10 @@ impl PerfEvent {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn test_perf_event() {
let prog = PerfEvent::parse(

@ -1,7 +1,6 @@
use std::borrow::Cow;
use proc_macro2::TokenStream;
use quote::quote;
use syn::{ItemFn, Result};
@ -45,9 +44,10 @@ impl RawTracePoint {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn test_raw_tracepoint() {
let prog = RawTracePoint::parse(

@ -34,9 +34,10 @@ impl SkLookup {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn test_sk_lookup() {
let prog = SkLookup::parse(

@ -34,9 +34,10 @@ impl SkMsg {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn test_sk_msg() {
let prog = SkMsg::parse(

@ -56,9 +56,10 @@ impl SkSkb {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn test_stream_parser() {
let prog = SkSkb::parse(

@ -34,9 +34,10 @@ impl SockOps {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn test_sock_ops() {
let prog = SockOps::parse(

@ -34,9 +34,10 @@ impl SocketFilter {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn test_socket_filter() {
let prog = SocketFilter::parse(

@ -34,9 +34,10 @@ impl SchedClassifier {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn test_sched_classifier() {
let prog = SchedClassifier::parse(

@ -52,9 +52,10 @@ impl TracePoint {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn test_tracepoint() {
let prog = TracePoint::parse(

@ -104,9 +104,10 @@ impl UProbe {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn uprobe() {
let uprobe = UProbe::parse(

@ -1,31 +1,52 @@
use std::borrow::Cow;
use proc_macro2::TokenStream;
use quote::quote;
use syn::{ItemFn, Result};
use syn::{Error, ItemFn, Result};
use crate::args::{err_on_unknown_args, pop_bool_arg, Args};
use crate::args::{err_on_unknown_args, pop_bool_arg, pop_string_arg, Args};
pub(crate) struct Xdp {
item: ItemFn,
frags: bool,
map: Option<XdpMap>,
}
#[derive(Clone, Copy)]
pub(crate) enum XdpMap {
CpuMap,
DevMap,
}
impl Xdp {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Xdp> {
let item = syn::parse2(item)?;
let mut args: Args = syn::parse2(attrs)?;
let frags = pop_bool_arg(&mut args, "frags");
let map = match pop_string_arg(&mut args, "map").as_deref() {
Some("cpumap") => Some(XdpMap::CpuMap),
Some("devmap") => Some(XdpMap::DevMap),
Some(name) => {
return Err(Error::new_spanned(
"map",
format!("Invalid value. Expected 'cpumap' or 'devmap', found '{name}'"),
))
}
None => None,
};
err_on_unknown_args(&args)?;
Ok(Xdp { item, frags })
Ok(Xdp { item, frags, map })
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let section_name: Cow<'_, _> = if self.frags {
"xdp.frags".into()
} else {
"xdp".into()
let mut section_name = vec![if self.frags { "xdp.frags" } else { "xdp" }];
match self.map {
Some(XdpMap::CpuMap) => section_name.push("cpumap"),
Some(XdpMap::DevMap) => section_name.push("devmap"),
None => (),
};
let section_name = section_name.join("/");
let fn_vis = &self.item.vis;
let fn_name = self.item.sig.ident.clone();
let item = &self.item;
@ -43,9 +64,10 @@ impl Xdp {
#[cfg(test)]
mod tests {
use super::*;
use syn::parse_quote;
use super::*;
#[test]
fn test_xdp() {
let prog = Xdp::parse(
@ -97,4 +119,122 @@ mod tests {
};
assert_eq!(expected.to_string(), expanded.to_string());
}
#[test]
fn test_xdp_cpumap() {
let prog = Xdp::parse(
parse_quote! { map = "cpumap" },
parse_quote! {
fn prog(ctx: &mut ::aya_bpf::programs::XdpContext) -> i32 {
0
}
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expected = quote! {
#[no_mangle]
#[link_section = "xdp/cpumap"]
fn prog(ctx: *mut ::aya_bpf::bindings::xdp_md) -> u32 {
return prog(::aya_bpf::programs::XdpContext::new(ctx));
fn prog(ctx: &mut ::aya_bpf::programs::XdpContext) -> i32 {
0
}
}
};
assert_eq!(expected.to_string(), expanded.to_string());
}
#[test]
fn test_xdp_devmap() {
let prog = Xdp::parse(
parse_quote! { map = "devmap" },
parse_quote! {
fn prog(ctx: &mut ::aya_bpf::programs::XdpContext) -> i32 {
0
}
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expected = quote! {
#[no_mangle]
#[link_section = "xdp/devmap"]
fn prog(ctx: *mut ::aya_bpf::bindings::xdp_md) -> u32 {
return prog(::aya_bpf::programs::XdpContext::new(ctx));
fn prog(ctx: &mut ::aya_bpf::programs::XdpContext) -> i32 {
0
}
}
};
assert_eq!(expected.to_string(), expanded.to_string());
}
#[test]
#[should_panic(expected = "Invalid value. Expected 'cpumap' or 'devmap', found 'badmap'")]
fn test_xdp_bad_map() {
Xdp::parse(
parse_quote! { map = "badmap" },
parse_quote! {
fn prog(ctx: &mut ::aya_bpf::programs::XdpContext) -> i32 {
0
}
},
)
.unwrap();
}
#[test]
fn test_xdp_frags_cpumap() {
let prog = Xdp::parse(
parse_quote! { frags, map = "cpumap" },
parse_quote! {
fn prog(ctx: &mut ::aya_bpf::programs::XdpContext) -> i32 {
0
}
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expected = quote! {
#[no_mangle]
#[link_section = "xdp.frags/cpumap"]
fn prog(ctx: *mut ::aya_bpf::bindings::xdp_md) -> u32 {
return prog(::aya_bpf::programs::XdpContext::new(ctx));
fn prog(ctx: &mut ::aya_bpf::programs::XdpContext) -> i32 {
0
}
}
};
assert_eq!(expected.to_string(), expanded.to_string());
}
#[test]
fn test_xdp_frags_devmap() {
let prog = Xdp::parse(
parse_quote! { frags, map = "devmap" },
parse_quote! {
fn prog(ctx: &mut ::aya_bpf::programs::XdpContext) -> i32 {
0
}
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expected = quote! {
#[no_mangle]
#[link_section = "xdp.frags/devmap"]
fn prog(ctx: *mut ::aya_bpf::bindings::xdp_md) -> u32 {
return prog(::aya_bpf::programs::XdpContext::new(ctx));
fn prog(ctx: &mut ::aya_bpf::programs::XdpContext) -> i32 {
0
}
}
};
assert_eq!(expected.to_string(), expanded.to_string());
}
}

@ -1,4 +1,6 @@
use proc_macro2::TokenStream;
use aya_log_common::DisplayHint;
use aya_log_parser::{parse, Fragment};
use proc_macro2::{Ident, Span, TokenStream};
use quote::quote;
use syn::{
parse::{Parse, ParseStream},
@ -6,9 +8,6 @@ use syn::{
Error, Expr, LitStr, Result, Token,
};
use aya_log_common::DisplayHint;
use aya_log_parser::{parse, Fragment};
pub(crate) struct LogArgs {
pub(crate) ctx: Expr,
pub(crate) target: Option<Expr>,
@ -142,13 +141,16 @@ pub(crate) fn log(args: LogArgs, level: Option<TokenStream>) -> Result<TokenStre
let num_args = values.len();
let values_iter = values.iter();
let size = Ident::new("size", Span::mixed_site());
let len = Ident::new("len", Span::mixed_site());
let slice = Ident::new("slice", Span::mixed_site());
let record = Ident::new("record", Span::mixed_site());
Ok(quote! {
match unsafe { &mut ::aya_log_ebpf::AYA_LOG_BUF }.get_ptr_mut(0).and_then(|ptr| unsafe { ptr.as_mut() }) {
None => {},
Some(::aya_log_ebpf::LogBuf { buf }) => {
let _: Option<()> = (|| {
let size = ::aya_log_ebpf::write_record_header(
let #size = ::aya_log_ebpf::write_record_header(
buf,
#target,
#lvl,
@ -157,14 +159,14 @@ pub(crate) fn log(args: LogArgs, level: Option<TokenStream>) -> Result<TokenStre
line!(),
#num_args,
)?;
let mut size = size.get();
let mut #size = #size.get();
#(
let slice = buf.get_mut(size..)?;
let len = ::aya_log_ebpf::WriteToBuf::write(#values_iter, slice)?;
size += len.get();
let #slice = buf.get_mut(#size..)?;
let #len = ::aya_log_ebpf::WriteToBuf::write(#values_iter, #slice)?;
#size += #len.get();
)*
let record = buf.get(..size)?;
unsafe { &mut ::aya_log_ebpf::AYA_LOGS }.output(#ctx, record, 0);
let #record = buf.get(..#size)?;
unsafe { &mut ::aya_log_ebpf::AYA_LOGS }.output(#ctx, #record, 0);
Some(())
})();
}

@ -55,7 +55,7 @@ fn push_literal(frag: &mut Vec<Fragment>, unescaped_literal: &str) -> Result<(),
/// Parses the display hint (e.g. the `ipv4` in `{:ipv4}`).
fn parse_display_hint(s: &str) -> Result<DisplayHint, String> {
Ok(match s {
"x" => DisplayHint::LowerHex,
"p" | "x" => DisplayHint::LowerHex,
"X" => DisplayHint::UpperHex,
"i" => DisplayHint::Ip,
"mac" => DisplayHint::LowerMac,
@ -145,7 +145,7 @@ mod test {
#[test]
fn test_parse() {
assert_eq!(
parse("foo {} bar {:x} test {:X} ayy {:i} lmao {{}} {{something}}"),
parse("foo {} bar {:x} test {:X} ayy {:i} lmao {{}} {{something}} {:p}"),
Ok(vec![
Fragment::Literal("foo ".into()),
Fragment::Parameter(Parameter {
@ -163,7 +163,10 @@ mod test {
Fragment::Parameter(Parameter {
hint: DisplayHint::Ip
}),
Fragment::Literal(" lmao {} {something}".into()),
Fragment::Literal(" lmao {} {something} ".into()),
Fragment::Parameter(Parameter {
hint: DisplayHint::LowerHex
}),
])
);
assert!(parse("foo {:}").is_err());

@ -59,13 +59,6 @@ use std::{
const MAP_NAME: &str = "AYA_LOGS";
use aya_log_common::{
Argument, DisplayHint, Level, LogValueLength, RecordField, LOG_BUF_CAPACITY, LOG_FIELDS,
};
use bytes::BytesMut;
use log::{error, Log, Record};
use thiserror::Error;
use aya::{
maps::{
perf::{AsyncPerfEventArray, Events, PerfBufferError},
@ -74,6 +67,12 @@ use aya::{
util::online_cpus,
Bpf, Pod,
};
use aya_log_common::{
Argument, DisplayHint, Level, LogValueLength, RecordField, LOG_BUF_CAPACITY, LOG_FIELDS,
};
use bytes::BytesMut;
use log::{error, Log, Record};
use thiserror::Error;
#[derive(Copy, Clone)]
#[repr(transparent)]
@ -563,10 +562,11 @@ fn try_read<T: Pod>(mut buf: &[u8]) -> Result<(T, &[u8], &[u8]), ()> {
#[cfg(test)]
mod test {
use super::*;
use aya_log_common::{write_record_header, WriteToBuf};
use log::{logger, Level};
use super::*;
fn new_log(args: usize) -> Option<(usize, Vec<u8>)> {
let mut buf = vec![0; 8192];
let len = write_record_header(

@ -15,11 +15,8 @@ bytes = { workspace = true }
core-error = { workspace = true, default-features = true }
hashbrown = { workspace = true, default-features = true }
log = { workspace = true }
object = { workspace = true, default-features = false, features = [
"elf",
"read_core",
] }
thiserror = { workspace = true, default-features = false }
object = { workspace = true, features = ["elf", "read_core"] }
thiserror = { workspace = true }
[dev-dependencies]
assert_matches = { workspace = true }

@ -1,5 +1,3 @@
use core::{ffi::CStr, mem, ptr};
use alloc::{
borrow::{Cow, ToOwned as _},
format,
@ -7,11 +5,14 @@ use alloc::{
vec,
vec::Vec,
};
use bytes::BufMut;
use core::{ffi::CStr, mem, ptr};
use bytes::BufMut;
use log::debug;
use object::{Endianness, SectionIndex};
#[cfg(not(feature = "std"))]
use crate::std;
use crate::{
btf::{
info::{FuncSecInfo, LineSecInfo},
@ -24,9 +25,6 @@ use crate::{
Object,
};
#[cfg(not(feature = "std"))]
use crate::std;
pub(crate) const MAX_RESOLVE_DEPTH: u8 = 32;
pub(crate) const MAX_SPEC_LEN: usize = 64;
@ -1102,12 +1100,13 @@ pub(crate) struct SecInfo<'a> {
#[cfg(test)]
mod tests {
use assert_matches::assert_matches;
use super::*;
use crate::btf::{
BtfEnum64, BtfParam, DataSec, DataSecEntry, DeclTag, Enum64, Float, Func, FuncProto, Ptr,
TypeTag, Var,
};
use assert_matches::assert_matches;
#[test]
fn test_parse_header() {

@ -1,4 +1,5 @@
use alloc::{string::String, vec, vec::Vec};
use bytes::BufMut;
use object::Endianness;

@ -1,5 +1,3 @@
use core::{mem, ops::Bound::Included, ptr};
use alloc::{
borrow::{Cow, ToOwned as _},
collections::BTreeMap,
@ -8,8 +6,12 @@ use alloc::{
vec,
vec::Vec,
};
use core::{mem, ops::Bound::Included, ptr};
use object::SectionIndex;
#[cfg(not(feature = "std"))]
use crate::std;
use crate::{
btf::{
fields_are_compatible, types_are_compatible, Array, Btf, BtfError, BtfMember, BtfType,
@ -23,9 +25,6 @@ use crate::{
Function, Object,
};
#[cfg(not(feature = "std"))]
use crate::std;
/// The error type returned by [`Object::relocate_btf`].
#[derive(thiserror::Error, Debug)]
#[error("error relocating `{section}`")]

@ -1,8 +1,8 @@
#![allow(missing_docs)]
use alloc::{string::ToString, vec, vec::Vec};
use core::{fmt::Display, mem, ptr};
use alloc::{string::ToString, vec, vec::Vec};
use object::Endianness;
use crate::btf::{Btf, BtfError, MAX_RESOLVE_DEPTH};
@ -1570,9 +1570,10 @@ fn bytes_of<T>(val: &T) -> &[u8] {
}
#[cfg(test)]
mod tests {
use super::*;
use assert_matches::assert_matches;
use super::*;
#[test]
fn test_read_btf_type_int() {
let endianness = Endianness::default();

@ -21,15 +21,11 @@ mod linux_bindings_x86_64;
// don't re-export __u8 __u16 etc which are already exported by the
// linux_bindings_* module
pub use btf_internal_bindings::{bpf_core_relo, bpf_core_relo_kind, btf_ext_header};
#[cfg(target_arch = "x86_64")]
pub use linux_bindings_x86_64::*;
#[cfg(target_arch = "arm")]
pub use linux_bindings_armv7::*;
#[cfg(target_arch = "aarch64")]
pub use linux_bindings_aarch64::*;
#[cfg(target_arch = "arm")]
pub use linux_bindings_armv7::*;
#[cfg(target_arch = "riscv64")]
pub use linux_bindings_riscv64::*;
#[cfg(target_arch = "x86_64")]
pub use linux_bindings_x86_64::*;

@ -77,6 +77,12 @@ mod std {
pub use core_error::Error;
}
pub use core::*;
pub mod os {
pub mod fd {
pub type RawFd = core::ffi::c_int;
}
}
}
pub mod btf;

@ -1,12 +1,11 @@
//! Map struct and type bindings.
use core::mem;
use crate::BpfSectionKind;
use alloc::vec::Vec;
use core::mem;
#[cfg(not(feature = "std"))]
use crate::std;
use crate::BpfSectionKind;
/// Invalid map type encontered
pub struct InvalidMapTypeError {
@ -176,6 +175,14 @@ impl Map {
}
}
/// Set the value size in bytes
pub fn set_value_size(&mut self, size: u32) {
match self {
Map::Legacy(m) => m.def.value_size = size,
Map::Btf(m) => m.def.value_size = size,
}
}
/// Returns the max entry number
pub fn max_entries(&self) -> u32 {
match self {

@ -7,7 +7,8 @@ use alloc::{
string::{String, ToString},
vec::Vec,
};
use core::{ffi::CStr, mem, ptr, str::FromStr};
use core::{ffi::CStr, mem, ptr, slice::from_raw_parts_mut, str::FromStr};
use log::debug;
use object::{
read::{Object as ElfObject, ObjectSection, Section as ObjSection},
@ -15,26 +16,23 @@ use object::{
SymbolKind,
};
use crate::{
btf::BtfFeatures,
generated::{BPF_CALL, BPF_JMP, BPF_K},
maps::{BtfMap, LegacyMap, Map, MINIMUM_MAP_SIZE},
relocation::*,
util::HashMap,
};
#[cfg(not(feature = "std"))]
use crate::std;
use crate::{
btf::{Btf, BtfError, BtfExt, BtfType},
generated::{bpf_insn, bpf_map_info, bpf_map_type::BPF_MAP_TYPE_ARRAY, BPF_F_RDONLY_PROG},
maps::{bpf_map_def, BtfMapDef, PinningType},
programs::{CgroupSockAddrAttachType, CgroupSockAttachType, CgroupSockoptAttachType},
btf::{
Array, Btf, BtfError, BtfExt, BtfFeatures, BtfType, DataSecEntry, FuncSecInfo, LineSecInfo,
},
generated::{
bpf_insn, bpf_map_info, bpf_map_type::BPF_MAP_TYPE_ARRAY, BPF_CALL, BPF_F_RDONLY_PROG,
BPF_JMP, BPF_K,
},
maps::{bpf_map_def, BtfMap, BtfMapDef, LegacyMap, Map, PinningType, MINIMUM_MAP_SIZE},
programs::{
CgroupSockAddrAttachType, CgroupSockAttachType, CgroupSockoptAttachType, XdpAttachType,
},
relocation::*,
util::HashMap,
};
use core::slice::from_raw_parts_mut;
use crate::btf::{Array, DataSecEntry, FuncSecInfo, LineSecInfo};
const KERNEL_VERSION_ANY: u32 = 0xFFFF_FFFE;
@ -47,17 +45,22 @@ pub struct Features {
bpf_perf_link: bool,
bpf_global_data: bool,
bpf_cookie: bool,
cpumap_prog_id: bool,
devmap_prog_id: bool,
btf: Option<BtfFeatures>,
}
impl Features {
#[doc(hidden)]
#[allow(clippy::too_many_arguments)]
pub fn new(
bpf_name: bool,
bpf_probe_read_kernel: bool,
bpf_perf_link: bool,
bpf_global_data: bool,
bpf_cookie: bool,
cpumap_prog_id: bool,
devmap_prog_id: bool,
btf: Option<BtfFeatures>,
) -> Self {
Self {
@ -66,6 +69,8 @@ impl Features {
bpf_perf_link,
bpf_global_data,
bpf_cookie,
cpumap_prog_id,
devmap_prog_id,
btf,
}
}
@ -95,6 +100,16 @@ impl Features {
self.bpf_cookie
}
/// Returns whether XDP CPU Maps support chained program IDs.
pub fn cpumap_prog_id(&self) -> bool {
self.cpumap_prog_id
}
/// Returns whether XDP Device Maps support chained program IDs.
pub fn devmap_prog_id(&self) -> bool {
self.devmap_prog_id
}
/// If BTF is supported, returns which BTF features are supported.
pub fn btf(&self) -> Option<&BtfFeatures> {
self.btf.as_ref()
@ -204,8 +219,6 @@ pub struct Function {
/// - `struct_ops+`
/// - `fmod_ret+`, `fmod_ret.s+`
/// - `iter+`, `iter.s+`
/// - `xdp.frags/cpumap`, `xdp/cpumap`
/// - `xdp.frags/devmap`, `xdp/devmap`
#[derive(Debug, Clone)]
#[allow(missing_docs)]
pub enum ProgramSection {
@ -221,6 +234,7 @@ pub enum ProgramSection {
SocketFilter,
Xdp {
frags: bool,
attach_type: XdpAttachType,
},
SkMsg,
SkSkbStreamParser,
@ -266,10 +280,15 @@ impl FromStr for ProgramSection {
// parse the common case, eg "xdp/program_name" or
// "sk_skb/stream_verdict/program_name"
let (kind, name) = match section.rsplit_once('/') {
None => (section, section),
Some((kind, name)) => (kind, name),
let mut pieces = section.split('/');
let mut next = || {
pieces
.next()
.ok_or_else(|| ParseError::InvalidProgramSection {
section: section.to_owned(),
})
};
let kind = next()?;
Ok(match kind {
"kprobe" => KProbe,
@ -278,131 +297,119 @@ impl FromStr for ProgramSection {
"uprobe.s" => UProbe { sleepable: true },
"uretprobe" => URetProbe { sleepable: false },
"uretprobe.s" => URetProbe { sleepable: true },
"xdp" => Xdp { frags: false },
"xdp.frags" => Xdp { frags: true },
"xdp" | "xdp.frags" => Xdp {
frags: kind == "xdp.frags",
attach_type: match pieces.next() {
None => XdpAttachType::Interface,
Some("cpumap") => XdpAttachType::CpuMap,
Some("devmap") => XdpAttachType::DevMap,
Some(_) => {
return Err(ParseError::InvalidProgramSection {
section: section.to_owned(),
})
}
},
},
"tp_btf" => BtfTracePoint,
kind if kind.starts_with("tracepoint") || kind.starts_with("tp") => TracePoint,
"tracepoint" | "tp" => TracePoint,
"socket" => SocketFilter,
"sk_msg" => SkMsg,
"sk_skb" => match name {
"stream_parser" => SkSkbStreamParser,
"stream_verdict" => SkSkbStreamVerdict,
_ => {
return Err(ParseError::InvalidProgramSection {
section: section.to_owned(),
})
}
},
"sk_skb/stream_parser" => SkSkbStreamParser,
"sk_skb/stream_verdict" => SkSkbStreamVerdict,
"sockops" => SockOps,
"classifier" => SchedClassifier,
"cgroup_skb" => match name {
"ingress" => CgroupSkbIngress,
"egress" => CgroupSkbEgress,
_ => {
return Err(ParseError::InvalidProgramSection {
section: section.to_owned(),
})
}
},
"cgroup_skb/ingress" => CgroupSkbIngress,
"cgroup_skb/egress" => CgroupSkbEgress,
"cgroup/skb" => CgroupSkb,
"cgroup/sock" => CgroupSock {
attach_type: CgroupSockAttachType::default(),
},
"cgroup/sysctl" => CgroupSysctl,
"cgroup/dev" => CgroupDevice,
"cgroup/getsockopt" => CgroupSockopt {
attach_type: CgroupSockoptAttachType::Get,
},
"cgroup/setsockopt" => CgroupSockopt {
attach_type: CgroupSockoptAttachType::Set,
},
"cgroup" => match name {
"skb" => CgroupSkb,
"sysctl" => CgroupSysctl,
"dev" => CgroupDevice,
"getsockopt" | "setsockopt" => {
if let Ok(attach_type) = CgroupSockoptAttachType::try_from(name) {
CgroupSockopt { attach_type }
} else {
"sk_skb" => {
let name = next()?;
match name {
"stream_parser" => SkSkbStreamParser,
"stream_verdict" => SkSkbStreamVerdict,
_ => {
return Err(ParseError::InvalidProgramSection {
section: section.to_owned(),
});
})
}
}
"sock" => CgroupSock {
attach_type: CgroupSockAttachType::default(),
},
"post_bind4" | "post_bind6" | "sock_create" | "sock_release" => {
if let Ok(attach_type) = CgroupSockAttachType::try_from(name) {
CgroupSock { attach_type }
} else {
}
"sockops" => SockOps,
"classifier" => SchedClassifier,
"cgroup_skb" => {
let name = next()?;
match name {
"ingress" => CgroupSkbIngress,
"egress" => CgroupSkbEgress,
_ => {
return Err(ParseError::InvalidProgramSection {
section: section.to_owned(),
});
})
}
}
name => {
if let Ok(attach_type) = CgroupSockAddrAttachType::try_from(name) {
CgroupSockAddr { attach_type }
} else {
}
"cgroup" => {
let name = next()?;
match name {
"skb" => CgroupSkb,
"sysctl" => CgroupSysctl,
"dev" => CgroupDevice,
"getsockopt" => CgroupSockopt {
attach_type: CgroupSockoptAttachType::Get,
},
"setsockopt" => CgroupSockopt {
attach_type: CgroupSockoptAttachType::Set,
},
"sock" => CgroupSock {
attach_type: CgroupSockAttachType::default(),
},
"post_bind4" => CgroupSock {
attach_type: CgroupSockAttachType::PostBind4,
},
"post_bind6" => CgroupSock {
attach_type: CgroupSockAttachType::PostBind6,
},
"sock_create" => CgroupSock {
attach_type: CgroupSockAttachType::SockCreate,
},
"sock_release" => CgroupSock {
attach_type: CgroupSockAttachType::SockRelease,
},
"bind4" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::Bind4,
},
"bind6" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::Bind6,
},
"connect4" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::Connect4,
},
"connect6" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::Connect6,
},
"getpeername4" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::GetPeerName4,
},
"getpeername6" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::GetPeerName6,
},
"getsockname4" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::GetSockName4,
},
"getsockname6" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::GetSockName6,
},
"sendmsg4" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::UDPSendMsg4,
},
"sendmsg6" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::UDPSendMsg6,
},
"recvmsg4" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::UDPRecvMsg4,
},
"recvmsg6" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::UDPRecvMsg6,
},
_ => {
return Err(ParseError::InvalidProgramSection {
section: section.to_owned(),
});
}
}
},
"cgroup/post_bind4" => CgroupSock {
attach_type: CgroupSockAttachType::PostBind4,
},
"cgroup/post_bind6" => CgroupSock {
attach_type: CgroupSockAttachType::PostBind6,
},
"cgroup/sock_create" => CgroupSock {
attach_type: CgroupSockAttachType::SockCreate,
},
"cgroup/sock_release" => CgroupSock {
attach_type: CgroupSockAttachType::SockRelease,
},
"cgroup/bind4" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::Bind4,
},
"cgroup/bind6" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::Bind6,
},
"cgroup/connect4" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::Connect4,
},
"cgroup/connect6" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::Connect6,
},
"cgroup/getpeername4" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::GetPeerName4,
},
"cgroup/getpeername6" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::GetPeerName6,
},
"cgroup/getsockname4" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::GetSockName4,
},
"cgroup/getsockname6" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::GetSockName6,
},
"cgroup/sendmsg4" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::UDPSendMsg4,
},
"cgroup/sendmsg6" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::UDPSendMsg6,
},
"cgroup/recvmsg4" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::UDPRecvMsg4,
},
"cgroup/recvmsg6" => CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::UDPRecvMsg6,
},
}
"lirc_mode2" => LircMode2,
"perf_event" => PerfEvent,
"raw_tp" | "raw_tracepoint" => RawTracePoint,
@ -592,12 +599,12 @@ impl Object {
.get(symbol_index)
.expect("all symbols in symbols_by_section are also in symbol_table");
let Some(name) = symbol.name.as_ref() else {
continue;
// Here we get both ::Label (LBB*) and ::Text symbols, and we only want the latter.
let name = match (symbol.name.as_ref(), symbol.kind) {
(Some(name), SymbolKind::Text) if !name.is_empty() => name,
_ => continue,
};
if name.is_empty() {
continue;
}
let (p, f) =
self.parse_program(section, program_section.clone(), name.to_string(), symbol)?;
let key = p.function_key();
@ -1385,6 +1392,7 @@ fn get_func_and_line_info(
#[cfg(test)]
mod tests {
use alloc::vec;
use assert_matches::assert_matches;
use object::Endianness;
@ -1642,15 +1650,12 @@ mod tests {
let prog_foo = obj.programs.get("foo").unwrap();
assert_matches!(
prog_foo,
Program {
license,
kernel_version: None,
section: ProgramSection::KProbe { .. },
..
} if license.to_str().unwrap() == "GPL"
);
assert_matches!(prog_foo, Program {
license,
kernel_version: None,
section: ProgramSection::KProbe { .. },
..
} => assert_eq!(license.to_str().unwrap(), "GPL"));
assert_matches!(
obj.functions.get(&prog_foo.function_key()),
@ -1704,14 +1709,12 @@ mod tests {
let prog_bar = obj.programs.get("bar").unwrap();
let function_bar = obj.functions.get(&prog_bar.function_key()).unwrap();
assert_matches!(prog_foo,
Program {
license,
kernel_version: None,
section: ProgramSection::KProbe { .. },
..
} if license.to_string_lossy() == "GPL"
);
assert_matches!(prog_foo, Program {
license,
kernel_version: None,
section: ProgramSection::KProbe { .. },
..
} => assert_eq!(license.to_str().unwrap(), "GPL"));
assert_matches!(
function_foo,
Function {
@ -1724,14 +1727,12 @@ mod tests {
} if name == "foo" && instructions.len() == 1
);
assert_matches!(prog_bar,
Program {
license,
kernel_version: None,
section: ProgramSection::KProbe { .. },
..
} if license.to_string_lossy() == "GPL"
);
assert_matches!(prog_bar, Program {
license,
kernel_version: None,
section: ProgramSection::KProbe { .. },
..
} => assert_eq!(license.to_str().unwrap(), "GPL"));
assert_matches!(
function_bar,
Function {
@ -2037,7 +2038,7 @@ mod tests {
assert_matches!(
obj.parse_section(fake_section(
BpfSectionKind::Program,
"xdp/foo",
"xdp",
bytes_of(&fake_ins()),
None
)),
@ -2060,7 +2061,7 @@ mod tests {
assert_matches!(
obj.parse_section(fake_section(
BpfSectionKind::Program,
"xdp.frags/foo",
"xdp.frags",
bytes_of(&fake_ins()),
None
)),

@ -1,11 +1,6 @@
//! Cgroup socket programs.
use alloc::{borrow::ToOwned, string::String};
use crate::generated::bpf_attach_type;
#[cfg(not(feature = "std"))]
use crate::std;
/// Defines where to attach a `CgroupSock` program.
#[derive(Copy, Clone, Debug, Default)]
pub enum CgroupSockAttachType {
@ -30,19 +25,3 @@ impl From<CgroupSockAttachType> for bpf_attach_type {
}
}
}
#[derive(Debug, thiserror::Error)]
#[error("{0} is not a valid attach type for a CGROUP_SOCK program")]
pub(crate) struct InvalidAttachType(String);
impl CgroupSockAttachType {
pub(crate) fn try_from(value: &str) -> Result<CgroupSockAttachType, InvalidAttachType> {
match value {
"post_bind4" => Ok(CgroupSockAttachType::PostBind4),
"post_bind6" => Ok(CgroupSockAttachType::PostBind6),
"sock_create" => Ok(CgroupSockAttachType::SockCreate),
"sock_release" => Ok(CgroupSockAttachType::SockRelease),
_ => Err(InvalidAttachType(value.to_owned())),
}
}
}

@ -1,11 +1,6 @@
//! Cgroup socket address programs.
use alloc::{borrow::ToOwned, string::String};
use crate::generated::bpf_attach_type;
#[cfg(not(feature = "std"))]
use crate::std;
/// Defines where to attach a `CgroupSockAddr` program.
#[derive(Copy, Clone, Debug)]
pub enum CgroupSockAddrAttachType {
@ -53,27 +48,3 @@ impl From<CgroupSockAddrAttachType> for bpf_attach_type {
}
}
}
#[derive(Debug, thiserror::Error)]
#[error("{0} is not a valid attach type for a CGROUP_SOCK_ADDR program")]
pub(crate) struct InvalidAttachType(String);
impl CgroupSockAddrAttachType {
pub(crate) fn try_from(value: &str) -> Result<CgroupSockAddrAttachType, InvalidAttachType> {
match value {
"bind4" => Ok(CgroupSockAddrAttachType::Bind4),
"bind6" => Ok(CgroupSockAddrAttachType::Bind6),
"connect4" => Ok(CgroupSockAddrAttachType::Connect4),
"connect6" => Ok(CgroupSockAddrAttachType::Connect6),
"getpeername4" => Ok(CgroupSockAddrAttachType::GetPeerName4),
"getpeername6" => Ok(CgroupSockAddrAttachType::GetPeerName6),
"getsockname4" => Ok(CgroupSockAddrAttachType::GetSockName4),
"getsockname6" => Ok(CgroupSockAddrAttachType::GetSockName6),
"sendmsg4" => Ok(CgroupSockAddrAttachType::UDPSendMsg4),
"sendmsg6" => Ok(CgroupSockAddrAttachType::UDPSendMsg6),
"recvmsg4" => Ok(CgroupSockAddrAttachType::UDPRecvMsg4),
"recvmsg6" => Ok(CgroupSockAddrAttachType::UDPRecvMsg6),
_ => Err(InvalidAttachType(value.to_owned())),
}
}
}

@ -1,11 +1,6 @@
//! Cgroup socket option programs.
use alloc::{borrow::ToOwned, string::String};
use crate::generated::bpf_attach_type;
#[cfg(not(feature = "std"))]
use crate::std;
/// Defines where to attach a `CgroupSockopt` program.
#[derive(Copy, Clone, Debug)]
pub enum CgroupSockoptAttachType {
@ -23,17 +18,3 @@ impl From<CgroupSockoptAttachType> for bpf_attach_type {
}
}
}
#[derive(Debug, thiserror::Error)]
#[error("{0} is not a valid attach type for a CGROUP_SOCKOPT program")]
pub(crate) struct InvalidAttachType(String);
impl CgroupSockoptAttachType {
pub(crate) fn try_from(value: &str) -> Result<CgroupSockoptAttachType, InvalidAttachType> {
match value {
"getsockopt" => Ok(CgroupSockoptAttachType::Get),
"setsockopt" => Ok(CgroupSockoptAttachType::Set),
_ => Err(InvalidAttachType(value.to_owned())),
}
}
}

@ -3,7 +3,9 @@
pub mod cgroup_sock;
pub mod cgroup_sock_addr;
pub mod cgroup_sockopt;
pub mod xdp;
pub use cgroup_sock::CgroupSockAttachType;
pub use cgroup_sock_addr::CgroupSockAddrAttachType;
pub use cgroup_sockopt::CgroupSockoptAttachType;
pub use xdp::XdpAttachType;

@ -0,0 +1,24 @@
//! XDP programs.
use crate::generated::bpf_attach_type;
/// Defines where to attach an `XDP` program.
#[derive(Copy, Clone, Debug)]
pub enum XdpAttachType {
/// Attach to a network interface.
Interface,
/// Attach to a cpumap. Requires kernel 5.9 or later.
CpuMap,
/// Attach to a devmap. Requires kernel 5.8 or later.
DevMap,
}
impl From<XdpAttachType> for bpf_attach_type {
fn from(value: XdpAttachType) -> Self {
match value {
XdpAttachType::Interface => bpf_attach_type::BPF_XDP,
XdpAttachType::CpuMap => bpf_attach_type::BPF_XDP_CPUMAP,
XdpAttachType::DevMap => bpf_attach_type::BPF_XDP_DEVMAP,
}
}
}

@ -1,11 +1,13 @@
//! Program relocation handling.
use alloc::{borrow::ToOwned, collections::BTreeMap, string::String};
use core::mem;
use alloc::{borrow::ToOwned, collections::BTreeMap, string::String};
use log::debug;
use object::{SectionIndex, SymbolKind};
#[cfg(not(feature = "std"))]
use crate::std;
use crate::{
generated::{
bpf_insn, BPF_CALL, BPF_JMP, BPF_K, BPF_PSEUDO_CALL, BPF_PSEUDO_FUNC, BPF_PSEUDO_MAP_FD,
@ -17,9 +19,6 @@ use crate::{
BpfSectionKind,
};
#[cfg(not(feature = "std"))]
use crate::std;
pub(crate) const INS_SIZE: usize = mem::size_of::<bpf_insn>();
/// The error type returned by [`Object::relocate_maps`] and [`Object::relocate_calls`]
@ -105,7 +104,7 @@ pub(crate) struct Symbol {
impl Object {
/// Relocates the map references
pub fn relocate_maps<'a, I: Iterator<Item = (&'a str, i32, &'a Map)>>(
pub fn relocate_maps<'a, I: Iterator<Item = (&'a str, std::os::fd::RawFd, &'a Map)>>(
&mut self,
maps: I,
text_sections: &HashSet<usize>,
@ -178,8 +177,8 @@ impl Object {
fn relocate_maps<'a, I: Iterator<Item = &'a Relocation>>(
fun: &mut Function,
relocations: I,
maps_by_section: &HashMap<usize, (&str, i32, &Map)>,
maps_by_symbol: &HashMap<usize, (&str, i32, &Map)>,
maps_by_section: &HashMap<usize, (&str, std::os::fd::RawFd, &Map)>,
maps_by_symbol: &HashMap<usize, (&str, std::os::fd::RawFd, &Map)>,
symbol_table: &HashMap<usize, Symbol>,
text_sections: &HashSet<usize>,
) -> Result<(), RelocationError> {
@ -498,13 +497,12 @@ fn insn_is_call(ins: &bpf_insn) -> bool {
mod test {
use alloc::{string::ToString, vec, vec::Vec};
use super::*;
use crate::{
maps::{BtfMap, LegacyMap, Map},
BpfSectionKind,
};
use super::*;
fn fake_sym(index: usize, section_index: usize, address: u64, name: &str, size: u64) -> Symbol {
Symbol {
index,

@ -1,14 +1,13 @@
use core::{mem, slice};
#[cfg(not(feature = "std"))]
pub(crate) use hashbrown::HashMap;
#[cfg(feature = "std")]
pub(crate) use std::collections::HashMap;
#[cfg(feature = "std")]
pub(crate) use std::collections::HashSet;
#[cfg(not(feature = "std"))]
pub(crate) use hashbrown::HashMap;
#[cfg(not(feature = "std"))]
pub(crate) use hashbrown::HashSet;
#[cfg(feature = "std")]
pub(crate) use std::collections::HashSet;
/// bytes_of converts a <T> to a byte slice
pub(crate) unsafe fn bytes_of<T>(val: &T) -> &[u8] {

@ -1,7 +1,6 @@
use aya_tool::generate::{generate, InputFile};
use std::{path::PathBuf, process::exit};
use aya_tool::generate::{generate, InputFile};
use clap::Parser;
#[derive(Parser)]

@ -7,7 +7,6 @@ use std::{
};
use tempfile::tempdir;
use thiserror::Error;
use crate::bindgen;

@ -12,6 +12,7 @@ edition = "2021"
rust-version = "1.66"
[dependencies]
assert_matches = { workspace = true }
async-io = { workspace = true, optional = true }
aya-obj = { workspace = true, features = ["std"] }
bitflags = { workspace = true }
@ -19,24 +20,17 @@ bytes = { workspace = true }
lazy_static = { workspace = true }
libc = { workspace = true }
log = { workspace = true }
object = { workspace = true, default-features = false, features = [
"elf",
"read_core",
"std",
] }
parking_lot = { workspace = true }
object = { workspace = true, features = ["elf", "read_core", "std"] }
thiserror = { workspace = true }
tokio = { workspace = true, features = ["rt"], optional = true }
[dev-dependencies]
assert_matches = { workspace = true }
futures = { workspace = true }
tempfile = { workspace = true }
[features]
default = []
async_tokio = ["tokio/net"]
async_std = ["async-io"]
async_std = ["dep:async-io"]
[package.metadata.docs.rs]
all-features = true

@ -5,11 +5,10 @@
![Build status][build-badge]
[![Book][book-badge]][book-url]
[crates-badge]: https://img.shields.io/crates/v/aya.svg?style=for-the-badge&logo=rust
[crates-url]: https://crates.io/crates/aya
[license-badge]: https://img.shields.io/badge/license-MIT%2FApache--2.0-blue?style=for-the-badge
[build-badge]: https://img.shields.io/github/workflow/status/aya-rs/aya/build-aya?style=for-the-badge&logo=github
[build-badge]: https://img.shields.io/github/actions/workflow/status/aya-rs/aya/build-aya.yml?branch=main&style=for-the-badge
[book-badge]: https://img.shields.io/badge/read%20the-book-9cf.svg?style=for-the-badge&logo=mdbook
[book-url]: https://aya-rs.dev/book
@ -26,8 +25,8 @@
[![Discord][discord-badge]][chat-url] [![Awesome][awesome-badge]][awesome-aya]
Join [the conversation on Discord][chat-url] to discuss anything related to Aya, or discover
and contribute to a list of [Awesome Aya][awesome-aya] projects.
Join [the conversation on Discord][chat-url] to discuss anything related to Aya
or discover and contribute to a list of [Awesome Aya][awesome-aya] projects.
[discord-badge]: https://img.shields.io/badge/Discord-chat-5865F2?style=for-the-badge&logo=discord
[chat-url]: https://discord.gg/xHW2cb2N6G
@ -37,7 +36,7 @@ and contribute to a list of [Awesome Aya][awesome-aya] projects.
## Overview
eBPF is a technology that allows running user-supplied programs inside the Linux
kernel. For more info see https://ebpf.io/what-is-ebpf.
kernel. For more info see [What is eBBF](https://ebpf.io/what-is-ebpf).
Aya is an eBPF library built with a focus on operability and developer
experience. It does not rely on [libbpf] nor [bcc] - it's built from the ground
@ -69,9 +68,8 @@ Some of the major features provided include:
### Example
Aya supports a large chunk of the eBPF API. The following example shows how to use a
`BPF_PROG_TYPE_CGROUP_SKB` program with aya:
Aya supports a large chunk of the eBPF API. The following example shows how to
use a `BPF_PROG_TYPE_CGROUP_SKB` program with aya:
```rust
use std::fs::File;
@ -96,12 +94,15 @@ ingress.attach(cgroup, CgroupSkbAttachType::Ingress)?;
## Contributing
Please see the [contributing guide](https://github.com/aya-rs/aya/blob/main/CONTRIBUTING.md).
## License
Aya is distributed under the terms of either the [MIT license] or the [Apache License] (version
2.0), at your option.
Aya is distributed under the terms of either the [MIT license] or the
[Apache License] (version 2.0), at your option.
Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in this crate by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions.
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
be dual licensed as above, without any additional terms or conditions.
[MIT license]: https://github.com/aya-rs/aya/blob/main/LICENSE-MIT
[Apache license]: https://github.com/aya-rs/aya/blob/main/LICENSE-APACHE

@ -1,10 +1,9 @@
use std::{
borrow::Cow,
collections::{HashMap, HashSet},
ffi::CString,
fs, io,
os::{
fd::{AsFd as _, OwnedFd},
fd::{AsFd as _, AsRawFd as _, OwnedFd},
raw::c_int,
},
path::{Path, PathBuf},
@ -37,14 +36,14 @@ use crate::{
SkMsg, SkSkb, SkSkbKind, SockOps, SocketFilter, TracePoint, UProbe, Xdp,
},
sys::{
bpf_load_btf, bpf_map_freeze, bpf_map_update_elem_ptr, is_bpf_cookie_supported,
is_bpf_global_data_supported, is_btf_datasec_supported, is_btf_decl_tag_supported,
is_btf_enum64_supported, is_btf_float_supported, is_btf_func_global_supported,
is_btf_func_supported, is_btf_supported, is_btf_type_tag_supported, is_perf_link_supported,
is_probe_read_kernel_supported, is_prog_name_supported, retry_with_verifier_logs,
SyscallError,
bpf_load_btf, is_bpf_cookie_supported, is_bpf_global_data_supported,
is_btf_datasec_supported, is_btf_decl_tag_supported, is_btf_enum64_supported,
is_btf_float_supported, is_btf_func_global_supported, is_btf_func_supported,
is_btf_supported, is_btf_type_tag_supported, is_perf_link_supported,
is_probe_read_kernel_supported, is_prog_id_supported, is_prog_name_supported,
retry_with_verifier_logs,
},
util::{bytes_of, bytes_of_slice, possible_cpus, POSSIBLE_CPUS},
util::{bytes_of, bytes_of_slice, page_size, possible_cpus, POSSIBLE_CPUS},
};
pub(crate) const BPF_OBJ_NAME_LEN: usize = 16;
@ -71,7 +70,7 @@ unsafe impl<T: Pod, const N: usize> Pod for [T; N] {}
pub use aya_obj::maps::{bpf_map_def, PinningType};
lazy_static! {
lazy_static::lazy_static! {
pub(crate) static ref FEATURES: Features = detect_features();
}
@ -95,6 +94,8 @@ fn detect_features() -> Features {
is_perf_link_supported(),
is_bpf_global_data_supported(),
is_bpf_cookie_supported(),
is_prog_id_supported(BPF_MAP_TYPE_CPUMAP),
is_prog_id_supported(BPF_MAP_TYPE_DEVMAP),
btf,
);
debug!("BPF Feature Detection: {:#?}", f);
@ -138,7 +139,7 @@ pub struct BpfLoader<'a> {
allow_unsupported_maps: bool,
}
bitflags! {
bitflags::bitflags! {
/// Used to set the verifier log level flags in [BpfLoader](BpfLoader::verifier_log_level()).
#[derive(Clone, Copy, Debug)]
pub struct VerifierLogLevel: u32 {
@ -161,8 +162,8 @@ impl Default for VerifierLogLevel {
impl<'a> BpfLoader<'a> {
/// Creates a new loader instance.
pub fn new() -> BpfLoader<'a> {
BpfLoader {
pub fn new() -> Self {
Self {
btf: Btf::from_sys_fs().ok().map(Cow::Owned),
map_pin_path: None,
globals: HashMap::new(),
@ -397,7 +398,7 @@ impl<'a> BpfLoader<'a> {
if let Some(btf) = obj.fixup_and_sanitize_btf(features)? {
match load_btf(btf.to_bytes(), *verifier_log_level) {
Ok(btf_fd) => Some(Arc::new(btf_fd)),
// Only report an error here if the BTF is truely needed, otherwise proceed without.
// Only report an error here if the BTF is truly needed, otherwise proceed without.
Err(err) => {
for program in obj.programs.values() {
match program.section {
@ -414,7 +415,10 @@ impl<'a> BpfLoader<'a> {
| ProgramSection::URetProbe { sleepable: _ }
| ProgramSection::TracePoint
| ProgramSection::SocketFilter
| ProgramSection::Xdp { frags: _ }
| ProgramSection::Xdp {
frags: _,
attach_type: _,
}
| ProgramSection::SkMsg
| ProgramSection::SkSkbStreamParser
| ProgramSection::SkSkbStreamVerdict
@ -457,49 +461,47 @@ impl<'a> BpfLoader<'a> {
{
continue;
}
match max_entries.get(name.as_str()) {
Some(size) => obj.set_max_entries(*size),
None => {
if obj.map_type() == BPF_MAP_TYPE_PERF_EVENT_ARRAY as u32
&& obj.max_entries() == 0
{
obj.set_max_entries(
possible_cpus()
.map_err(|error| BpfError::FileError {
path: PathBuf::from(POSSIBLE_CPUS),
error,
})?
.len() as u32,
);
}
let num_cpus = || -> Result<u32, BpfError> {
Ok(possible_cpus()
.map_err(|error| BpfError::FileError {
path: PathBuf::from(POSSIBLE_CPUS),
error,
})?
.len() as u32)
};
let map_type: bpf_map_type = obj.map_type().try_into().map_err(MapError::from)?;
if let Some(max_entries) = max_entries_override(
map_type,
max_entries.get(name.as_str()).copied(),
|| obj.max_entries(),
num_cpus,
|| page_size() as u32,
)? {
obj.set_max_entries(max_entries)
}
match obj.map_type().try_into() {
Ok(BPF_MAP_TYPE_CPUMAP) => {
obj.set_value_size(if FEATURES.cpumap_prog_id() { 8 } else { 4 })
}
Ok(BPF_MAP_TYPE_DEVMAP | BPF_MAP_TYPE_DEVMAP_HASH) => {
obj.set_value_size(if FEATURES.devmap_prog_id() { 8 } else { 4 })
}
_ => (),
}
let btf_fd = btf_fd.as_deref().map(|fd| fd.as_fd());
let mut map = match obj.pinning() {
PinningType::None => MapData::create(obj, &name, btf_fd)?,
PinningType::ByName => {
let path = map_pin_path.as_ref().ok_or(BpfError::NoPinPath)?;
MapData::create_pinned(path, obj, &name, btf_fd)?
// pin maps in /sys/fs/bpf by default to align with libbpf
// behavior https://github.com/libbpf/libbpf/blob/v1.2.2/src/libbpf.c#L2161.
let path = map_pin_path
.as_deref()
.unwrap_or_else(|| Path::new("/sys/fs/bpf"));
MapData::create_pinned_by_name(path, obj, &name, btf_fd)?
}
};
let fd = map.fd;
if !map.obj.data().is_empty() && map.obj.section_kind() != BpfSectionKind::Bss {
bpf_map_update_elem_ptr(fd, &0 as *const _, map.obj.data_mut().as_mut_ptr(), 0)
.map_err(|(_, io_error)| SyscallError {
call: "bpf_map_update_elem",
io_error,
})
.map_err(MapError::from)?;
}
if map.obj.section_kind() == BpfSectionKind::Rodata {
bpf_map_freeze(fd)
.map_err(|(_, io_error)| SyscallError {
call: "bpf_map_freeze",
io_error,
})
.map_err(MapError::from)?;
}
map.finalize()?;
maps.insert(name, map);
}
@ -511,7 +513,7 @@ impl<'a> BpfLoader<'a> {
obj.relocate_maps(
maps.iter()
.map(|(s, data)| (s.as_str(), data.fd, &data.obj)),
.map(|(s, data)| (s.as_str(), data.fd().as_fd().as_raw_fd(), data.obj())),
&text_sections,
)?;
obj.relocate_calls(&text_sections)?;
@ -574,13 +576,18 @@ impl<'a> BpfLoader<'a> {
ProgramSection::SocketFilter => Program::SocketFilter(SocketFilter {
data: ProgramData::new(prog_name, obj, btf_fd, *verifier_log_level),
}),
ProgramSection::Xdp { frags, .. } => {
ProgramSection::Xdp {
frags, attach_type, ..
} => {
let mut data =
ProgramData::new(prog_name, obj, btf_fd, *verifier_log_level);
if *frags {
data.flags = BPF_F_XDP_HAS_FRAGS;
}
Program::Xdp(Xdp { data })
Program::Xdp(Xdp {
data,
attach_type: *attach_type,
})
}
ProgramSection::SkMsg => Program::SkMsg(SkMsg {
data: ProgramData::new(prog_name, obj, btf_fd, *verifier_log_level),
@ -608,10 +615,6 @@ impl<'a> BpfLoader<'a> {
ProgramSection::SchedClassifier => {
Program::SchedClassifier(SchedClassifier {
data: ProgramData::new(prog_name, obj, btf_fd, *verifier_log_level),
name: unsafe {
CString::from_vec_unchecked(Vec::from(name.clone()))
.into_boxed_c_str()
},
})
}
ProgramSection::CgroupSkb => Program::CgroupSkb(CgroupSkb {
@ -696,7 +699,7 @@ impl<'a> BpfLoader<'a> {
if !*allow_unsupported_maps {
maps.iter().try_for_each(|(_, x)| match x {
Map::Unsupported(map) => Err(BpfError::MapError(MapError::Unsupported {
map_type: map.obj.map_type(),
map_type: map.obj().map_type(),
})),
_ => Ok(()),
})?;
@ -707,12 +710,8 @@ impl<'a> BpfLoader<'a> {
}
fn parse_map(data: (String, MapData)) -> Result<(String, Map), BpfError> {
let name = data.0;
let map = data.1;
let map_type =
bpf_map_type::try_from(map.obj.map_type()).map_err(|e| MapError::InvalidMapType {
map_type: e.map_type,
})?;
let (name, map) = data;
let map_type = bpf_map_type::try_from(map.obj().map_type()).map_err(MapError::from)?;
let map = match map_type {
BPF_MAP_TYPE_ARRAY => Map::Array(map),
BPF_MAP_TYPE_PERCPU_ARRAY => Map::PerCpuArray(map),
@ -722,6 +721,7 @@ fn parse_map(data: (String, MapData)) -> Result<(String, Map), BpfError> {
BPF_MAP_TYPE_PERCPU_HASH => Map::PerCpuHashMap(map),
BPF_MAP_TYPE_LRU_PERCPU_HASH => Map::PerCpuLruHashMap(map),
BPF_MAP_TYPE_PERF_EVENT_ARRAY => Map::PerfEventArray(map),
BPF_MAP_TYPE_RINGBUF => Map::RingBuf(map),
BPF_MAP_TYPE_SOCKHASH => Map::SockHash(map),
BPF_MAP_TYPE_SOCKMAP => Map::SockMap(map),
BPF_MAP_TYPE_BLOOM_FILTER => Map::BloomFilter(map),
@ -729,6 +729,10 @@ fn parse_map(data: (String, MapData)) -> Result<(String, Map), BpfError> {
BPF_MAP_TYPE_STACK => Map::Stack(map),
BPF_MAP_TYPE_STACK_TRACE => Map::StackTraceMap(map),
BPF_MAP_TYPE_QUEUE => Map::Queue(map),
BPF_MAP_TYPE_CPUMAP => Map::CpuMap(map),
BPF_MAP_TYPE_DEVMAP => Map::DevMap(map),
BPF_MAP_TYPE_DEVMAP_HASH => Map::DevMapHash(map),
BPF_MAP_TYPE_XSKMAP => Map::XskMap(map),
m => {
warn!("The map {name} is of type {:#?} which is currently unsupported in Aya, use `allow_unsupported_maps()` to load it anyways", m);
Map::Unsupported(map)
@ -738,7 +742,106 @@ fn parse_map(data: (String, MapData)) -> Result<(String, Map), BpfError> {
Ok((name, map))
}
impl<'a> Default for BpfLoader<'a> {
/// Computes the value which should be used to override the max_entries value of the map
/// based on the user-provided override and the rules for that map type.
fn max_entries_override(
map_type: bpf_map_type,
user_override: Option<u32>,
current_value: impl Fn() -> u32,
num_cpus: impl Fn() -> Result<u32, BpfError>,
page_size: impl Fn() -> u32,
) -> Result<Option<u32>, BpfError> {
let max_entries = || user_override.unwrap_or_else(&current_value);
Ok(match map_type {
BPF_MAP_TYPE_PERF_EVENT_ARRAY if max_entries() == 0 => Some(num_cpus()?),
BPF_MAP_TYPE_RINGBUF => Some(adjust_to_page_size(max_entries(), page_size()))
.filter(|adjusted| *adjusted != max_entries())
.or(user_override),
_ => user_override,
})
}
// Adjusts the byte size of a RingBuf map to match a power-of-two multiple of the page size.
//
// This mirrors the logic used by libbpf.
// See https://github.com/libbpf/libbpf/blob/ec6f716eda43/src/libbpf.c#L2461-L2463
fn adjust_to_page_size(byte_size: u32, page_size: u32) -> u32 {
// If the byte_size is zero, return zero and let the verifier reject the map
// when it is loaded. This is the behavior of libbpf.
if byte_size == 0 {
return 0;
}
// TODO: Replace with primitive method when int_roundings (https://github.com/rust-lang/rust/issues/88581)
// is stabilized.
fn div_ceil(n: u32, rhs: u32) -> u32 {
let d = n / rhs;
let r = n % rhs;
if r > 0 && rhs > 0 {
d + 1
} else {
d
}
}
let pages_needed = div_ceil(byte_size, page_size);
page_size * pages_needed.next_power_of_two()
}
#[cfg(test)]
mod tests {
use crate::generated::bpf_map_type::*;
const PAGE_SIZE: u32 = 4096;
const NUM_CPUS: u32 = 4;
#[test]
fn test_adjust_to_page_size() {
use super::adjust_to_page_size;
[
(0, 0),
(4096, 1),
(4096, 4095),
(4096, 4096),
(8192, 4097),
(8192, 8192),
(16384, 8193),
]
.into_iter()
.for_each(|(exp, input)| assert_eq!(exp, adjust_to_page_size(input, PAGE_SIZE)))
}
#[test]
fn test_max_entries_override() {
use super::max_entries_override;
[
(BPF_MAP_TYPE_RINGBUF, Some(1), 1, Some(PAGE_SIZE)),
(BPF_MAP_TYPE_RINGBUF, None, 1, Some(PAGE_SIZE)),
(BPF_MAP_TYPE_RINGBUF, None, PAGE_SIZE, None),
(BPF_MAP_TYPE_PERF_EVENT_ARRAY, None, 1, None),
(BPF_MAP_TYPE_PERF_EVENT_ARRAY, Some(42), 1, Some(42)),
(BPF_MAP_TYPE_PERF_EVENT_ARRAY, Some(0), 1, Some(NUM_CPUS)),
(BPF_MAP_TYPE_PERF_EVENT_ARRAY, None, 0, Some(NUM_CPUS)),
(BPF_MAP_TYPE_PERF_EVENT_ARRAY, None, 42, None),
(BPF_MAP_TYPE_ARRAY, None, 1, None),
(BPF_MAP_TYPE_ARRAY, Some(2), 1, Some(2)),
]
.into_iter()
.for_each(|(map_type, user_override, current_value, exp)| {
assert_eq!(
exp,
max_entries_override(
map_type,
user_override,
|| { current_value },
|| Ok(NUM_CPUS),
|| PAGE_SIZE
)
.unwrap()
)
})
}
}
impl Default for BpfLoader<'_> {
fn default() -> Self {
BpfLoader::new()
}
@ -768,7 +871,7 @@ impl Bpf {
/// let bpf = Bpf::load_file("file.o")?;
/// # Ok::<(), aya::BpfError>(())
/// ```
pub fn load_file<P: AsRef<Path>>(path: P) -> Result<Bpf, BpfError> {
pub fn load_file<P: AsRef<Path>>(path: P) -> Result<Self, BpfError> {
BpfLoader::new()
.btf(Btf::from_sys_fs().ok().as_ref())
.load_file(path)
@ -793,7 +896,7 @@ impl Bpf {
/// let bpf = Bpf::load(&data)?;
/// # Ok::<(), aya::BpfError>(())
/// ```
pub fn load(data: &[u8]) -> Result<Bpf, BpfError> {
pub fn load(data: &[u8]) -> Result<Self, BpfError> {
BpfLoader::new()
.btf(Btf::from_sys_fs().ok().as_ref())
.load(data)
@ -854,6 +957,29 @@ impl Bpf {
self.maps.iter().map(|(name, map)| (name.as_str(), map))
}
/// A mutable iterator over all the maps.
///
/// # Examples
/// ```no_run
/// # use std::path::Path;
/// # #[derive(thiserror::Error, Debug)]
/// # enum Error {
/// # #[error(transparent)]
/// # Bpf(#[from] aya::BpfError),
/// # #[error(transparent)]
/// # Pin(#[from] aya::pin::PinError)
/// # }
/// # let mut bpf = aya::Bpf::load(&[])?;
/// # let pin_path = Path::new("/tmp/pin_path");
/// for (_, map) in bpf.maps_mut() {
/// map.pin(pin_path)?;
/// }
/// # Ok::<(), Error>(())
/// ```
pub fn maps_mut(&mut self) -> impl Iterator<Item = (&str, &mut Map)> {
self.maps.iter_mut().map(|(name, map)| (name.as_str(), map))
}
/// Returns a reference to the program with the given name.
///
/// You can use this to inspect a program and its properties. To load and attach a program, use
@ -949,10 +1075,6 @@ pub enum BpfError {
error: io::Error,
},
/// Pinning requested but no path provided
#[error("pinning requested but no path provided")]
NoPinPath,
/// Unexpected pinning type
#[error("unexpected pinning type {name}")]
UnexpectedPinningType {
@ -960,13 +1082,6 @@ pub enum BpfError {
name: u32,
},
/// Invalid path
#[error("invalid path `{error}`")]
InvalidPath {
/// The error message
error: String,
},
/// Error parsing BPF object
#[error("error parsing BPF object: {0}")]
ParseError(#[from] ParseError),

@ -37,13 +37,47 @@
html_favicon_url = "https://aya-rs.dev/assets/images/crabby.svg"
)]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![deny(clippy::all, missing_docs)]
#![deny(
clippy::all,
clippy::use_self,
absolute_paths_not_starting_with_crate,
deprecated_in_future,
elided_lifetimes_in_paths,
explicit_outlives_requirements,
ffi_unwind_calls,
keyword_idents,
//let_underscore_drop,
macro_use_extern_crate,
meta_variable_misuse,
missing_abi,
//missing_copy_implementations,
missing_docs,
non_ascii_idents,
noop_method_call,
pointer_structural_match,
rust_2021_incompatible_closure_captures,
rust_2021_incompatible_or_patterns,
rust_2021_prefixes_incompatible_syntax,
rust_2021_prelude_collisions,
single_use_lifetimes,
trivial_numeric_casts,
unreachable_pub,
//unsafe_op_in_unsafe_fn,
unstable_features,
unused_crate_dependencies,
unused_extern_crates,
unused_import_braces,
unused_lifetimes,
unused_macro_rules,
unused_qualifications,
//unused_results,
unused_tuple_struct_fields,
)]
#![allow(clippy::missing_safety_doc, clippy::len_without_is_empty)]
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate bitflags;
#![cfg_attr(
all(feature = "async_tokio", feature = "async_std"),
allow(unused_crate_dependencies)
)]
mod bpf;
use aya_obj::generated;

@ -1,6 +1,7 @@
use std::{
borrow::{Borrow, BorrowMut},
marker::PhantomData,
os::fd::AsFd as _,
};
use crate::{
@ -30,16 +31,16 @@ use crate::{
/// ```
#[doc(alias = "BPF_MAP_TYPE_ARRAY")]
pub struct Array<T, V: Pod> {
inner: T,
pub(crate) inner: T,
_v: PhantomData<V>,
}
impl<T: Borrow<MapData>, V: Pod> Array<T, V> {
pub(crate) fn new(map: T) -> Result<Array<T, V>, MapError> {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
let data = map.borrow();
check_kv_size::<u32, V>(data)?;
Ok(Array {
Ok(Self {
inner: map,
_v: PhantomData,
})
@ -61,7 +62,7 @@ impl<T: Borrow<MapData>, V: Pod> Array<T, V> {
pub fn get(&self, index: &u32, flags: u64) -> Result<V, MapError> {
let data = self.inner.borrow();
check_bounds(data, *index)?;
let fd = data.fd;
let fd = data.fd().as_fd();
let value =
bpf_map_lookup_elem(fd, index, flags).map_err(|(_, io_error)| SyscallError {
@ -88,7 +89,7 @@ impl<T: BorrowMut<MapData>, V: Pod> Array<T, V> {
pub fn set(&mut self, index: u32, value: impl Borrow<V>, flags: u64) -> Result<(), MapError> {
let data = self.inner.borrow_mut();
check_bounds(data, index)?;
let fd = data.fd;
let fd = data.fd().as_fd();
bpf_map_update_elem(fd, Some(&index), value.borrow(), flags).map_err(|(_, io_error)| {
SyscallError {
call: "bpf_map_update_elem",

@ -1,6 +1,7 @@
use std::{
borrow::{Borrow, BorrowMut},
marker::PhantomData,
os::fd::AsFd as _,
};
use crate::{
@ -49,16 +50,16 @@ use crate::{
/// ```
#[doc(alias = "BPF_MAP_TYPE_PERCPU_ARRAY")]
pub struct PerCpuArray<T, V: Pod> {
inner: T,
pub(crate) inner: T,
_v: PhantomData<V>,
}
impl<T: Borrow<MapData>, V: Pod> PerCpuArray<T, V> {
pub(crate) fn new(map: T) -> Result<PerCpuArray<T, V>, MapError> {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
let data = map.borrow();
check_kv_size::<u32, V>(data)?;
Ok(PerCpuArray {
Ok(Self {
inner: map,
_v: PhantomData,
})
@ -80,7 +81,7 @@ impl<T: Borrow<MapData>, V: Pod> PerCpuArray<T, V> {
pub fn get(&self, index: &u32, flags: u64) -> Result<PerCpuValues<V>, MapError> {
let data = self.inner.borrow();
check_bounds(data, *index)?;
let fd = data.fd;
let fd = data.fd().as_fd();
let value = bpf_map_lookup_elem_per_cpu(fd, index, flags).map_err(|(_, io_error)| {
SyscallError {
@ -108,7 +109,7 @@ impl<T: BorrowMut<MapData>, V: Pod> PerCpuArray<T, V> {
pub fn set(&mut self, index: u32, values: PerCpuValues<V>, flags: u64) -> Result<(), MapError> {
let data = self.inner.borrow_mut();
check_bounds(data, index)?;
let fd = data.fd;
let fd = data.fd().as_fd();
bpf_map_update_elem_per_cpu(fd, &index, &values, flags).map_err(|(_, io_error)| {
SyscallError {

@ -48,15 +48,15 @@ use crate::{
/// ```
#[doc(alias = "BPF_MAP_TYPE_PROG_ARRAY")]
pub struct ProgramArray<T> {
inner: T,
pub(crate) inner: T,
}
impl<T: Borrow<MapData>> ProgramArray<T> {
pub(crate) fn new(map: T) -> Result<ProgramArray<T>, MapError> {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
let data = map.borrow();
check_kv_size::<u32, RawFd>(data)?;
Ok(ProgramArray { inner: map })
Ok(Self { inner: map })
}
/// An iterator over the indices of the array that point to a program. The iterator item type
@ -74,7 +74,7 @@ impl<T: BorrowMut<MapData>> ProgramArray<T> {
pub fn set(&mut self, index: u32, program: &ProgramFd, flags: u64) -> Result<(), MapError> {
let data = self.inner.borrow_mut();
check_bounds(data, index)?;
let fd = data.fd;
let fd = data.fd().as_fd();
let prog_fd = program.as_fd();
let prog_fd = prog_fd.as_raw_fd();
@ -94,7 +94,7 @@ impl<T: BorrowMut<MapData>> ProgramArray<T> {
pub fn clear_index(&mut self, index: &u32) -> Result<(), MapError> {
let data = self.inner.borrow_mut();
check_bounds(data, *index)?;
let fd = self.inner.borrow_mut().fd;
let fd = data.fd().as_fd();
bpf_map_delete_elem(fd, index)
.map(|_| ())

@ -2,6 +2,7 @@
use std::{
borrow::{Borrow, BorrowMut},
marker::PhantomData,
os::fd::AsFd as _,
};
use crate::{
@ -35,16 +36,16 @@ use crate::{
#[doc(alias = "BPF_MAP_TYPE_BLOOM_FILTER")]
#[derive(Debug)]
pub struct BloomFilter<T, V: Pod> {
inner: T,
pub(crate) inner: T,
_v: PhantomData<V>,
}
impl<T: Borrow<MapData>, V: Pod> BloomFilter<T, V> {
pub(crate) fn new(map: T) -> Result<BloomFilter<T, V>, MapError> {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
let data = map.borrow();
check_v_size::<V>(data)?;
Ok(BloomFilter {
Ok(Self {
inner: map,
_v: PhantomData,
})
@ -52,7 +53,7 @@ impl<T: Borrow<MapData>, V: Pod> BloomFilter<T, V> {
/// Query the existence of the element.
pub fn contains(&self, mut value: &V, flags: u64) -> Result<(), MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
bpf_map_lookup_elem_ptr::<u32, _>(fd, None, &mut value, flags)
.map_err(|(_, io_error)| SyscallError {
@ -67,7 +68,7 @@ impl<T: Borrow<MapData>, V: Pod> BloomFilter<T, V> {
impl<T: BorrowMut<MapData>, V: Pod> BloomFilter<T, V> {
/// Inserts a value into the map.
pub fn insert(&mut self, value: impl Borrow<V>, flags: u64) -> Result<(), MapError> {
let fd = self.inner.borrow_mut().fd;
let fd = self.inner.borrow_mut().fd().as_fd();
bpf_map_push_elem(fd, value.borrow(), flags).map_err(|(_, io_error)| SyscallError {
call: "bpf_map_push_elem",
io_error,
@ -78,6 +79,11 @@ impl<T: BorrowMut<MapData>, V: Pod> BloomFilter<T, V> {
#[cfg(test)]
mod tests {
use std::{ffi::c_long, io};
use assert_matches::assert_matches;
use libc::{EFAULT, ENOENT};
use super::*;
use crate::{
bpf_map_def,
@ -89,9 +95,6 @@ mod tests {
obj::{self, maps::LegacyMap, BpfSectionKind},
sys::{override_syscall, SysResult, Syscall},
};
use assert_matches::assert_matches;
use libc::{EFAULT, ENOENT};
use std::{ffi::c_long, io};
fn new_obj_map() -> obj::Map {
obj::Map::Legacy(LegacyMap {

@ -1,6 +1,7 @@
use std::{
borrow::{Borrow, BorrowMut},
marker::PhantomData,
os::fd::AsFd as _,
};
use crate::{
@ -33,17 +34,17 @@ use crate::{
#[doc(alias = "BPF_MAP_TYPE_LRU_HASH")]
#[derive(Debug)]
pub struct HashMap<T, K, V> {
inner: T,
pub(crate) inner: T,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
impl<T: Borrow<MapData>, K: Pod, V: Pod> HashMap<T, K, V> {
pub(crate) fn new(map: T) -> Result<HashMap<T, K, V>, MapError> {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
let data = map.borrow();
check_kv_size::<K, V>(data)?;
Ok(HashMap {
Ok(Self {
inner: map,
_k: PhantomData,
_v: PhantomData,
@ -52,7 +53,7 @@ impl<T: Borrow<MapData>, K: Pod, V: Pod> HashMap<T, K, V> {
/// Returns a copy of the value associated with the key.
pub fn get(&self, key: &K, flags: u64) -> Result<V, MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
let value = bpf_map_lookup_elem(fd, key, flags).map_err(|(_, io_error)| SyscallError {
call: "bpf_map_lookup_elem",
io_error,
@ -96,7 +97,7 @@ impl<T: Borrow<MapData>, K: Pod, V: Pod> IterableMap<K, V> for HashMap<T, K, V>
}
fn get(&self, key: &K) -> Result<V, MapError> {
HashMap::get(self, key, 0)
Self::get(self, key, 0)
}
}
@ -107,44 +108,22 @@ mod tests {
use assert_matches::assert_matches;
use libc::{EFAULT, ENOENT};
use super::{
super::test_utils::{self, new_map},
*,
};
use crate::{
bpf_map_def,
generated::{
bpf_attr, bpf_cmd,
bpf_map_type::{BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_LRU_HASH},
},
maps::{Map, MapData},
obj::{self, maps::LegacyMap, BpfSectionKind},
maps::Map,
obj,
sys::{override_syscall, SysResult, Syscall},
};
use super::*;
fn new_obj_map() -> obj::Map {
obj::Map::Legacy(LegacyMap {
def: bpf_map_def {
map_type: BPF_MAP_TYPE_HASH as u32,
key_size: 4,
value_size: 4,
max_entries: 1024,
..Default::default()
},
section_index: 0,
section_kind: BpfSectionKind::Maps,
data: Vec::new(),
symbol_index: None,
})
}
fn new_map(obj: obj::Map) -> MapData {
override_syscall(|call| match call {
Syscall::Bpf {
cmd: bpf_cmd::BPF_MAP_CREATE,
..
} => Ok(1337),
call => panic!("unexpected syscall {:?}", call),
});
MapData::create(obj, "foo", None).unwrap()
test_utils::new_obj_map(BPF_MAP_TYPE_HASH)
}
fn sys_error(value: i32) -> SysResult<c_long> {
@ -213,21 +192,10 @@ mod tests {
#[test]
fn test_try_from_ok_lru() {
let map = new_map(obj::Map::Legacy(LegacyMap {
def: bpf_map_def {
map_type: BPF_MAP_TYPE_LRU_HASH as u32,
key_size: 4,
value_size: 4,
max_entries: 1024,
..Default::default()
},
section_index: 0,
section_kind: BpfSectionKind::Maps,
symbol_index: None,
data: Vec::new(),
}));
let map = Map::HashMap(map);
let map_data = || new_map(test_utils::new_obj_map(BPF_MAP_TYPE_LRU_HASH));
let map = Map::HashMap(map_data());
assert!(HashMap::<_, u32, u32>::try_from(&map).is_ok());
let map = Map::LruHashMap(map_data());
assert!(HashMap::<_, u32, u32>::try_from(&map).is_ok())
}

@ -1,4 +1,6 @@
//! Hash map types.
use std::os::fd::AsFd as _;
use crate::{
maps::MapError,
sys::{bpf_map_delete_elem, bpf_map_update_elem, SyscallError},
@ -20,7 +22,7 @@ pub(crate) fn insert<K: Pod, V: Pod>(
value: &V,
flags: u64,
) -> Result<(), MapError> {
let fd = map.fd;
let fd = map.fd().as_fd();
bpf_map_update_elem(fd, Some(key), value, flags).map_err(|(_, io_error)| SyscallError {
call: "bpf_map_update_elem",
io_error,
@ -30,7 +32,7 @@ pub(crate) fn insert<K: Pod, V: Pod>(
}
pub(crate) fn remove<K: Pod>(map: &MapData, key: &K) -> Result<(), MapError> {
let fd = map.fd;
let fd = map.fd().as_fd();
bpf_map_delete_elem(fd, key)
.map(|_| ())
.map_err(|(_, io_error)| {
@ -41,3 +43,41 @@ pub(crate) fn remove<K: Pod>(map: &MapData, key: &K) -> Result<(), MapError> {
.into()
})
}
#[cfg(test)]
mod test_utils {
use crate::{
bpf_map_def,
generated::{bpf_cmd, bpf_map_type},
maps::MapData,
obj::{self, maps::LegacyMap, BpfSectionKind},
sys::{override_syscall, Syscall},
};
pub(super) fn new_map(obj: obj::Map) -> MapData {
override_syscall(|call| match call {
Syscall::Bpf {
cmd: bpf_cmd::BPF_MAP_CREATE,
..
} => Ok(1337),
call => panic!("unexpected syscall {:?}", call),
});
MapData::create(obj, "foo", None).unwrap()
}
pub(super) fn new_obj_map(map_type: bpf_map_type) -> obj::Map {
obj::Map::Legacy(LegacyMap {
def: bpf_map_def {
map_type: map_type as u32,
key_size: 4,
value_size: 4,
max_entries: 1024,
..Default::default()
},
section_index: 0,
section_kind: BpfSectionKind::Maps,
data: Vec::new(),
symbol_index: None,
})
}
}

@ -2,6 +2,7 @@
use std::{
borrow::{Borrow, BorrowMut},
marker::PhantomData,
os::fd::AsFd as _,
};
use crate::{
@ -42,17 +43,17 @@ use crate::{
#[doc(alias = "BPF_MAP_TYPE_LRU_PERCPU_HASH")]
#[doc(alias = "BPF_MAP_TYPE_PERCPU_HASH")]
pub struct PerCpuHashMap<T, K: Pod, V: Pod> {
inner: T,
pub(crate) inner: T,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
impl<T: Borrow<MapData>, K: Pod, V: Pod> PerCpuHashMap<T, K, V> {
pub(crate) fn new(map: T) -> Result<PerCpuHashMap<T, K, V>, MapError> {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
let data = map.borrow();
check_kv_size::<K, V>(data)?;
Ok(PerCpuHashMap {
Ok(Self {
inner: map,
_k: PhantomData,
_v: PhantomData,
@ -61,7 +62,7 @@ impl<T: Borrow<MapData>, K: Pod, V: Pod> PerCpuHashMap<T, K, V> {
/// Returns a slice of values - one for each CPU - associated with the key.
pub fn get(&self, key: &K, flags: u64) -> Result<PerCpuValues<V>, MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
let values =
bpf_map_lookup_elem_per_cpu(fd, key, flags).map_err(|(_, io_error)| SyscallError {
call: "bpf_map_lookup_elem",
@ -118,7 +119,7 @@ impl<T: BorrowMut<MapData>, K: Pod, V: Pod> PerCpuHashMap<T, K, V> {
values: PerCpuValues<V>,
flags: u64,
) -> Result<(), MapError> {
let fd = self.inner.borrow_mut().fd;
let fd = self.inner.borrow_mut().fd().as_fd();
bpf_map_update_elem_per_cpu(fd, key.borrow(), &values, flags).map_err(
|(_, io_error)| SyscallError {
call: "bpf_map_update_elem",
@ -143,6 +144,32 @@ impl<T: Borrow<MapData>, K: Pod, V: Pod> IterableMap<K, PerCpuValues<V>>
}
fn get(&self, key: &K) -> Result<PerCpuValues<V>, MapError> {
PerCpuHashMap::get(self, key, 0)
Self::get(self, key, 0)
}
}
#[cfg(test)]
mod tests {
use super::{super::test_utils, *};
use crate::{
generated::bpf_map_type::{BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_HASH},
maps::Map,
};
#[test]
fn test_try_from_ok() {
let map = Map::PerCpuHashMap(test_utils::new_map(test_utils::new_obj_map(
BPF_MAP_TYPE_PERCPU_HASH,
)));
assert!(PerCpuHashMap::<_, u32, u32>::try_from(&map).is_ok())
}
#[test]
fn test_try_from_ok_lru() {
let map_data =
|| test_utils::new_map(test_utils::new_obj_map(BPF_MAP_TYPE_LRU_PERCPU_HASH));
let map = Map::PerCpuHashMap(map_data());
assert!(PerCpuHashMap::<_, u32, u32>::try_from(&map).is_ok());
let map = Map::PerCpuLruHashMap(map_data());
assert!(PerCpuHashMap::<_, u32, u32>::try_from(&map).is_ok())
}
}

@ -2,6 +2,7 @@
use std::{
borrow::{Borrow, BorrowMut},
marker::PhantomData,
os::fd::AsFd as _,
};
use crate::{
@ -47,7 +48,7 @@ use crate::{
#[doc(alias = "BPF_MAP_TYPE_LPM_TRIE")]
#[derive(Debug)]
pub struct LpmTrie<T, K, V> {
inner: T,
pub(crate) inner: T,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
@ -113,11 +114,11 @@ impl<K: Pod> Clone for Key<K> {
unsafe impl<K: Pod> Pod for Key<K> {}
impl<T: Borrow<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
pub(crate) fn new(map: T) -> Result<LpmTrie<T, K, V>, MapError> {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
let data = map.borrow();
check_kv_size::<Key<K>, V>(data)?;
Ok(LpmTrie {
Ok(Self {
inner: map,
_k: PhantomData,
_v: PhantomData,
@ -126,7 +127,7 @@ impl<T: Borrow<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
/// Returns a copy of the value associated with the longest prefix matching key in the LpmTrie.
pub fn get(&self, key: &Key<K>, flags: u64) -> Result<V, MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
let value = bpf_map_lookup_elem(fd, key, flags).map_err(|(_, io_error)| SyscallError {
call: "bpf_map_lookup_elem",
io_error,
@ -155,7 +156,7 @@ impl<T: BorrowMut<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
value: impl Borrow<V>,
flags: u64,
) -> Result<(), MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
bpf_map_update_elem(fd, Some(key), value.borrow(), flags).map_err(|(_, io_error)| {
SyscallError {
call: "bpf_map_update_elem",
@ -170,7 +171,7 @@ impl<T: BorrowMut<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
///
/// Both the prefix and data must match exactly - this method does not do a longest prefix match.
pub fn remove(&mut self, key: &Key<K>) -> Result<(), MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
bpf_map_delete_elem(fd, key)
.map(|_| ())
.map_err(|(_, io_error)| {
@ -195,6 +196,11 @@ impl<T: Borrow<MapData>, K: Pod, V: Pod> IterableMap<Key<K>, V> for LpmTrie<T, K
#[cfg(test)]
mod tests {
use std::{ffi::c_long, io, mem, net::Ipv4Addr};
use assert_matches::assert_matches;
use libc::{EFAULT, ENOENT};
use super::*;
use crate::{
bpf_map_def,
@ -206,9 +212,6 @@ mod tests {
obj::{self, maps::LegacyMap, BpfSectionKind},
sys::{override_syscall, SysResult, Syscall},
};
use assert_matches::assert_matches;
use libc::{EFAULT, ENOENT};
use std::{ffi::c_long, io, mem, net::Ipv4Addr};
fn new_obj_map() -> obj::Map {
obj::Map::Legacy(LegacyMap {

File diff suppressed because it is too large Load Diff

@ -1,8 +1,4 @@
use bytes::BytesMut;
use std::{
borrow::{Borrow, BorrowMut},
os::fd::{AsRawFd as _, RawFd},
};
use std::borrow::{Borrow, BorrowMut};
// See https://doc.rust-lang.org/cargo/reference/features.html#mutually-exclusive-features.
//
@ -10,7 +6,7 @@ use std::{
// "async-async-std". Presently we arbitrarily choose tokio over async-std when both are requested.
#[cfg(all(not(feature = "async_tokio"), feature = "async_std"))]
use async_io::Async;
use bytes::BytesMut;
#[cfg(feature = "async_tokio")]
use tokio::io::unix::AsyncFd;
@ -52,7 +48,6 @@ use crate::maps::{
/// # let mut bpf = aya::Bpf::load(&[])?;
/// use aya::maps::perf::{AsyncPerfEventArray, PerfBufferError};
/// use aya::util::online_cpus;
/// use futures::future;
/// use bytes::BytesMut;
/// use tokio::task; // or async_std::task
///
@ -93,7 +88,7 @@ pub struct AsyncPerfEventArray<T> {
perf_map: PerfEventArray<T>,
}
impl<T: BorrowMut<MapData> + Borrow<MapData>> AsyncPerfEventArray<T> {
impl<T: BorrowMut<MapData>> AsyncPerfEventArray<T> {
/// Opens the perf buffer at the given index.
///
/// The returned buffer will receive all the events eBPF programs send at the given index.
@ -104,22 +99,17 @@ impl<T: BorrowMut<MapData> + Borrow<MapData>> AsyncPerfEventArray<T> {
) -> Result<AsyncPerfEventArrayBuffer<T>, PerfBufferError> {
let Self { perf_map } = self;
let buf = perf_map.open(index, page_count)?;
let fd = buf.as_raw_fd();
Ok(AsyncPerfEventArrayBuffer {
buf,
#[cfg(feature = "async_tokio")]
async_tokio_fd: AsyncFd::new(fd)?,
#[cfg(all(not(feature = "async_tokio"), feature = "async_std"))]
async_std_fd: Async::new(fd)?,
})
#[cfg(feature = "async_tokio")]
let buf = AsyncFd::new(buf)?;
#[cfg(all(not(feature = "async_tokio"), feature = "async_std"))]
let buf = Async::new(buf)?;
Ok(AsyncPerfEventArrayBuffer { buf })
}
}
impl<T: Borrow<MapData>> AsyncPerfEventArray<T> {
pub(crate) fn new(map: T) -> Result<AsyncPerfEventArray<T>, MapError> {
Ok(AsyncPerfEventArray {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
Ok(Self {
perf_map: PerfEventArray::new(map)?,
})
}
@ -132,17 +122,18 @@ impl<T: Borrow<MapData>> AsyncPerfEventArray<T> {
///
/// See the [`AsyncPerfEventArray` documentation](AsyncPerfEventArray) for an overview of how to
/// use perf buffers.
pub struct AsyncPerfEventArrayBuffer<T> {
pub struct AsyncPerfEventArrayBuffer<T: BorrowMut<MapData>> {
#[cfg(not(any(feature = "async_tokio", feature = "async_std")))]
buf: PerfEventArrayBuffer<T>,
#[cfg(feature = "async_tokio")]
async_tokio_fd: AsyncFd<RawFd>,
buf: AsyncFd<PerfEventArrayBuffer<T>>,
#[cfg(all(not(feature = "async_tokio"), feature = "async_std"))]
async_std_fd: Async<RawFd>,
buf: Async<PerfEventArrayBuffer<T>>,
}
impl<T: BorrowMut<MapData> + Borrow<MapData>> AsyncPerfEventArrayBuffer<T> {
impl<T: BorrowMut<MapData>> AsyncPerfEventArrayBuffer<T> {
/// Reads events from the buffer.
///
/// This method reads events into the provided slice of buffers, filling
@ -156,21 +147,20 @@ impl<T: BorrowMut<MapData> + Borrow<MapData>> AsyncPerfEventArrayBuffer<T> {
&mut self,
buffers: &mut [BytesMut],
) -> Result<Events, PerfBufferError> {
let Self {
buf,
#[cfg(feature = "async_tokio")]
async_tokio_fd,
#[cfg(all(not(feature = "async_tokio"), feature = "async_std"))]
async_std_fd,
} = self;
let Self { buf } = self;
loop {
#[cfg(feature = "async_tokio")]
let mut guard = async_tokio_fd.readable_mut().await?;
let mut guard = buf.readable_mut().await?;
#[cfg(feature = "async_tokio")]
let buf = guard.get_inner_mut();
#[cfg(all(not(feature = "async_tokio"), feature = "async_std"))]
if !buf.readable() {
async_std_fd.readable().await?;
}
let buf = {
if !buf.get_ref().readable() {
buf.readable().await?;
}
unsafe { buf.get_mut() }
};
let events = buf.read_events(buffers)?;
const EMPTY: Events = Events { read: 0, lost: 0 };

@ -1,6 +1,7 @@
//! Ring buffer types used to receive events from eBPF programs using the linux `perf` API.
//! Ring buffer types used to receive events from eBPF programs using the linux
//! `perf` API.
//!
//! See the [`PerfEventArray`](crate::maps::PerfEventArray) and [`AsyncPerfEventArray`](crate::maps::perf::AsyncPerfEventArray).
//! See [`PerfEventArray`] and [`AsyncPerfEventArray`].
#[cfg(any(feature = "async_tokio", feature = "async_std"))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "async_tokio", feature = "async_std"))))]
mod async_perf_event_array;

@ -7,7 +7,7 @@ use std::{
};
use bytes::BytesMut;
use libc::{c_int, munmap, MAP_FAILED, MAP_SHARED, PROT_READ, PROT_WRITE};
use libc::{munmap, MAP_FAILED, MAP_SHARED, PROT_READ, PROT_WRITE};
use thiserror::Error;
use crate::{
@ -15,7 +15,7 @@ use crate::{
perf_event_header, perf_event_mmap_page,
perf_event_type::{PERF_RECORD_LOST, PERF_RECORD_SAMPLE},
},
sys::{perf_event_ioctl, perf_event_open_bpf, SysResult},
sys::{mmap, perf_event_ioctl, perf_event_open_bpf, SysResult},
PERF_EVENT_IOC_DISABLE, PERF_EVENT_IOC_ENABLE,
};
@ -96,7 +96,7 @@ impl PerfBuffer {
cpu_id: u32,
page_size: usize,
page_count: usize,
) -> Result<PerfBuffer, PerfBufferError> {
) -> Result<Self, PerfBufferError> {
if !page_count.is_power_of_two() {
return Err(PerfBufferError::InvalidPageCount { page_count });
}
@ -120,7 +120,7 @@ impl PerfBuffer {
});
}
let perf_buf = PerfBuffer {
let perf_buf = Self {
buf: AtomicPtr::new(buf as *mut perf_event_mmap_page),
fd,
size,
@ -282,49 +282,32 @@ impl Drop for PerfBuffer {
}
}
#[cfg_attr(test, allow(unused_variables))]
unsafe fn mmap(
addr: *mut c_void,
len: usize,
prot: c_int,
flags: c_int,
fd: BorrowedFd<'_>,
offset: libc::off_t,
) -> *mut c_void {
#[cfg(not(test))]
return libc::mmap(addr, len, prot, flags, fd.as_raw_fd(), offset);
#[cfg(test)]
use crate::sys::TEST_MMAP_RET;
#[cfg(test)]
TEST_MMAP_RET.with(|ret| *ret.borrow())
}
#[derive(Debug)]
#[repr(C)]
struct Sample {
header: perf_event_header,
pub size: u32,
size: u32,
}
#[repr(C)]
#[derive(Debug)]
struct LostSamples {
header: perf_event_header,
pub id: u64,
pub count: u64,
id: u64,
count: u64,
}
#[cfg(test)]
mod tests {
use std::{fmt::Debug, mem};
use assert_matches::assert_matches;
use super::*;
use crate::{
generated::perf_event_mmap_page,
sys::{override_syscall, Syscall, TEST_MMAP_RET},
};
use assert_matches::assert_matches;
use std::{fmt::Debug, mem};
const PAGE_SIZE: usize = 4096;
union MMappedBuf {

@ -4,7 +4,7 @@
use std::{
borrow::{Borrow, BorrowMut},
ops::Deref,
os::fd::{AsRawFd, RawFd},
os::fd::{AsFd, AsRawFd, BorrowedFd, RawFd},
sync::Arc,
};
@ -31,7 +31,7 @@ pub struct PerfEventArrayBuffer<T> {
buf: PerfBuffer,
}
impl<T: BorrowMut<MapData> + Borrow<MapData>> PerfEventArrayBuffer<T> {
impl<T: BorrowMut<MapData>> PerfEventArrayBuffer<T> {
/// Returns true if the buffer contains events that haven't been read.
pub fn readable(&self) -> bool {
self.buf.readable()
@ -55,7 +55,13 @@ impl<T: BorrowMut<MapData> + Borrow<MapData>> PerfEventArrayBuffer<T> {
}
}
impl<T: BorrowMut<MapData> + Borrow<MapData>> AsRawFd for PerfEventArrayBuffer<T> {
impl<T: BorrowMut<MapData>> AsFd for PerfEventArrayBuffer<T> {
fn as_fd(&self) -> BorrowedFd<'_> {
self.buf.as_fd()
}
}
impl<T: BorrowMut<MapData>> AsRawFd for PerfEventArrayBuffer<T> {
fn as_raw_fd(&self) -> RawFd {
self.buf.as_raw_fd()
}
@ -161,15 +167,15 @@ pub struct PerfEventArray<T> {
}
impl<T: Borrow<MapData>> PerfEventArray<T> {
pub(crate) fn new(map: T) -> Result<PerfEventArray<T>, MapError> {
Ok(PerfEventArray {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
Ok(Self {
map: Arc::new(map),
page_size: page_size(),
})
}
}
impl<T: BorrowMut<MapData> + Borrow<MapData>> PerfEventArray<T> {
impl<T: BorrowMut<MapData>> PerfEventArray<T> {
/// Opens the perf buffer at the given index.
///
/// The returned buffer will receive all the events eBPF programs send at the given index.
@ -180,9 +186,8 @@ impl<T: BorrowMut<MapData> + Borrow<MapData>> PerfEventArray<T> {
) -> Result<PerfEventArrayBuffer<T>, PerfBufferError> {
// FIXME: keep track of open buffers
// this cannot fail as new() checks that the fd is open
let map_data: &MapData = self.map.deref().borrow();
let map_fd = map_data.fd;
let map_fd = map_data.fd().as_fd();
let buf = PerfBuffer::open(index, self.page_size, page_count.unwrap_or(2))?;
bpf_map_update_elem(map_fd, Some(&index), &buf.as_raw_fd(), 0)
.map_err(|(_, io_error)| io_error)?;

@ -2,6 +2,7 @@
use std::{
borrow::{Borrow, BorrowMut},
marker::PhantomData,
os::fd::AsFd as _,
};
use crate::{
@ -29,16 +30,16 @@ use crate::{
/// ```
#[doc(alias = "BPF_MAP_TYPE_QUEUE")]
pub struct Queue<T, V: Pod> {
inner: T,
pub(crate) inner: T,
_v: PhantomData<V>,
}
impl<T: Borrow<MapData>, V: Pod> Queue<T, V> {
pub(crate) fn new(map: T) -> Result<Queue<T, V>, MapError> {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
let data = map.borrow();
check_kv_size::<(), V>(data)?;
Ok(Queue {
Ok(Self {
inner: map,
_v: PhantomData,
})
@ -60,7 +61,7 @@ impl<T: BorrowMut<MapData>, V: Pod> Queue<T, V> {
/// Returns [`MapError::ElementNotFound`] if the queue is empty, [`MapError::SyscallError`]
/// if `bpf_map_lookup_and_delete_elem` fails.
pub fn pop(&mut self, flags: u64) -> Result<V, MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
let value = bpf_map_lookup_and_delete_elem::<u32, _>(fd, None, flags).map_err(
|(_, io_error)| SyscallError {
@ -77,7 +78,7 @@ impl<T: BorrowMut<MapData>, V: Pod> Queue<T, V> {
///
/// [`MapError::SyscallError`] if `bpf_map_update_elem` fails.
pub fn push(&mut self, value: impl Borrow<V>, flags: u64) -> Result<(), MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
bpf_map_push_elem(fd, value.borrow(), flags).map_err(|(_, io_error)| SyscallError {
call: "bpf_map_push_elem",
io_error,

@ -0,0 +1,458 @@
//! A [ring buffer map][ringbuf] that may be used to receive events from eBPF programs.
//! As of Linux 5.8, this is the preferred way to transfer per-event data from eBPF
//! programs to userspace.
//!
//! [ringbuf]: https://www.kernel.org/doc/html/latest/bpf/ringbuf.html
use std::{
borrow::Borrow,
ffi::{c_int, c_void},
fmt::{self, Debug, Formatter},
io, mem,
ops::Deref,
os::fd::{AsFd as _, AsRawFd, BorrowedFd, RawFd},
ptr,
ptr::NonNull,
slice,
sync::atomic::{AtomicU32, AtomicUsize, Ordering},
};
use libc::{munmap, off_t, MAP_FAILED, MAP_SHARED, PROT_READ, PROT_WRITE};
use crate::{
generated::{BPF_RINGBUF_BUSY_BIT, BPF_RINGBUF_DISCARD_BIT, BPF_RINGBUF_HDR_SZ},
maps::{MapData, MapError},
sys::{mmap, SyscallError},
util::page_size,
};
/// A map that can be used to receive events from eBPF programs.
///
/// This is similar to [`crate::maps::PerfEventArray`], but different in a few ways:
/// * It's shared across all CPUs, which allows a strong ordering between events.
/// * Data notifications are delivered precisely instead of being sampled for every N events; the
/// eBPF program can also control notification delivery if sampling is desired for performance
/// reasons. By default, a notification will be sent if the consumer is caught up at the time of
/// committing. The eBPF program can use the `BPF_RB_NO_WAKEUP` or `BPF_RB_FORCE_WAKEUP` flags to
/// control this behavior.
/// * On the eBPF side, it supports the reverse-commit pattern where the event can be directly
/// written into the ring without copying from a temporary location.
/// * Dropped sample notifications go to the eBPF program as the return value of `reserve`/`output`,
/// and not the userspace reader. This might require extra code to handle, but allows for more
/// flexible schemes to handle dropped samples.
///
/// To receive events you need to:
/// * Construct [`RingBuf`] using [`RingBuf::try_from`].
/// * Call [`RingBuf::next`] to poll events from the [`RingBuf`].
///
/// To receive async notifications of data availability, you may construct an
/// [`tokio::io::unix::AsyncFd`] from the [`RingBuf`]'s file descriptor and poll it for readiness.
///
/// # Minimum kernel version
///
/// The minimum kernel version required to use this feature is 5.8.
///
/// # Examples
///
/// ```no_run
/// # struct PollFd<T>(T);
/// # fn poll_fd<T>(t: T) -> PollFd<T> { PollFd(t) }
/// # impl<T> PollFd<T> {
/// # fn readable(&mut self) -> Guard<'_, T> { Guard(self) }
/// # }
/// # struct Guard<'a, T>(&'a mut PollFd<T>);
/// # impl<T> Guard<'_, T> {
/// # fn inner_mut(&mut self) -> &mut T {
/// # let Guard(PollFd(t)) = self;
/// # t
/// # }
/// # fn clear_ready(&mut self) {}
/// # }
/// # let bpf = aya::Bpf::load(&[])?;
/// use aya::maps::RingBuf;
/// use std::convert::TryFrom;
///
/// let ring_buf = RingBuf::try_from(bpf.map_mut("ARRAY")?)?;
/// let poll = poll_fd(ring_buf);
/// loop {
/// let mut guard = poll.readable()?;
/// let ring_buf = guard.inner_mut()
/// while let Some(item) = ring_buf.next() {
/// println!("Received: {:?}", item);
/// }
/// guard.clear_ready();
/// }
/// # Ok::<(), aya::BpfError>(())
/// ```
///
/// # Polling
///
/// In the example above the implementations of poll(), poll.readable(), guard.inner_mut(), and
/// guard.clear_ready() are not given. RingBuf implements the AsRawFd trait, so you can implement
/// polling using any crate that can poll file descriptors, like epoll, mio etc. The above example
/// API is motivated by that of [`tokio::io::unix::AsyncFd`].
#[doc(alias = "BPF_MAP_TYPE_RINGBUF")]
pub struct RingBuf<T> {
map: T,
consumer: ConsumerPos,
producer: ProducerData,
}
impl<T: Borrow<MapData>> RingBuf<T> {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
let data: &MapData = map.borrow();
let page_size = page_size();
let map_fd = data.fd().as_fd();
let byte_size = data.obj.max_entries();
let consumer_metadata = ConsumerMetadata::new(map_fd, 0, page_size)?;
let consumer = ConsumerPos::new(consumer_metadata);
let producer = ProducerData::new(map_fd, page_size, page_size, byte_size)?;
Ok(Self {
map,
consumer,
producer,
})
}
}
impl<T> RingBuf<T> {
/// Try to take a new entry from the ringbuf.
///
/// Returns `Some(item)` if the ringbuf is not empty. Returns `None` if the ringbuf is empty, in
/// which case the caller may register for availability notifications through `epoll` or other
/// APIs. Only one RingBufItem may be outstanding at a time.
//
// This is not an implementation of `Iterator` because we need to be able to refer to the
// lifetime of the iterator in the returned `RingBufItem`. If the Iterator::Item leveraged GATs,
// one could imagine an implementation of `Iterator` that would work. GATs are stabilized in
// Rust 1.65, but there's not yet a trait that the community seems to have standardized around.
#[allow(clippy::should_implement_trait)]
pub fn next(&mut self) -> Option<RingBufItem<'_>> {
let Self {
consumer, producer, ..
} = self;
producer.next(consumer)
}
}
/// Access to the RawFd can be used to construct an AsyncFd for use with epoll.
impl<T: Borrow<MapData>> AsRawFd for RingBuf<T> {
fn as_raw_fd(&self) -> RawFd {
let Self {
map,
consumer: _,
producer: _,
} = self;
map.borrow().fd().as_fd().as_raw_fd()
}
}
/// The current outstanding item read from the ringbuf.
pub struct RingBufItem<'a> {
data: &'a [u8],
consumer: &'a mut ConsumerPos,
}
impl Deref for RingBufItem<'_> {
type Target = [u8];
fn deref(&self) -> &Self::Target {
let Self { data, .. } = self;
data
}
}
impl Drop for RingBufItem<'_> {
fn drop(&mut self) {
let Self { consumer, data } = self;
consumer.consume(data.len())
}
}
impl Debug for RingBufItem<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let Self {
data,
consumer:
ConsumerPos {
pos,
metadata: ConsumerMetadata { mmap: _ },
},
} = self;
// In general Relaxed here is sufficient, for debugging, it certainly is.
f.debug_struct("RingBufItem")
.field("pos", pos)
.field("len", &data.len())
.finish()
}
}
struct ConsumerMetadata {
mmap: MMap,
}
impl ConsumerMetadata {
fn new(fd: BorrowedFd<'_>, offset: usize, page_size: usize) -> Result<Self, MapError> {
let mmap = MMap::new(
fd,
page_size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
offset.try_into().unwrap(),
)?;
Ok(Self { mmap })
}
}
impl AsRef<AtomicUsize> for ConsumerMetadata {
fn as_ref(&self) -> &AtomicUsize {
let Self {
mmap: MMap { ptr, .. },
} = self;
unsafe { ptr.cast::<AtomicUsize>().as_ref() }
}
}
struct ConsumerPos {
pos: usize,
metadata: ConsumerMetadata,
}
impl ConsumerPos {
fn new(metadata: ConsumerMetadata) -> Self {
// Load the initial value of the consumer position. SeqCst is used to be safe given we don't
// have any claims about memory synchronization performed by some previous writer.
let pos = metadata.as_ref().load(Ordering::SeqCst);
Self { pos, metadata }
}
fn consume(&mut self, len: usize) {
let Self { pos, metadata } = self;
// TODO: Use primitive method when https://github.com/rust-lang/rust/issues/88581 is stabilized.
fn next_multiple_of(n: usize, multiple: usize) -> usize {
match n % multiple {
0 => n,
rem => n + (multiple - rem),
}
}
*pos += next_multiple_of(usize::try_from(BPF_RINGBUF_HDR_SZ).unwrap() + len, 8);
// Write operation needs to be properly ordered with respect to the producer committing new
// data to the ringbuf. The producer uses xchg (SeqCst) to commit new data [1]. The producer
// reads the consumer offset after clearing the busy bit on a new entry [2]. By using SeqCst
// here we ensure that either a subsequent read by the consumer to consume messages will see
// an available message, or the producer in the kernel will see the updated consumer offset
// that is caught up.
//
// [1]: https://github.com/torvalds/linux/blob/2772d7df/kernel/bpf/ringbuf.c#L487-L488
// [2]: https://github.com/torvalds/linux/blob/2772d7df/kernel/bpf/ringbuf.c#L494
metadata.as_ref().store(*pos, Ordering::SeqCst);
}
}
struct ProducerData {
mmap: MMap,
// Offset in the mmap where the data starts.
data_offset: usize,
// A cache of the value of the producer position. It is used to avoid re-reading the producer
// position when we know there is more data to consume.
pos_cache: usize,
// A bitmask which truncates u32 values to the domain of valid offsets in the ringbuf.
mask: u32,
}
impl ProducerData {
fn new(
fd: BorrowedFd<'_>,
offset: usize,
page_size: usize,
byte_size: u32,
) -> Result<Self, MapError> {
// The producer pages have one page of metadata and then the data pages, all mapped
// read-only. Note that the length of the mapping includes the data pages twice as the
// kernel will map them two time consecutively to avoid special handling of entries that
// cross over the end of the ring buffer.
//
// The kernel diagram below shows the layout of the ring buffer. It references "meta pages",
// but we only map exactly one producer meta page read-only. The consumer meta page is mapped
// read-write elsewhere, and is taken into consideration via the offset parameter.
//
// From kernel/bpf/ringbuf.c[0]:
//
// Each data page is mapped twice to allow "virtual"
// continuous read of samples wrapping around the end of ring
// buffer area:
// ------------------------------------------------------
// | meta pages | real data pages | same data pages |
// ------------------------------------------------------
// | | 1 2 3 4 5 6 7 8 9 | 1 2 3 4 5 6 7 8 9 |
// ------------------------------------------------------
// | | TA DA | TA DA |
// ------------------------------------------------------
// ^^^^^^^
// |
// Here, no need to worry about special handling of wrapped-around
// data due to double-mapped data pages. This works both in kernel and
// when mmap()'ed in user-space, simplifying both kernel and
// user-space implementations significantly.
//
// [0]: https://github.com/torvalds/linux/blob/3f01e9fe/kernel/bpf/ringbuf.c#L108-L124
let len = page_size + 2 * usize::try_from(byte_size).unwrap();
let mmap = MMap::new(fd, len, PROT_READ, MAP_SHARED, offset.try_into().unwrap())?;
// byte_size is required to be a power of two multiple of page_size (which implicitly is a
// power of 2), so subtracting one will create a bitmask for values less than byte_size.
debug_assert!(byte_size.is_power_of_two());
let mask = byte_size - 1;
Ok(Self {
mmap,
data_offset: page_size,
pos_cache: 0,
mask,
})
}
fn next<'a>(&'a mut self, consumer: &'a mut ConsumerPos) -> Option<RingBufItem<'a>> {
let Self {
ref mmap,
data_offset,
pos_cache,
mask,
} = self;
let pos = unsafe { mmap.ptr.cast().as_ref() };
let mmap_data = mmap.as_ref();
let data_pages = mmap_data.get(*data_offset..).unwrap_or_else(|| {
panic!(
"offset {} out of bounds, data len {}",
data_offset,
mmap_data.len()
)
});
while data_available(pos, pos_cache, consumer) {
match read_item(data_pages, *mask, consumer) {
Item::Busy => return None,
Item::Discard { len } => consumer.consume(len),
Item::Data(data) => return Some(RingBufItem { data, consumer }),
}
}
return None;
enum Item<'a> {
Busy,
Discard { len: usize },
Data(&'a [u8]),
}
fn data_available(
producer: &AtomicUsize,
cache: &mut usize,
consumer: &ConsumerPos,
) -> bool {
let ConsumerPos { pos: consumer, .. } = consumer;
if consumer == cache {
// This value is written using Release by the kernel [1], and should be read with
// Acquire to ensure that the prior writes to the entry header are visible.
//
// [1]: https://github.com/torvalds/linux/blob/eb26cbb1/kernel/bpf/ringbuf.c#L447-L448
*cache = producer.load(Ordering::Acquire);
}
// Note that we don't compare the order of the values because the producer position may
// overflow u32 and wrap around to 0. Instead we just compare equality and assume that
// the consumer position is always logically less than the producer position.
//
// Note also that the kernel, at the time of writing [1], doesn't seem to handle this
// overflow correctly at all, and it's not clear that one can produce events after the
// producer position has wrapped around.
//
// [1]: https://github.com/torvalds/linux/blob/4b810bf0/kernel/bpf/ringbuf.c#L434-L440
consumer != cache
}
fn read_item<'data>(data: &'data [u8], mask: u32, pos: &ConsumerPos) -> Item<'data> {
let ConsumerPos { pos, .. } = pos;
let offset = pos & usize::try_from(mask).unwrap();
let must_get_data = |offset, len| {
data.get(offset..offset + len).unwrap_or_else(|| {
panic!("{:?} not in {:?}", offset..offset + len, 0..data.len())
})
};
let header_ptr =
must_get_data(offset, mem::size_of::<AtomicU32>()).as_ptr() as *const AtomicU32;
// Pair the kernel's SeqCst write (implies Release) [1] with an Acquire load. This
// ensures data written by the producer will be visible.
//
// [1]: https://github.com/torvalds/linux/blob/eb26cbb1/kernel/bpf/ringbuf.c#L488
let header = unsafe { &*header_ptr }.load(Ordering::Acquire);
if header & BPF_RINGBUF_BUSY_BIT != 0 {
Item::Busy
} else {
let len = usize::try_from(header & mask).unwrap();
if header & BPF_RINGBUF_DISCARD_BIT != 0 {
Item::Discard { len }
} else {
let data_offset = offset + usize::try_from(BPF_RINGBUF_HDR_SZ).unwrap();
let data = must_get_data(data_offset, len);
Item::Data(data)
}
}
}
}
}
// MMap corresponds to a memory-mapped region.
//
// The data is unmapped in Drop.
struct MMap {
ptr: NonNull<c_void>,
len: usize,
}
impl MMap {
fn new(
fd: BorrowedFd<'_>,
len: usize,
prot: c_int,
flags: c_int,
offset: off_t,
) -> Result<Self, MapError> {
match unsafe { mmap(ptr::null_mut(), len, prot, flags, fd, offset) } {
MAP_FAILED => Err(MapError::SyscallError(SyscallError {
call: "mmap",
io_error: io::Error::last_os_error(),
})),
ptr => Ok(Self {
ptr: ptr::NonNull::new(ptr).ok_or(
// This should never happen, but to be paranoid, and so we never need to talk
// about a null pointer, we check it anyway.
MapError::SyscallError(SyscallError {
call: "mmap",
io_error: io::Error::new(
io::ErrorKind::Other,
"mmap returned null pointer",
),
}),
)?,
len,
}),
}
}
}
impl AsRef<[u8]> for MMap {
fn as_ref(&self) -> &[u8] {
let Self { ptr, len } = self;
unsafe { slice::from_raw_parts(ptr.as_ptr().cast(), *len) }
}
}
impl Drop for MMap {
fn drop(&mut self) {
let Self { ptr, len } = *self;
unsafe { munmap(ptr.as_ptr(), len) };
}
}

@ -2,17 +2,32 @@
mod sock_hash;
mod sock_map;
use std::{
io,
os::fd::{AsFd, BorrowedFd},
};
pub use sock_hash::SockHash;
pub use sock_map::SockMap;
use std::os::fd::{AsRawFd, RawFd};
/// A socket map file descriptor.
#[derive(Copy, Clone)]
pub struct SockMapFd(RawFd);
#[repr(transparent)]
pub struct SockMapFd(super::MapFd);
impl SockMapFd {
/// Creates a new instance that shares the same underlying file description as [`self`].
pub fn try_clone(&self) -> io::Result<Self> {
let Self(inner) = self;
let super::MapFd(inner) = inner;
let inner = inner.try_clone()?;
let inner = super::MapFd(inner);
Ok(Self(inner))
}
}
impl AsRawFd for SockMapFd {
fn as_raw_fd(&self) -> RawFd {
self.0
impl AsFd for SockMapFd {
fn as_fd(&self) -> BorrowedFd<'_> {
let Self(fd) = self;
fd.as_fd()
}
}

@ -1,12 +1,13 @@
use std::{
borrow::{Borrow, BorrowMut},
marker::PhantomData,
os::fd::{AsRawFd, RawFd},
os::fd::{AsFd as _, AsRawFd, RawFd},
};
use crate::{
maps::{
check_kv_size, hash_map, sock::SockMapFd, IterableMap, MapData, MapError, MapIter, MapKeys,
check_kv_size, hash_map, sock::SockMapFd, IterableMap, MapData, MapError, MapFd, MapIter,
MapKeys,
},
sys::{bpf_map_lookup_elem, SyscallError},
Pod,
@ -47,11 +48,11 @@ use crate::{
/// use aya::programs::SkMsg;
///
/// let mut intercept_egress = SockHash::<_, u32>::try_from(bpf.map("INTERCEPT_EGRESS").unwrap())?;
/// let map_fd = intercept_egress.fd()?;
/// let map_fd = intercept_egress.fd().try_clone()?;
///
/// let prog: &mut SkMsg = bpf.program_mut("intercept_egress_packet").unwrap().try_into()?;
/// prog.load()?;
/// prog.attach(map_fd)?;
/// prog.attach(&map_fd)?;
///
/// let mut client = TcpStream::connect("127.0.0.1:1234")?;
/// let mut intercept_egress = SockHash::try_from(bpf.map_mut("INTERCEPT_EGRESS").unwrap())?;
@ -64,16 +65,16 @@ use crate::{
/// ```
#[doc(alias = "BPF_MAP_TYPE_SOCKHASH")]
pub struct SockHash<T, K> {
inner: T,
pub(crate) inner: T,
_k: PhantomData<K>,
}
impl<T: Borrow<MapData>, K: Pod> SockHash<T, K> {
pub(crate) fn new(map: T) -> Result<SockHash<T, K>, MapError> {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
let data = map.borrow();
check_kv_size::<K, u32>(data)?;
Ok(SockHash {
Ok(Self {
inner: map,
_k: PhantomData,
})
@ -81,7 +82,7 @@ impl<T: Borrow<MapData>, K: Pod> SockHash<T, K> {
/// Returns the fd of the socket stored at the given key.
pub fn get(&self, key: &K, flags: u64) -> Result<RawFd, MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
let value = bpf_map_lookup_elem(fd, key, flags).map_err(|(_, io_error)| SyscallError {
call: "bpf_map_lookup_elem",
io_error,
@ -105,8 +106,11 @@ impl<T: Borrow<MapData>, K: Pod> SockHash<T, K> {
///
/// The returned file descriptor can be used to attach programs that work with
/// socket maps, like [`SkMsg`](crate::programs::SkMsg) and [`SkSkb`](crate::programs::SkSkb).
pub fn fd(&self) -> Result<SockMapFd, MapError> {
Ok(SockMapFd(self.inner.borrow().fd))
pub fn fd(&self) -> &SockMapFd {
let fd: &MapFd = self.inner.borrow().fd();
// TODO(https://github.com/rust-lang/rfcs/issues/3066): avoid this unsafe.
// SAFETY: `SockMapFd` is #[repr(transparent)] over `MapFd`.
unsafe { std::mem::transmute(fd) }
}
}
@ -138,6 +142,6 @@ impl<T: Borrow<MapData>, K: Pod> IterableMap<K, RawFd> for SockHash<T, K> {
}
fn get(&self, key: &K) -> Result<RawFd, MapError> {
SockHash::get(self, key, 0)
Self::get(self, key, 0)
}
}

@ -2,11 +2,11 @@
use std::{
borrow::{Borrow, BorrowMut},
os::fd::{AsRawFd, RawFd},
os::fd::{AsFd as _, AsRawFd, RawFd},
};
use crate::{
maps::{check_bounds, check_kv_size, sock::SockMapFd, MapData, MapError, MapKeys},
maps::{check_bounds, check_kv_size, sock::SockMapFd, MapData, MapError, MapFd, MapKeys},
sys::{bpf_map_delete_elem, bpf_map_update_elem, SyscallError},
};
@ -26,18 +26,29 @@ use crate::{
/// # Examples
///
/// ```no_run
/// # #[derive(Debug, thiserror::Error)]
/// # enum Error {
/// # #[error(transparent)]
/// # IO(#[from] std::io::Error),
/// # #[error(transparent)]
/// # Map(#[from] aya::maps::MapError),
/// # #[error(transparent)]
/// # Program(#[from] aya::programs::ProgramError),
/// # #[error(transparent)]
/// # Bpf(#[from] aya::BpfError)
/// # }
/// # let mut bpf = aya::Bpf::load(&[])?;
/// use aya::maps::SockMap;
/// use aya::programs::SkSkb;
///
/// let intercept_ingress = SockMap::try_from(bpf.map("INTERCEPT_INGRESS").unwrap())?;
/// let map_fd = intercept_ingress.fd()?;
/// let map_fd = intercept_ingress.fd().try_clone()?;
///
/// let prog: &mut SkSkb = bpf.program_mut("intercept_ingress_packet").unwrap().try_into()?;
/// prog.load()?;
/// prog.attach(map_fd)?;
/// prog.attach(&map_fd)?;
///
/// # Ok::<(), aya::BpfError>(())
/// # Ok::<(), Error>(())
/// ```
#[doc(alias = "BPF_MAP_TYPE_SOCKMAP")]
pub struct SockMap<T> {
@ -45,11 +56,11 @@ pub struct SockMap<T> {
}
impl<T: Borrow<MapData>> SockMap<T> {
pub(crate) fn new(map: T) -> Result<SockMap<T>, MapError> {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
let data = map.borrow();
check_kv_size::<u32, RawFd>(data)?;
Ok(SockMap { inner: map })
Ok(Self { inner: map })
}
/// An iterator over the indices of the array that point to a program. The iterator item type
@ -62,8 +73,11 @@ impl<T: Borrow<MapData>> SockMap<T> {
///
/// The returned file descriptor can be used to attach programs that work with
/// socket maps, like [`SkMsg`](crate::programs::SkMsg) and [`SkSkb`](crate::programs::SkSkb).
pub fn fd(&self) -> Result<SockMapFd, MapError> {
Ok(SockMapFd(self.inner.borrow().fd))
pub fn fd(&self) -> &SockMapFd {
let fd: &MapFd = self.inner.borrow().fd();
// TODO(https://github.com/rust-lang/rfcs/issues/3066): avoid this unsafe.
// SAFETY: `SockMapFd` is #[repr(transparent)] over `MapFd`.
unsafe { std::mem::transmute(&fd) }
}
}
@ -71,7 +85,7 @@ impl<T: BorrowMut<MapData>> SockMap<T> {
/// Stores a socket into the map.
pub fn set<I: AsRawFd>(&mut self, index: u32, socket: &I, flags: u64) -> Result<(), MapError> {
let data = self.inner.borrow_mut();
let fd = data.fd;
let fd = data.fd().as_fd();
check_bounds(data, index)?;
bpf_map_update_elem(fd, Some(&index), &socket.as_raw_fd(), flags).map_err(
|(_, io_error)| SyscallError {
@ -85,7 +99,7 @@ impl<T: BorrowMut<MapData>> SockMap<T> {
/// Removes the socket stored at `index` from the map.
pub fn clear_index(&mut self, index: &u32) -> Result<(), MapError> {
let data = self.inner.borrow_mut();
let fd = data.fd;
let fd = data.fd().as_fd();
check_bounds(data, *index)?;
bpf_map_delete_elem(fd, index)
.map(|_| ())

@ -2,6 +2,7 @@
use std::{
borrow::{Borrow, BorrowMut},
marker::PhantomData,
os::fd::AsFd as _,
};
use crate::{
@ -29,16 +30,16 @@ use crate::{
/// ```
#[doc(alias = "BPF_MAP_TYPE_STACK")]
pub struct Stack<T, V: Pod> {
inner: T,
pub(crate) inner: T,
_v: PhantomData<V>,
}
impl<T: Borrow<MapData>, V: Pod> Stack<T, V> {
pub(crate) fn new(map: T) -> Result<Stack<T, V>, MapError> {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
let data = map.borrow();
check_kv_size::<(), V>(data)?;
Ok(Stack {
Ok(Self {
inner: map,
_v: PhantomData,
})
@ -60,7 +61,7 @@ impl<T: BorrowMut<MapData>, V: Pod> Stack<T, V> {
/// Returns [`MapError::ElementNotFound`] if the stack is empty, [`MapError::SyscallError`]
/// if `bpf_map_lookup_and_delete_elem` fails.
pub fn pop(&mut self, flags: u64) -> Result<V, MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
let value = bpf_map_lookup_and_delete_elem::<u32, _>(fd, None, flags).map_err(
|(_, io_error)| SyscallError {
@ -77,7 +78,7 @@ impl<T: BorrowMut<MapData>, V: Pod> Stack<T, V> {
///
/// [`MapError::SyscallError`] if `bpf_map_update_elem` fails.
pub fn push(&mut self, value: impl Borrow<V>, flags: u64) -> Result<(), MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
bpf_map_update_elem(fd, None::<&u32>, value.borrow(), flags).map_err(|(_, io_error)| {
SyscallError {
call: "bpf_map_update_elem",

@ -1,7 +1,7 @@
//! A hash map of kernel or user space stack traces.
//!
//! See [`StackTraceMap`] for documentation and examples.
use std::{borrow::Borrow, fs, io, mem, path::Path, str::FromStr};
use std::{borrow::Borrow, fs, io, mem, os::fd::AsFd as _, path::Path, str::FromStr};
use crate::{
maps::{IterableMap, MapData, MapError, MapIter, MapKeys},
@ -67,12 +67,12 @@ use crate::{
#[derive(Debug)]
#[doc(alias = "BPF_MAP_TYPE_STACK_TRACE")]
pub struct StackTraceMap<T> {
inner: T,
pub(crate) inner: T,
max_stack_depth: usize,
}
impl<T: Borrow<MapData>> StackTraceMap<T> {
pub(crate) fn new(map: T) -> Result<StackTraceMap<T>, MapError> {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
let data = map.borrow();
let expected = mem::size_of::<u32>();
let size = data.obj.key_size() as usize;
@ -90,7 +90,7 @@ impl<T: Borrow<MapData>> StackTraceMap<T> {
return Err(MapError::InvalidValueSize { size, expected });
}
Ok(StackTraceMap {
Ok(Self {
inner: map,
max_stack_depth,
})
@ -103,7 +103,7 @@ impl<T: Borrow<MapData>> StackTraceMap<T> {
/// Returns [`MapError::KeyNotFound`] if there is no stack trace with the
/// given `stack_id`, or [`MapError::SyscallError`] if `bpf_map_lookup_elem` fails.
pub fn get(&self, stack_id: &u32, flags: u64) -> Result<StackTrace, MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
let mut frames = vec![0; self.max_stack_depth];
bpf_map_lookup_elem_ptr(fd, Some(stack_id), frames.as_mut_ptr(), flags)

@ -0,0 +1,194 @@
//! An array of available CPUs.
use std::{
borrow::{Borrow, BorrowMut},
num::NonZeroU32,
os::fd::{AsFd, AsRawFd},
};
use aya_obj::generated::bpf_cpumap_val;
use super::XdpMapError;
use crate::{
maps::{check_bounds, check_kv_size, IterableMap, MapData, MapError},
programs::ProgramFd,
sys::{bpf_map_lookup_elem, bpf_map_update_elem, SyscallError},
Pod, FEATURES,
};
/// An array of available CPUs.
///
/// XDP programs can use this map to redirect packets to a target
/// CPU for processing.
///
/// # Minimum kernel version
///
/// The minimum kernel version required to use this feature is 4.15.
///
/// # Examples
/// ```no_run
/// # let elf_bytes = &[];
/// use aya::maps::xdp::CpuMap;
///
/// let ncpus = aya::util::nr_cpus().unwrap() as u32;
/// let mut bpf = aya::BpfLoader::new()
/// .set_max_entries("CPUS", ncpus)
/// .load(elf_bytes)
/// .unwrap();
/// let mut cpumap = CpuMap::try_from(bpf.map_mut("CPUS").unwrap())?;
/// let flags = 0;
/// let queue_size = 2048;
/// for i in 0..ncpus {
/// cpumap.set(i, queue_size, None, flags);
/// }
///
/// # Ok::<(), aya::BpfError>(())
/// ```
///
/// # See also
///
/// Kernel documentation: <https://docs.kernel.org/next/bpf/map_cpumap.html>
#[doc(alias = "BPF_MAP_TYPE_CPUMAP")]
pub struct CpuMap<T> {
pub(crate) inner: T,
}
impl<T: Borrow<MapData>> CpuMap<T> {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
let data = map.borrow();
if FEATURES.cpumap_prog_id() {
check_kv_size::<u32, bpf_cpumap_val>(data)?;
} else {
check_kv_size::<u32, u32>(data)?;
}
Ok(Self { inner: map })
}
/// Returns the number of elements in the array.
///
/// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side.
pub fn len(&self) -> u32 {
self.inner.borrow().obj.max_entries()
}
/// Returns the queue size and optional program for a given CPU index.
///
/// # Errors
///
/// Returns [`MapError::OutOfBounds`] if `cpu_index` is out of bounds,
/// [`MapError::SyscallError`] if `bpf_map_lookup_elem` fails.
pub fn get(&self, cpu_index: u32, flags: u64) -> Result<CpuMapValue, MapError> {
let data = self.inner.borrow();
check_bounds(data, cpu_index)?;
let fd = data.fd().as_fd();
let value = if FEATURES.cpumap_prog_id() {
bpf_map_lookup_elem::<_, bpf_cpumap_val>(fd, &cpu_index, flags).map(|value| {
value.map(|value| CpuMapValue {
queue_size: value.qsize,
// SAFETY: map writes use fd, map reads use id.
// https://github.com/torvalds/linux/blob/2dde18cd1d8fac735875f2e4987f11817cc0bc2c/include/uapi/linux/bpf.h#L6241
prog_id: NonZeroU32::new(unsafe { value.bpf_prog.id }),
})
})
} else {
bpf_map_lookup_elem::<_, u32>(fd, &cpu_index, flags).map(|value| {
value.map(|qsize| CpuMapValue {
queue_size: qsize,
prog_id: None,
})
})
};
value
.map_err(|(_, io_error)| SyscallError {
call: "bpf_map_lookup_elem",
io_error,
})?
.ok_or(MapError::KeyNotFound)
}
/// An iterator over the elements of the map.
pub fn iter(&self) -> impl Iterator<Item = Result<CpuMapValue, MapError>> + '_ {
(0..self.len()).map(move |i| self.get(i, 0))
}
}
impl<T: BorrowMut<MapData>> CpuMap<T> {
/// Sets the queue size at the given CPU index, and optionally a chained program.
///
/// When sending the packet to the CPU at the given index, the kernel will queue up to
/// `queue_size` packets before dropping them.
///
/// Starting from Linux kernel 5.9, another XDP program can be passed in that will be run on the
/// target CPU, instead of the CPU that receives the packets. This allows to perform minimal
/// computations on CPUs that directly handle packets from a NIC's RX queues, and perform
/// possibly heavier ones in other, less busy CPUs.
///
/// The chained program must be loaded with the `BPF_XDP_CPUMAP` attach type. When using
/// `aya-ebpf`, that means XDP programs that specify the `map = "cpumap"` argument. See the
/// kernel-space `aya_ebpf::xdp` for more information.
///
/// # Errors
///
/// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`]
/// if `bpf_map_update_elem` fails, [`XdpMapError::ChainedProgramNotSupported`] if the kernel
/// does not support chained programs and one is provided.
pub fn set(
&mut self,
cpu_index: u32,
queue_size: u32,
program: Option<&ProgramFd>,
flags: u64,
) -> Result<(), XdpMapError> {
let data = self.inner.borrow_mut();
check_bounds(data, cpu_index)?;
let fd = data.fd().as_fd();
let res = if FEATURES.cpumap_prog_id() {
let mut value = unsafe { std::mem::zeroed::<bpf_cpumap_val>() };
value.qsize = queue_size;
// Default is valid as the kernel will only consider fd > 0:
// https://github.com/torvalds/linux/blob/2dde18cd1d8fac735875f2e4987f11817cc0bc2c/kernel/bpf/cpumap.c#L466
value.bpf_prog.fd = program
.map(|prog| prog.as_fd().as_raw_fd())
.unwrap_or_default();
bpf_map_update_elem(fd, Some(&cpu_index), &value, flags)
} else {
if program.is_some() {
return Err(XdpMapError::ChainedProgramNotSupported);
}
bpf_map_update_elem(fd, Some(&cpu_index), &queue_size, flags)
};
res.map_err(|(_, io_error)| {
MapError::from(SyscallError {
call: "bpf_map_update_elem",
io_error,
})
})?;
Ok(())
}
}
impl<T: Borrow<MapData>> IterableMap<u32, CpuMapValue> for CpuMap<T> {
fn map(&self) -> &MapData {
self.inner.borrow()
}
fn get(&self, key: &u32) -> Result<CpuMapValue, MapError> {
self.get(*key, 0)
}
}
unsafe impl Pod for bpf_cpumap_val {}
#[derive(Clone, Copy, Debug)]
/// The value of a CPU map.
pub struct CpuMapValue {
/// Size of the for the CPU.
pub queue_size: u32,
/// Chained XDP program ID.
pub prog_id: Option<NonZeroU32>,
}

@ -0,0 +1,186 @@
//! An array of network devices.
use std::{
borrow::{Borrow, BorrowMut},
num::NonZeroU32,
os::fd::{AsFd, AsRawFd},
};
use aya_obj::generated::bpf_devmap_val;
use super::XdpMapError;
use crate::{
maps::{check_bounds, check_kv_size, IterableMap, MapData, MapError},
programs::ProgramFd,
sys::{bpf_map_lookup_elem, bpf_map_update_elem, SyscallError},
Pod, FEATURES,
};
/// An array of network devices.
///
/// XDP programs can use this map to redirect to other network
/// devices.
///
/// # Minimum kernel version
///
/// The minimum kernel version required to use this feature is 4.14.
///
/// # Examples
/// ```no_run
/// # let mut bpf = aya::Bpf::load(&[])?;
/// use aya::maps::xdp::DevMap;
///
/// let mut devmap = DevMap::try_from(bpf.map_mut("IFACES").unwrap())?;
/// // Lookups at index 2 will redirect packets to interface with index 3 (e.g. eth1)
/// devmap.set(2, 3, None, 0);
///
/// # Ok::<(), aya::BpfError>(())
/// ```
///
/// # See also
///
/// Kernel documentation: <https://docs.kernel.org/next/bpf/map_devmap.html>
#[doc(alias = "BPF_MAP_TYPE_DEVMAP")]
pub struct DevMap<T> {
pub(crate) inner: T,
}
impl<T: Borrow<MapData>> DevMap<T> {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
let data = map.borrow();
if FEATURES.devmap_prog_id() {
check_kv_size::<u32, bpf_devmap_val>(data)?;
} else {
check_kv_size::<u32, u32>(data)?;
}
Ok(Self { inner: map })
}
/// Returns the number of elements in the array.
///
/// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side.
pub fn len(&self) -> u32 {
self.inner.borrow().obj.max_entries()
}
/// Returns the target interface index and optional program at a given index.
///
/// # Errors
///
/// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`]
/// if `bpf_map_lookup_elem` fails.
pub fn get(&self, index: u32, flags: u64) -> Result<DevMapValue, MapError> {
let data = self.inner.borrow();
check_bounds(data, index)?;
let fd = data.fd().as_fd();
let value = if FEATURES.devmap_prog_id() {
bpf_map_lookup_elem::<_, bpf_devmap_val>(fd, &index, flags).map(|value| {
value.map(|value| DevMapValue {
if_index: value.ifindex,
// SAFETY: map writes use fd, map reads use id.
// https://github.com/torvalds/linux/blob/2dde18cd1d8fac735875f2e4987f11817cc0bc2c/include/uapi/linux/bpf.h#L6228
prog_id: NonZeroU32::new(unsafe { value.bpf_prog.id }),
})
})
} else {
bpf_map_lookup_elem::<_, u32>(fd, &index, flags).map(|value| {
value.map(|ifindex| DevMapValue {
if_index: ifindex,
prog_id: None,
})
})
};
value
.map_err(|(_, io_error)| SyscallError {
call: "bpf_map_lookup_elem",
io_error,
})?
.ok_or(MapError::KeyNotFound)
}
/// An iterator over the elements of the array.
pub fn iter(&self) -> impl Iterator<Item = Result<DevMapValue, MapError>> + '_ {
(0..self.len()).map(move |i| self.get(i, 0))
}
}
impl<T: BorrowMut<MapData>> DevMap<T> {
/// Sets the target interface index at index, and optionally a chained program.
///
/// When redirecting using `index`, packets will be transmitted by the interface with
/// `target_if_index`.
///
/// Starting from Linux kernel 5.8, another XDP program can be passed in that will be run before
/// actual transmission. It can be used to modify the packet before transmission with NIC
/// specific data (MAC address update, checksum computations, etc) or other purposes.
///
/// The chained program must be loaded with the `BPF_XDP_DEVMAP` attach type. When using
/// `aya-ebpf`, that means XDP programs that specify the `map = "devmap"` argument. See the
/// kernel-space `aya_ebpf::xdp` for more information.
///
/// # Errors
///
/// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`]
/// if `bpf_map_update_elem` fails, [`MapError::ProgIdNotSupported`] if the kernel does not
/// support chained programs and one is provided.
pub fn set(
&mut self,
index: u32,
target_if_index: u32,
program: Option<&ProgramFd>,
flags: u64,
) -> Result<(), XdpMapError> {
let data = self.inner.borrow_mut();
check_bounds(data, index)?;
let fd = data.fd().as_fd();
let res = if FEATURES.devmap_prog_id() {
let mut value = unsafe { std::mem::zeroed::<bpf_devmap_val>() };
value.ifindex = target_if_index;
// Default is valid as the kernel will only consider fd > 0:
// https://github.com/torvalds/linux/blob/2dde18cd1d8fac735875f2e4987f11817cc0bc2c/kernel/bpf/devmap.c#L866
// https://github.com/torvalds/linux/blob/2dde18cd1d8fac735875f2e4987f11817cc0bc2c/kernel/bpf/devmap.c#L918
value.bpf_prog.fd = program
.map(|prog| prog.as_fd().as_raw_fd())
.unwrap_or_default();
bpf_map_update_elem(fd, Some(&index), &value, flags)
} else {
if program.is_some() {
return Err(XdpMapError::ChainedProgramNotSupported);
}
bpf_map_update_elem(fd, Some(&index), &target_if_index, flags)
};
res.map_err(|(_, io_error)| {
MapError::from(SyscallError {
call: "bpf_map_update_elem",
io_error,
})
})?;
Ok(())
}
}
impl<T: Borrow<MapData>> IterableMap<u32, DevMapValue> for DevMap<T> {
fn map(&self) -> &MapData {
self.inner.borrow()
}
fn get(&self, key: &u32) -> Result<DevMapValue, MapError> {
self.get(*key, 0)
}
}
unsafe impl Pod for bpf_devmap_val {}
#[derive(Clone, Copy, Debug)]
/// The value of a device map.
pub struct DevMapValue {
/// Target interface index to redirect to.
pub if_index: u32,
/// Chained XDP program ID.
pub prog_id: Option<NonZeroU32>,
}

@ -0,0 +1,167 @@
//! An hashmap of network devices.
use std::{
borrow::{Borrow, BorrowMut},
num::NonZeroU32,
os::fd::{AsFd, AsRawFd},
};
use aya_obj::generated::bpf_devmap_val;
use super::{dev_map::DevMapValue, XdpMapError};
use crate::{
maps::{check_kv_size, hash_map, IterableMap, MapData, MapError, MapIter, MapKeys},
programs::ProgramFd,
sys::{bpf_map_lookup_elem, SyscallError},
FEATURES,
};
/// An hashmap of network devices.
///
/// XDP programs can use this map to redirect to other network
/// devices.
///
/// # Minimum kernel version
///
/// The minimum kernel version required to use this feature is 5.4.
///
/// # Examples
/// ```no_run
/// # let mut bpf = aya::Bpf::load(&[])?;
/// use aya::maps::xdp::DevMapHash;
///
/// let mut devmap = DevMapHash::try_from(bpf.map_mut("IFACES").unwrap())?;
/// // Lookups with key 2 will redirect packets to interface with index 3 (e.g. eth1)
/// devmap.insert(2, 3, None, 0);
///
/// # Ok::<(), aya::BpfError>(())
/// ```
///
/// # See also
///
/// Kernel documentation: <https://docs.kernel.org/next/bpf/map_devmap.html>
#[doc(alias = "BPF_MAP_TYPE_DEVMAP_HASH")]
pub struct DevMapHash<T> {
pub(crate) inner: T,
}
impl<T: Borrow<MapData>> DevMapHash<T> {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
let data = map.borrow();
if FEATURES.devmap_prog_id() {
check_kv_size::<u32, bpf_devmap_val>(data)?;
} else {
check_kv_size::<u32, u32>(data)?;
}
Ok(Self { inner: map })
}
/// Returns the target interface index and optional program for a given key.
///
/// # Errors
///
/// Returns [`MapError::SyscallError`] if `bpf_map_lookup_elem` fails.
pub fn get(&self, key: u32, flags: u64) -> Result<DevMapValue, MapError> {
let fd = self.inner.borrow().fd().as_fd();
let value = if FEATURES.devmap_prog_id() {
bpf_map_lookup_elem::<_, bpf_devmap_val>(fd, &key, flags).map(|value| {
value.map(|value| DevMapValue {
if_index: value.ifindex,
// SAFETY: map writes use fd, map reads use id.
// https://github.com/torvalds/linux/blob/2dde18cd1d8fac735875f2e4987f11817cc0bc2c/include/uapi/linux/bpf.h#L6228
prog_id: NonZeroU32::new(unsafe { value.bpf_prog.id }),
})
})
} else {
bpf_map_lookup_elem::<_, u32>(fd, &key, flags).map(|value| {
value.map(|ifindex| DevMapValue {
if_index: ifindex,
prog_id: None,
})
})
};
value
.map_err(|(_, io_error)| SyscallError {
call: "bpf_map_lookup_elem",
io_error,
})?
.ok_or(MapError::KeyNotFound)
}
/// An iterator over the elements of the devmap in arbitrary order.
pub fn iter(&self) -> MapIter<'_, u32, DevMapValue, Self> {
MapIter::new(self)
}
/// An iterator visiting all keys in arbitrary order.
pub fn keys(&self) -> MapKeys<'_, u32> {
MapKeys::new(self.inner.borrow())
}
}
impl<T: BorrowMut<MapData>> DevMapHash<T> {
/// Inserts an ifindex and optionally a chained program in the map.
///
/// When redirecting using `key`, packets will be transmitted by the interface with `ifindex`.
///
/// Starting from Linux kernel 5.8, another XDP program can be passed in that will be run before
/// actual transmission. It can be used to modify the packet before transmission with NIC
/// specific data (MAC address update, checksum computations, etc) or other purposes.
///
/// The chained program must be loaded with the `BPF_XDP_DEVMAP` attach type. When using
/// `aya-ebpf`, that means XDP programs that specify the `map = "devmap"` argument. See the
/// kernel-space `aya_ebpf::xdp` for more information.
///
/// # Errors
///
/// Returns [`MapError::SyscallError`] if `bpf_map_update_elem` fails,
/// [`MapError::ProgIdNotSupported`] if the kernel does not support chained programs and one is
/// provided.
pub fn insert(
&mut self,
key: u32,
target_if_index: u32,
program: Option<&ProgramFd>,
flags: u64,
) -> Result<(), XdpMapError> {
if FEATURES.devmap_prog_id() {
let mut value = unsafe { std::mem::zeroed::<bpf_devmap_val>() };
value.ifindex = target_if_index;
// Default is valid as the kernel will only consider fd > 0:
// https://github.com/torvalds/linux/blob/2dde18cd1d8fac735875f2e4987f11817cc0bc2c/kernel/bpf/devmap.c#L866
// https://github.com/torvalds/linux/blob/2dde18cd1d8fac735875f2e4987f11817cc0bc2c/kernel/bpf/devmap.c#L918
value.bpf_prog.fd = program
.map(|prog| prog.as_fd().as_raw_fd())
.unwrap_or_default();
hash_map::insert(self.inner.borrow_mut(), &key, &value, flags)?;
} else {
if program.is_some() {
return Err(XdpMapError::ChainedProgramNotSupported);
}
hash_map::insert(self.inner.borrow_mut(), &key, &target_if_index, flags)?;
}
Ok(())
}
/// Removes a value from the map.
///
/// # Errors
///
/// Returns [`MapError::SyscallError`] if `bpf_map_delete_elem` fails.
pub fn remove(&mut self, key: u32) -> Result<(), MapError> {
hash_map::remove(self.inner.borrow_mut(), &key)
}
}
impl<T: Borrow<MapData>> IterableMap<u32, DevMapValue> for DevMapHash<T> {
fn map(&self) -> &MapData {
self.inner.borrow()
}
fn get(&self, key: &u32) -> Result<DevMapValue, MapError> {
self.get(*key, 0)
}
}

@ -0,0 +1,25 @@
//! XDP maps.
mod cpu_map;
mod dev_map;
mod dev_map_hash;
mod xsk_map;
pub use cpu_map::CpuMap;
pub use dev_map::DevMap;
pub use dev_map_hash::DevMapHash;
use thiserror::Error;
pub use xsk_map::XskMap;
use super::MapError;
#[derive(Error, Debug)]
/// Errors occuring from working with XDP maps.
pub enum XdpMapError {
/// Chained programs are not supported.
#[error("chained programs are not supported by the current kernel")]
ChainedProgramNotSupported,
/// Map operation failed.
#[error(transparent)]
MapError(#[from] MapError),
}

@ -0,0 +1,81 @@
//! An array of AF_XDP sockets.
use std::{
borrow::{Borrow, BorrowMut},
os::fd::{AsFd, AsRawFd, RawFd},
};
use crate::{
maps::{check_bounds, check_kv_size, MapData, MapError},
sys::{bpf_map_update_elem, SyscallError},
};
/// An array of AF_XDP sockets.
///
/// XDP programs can use this map to redirect packets to a target
/// AF_XDP socket using the `XDP_REDIRECT` action.
///
/// # Minimum kernel version
///
/// The minimum kernel version required to use this feature is 4.18.
///
/// # Examples
/// ```no_run
/// # let mut bpf = aya::Bpf::load(&[])?;
/// # let socket_fd = 1;
/// use aya::maps::XskMap;
///
/// let mut xskmap = XskMap::try_from(bpf.map_mut("SOCKETS").unwrap())?;
/// // socket_fd is the RawFd of an AF_XDP socket
/// xskmap.set(0, socket_fd, 0);
/// # Ok::<(), aya::BpfError>(())
/// ```
///
/// # See also
///
/// Kernel documentation: <https://docs.kernel.org/next/bpf/map_xskmap.html>
#[doc(alias = "BPF_MAP_TYPE_XSKMAP")]
pub struct XskMap<T> {
pub(crate) inner: T,
}
impl<T: Borrow<MapData>> XskMap<T> {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
let data = map.borrow();
check_kv_size::<u32, RawFd>(data)?;
Ok(Self { inner: map })
}
/// Returns the number of elements in the array.
///
/// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side.
pub fn len(&self) -> u32 {
self.inner.borrow().obj.max_entries()
}
}
impl<T: BorrowMut<MapData>> XskMap<T> {
/// Sets the `AF_XDP` socket at a given index.
///
/// When redirecting a packet, the `AF_XDP` socket at `index` will recieve the packet. Note
/// that it will do so only if the socket is bound to the same queue the packet was recieved
/// on.
///
/// # Errors
///
/// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`]
/// if `bpf_map_update_elem` fails.
pub fn set(&mut self, index: u32, socket_fd: impl AsRawFd, flags: u64) -> Result<(), MapError> {
let data = self.inner.borrow_mut();
check_bounds(data, index)?;
let fd = data.fd().as_fd();
bpf_map_update_elem(fd, Some(&index), &socket_fd.as_raw_fd(), flags).map_err(
|(_, io_error)| SyscallError {
call: "bpf_map_update_elem",
io_error,
},
)?;
Ok(())
}
}

@ -1,17 +1,12 @@
//! Pinning BPF objects to the BPF filesystem.
use crate::sys::SyscallError;
use thiserror::Error;
use crate::sys::SyscallError;
/// An error ocurred working with a pinned BPF object.
#[derive(Error, Debug)]
pub enum PinError {
/// The object has already been pinned.
#[error("the BPF object `{name}` has already been pinned")]
AlreadyPinned {
/// Object name.
name: String,
},
/// The object FD is not known by Aya.
#[error("the BPF object `{name}`'s FD is not known")]
NoFd {
@ -19,10 +14,14 @@ pub enum PinError {
name: String,
},
/// The path for the BPF object is not valid.
#[error("invalid pin path `{error}`")]
#[error("invalid pin path `{}`", path.display())]
InvalidPinPath {
/// The error message.
error: String,
/// The path.
path: std::path::PathBuf,
#[source]
/// The source error.
error: std::ffi::NulError,
},
/// An error ocurred making a syscall.
#[error(transparent)]

@ -1,14 +1,14 @@
//! Cgroup device programs.
use crate::util::KernelVersion;
use std::os::fd::{AsFd as _, AsRawFd};
use std::os::fd::AsFd;
use crate::{
generated::{bpf_attach_type::BPF_CGROUP_DEVICE, bpf_prog_type::BPF_PROG_TYPE_CGROUP_DEVICE},
programs::{
define_link_wrapper, load_program, FdLink, Link, ProgAttachLink, ProgramData, ProgramError,
},
sys::{bpf_link_create, bpf_prog_attach, SyscallError},
sys::{bpf_link_create, LinkTarget, SyscallError},
util::KernelVersion,
};
/// A program used to watch or prevent device interaction from a cgroup.
@ -60,35 +60,35 @@ impl CgroupDevice {
/// Attaches the program to the given cgroup.
///
/// The returned value can be used to detach, see [CgroupDevice::detach]
pub fn attach<T: AsRawFd>(&mut self, cgroup: T) -> Result<CgroupDeviceLinkId, ProgramError> {
pub fn attach<T: AsFd>(&mut self, cgroup: T) -> Result<CgroupDeviceLinkId, ProgramError> {
let prog_fd = self.fd()?;
let prog_fd = prog_fd.as_fd();
let prog_fd = prog_fd.as_raw_fd();
let cgroup_fd = cgroup.as_raw_fd();
let cgroup_fd = cgroup.as_fd();
if KernelVersion::current().unwrap() >= KernelVersion::new(5, 7, 0) {
let link_fd = bpf_link_create(prog_fd, cgroup_fd, BPF_CGROUP_DEVICE, None, 0).map_err(
|(_, io_error)| SyscallError {
call: "bpf_link_create",
io_error,
},
)?;
let link_fd = bpf_link_create(
prog_fd,
LinkTarget::Fd(cgroup_fd),
BPF_CGROUP_DEVICE,
None,
0,
)
.map_err(|(_, io_error)| SyscallError {
call: "bpf_link_create",
io_error,
})?;
self.data
.links
.insert(CgroupDeviceLink::new(CgroupDeviceLinkInner::Fd(
FdLink::new(link_fd),
)))
} else {
bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_DEVICE).map_err(|(_, io_error)| {
SyscallError {
call: "bpf_prog_attach",
io_error,
}
})?;
let link = ProgAttachLink::attach(prog_fd, cgroup_fd, BPF_CGROUP_DEVICE)?;
self.data
.links
.insert(CgroupDeviceLink::new(CgroupDeviceLinkInner::ProgAttach(
ProgAttachLink::new(prog_fd, cgroup_fd, BPF_CGROUP_DEVICE),
link,
)))
}
}
@ -129,15 +129,15 @@ impl Link for CgroupDeviceLinkInner {
fn id(&self) -> Self::Id {
match self {
CgroupDeviceLinkInner::Fd(fd) => CgroupDeviceLinkIdInner::Fd(fd.id()),
CgroupDeviceLinkInner::ProgAttach(p) => CgroupDeviceLinkIdInner::ProgAttach(p.id()),
Self::Fd(fd) => CgroupDeviceLinkIdInner::Fd(fd.id()),
Self::ProgAttach(p) => CgroupDeviceLinkIdInner::ProgAttach(p.id()),
}
}
fn detach(self) -> Result<(), ProgramError> {
match self {
CgroupDeviceLinkInner::Fd(fd) => fd.detach(),
CgroupDeviceLinkInner::ProgAttach(p) => p.detach(),
Self::Fd(fd) => fd.detach(),
Self::ProgAttach(p) => p.detach(),
}
}
}

@ -1,11 +1,6 @@
//! Cgroup skb programs.
use crate::util::KernelVersion;
use std::{
hash::Hash,
os::fd::{AsFd as _, AsRawFd},
path::Path,
};
use std::{hash::Hash, os::fd::AsFd, path::Path};
use crate::{
generated::{
@ -15,7 +10,8 @@ use crate::{
programs::{
define_link_wrapper, load_program, FdLink, Link, ProgAttachLink, ProgramData, ProgramError,
},
sys::{bpf_link_create, bpf_prog_attach, SyscallError},
sys::{bpf_link_create, LinkTarget, SyscallError},
util::KernelVersion,
VerifierLogLevel,
};
@ -87,45 +83,36 @@ impl CgroupSkb {
/// Attaches the program to the given cgroup.
///
/// The returned value can be used to detach, see [CgroupSkb::detach].
pub fn attach<T: AsRawFd>(
pub fn attach<T: AsFd>(
&mut self,
cgroup: T,
attach_type: CgroupSkbAttachType,
) -> Result<CgroupSkbLinkId, ProgramError> {
let prog_fd = self.fd()?;
let prog_fd = prog_fd.as_fd();
let prog_fd = prog_fd.as_raw_fd();
let cgroup_fd = cgroup.as_raw_fd();
let cgroup_fd = cgroup.as_fd();
let attach_type = match attach_type {
CgroupSkbAttachType::Ingress => BPF_CGROUP_INET_INGRESS,
CgroupSkbAttachType::Egress => BPF_CGROUP_INET_EGRESS,
};
if KernelVersion::current().unwrap() >= KernelVersion::new(5, 7, 0) {
let link_fd = bpf_link_create(prog_fd, cgroup_fd, attach_type, None, 0).map_err(
|(_, io_error)| SyscallError {
let link_fd = bpf_link_create(prog_fd, LinkTarget::Fd(cgroup_fd), attach_type, None, 0)
.map_err(|(_, io_error)| SyscallError {
call: "bpf_link_create",
io_error,
},
)?;
})?;
self.data
.links
.insert(CgroupSkbLink::new(CgroupSkbLinkInner::Fd(FdLink::new(
link_fd,
))))
} else {
bpf_prog_attach(prog_fd, cgroup_fd, attach_type).map_err(|(_, io_error)| {
SyscallError {
call: "bpf_prog_attach",
io_error,
}
})?;
let link = ProgAttachLink::attach(prog_fd, cgroup_fd, attach_type)?;
self.data
.links
.insert(CgroupSkbLink::new(CgroupSkbLinkInner::ProgAttach(
ProgAttachLink::new(prog_fd, cgroup_fd, attach_type),
)))
.insert(CgroupSkbLink::new(CgroupSkbLinkInner::ProgAttach(link)))
}
}
@ -179,15 +166,15 @@ impl Link for CgroupSkbLinkInner {
fn id(&self) -> Self::Id {
match self {
CgroupSkbLinkInner::Fd(fd) => CgroupSkbLinkIdInner::Fd(fd.id()),
CgroupSkbLinkInner::ProgAttach(p) => CgroupSkbLinkIdInner::ProgAttach(p.id()),
Self::Fd(fd) => CgroupSkbLinkIdInner::Fd(fd.id()),
Self::ProgAttach(p) => CgroupSkbLinkIdInner::ProgAttach(p.id()),
}
}
fn detach(self) -> Result<(), ProgramError> {
match self {
CgroupSkbLinkInner::Fd(fd) => fd.detach(),
CgroupSkbLinkInner::ProgAttach(p) => p.detach(),
Self::Fd(fd) => fd.detach(),
Self::ProgAttach(p) => p.detach(),
}
}
}

@ -1,20 +1,16 @@
//! Cgroup socket programs.
pub use aya_obj::programs::CgroupSockAttachType;
use std::{hash::Hash, os::fd::AsFd, path::Path};
use crate::util::KernelVersion;
use std::{
hash::Hash,
os::fd::{AsFd as _, AsRawFd},
path::Path,
};
pub use aya_obj::programs::CgroupSockAttachType;
use crate::{
generated::bpf_prog_type::BPF_PROG_TYPE_CGROUP_SOCK,
programs::{
define_link_wrapper, load_program, FdLink, Link, ProgAttachLink, ProgramData, ProgramError,
},
sys::{bpf_link_create, bpf_prog_attach, SyscallError},
sys::{bpf_link_create, LinkTarget, SyscallError},
util::KernelVersion,
VerifierLogLevel,
};
@ -70,37 +66,28 @@ impl CgroupSock {
/// Attaches the program to the given cgroup.
///
/// The returned value can be used to detach, see [CgroupSock::detach].
pub fn attach<T: AsRawFd>(&mut self, cgroup: T) -> Result<CgroupSockLinkId, ProgramError> {
pub fn attach<T: AsFd>(&mut self, cgroup: T) -> Result<CgroupSockLinkId, ProgramError> {
let prog_fd = self.fd()?;
let prog_fd = prog_fd.as_fd();
let prog_fd = prog_fd.as_raw_fd();
let cgroup_fd = cgroup.as_raw_fd();
let cgroup_fd = cgroup.as_fd();
let attach_type = self.data.expected_attach_type.unwrap();
if KernelVersion::current().unwrap() >= KernelVersion::new(5, 7, 0) {
let link_fd = bpf_link_create(prog_fd, cgroup_fd, attach_type, None, 0).map_err(
|(_, io_error)| SyscallError {
let link_fd = bpf_link_create(prog_fd, LinkTarget::Fd(cgroup_fd), attach_type, None, 0)
.map_err(|(_, io_error)| SyscallError {
call: "bpf_link_create",
io_error,
},
)?;
})?;
self.data
.links
.insert(CgroupSockLink::new(CgroupSockLinkInner::Fd(FdLink::new(
link_fd,
))))
} else {
bpf_prog_attach(prog_fd, cgroup_fd, attach_type).map_err(|(_, io_error)| {
SyscallError {
call: "bpf_prog_attach",
io_error,
}
})?;
let link = ProgAttachLink::attach(prog_fd, cgroup_fd, attach_type)?;
self.data
.links
.insert(CgroupSockLink::new(CgroupSockLinkInner::ProgAttach(
ProgAttachLink::new(prog_fd, cgroup_fd, attach_type),
)))
.insert(CgroupSockLink::new(CgroupSockLinkInner::ProgAttach(link)))
}
}
@ -151,15 +138,15 @@ impl Link for CgroupSockLinkInner {
fn id(&self) -> Self::Id {
match self {
CgroupSockLinkInner::Fd(fd) => CgroupSockLinkIdInner::Fd(fd.id()),
CgroupSockLinkInner::ProgAttach(p) => CgroupSockLinkIdInner::ProgAttach(p.id()),
Self::Fd(fd) => CgroupSockLinkIdInner::Fd(fd.id()),
Self::ProgAttach(p) => CgroupSockLinkIdInner::ProgAttach(p.id()),
}
}
fn detach(self) -> Result<(), ProgramError> {
match self {
CgroupSockLinkInner::Fd(fd) => fd.detach(),
CgroupSockLinkInner::ProgAttach(p) => p.detach(),
Self::Fd(fd) => fd.detach(),
Self::ProgAttach(p) => p.detach(),
}
}
}

@ -1,20 +1,16 @@
//! Cgroup socket address programs.
pub use aya_obj::programs::CgroupSockAddrAttachType;
use std::{hash::Hash, os::fd::AsFd, path::Path};
use crate::util::KernelVersion;
use std::{
hash::Hash,
os::fd::{AsFd as _, AsRawFd},
path::Path,
};
pub use aya_obj::programs::CgroupSockAddrAttachType;
use crate::{
generated::bpf_prog_type::BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
programs::{
define_link_wrapper, load_program, FdLink, Link, ProgAttachLink, ProgramData, ProgramError,
},
sys::{bpf_link_create, bpf_prog_attach, SyscallError},
sys::{bpf_link_create, LinkTarget, SyscallError},
util::KernelVersion,
VerifierLogLevel,
};
@ -71,38 +67,27 @@ impl CgroupSockAddr {
/// Attaches the program to the given cgroup.
///
/// The returned value can be used to detach, see [CgroupSockAddr::detach].
pub fn attach<T: AsRawFd>(&mut self, cgroup: T) -> Result<CgroupSockAddrLinkId, ProgramError> {
pub fn attach<T: AsFd>(&mut self, cgroup: T) -> Result<CgroupSockAddrLinkId, ProgramError> {
let prog_fd = self.fd()?;
let prog_fd = prog_fd.as_fd();
let prog_fd = prog_fd.as_raw_fd();
let cgroup_fd = cgroup.as_raw_fd();
let cgroup_fd = cgroup.as_fd();
let attach_type = self.data.expected_attach_type.unwrap();
if KernelVersion::current().unwrap() >= KernelVersion::new(5, 7, 0) {
let link_fd = bpf_link_create(prog_fd, cgroup_fd, attach_type, None, 0).map_err(
|(_, io_error)| SyscallError {
let link_fd = bpf_link_create(prog_fd, LinkTarget::Fd(cgroup_fd), attach_type, None, 0)
.map_err(|(_, io_error)| SyscallError {
call: "bpf_link_create",
io_error,
},
)?;
})?;
self.data
.links
.insert(CgroupSockAddrLink::new(CgroupSockAddrLinkInner::Fd(
FdLink::new(link_fd),
)))
} else {
bpf_prog_attach(prog_fd, cgroup_fd, attach_type).map_err(|(_, io_error)| {
SyscallError {
call: "bpf_prog_attach",
io_error,
}
})?;
let link = ProgAttachLink::attach(prog_fd, cgroup_fd, attach_type)?;
self.data.links.insert(CgroupSockAddrLink::new(
CgroupSockAddrLinkInner::ProgAttach(ProgAttachLink::new(
prog_fd,
cgroup_fd,
attach_type,
)),
CgroupSockAddrLinkInner::ProgAttach(link),
))
}
}
@ -157,15 +142,15 @@ impl Link for CgroupSockAddrLinkInner {
fn id(&self) -> Self::Id {
match self {
CgroupSockAddrLinkInner::Fd(fd) => CgroupSockAddrLinkIdInner::Fd(fd.id()),
CgroupSockAddrLinkInner::ProgAttach(p) => CgroupSockAddrLinkIdInner::ProgAttach(p.id()),
Self::Fd(fd) => CgroupSockAddrLinkIdInner::Fd(fd.id()),
Self::ProgAttach(p) => CgroupSockAddrLinkIdInner::ProgAttach(p.id()),
}
}
fn detach(self) -> Result<(), ProgramError> {
match self {
CgroupSockAddrLinkInner::Fd(fd) => fd.detach(),
CgroupSockAddrLinkInner::ProgAttach(p) => p.detach(),
Self::Fd(fd) => fd.detach(),
Self::ProgAttach(p) => p.detach(),
}
}
}

@ -1,20 +1,16 @@
//! Cgroup socket option programs.
pub use aya_obj::programs::CgroupSockoptAttachType;
use std::{hash::Hash, os::fd::AsFd, path::Path};
use crate::util::KernelVersion;
use std::{
hash::Hash,
os::fd::{AsFd as _, AsRawFd},
path::Path,
};
pub use aya_obj::programs::CgroupSockoptAttachType;
use crate::{
generated::bpf_prog_type::BPF_PROG_TYPE_CGROUP_SOCKOPT,
programs::{
define_link_wrapper, load_program, FdLink, Link, ProgAttachLink, ProgramData, ProgramError,
},
sys::{bpf_link_create, bpf_prog_attach, SyscallError},
sys::{bpf_link_create, LinkTarget, SyscallError},
util::KernelVersion,
VerifierLogLevel,
};
@ -68,36 +64,29 @@ impl CgroupSockopt {
/// Attaches the program to the given cgroup.
///
/// The returned value can be used to detach, see [CgroupSockopt::detach].
pub fn attach<T: AsRawFd>(&mut self, cgroup: T) -> Result<CgroupSockoptLinkId, ProgramError> {
pub fn attach<T: AsFd>(&mut self, cgroup: T) -> Result<CgroupSockoptLinkId, ProgramError> {
let prog_fd = self.fd()?;
let prog_fd = prog_fd.as_fd();
let prog_fd = prog_fd.as_raw_fd();
let cgroup_fd = cgroup.as_raw_fd();
let cgroup_fd = cgroup.as_fd();
let attach_type = self.data.expected_attach_type.unwrap();
if KernelVersion::current().unwrap() >= KernelVersion::new(5, 7, 0) {
let link_fd = bpf_link_create(prog_fd, cgroup_fd, attach_type, None, 0).map_err(
|(_, io_error)| SyscallError {
let link_fd = bpf_link_create(prog_fd, LinkTarget::Fd(cgroup_fd), attach_type, None, 0)
.map_err(|(_, io_error)| SyscallError {
call: "bpf_link_create",
io_error,
},
)?;
})?;
self.data
.links
.insert(CgroupSockoptLink::new(CgroupSockoptLinkInner::Fd(
FdLink::new(link_fd),
)))
} else {
bpf_prog_attach(prog_fd, cgroup_fd, attach_type).map_err(|(_, io_error)| {
SyscallError {
call: "bpf_prog_attach",
io_error,
}
})?;
let link = ProgAttachLink::attach(prog_fd, cgroup_fd, attach_type)?;
self.data
.links
.insert(CgroupSockoptLink::new(CgroupSockoptLinkInner::ProgAttach(
ProgAttachLink::new(prog_fd, cgroup_fd, attach_type),
link,
)))
}
}
@ -152,15 +141,15 @@ impl Link for CgroupSockoptLinkInner {
fn id(&self) -> Self::Id {
match self {
CgroupSockoptLinkInner::Fd(fd) => CgroupSockoptLinkIdInner::Fd(fd.id()),
CgroupSockoptLinkInner::ProgAttach(p) => CgroupSockoptLinkIdInner::ProgAttach(p.id()),
Self::Fd(fd) => CgroupSockoptLinkIdInner::Fd(fd.id()),
Self::ProgAttach(p) => CgroupSockoptLinkIdInner::ProgAttach(p.id()),
}
}
fn detach(self) -> Result<(), ProgramError> {
match self {
CgroupSockoptLinkInner::Fd(fd) => fd.detach(),
CgroupSockoptLinkInner::ProgAttach(p) => p.detach(),
Self::Fd(fd) => fd.detach(),
Self::ProgAttach(p) => p.detach(),
}
}
}

@ -1,17 +1,14 @@
//! Cgroup sysctl programs.
use crate::util::KernelVersion;
use std::{
hash::Hash,
os::fd::{AsFd as _, AsRawFd},
};
use std::{hash::Hash, os::fd::AsFd};
use crate::{
generated::{bpf_attach_type::BPF_CGROUP_SYSCTL, bpf_prog_type::BPF_PROG_TYPE_CGROUP_SYSCTL},
programs::{
define_link_wrapper, load_program, FdLink, Link, ProgAttachLink, ProgramData, ProgramError,
},
sys::{bpf_link_create, bpf_prog_attach, SyscallError},
sys::{bpf_link_create, LinkTarget, SyscallError},
util::KernelVersion,
};
/// A program used to watch for sysctl changes.
@ -62,36 +59,35 @@ impl CgroupSysctl {
/// Attaches the program to the given cgroup.
///
/// The returned value can be used to detach, see [CgroupSysctl::detach].
pub fn attach<T: AsRawFd>(&mut self, cgroup: T) -> Result<CgroupSysctlLinkId, ProgramError> {
pub fn attach<T: AsFd>(&mut self, cgroup: T) -> Result<CgroupSysctlLinkId, ProgramError> {
let prog_fd = self.fd()?;
let prog_fd = prog_fd.as_fd();
let prog_fd = prog_fd.as_raw_fd();
let cgroup_fd = cgroup.as_raw_fd();
let cgroup_fd = cgroup.as_fd();
if KernelVersion::current().unwrap() >= KernelVersion::new(5, 7, 0) {
let link_fd = bpf_link_create(prog_fd, cgroup_fd, BPF_CGROUP_SYSCTL, None, 0).map_err(
|(_, io_error)| SyscallError {
call: "bpf_link_create",
io_error,
},
)?;
let link_fd = bpf_link_create(
prog_fd,
LinkTarget::Fd(cgroup_fd),
BPF_CGROUP_SYSCTL,
None,
0,
)
.map_err(|(_, io_error)| SyscallError {
call: "bpf_link_create",
io_error,
})?;
self.data
.links
.insert(CgroupSysctlLink::new(CgroupSysctlLinkInner::Fd(
FdLink::new(link_fd),
)))
} else {
bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SYSCTL).map_err(|(_, io_error)| {
SyscallError {
call: "bpf_prog_attach",
io_error,
}
})?;
let link = ProgAttachLink::attach(prog_fd, cgroup_fd, BPF_CGROUP_SYSCTL)?;
self.data
.links
.insert(CgroupSysctlLink::new(CgroupSysctlLinkInner::ProgAttach(
ProgAttachLink::new(prog_fd, cgroup_fd, BPF_CGROUP_SYSCTL),
link,
)))
}
}
@ -132,15 +128,15 @@ impl Link for CgroupSysctlLinkInner {
fn id(&self) -> Self::Id {
match self {
CgroupSysctlLinkInner::Fd(fd) => CgroupSysctlLinkIdInner::Fd(fd.id()),
CgroupSysctlLinkInner::ProgAttach(p) => CgroupSysctlLinkIdInner::ProgAttach(p.id()),
Self::Fd(fd) => CgroupSysctlLinkIdInner::Fd(fd.id()),
Self::ProgAttach(p) => CgroupSysctlLinkIdInner::ProgAttach(p.id()),
}
}
fn detach(self) -> Result<(), ProgramError> {
match self {
CgroupSysctlLinkInner::Fd(fd) => fd.detach(),
CgroupSysctlLinkInner::ProgAttach(p) => p.detach(),
Self::Fd(fd) => fd.detach(),
Self::ProgAttach(p) => p.detach(),
}
}
}

@ -1,8 +1,9 @@
//! Extension programs.
use std::os::fd::{AsFd as _, AsRawFd as _, BorrowedFd, OwnedFd};
use thiserror::Error;
use std::os::fd::{AsFd as _, BorrowedFd, OwnedFd};
use object::Endianness;
use thiserror::Error;
use crate::{
generated::{bpf_attach_type::BPF_CGROUP_INET_INGRESS, bpf_prog_type::BPF_PROG_TYPE_EXT},
@ -10,7 +11,7 @@ use crate::{
programs::{
define_link_wrapper, load_program, FdLink, FdLinkId, ProgramData, ProgramError, ProgramFd,
},
sys::{self, bpf_link_create, SyscallError},
sys::{self, bpf_link_create, LinkTarget, SyscallError},
Btf,
};
@ -88,21 +89,25 @@ impl Extension {
pub fn attach(&mut self) -> Result<ExtensionLinkId, ProgramError> {
let prog_fd = self.fd()?;
let prog_fd = prog_fd.as_fd();
let prog_fd = prog_fd.as_raw_fd();
let target_fd = self
.data
.attach_prog_fd
.as_ref()
.ok_or(ProgramError::NotLoaded)?;
let target_fd = target_fd.as_fd();
let target_fd = target_fd.as_raw_fd();
let btf_id = self.data.attach_btf_id.ok_or(ProgramError::NotLoaded)?;
// the attach type must be set as 0, which is bpf_attach_type::BPF_CGROUP_INET_INGRESS
let link_fd = bpf_link_create(prog_fd, target_fd, BPF_CGROUP_INET_INGRESS, Some(btf_id), 0)
.map_err(|(_, io_error)| SyscallError {
call: "bpf_link_create",
io_error,
})?;
let link_fd = bpf_link_create(
prog_fd,
LinkTarget::Fd(target_fd),
BPF_CGROUP_INET_INGRESS,
Some(btf_id),
0,
)
.map_err(|(_, io_error)| SyscallError {
call: "bpf_link_create",
io_error,
})?;
self.data
.links
.insert(ExtensionLink::new(FdLink::new(link_fd)))
@ -128,11 +133,10 @@ impl Extension {
let (_, btf_id) = get_btf_info(target_fd, func_name)?;
let prog_fd = self.fd()?;
let prog_fd = prog_fd.as_fd();
let prog_fd = prog_fd.as_raw_fd();
// the attach type must be set as 0, which is bpf_attach_type::BPF_CGROUP_INET_INGRESS
let link_fd = bpf_link_create(
prog_fd,
target_fd.as_raw_fd(),
LinkTarget::Fd(target_fd),
BPF_CGROUP_INET_INGRESS,
Some(btf_id),
0,

@ -1,5 +1,11 @@
//! Kernel space probes.
use std::{io, os::fd::AsFd as _, path::Path};
use std::{
ffi::OsStr,
io,
os::fd::AsFd as _,
path::{Path, PathBuf},
};
use thiserror::Error;
use crate::{
@ -67,8 +73,12 @@ impl KProbe {
/// target function.
///
/// The returned value can be used to detach from the given function, see [KProbe::detach].
pub fn attach(&mut self, fn_name: &str, offset: u64) -> Result<KProbeLinkId, ProgramError> {
attach(&mut self.data, self.kind, fn_name, offset, None)
pub fn attach<T: AsRef<OsStr>>(
&mut self,
fn_name: T,
offset: u64,
) -> Result<KProbeLinkId, ProgramError> {
attach(&mut self.data, self.kind, fn_name.as_ref(), offset, None)
}
/// Detaches the program.
@ -114,7 +124,7 @@ pub enum KProbeError {
#[error("`{filename}`")]
FileError {
/// The file name
filename: String,
filename: PathBuf,
/// The [`io::Error`] returned from the file operation
#[source]
io_error: io::Error,
@ -139,7 +149,7 @@ impl TryFrom<FdLink> for KProbeLink {
fn try_from(fd_link: FdLink) -> Result<Self, Self::Error> {
let info = bpf_link_get_info_by_fd(fd_link.fd.as_fd())?;
if info.type_ == (bpf_link_type::BPF_LINK_TYPE_KPROBE_MULTI as u32) {
return Ok(KProbeLink::new(PerfLinkInner::FdLink(fd_link)));
return Ok(Self::new(PerfLinkInner::FdLink(fd_link)));
}
Err(LinkError::InvalidLink)
}

@ -1,20 +1,19 @@
//! Program links.
use libc::{close, dup};
use thiserror::Error;
use std::{
collections::{hash_map::Entry, HashMap},
ffi::CString,
io,
os::fd::{AsRawFd as _, OwnedFd, RawFd},
os::fd::{AsFd as _, AsRawFd as _, BorrowedFd, OwnedFd, RawFd},
path::{Path, PathBuf},
};
use thiserror::Error;
use crate::{
generated::bpf_attach_type,
pin::PinError,
programs::ProgramError,
sys::{bpf_get_object, bpf_pin_object, bpf_prog_detach, SyscallError},
programs::{ProgramError, ProgramFd},
sys::{bpf_get_object, bpf_pin_object, bpf_prog_attach, bpf_prog_detach, SyscallError},
};
/// A Link.
@ -35,8 +34,8 @@ pub(crate) struct LinkMap<T: Link> {
}
impl<T: Link> LinkMap<T> {
pub(crate) fn new() -> LinkMap<T> {
LinkMap {
pub(crate) fn new() -> Self {
Self {
links: HashMap::new(),
}
}
@ -112,8 +111,8 @@ pub struct FdLink {
}
impl FdLink {
pub(crate) fn new(fd: OwnedFd) -> FdLink {
FdLink { fd }
pub(crate) fn new(fd: OwnedFd) -> Self {
Self { fd }
}
/// Pins the link to a BPF file system.
@ -146,19 +145,20 @@ impl FdLink {
/// # Ok::<(), Error>(())
/// ```
pub fn pin<P: AsRef<Path>>(self, path: P) -> Result<PinnedLink, PinError> {
let path_string =
CString::new(path.as_ref().to_string_lossy().into_owned()).map_err(|e| {
PinError::InvalidPinPath {
error: e.to_string(),
}
})?;
bpf_pin_object(self.fd.as_raw_fd(), &path_string).map_err(|(_, io_error)| {
SyscallError {
call: "BPF_OBJ_PIN",
io_error,
use std::os::unix::ffi::OsStrExt as _;
let path = path.as_ref();
let path_string = CString::new(path.as_os_str().as_bytes()).map_err(|error| {
PinError::InvalidPinPath {
path: path.into(),
error,
}
})?;
Ok(PinnedLink::new(PathBuf::from(path.as_ref()), self))
bpf_pin_object(self.fd.as_fd(), &path_string).map_err(|(_, io_error)| SyscallError {
call: "BPF_OBJ_PIN",
io_error,
})?;
Ok(PinnedLink::new(path.into(), self))
}
}
@ -198,22 +198,22 @@ pub struct PinnedLink {
impl PinnedLink {
fn new(path: PathBuf, link: FdLink) -> Self {
PinnedLink { inner: link, path }
Self { inner: link, path }
}
/// Creates a [`crate::programs::links::PinnedLink`] from a valid path on bpffs.
pub fn from_pin<P: AsRef<Path>>(path: P) -> Result<Self, LinkError> {
let path_string = CString::new(path.as_ref().to_string_lossy().to_string()).unwrap();
use std::os::unix::ffi::OsStrExt as _;
// TODO: avoid this unwrap by adding a new error variant.
let path_string = CString::new(path.as_ref().as_os_str().as_bytes()).unwrap();
let fd = bpf_get_object(&path_string).map_err(|(_, io_error)| {
LinkError::SyscallError(SyscallError {
call: "BPF_OBJ_GET",
io_error,
})
})?;
Ok(PinnedLink::new(
path.as_ref().to_path_buf(),
FdLink::new(fd),
))
Ok(Self::new(path.as_ref().to_path_buf(), FdLink::new(fd)))
}
/// Removes the pinned link from the filesystem and returns an [`FdLink`].
@ -230,22 +230,31 @@ pub struct ProgAttachLinkId(RawFd, RawFd, bpf_attach_type);
/// The Link type used by programs that are attached with `bpf_prog_attach`.
#[derive(Debug)]
pub struct ProgAttachLink {
prog_fd: RawFd,
target_fd: RawFd,
prog_fd: ProgramFd,
target_fd: OwnedFd,
attach_type: bpf_attach_type,
}
impl ProgAttachLink {
pub(crate) fn new(
prog_fd: RawFd,
target_fd: RawFd,
pub(crate) fn attach(
prog_fd: BorrowedFd<'_>,
target_fd: BorrowedFd<'_>,
attach_type: bpf_attach_type,
) -> ProgAttachLink {
ProgAttachLink {
) -> Result<Self, ProgramError> {
// The link is going to own this new file descriptor so we are
// going to need a duplicate whose lifetime we manage. Let's
// duplicate it prior to attaching it so the new file
// descriptor is closed at drop in case it fails to attach.
let prog_fd = prog_fd.try_clone_to_owned()?;
let target_fd = target_fd.try_clone_to_owned()?;
bpf_prog_attach(prog_fd.as_fd(), target_fd.as_fd(), attach_type)?;
let prog_fd = ProgramFd(prog_fd);
Ok(Self {
prog_fd,
target_fd: unsafe { dup(target_fd) },
target_fd,
attach_type,
}
})
}
}
@ -253,13 +262,20 @@ impl Link for ProgAttachLink {
type Id = ProgAttachLinkId;
fn id(&self) -> Self::Id {
ProgAttachLinkId(self.prog_fd, self.target_fd, self.attach_type)
ProgAttachLinkId(
self.prog_fd.as_fd().as_raw_fd(),
self.target_fd.as_raw_fd(),
self.attach_type,
)
}
fn detach(self) -> Result<(), ProgramError> {
let _ = bpf_prog_detach(self.prog_fd, self.target_fd, self.attach_type);
unsafe { close(self.target_fd) };
Ok(())
bpf_prog_detach(
self.prog_fd.as_fd(),
self.target_fd.as_fd(),
self.attach_type,
)
.map_err(Into::into)
}
}
@ -300,7 +316,7 @@ macro_rules! define_link_wrapper {
}
}
impl crate::programs::Link for $wrapper {
impl $crate::programs::Link for $wrapper {
type Id = $wrapper_id;
fn id(&self) -> Self::Id {
@ -341,13 +357,13 @@ pub enum LinkError {
#[cfg(test)]
mod tests {
use assert_matches::assert_matches;
use std::{cell::RefCell, fs::File, rc::Rc};
use tempfile::tempdir;
use crate::{programs::ProgramError, sys::override_syscall};
use assert_matches::assert_matches;
use tempfile::tempdir;
use super::{FdLink, Link, LinkMap};
use crate::{programs::ProgramError, sys::override_syscall};
#[derive(Debug, Hash, Eq, PartialEq)]
struct TestLinkId(u8, u8);
@ -359,8 +375,8 @@ mod tests {
}
impl TestLink {
fn new(a: u8, b: u8) -> TestLink {
TestLink {
fn new(a: u8, b: u8) -> Self {
Self {
id: (a, b),
detached: Rc::new(RefCell::new(0)),
}

@ -1,14 +1,12 @@
//! Lirc programs.
use std::os::fd::{AsFd as _, AsRawFd, BorrowedFd, IntoRawFd as _, RawFd};
use std::os::fd::{AsFd, AsRawFd as _, OwnedFd, RawFd};
use crate::{
generated::{bpf_attach_type::BPF_LIRC_MODE2, bpf_prog_type::BPF_PROG_TYPE_LIRC_MODE2},
programs::{load_program, query, Link, ProgramData, ProgramError, ProgramInfo},
sys::{bpf_prog_attach, bpf_prog_detach, bpf_prog_get_fd_by_id, SyscallError},
programs::{load_program, query, Link, ProgramData, ProgramError, ProgramFd, ProgramInfo},
sys::{bpf_prog_attach, bpf_prog_detach, bpf_prog_get_fd_by_id},
};
use libc::{close, dup};
/// A program used to decode IR into key events for a lirc device.
///
/// [`LircMode2`] programs can be used to inspect infrared pulses, spaces,
@ -60,18 +58,17 @@ impl LircMode2 {
/// Attaches the program to the given lirc device.
///
/// The returned value can be used to detach, see [LircMode2::detach].
pub fn attach<T: AsRawFd>(&mut self, lircdev: T) -> Result<LircLinkId, ProgramError> {
pub fn attach<T: AsFd>(&mut self, lircdev: T) -> Result<LircLinkId, ProgramError> {
let prog_fd = self.fd()?;
let prog_fd = prog_fd.as_fd();
let prog_fd = prog_fd.as_raw_fd();
let lircdev_fd = lircdev.as_raw_fd();
bpf_prog_attach(prog_fd, lircdev_fd, BPF_LIRC_MODE2).map_err(|(_, io_error)| {
SyscallError {
call: "bpf_prog_attach",
io_error,
}
})?;
// The link is going to own this new file descriptor so we are
// going to need a duplicate whose lifetime we manage. Let's
// duplicate it prior to attaching it so the new file
// descriptor is closed at drop in case it fails to attach.
let prog_fd = prog_fd.try_clone()?;
let lircdev_fd = lircdev.as_fd().try_clone_to_owned()?;
bpf_prog_attach(prog_fd.as_fd(), lircdev_fd.as_fd(), BPF_LIRC_MODE2)?;
self.data.links.insert(LircLink::new(prog_fd, lircdev_fd))
}
@ -92,20 +89,19 @@ impl LircMode2 {
}
/// Queries the lirc device for attached programs.
pub fn query<T: AsRawFd>(target_fd: T) -> Result<Vec<LircLink>, ProgramError> {
let prog_ids = query(target_fd.as_raw_fd(), BPF_LIRC_MODE2, 0, &mut None)?;
let mut prog_fds = Vec::with_capacity(prog_ids.len());
pub fn query<T: AsFd>(target_fd: T) -> Result<Vec<LircLink>, ProgramError> {
let target_fd = target_fd.as_fd();
let prog_ids = query(target_fd, BPF_LIRC_MODE2, 0, &mut None)?;
for id in prog_ids {
let fd = bpf_prog_get_fd_by_id(id)?;
prog_fds.push(fd);
}
Ok(prog_fds
prog_ids
.into_iter()
.map(|prog_fd| LircLink::new(prog_fd.into_raw_fd(), target_fd.as_raw_fd()))
.collect())
.map(|prog_id| {
let prog_fd = bpf_prog_get_fd_by_id(prog_id)?;
let target_fd = target_fd.try_clone_to_owned()?;
let prog_fd = ProgramFd(prog_fd);
Ok(LircLink::new(prog_fd, target_fd))
})
.collect()
}
}
@ -116,23 +112,22 @@ pub struct LircLinkId(RawFd, RawFd);
#[derive(Debug)]
/// An LircMode2 Link
pub struct LircLink {
prog_fd: RawFd,
target_fd: RawFd,
prog_fd: ProgramFd,
target_fd: OwnedFd,
}
impl LircLink {
pub(crate) fn new(prog_fd: RawFd, target_fd: RawFd) -> LircLink {
LircLink {
prog_fd,
target_fd: unsafe { dup(target_fd) },
}
pub(crate) fn new(prog_fd: ProgramFd, target_fd: OwnedFd) -> Self {
Self { prog_fd, target_fd }
}
/// Get ProgramInfo from this link
pub fn info(&self) -> Result<ProgramInfo, ProgramError> {
// SAFETY: TODO(https://github.com/aya-rs/aya/issues/612): make this safe by not holding `RawFd`s.
let prog_fd = unsafe { BorrowedFd::borrow_raw(self.prog_fd) };
ProgramInfo::new_from_fd(prog_fd)
let Self {
prog_fd,
target_fd: _,
} = self;
ProgramInfo::new_from_fd(prog_fd.as_fd())
}
}
@ -140,12 +135,11 @@ impl Link for LircLink {
type Id = LircLinkId;
fn id(&self) -> Self::Id {
LircLinkId(self.prog_fd, self.target_fd)
LircLinkId(self.prog_fd.as_fd().as_raw_fd(), self.target_fd.as_raw_fd())
}
fn detach(self) -> Result<(), ProgramError> {
let _ = bpf_prog_detach(self.prog_fd, self.target_fd, BPF_LIRC_MODE2);
unsafe { close(self.target_fd) };
Ok(())
bpf_prog_detach(self.prog_fd.as_fd(), self.target_fd.as_fd(), BPF_LIRC_MODE2)
.map_err(Into::into)
}
}

@ -64,7 +64,6 @@ pub mod uprobe;
mod utils;
pub mod xdp;
use libc::ENOSPC;
use std::{
ffi::CString,
io,
@ -74,7 +73,6 @@ use std::{
sync::Arc,
time::{Duration, SystemTime},
};
use thiserror::Error;
pub use cgroup_device::CgroupDevice;
pub use cgroup_skb::{CgroupSkb, CgroupSkbAttachType};
@ -86,6 +84,7 @@ pub use extension::{Extension, ExtensionError};
pub use fentry::FEntry;
pub use fexit::FExit;
pub use kprobe::{KProbe, KProbeError};
use libc::ENOSPC;
pub use links::Link;
use links::*;
pub use lirc_mode2::LircMode2;
@ -100,6 +99,7 @@ pub use sk_skb::{SkSkb, SkSkbKind};
pub use sock_ops::SockOps;
pub use socket_filter::{SocketFilter, SocketFilterError};
pub use tc::{SchedClassifier, TcAttachType, TcError};
use thiserror::Error;
pub use tp_btf::BtfTracePoint;
pub use trace_point::{TracePoint, TracePointError};
pub use uprobe::{UProbe, UProbeError};
@ -108,7 +108,7 @@ pub use xdp::{Xdp, XdpError, XdpFlags};
use crate::{
generated::{bpf_attach_type, bpf_link_info, bpf_prog_info, bpf_prog_type},
maps::MapError,
obj::{self, btf::BtfError, Function, VerifierLog},
obj::{self, btf::BtfError, VerifierLog},
pin::PinError,
programs::utils::{boot_time, get_fdinfo},
sys::{
@ -117,7 +117,7 @@ use crate::{
bpf_prog_query, iter_link_ids, iter_prog_ids, retry_with_verifier_logs,
BpfLoadProgramAttrs, SyscallError,
},
util::KernelVersion,
util::{bytes_of_bpf_name, KernelVersion},
VerifierLogLevel,
};
@ -218,9 +218,8 @@ pub enum ProgramError {
pub struct ProgramFd(OwnedFd);
impl ProgramFd {
/// Creates a new `ProgramFd` instance that shares the same underlying file
/// description as the existing `ProgramFd` instance.
pub fn try_clone(&self) -> Result<Self, ProgramError> {
/// Creates a new instance that shares the same underlying file description as [`self`].
pub fn try_clone(&self) -> io::Result<Self> {
let Self(inner) = self;
let inner = inner.try_clone()?;
Ok(Self(inner))
@ -292,90 +291,90 @@ impl Program {
pub fn prog_type(&self) -> bpf_prog_type {
use crate::generated::bpf_prog_type::*;
match self {
Program::KProbe(_) => BPF_PROG_TYPE_KPROBE,
Program::UProbe(_) => BPF_PROG_TYPE_KPROBE,
Program::TracePoint(_) => BPF_PROG_TYPE_TRACEPOINT,
Program::SocketFilter(_) => BPF_PROG_TYPE_SOCKET_FILTER,
Program::Xdp(_) => BPF_PROG_TYPE_XDP,
Program::SkMsg(_) => BPF_PROG_TYPE_SK_MSG,
Program::SkSkb(_) => BPF_PROG_TYPE_SK_SKB,
Program::SockOps(_) => BPF_PROG_TYPE_SOCK_OPS,
Program::SchedClassifier(_) => BPF_PROG_TYPE_SCHED_CLS,
Program::CgroupSkb(_) => BPF_PROG_TYPE_CGROUP_SKB,
Program::CgroupSysctl(_) => BPF_PROG_TYPE_CGROUP_SYSCTL,
Program::CgroupSockopt(_) => BPF_PROG_TYPE_CGROUP_SOCKOPT,
Program::LircMode2(_) => BPF_PROG_TYPE_LIRC_MODE2,
Program::PerfEvent(_) => BPF_PROG_TYPE_PERF_EVENT,
Program::RawTracePoint(_) => BPF_PROG_TYPE_RAW_TRACEPOINT,
Program::Lsm(_) => BPF_PROG_TYPE_LSM,
Program::BtfTracePoint(_) => BPF_PROG_TYPE_TRACING,
Program::FEntry(_) => BPF_PROG_TYPE_TRACING,
Program::FExit(_) => BPF_PROG_TYPE_TRACING,
Program::Extension(_) => BPF_PROG_TYPE_EXT,
Program::CgroupSockAddr(_) => BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
Program::SkLookup(_) => BPF_PROG_TYPE_SK_LOOKUP,
Program::CgroupSock(_) => BPF_PROG_TYPE_CGROUP_SOCK,
Program::CgroupDevice(_) => BPF_PROG_TYPE_CGROUP_DEVICE,
Self::KProbe(_) => BPF_PROG_TYPE_KPROBE,
Self::UProbe(_) => BPF_PROG_TYPE_KPROBE,
Self::TracePoint(_) => BPF_PROG_TYPE_TRACEPOINT,
Self::SocketFilter(_) => BPF_PROG_TYPE_SOCKET_FILTER,
Self::Xdp(_) => BPF_PROG_TYPE_XDP,
Self::SkMsg(_) => BPF_PROG_TYPE_SK_MSG,
Self::SkSkb(_) => BPF_PROG_TYPE_SK_SKB,
Self::SockOps(_) => BPF_PROG_TYPE_SOCK_OPS,
Self::SchedClassifier(_) => BPF_PROG_TYPE_SCHED_CLS,
Self::CgroupSkb(_) => BPF_PROG_TYPE_CGROUP_SKB,
Self::CgroupSysctl(_) => BPF_PROG_TYPE_CGROUP_SYSCTL,
Self::CgroupSockopt(_) => BPF_PROG_TYPE_CGROUP_SOCKOPT,
Self::LircMode2(_) => BPF_PROG_TYPE_LIRC_MODE2,
Self::PerfEvent(_) => BPF_PROG_TYPE_PERF_EVENT,
Self::RawTracePoint(_) => BPF_PROG_TYPE_RAW_TRACEPOINT,
Self::Lsm(_) => BPF_PROG_TYPE_LSM,
Self::BtfTracePoint(_) => BPF_PROG_TYPE_TRACING,
Self::FEntry(_) => BPF_PROG_TYPE_TRACING,
Self::FExit(_) => BPF_PROG_TYPE_TRACING,
Self::Extension(_) => BPF_PROG_TYPE_EXT,
Self::CgroupSockAddr(_) => BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
Self::SkLookup(_) => BPF_PROG_TYPE_SK_LOOKUP,
Self::CgroupSock(_) => BPF_PROG_TYPE_CGROUP_SOCK,
Self::CgroupDevice(_) => BPF_PROG_TYPE_CGROUP_DEVICE,
}
}
/// Pin the program to the provided path
pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<(), PinError> {
match self {
Program::KProbe(p) => p.pin(path),
Program::UProbe(p) => p.pin(path),
Program::TracePoint(p) => p.pin(path),
Program::SocketFilter(p) => p.pin(path),
Program::Xdp(p) => p.pin(path),
Program::SkMsg(p) => p.pin(path),
Program::SkSkb(p) => p.pin(path),
Program::SockOps(p) => p.pin(path),
Program::SchedClassifier(p) => p.pin(path),
Program::CgroupSkb(p) => p.pin(path),
Program::CgroupSysctl(p) => p.pin(path),
Program::CgroupSockopt(p) => p.pin(path),
Program::LircMode2(p) => p.pin(path),
Program::PerfEvent(p) => p.pin(path),
Program::RawTracePoint(p) => p.pin(path),
Program::Lsm(p) => p.pin(path),
Program::BtfTracePoint(p) => p.pin(path),
Program::FEntry(p) => p.pin(path),
Program::FExit(p) => p.pin(path),
Program::Extension(p) => p.pin(path),
Program::CgroupSockAddr(p) => p.pin(path),
Program::SkLookup(p) => p.pin(path),
Program::CgroupSock(p) => p.pin(path),
Program::CgroupDevice(p) => p.pin(path),
Self::KProbe(p) => p.pin(path),
Self::UProbe(p) => p.pin(path),
Self::TracePoint(p) => p.pin(path),
Self::SocketFilter(p) => p.pin(path),
Self::Xdp(p) => p.pin(path),
Self::SkMsg(p) => p.pin(path),
Self::SkSkb(p) => p.pin(path),
Self::SockOps(p) => p.pin(path),
Self::SchedClassifier(p) => p.pin(path),
Self::CgroupSkb(p) => p.pin(path),
Self::CgroupSysctl(p) => p.pin(path),
Self::CgroupSockopt(p) => p.pin(path),
Self::LircMode2(p) => p.pin(path),
Self::PerfEvent(p) => p.pin(path),
Self::RawTracePoint(p) => p.pin(path),
Self::Lsm(p) => p.pin(path),
Self::BtfTracePoint(p) => p.pin(path),
Self::FEntry(p) => p.pin(path),
Self::FExit(p) => p.pin(path),
Self::Extension(p) => p.pin(path),
Self::CgroupSockAddr(p) => p.pin(path),
Self::SkLookup(p) => p.pin(path),
Self::CgroupSock(p) => p.pin(path),
Self::CgroupDevice(p) => p.pin(path),
}
}
/// Unloads the program from the kernel.
pub fn unload(self) -> Result<(), ProgramError> {
match self {
Program::KProbe(mut p) => p.unload(),
Program::UProbe(mut p) => p.unload(),
Program::TracePoint(mut p) => p.unload(),
Program::SocketFilter(mut p) => p.unload(),
Program::Xdp(mut p) => p.unload(),
Program::SkMsg(mut p) => p.unload(),
Program::SkSkb(mut p) => p.unload(),
Program::SockOps(mut p) => p.unload(),
Program::SchedClassifier(mut p) => p.unload(),
Program::CgroupSkb(mut p) => p.unload(),
Program::CgroupSysctl(mut p) => p.unload(),
Program::CgroupSockopt(mut p) => p.unload(),
Program::LircMode2(mut p) => p.unload(),
Program::PerfEvent(mut p) => p.unload(),
Program::RawTracePoint(mut p) => p.unload(),
Program::Lsm(mut p) => p.unload(),
Program::BtfTracePoint(mut p) => p.unload(),
Program::FEntry(mut p) => p.unload(),
Program::FExit(mut p) => p.unload(),
Program::Extension(mut p) => p.unload(),
Program::CgroupSockAddr(mut p) => p.unload(),
Program::SkLookup(mut p) => p.unload(),
Program::CgroupSock(mut p) => p.unload(),
Program::CgroupDevice(mut p) => p.unload(),
Self::KProbe(mut p) => p.unload(),
Self::UProbe(mut p) => p.unload(),
Self::TracePoint(mut p) => p.unload(),
Self::SocketFilter(mut p) => p.unload(),
Self::Xdp(mut p) => p.unload(),
Self::SkMsg(mut p) => p.unload(),
Self::SkSkb(mut p) => p.unload(),
Self::SockOps(mut p) => p.unload(),
Self::SchedClassifier(mut p) => p.unload(),
Self::CgroupSkb(mut p) => p.unload(),
Self::CgroupSysctl(mut p) => p.unload(),
Self::CgroupSockopt(mut p) => p.unload(),
Self::LircMode2(mut p) => p.unload(),
Self::PerfEvent(mut p) => p.unload(),
Self::RawTracePoint(mut p) => p.unload(),
Self::Lsm(mut p) => p.unload(),
Self::BtfTracePoint(mut p) => p.unload(),
Self::FEntry(mut p) => p.unload(),
Self::FExit(mut p) => p.unload(),
Self::Extension(mut p) => p.unload(),
Self::CgroupSockAddr(mut p) => p.unload(),
Self::SkLookup(mut p) => p.unload(),
Self::CgroupSock(mut p) => p.unload(),
Self::CgroupDevice(mut p) => p.unload(),
}
}
@ -384,30 +383,63 @@ impl Program {
/// Can be used to add a program to a [`crate::maps::ProgramArray`] or attach an [`Extension`] program.
pub fn fd(&self) -> Result<&ProgramFd, ProgramError> {
match self {
Program::KProbe(p) => p.fd(),
Program::UProbe(p) => p.fd(),
Program::TracePoint(p) => p.fd(),
Program::SocketFilter(p) => p.fd(),
Program::Xdp(p) => p.fd(),
Program::SkMsg(p) => p.fd(),
Program::SkSkb(p) => p.fd(),
Program::SockOps(p) => p.fd(),
Program::SchedClassifier(p) => p.fd(),
Program::CgroupSkb(p) => p.fd(),
Program::CgroupSysctl(p) => p.fd(),
Program::CgroupSockopt(p) => p.fd(),
Program::LircMode2(p) => p.fd(),
Program::PerfEvent(p) => p.fd(),
Program::RawTracePoint(p) => p.fd(),
Program::Lsm(p) => p.fd(),
Program::BtfTracePoint(p) => p.fd(),
Program::FEntry(p) => p.fd(),
Program::FExit(p) => p.fd(),
Program::Extension(p) => p.fd(),
Program::CgroupSockAddr(p) => p.fd(),
Program::SkLookup(p) => p.fd(),
Program::CgroupSock(p) => p.fd(),
Program::CgroupDevice(p) => p.fd(),
Self::KProbe(p) => p.fd(),
Self::UProbe(p) => p.fd(),
Self::TracePoint(p) => p.fd(),
Self::SocketFilter(p) => p.fd(),
Self::Xdp(p) => p.fd(),
Self::SkMsg(p) => p.fd(),
Self::SkSkb(p) => p.fd(),
Self::SockOps(p) => p.fd(),
Self::SchedClassifier(p) => p.fd(),
Self::CgroupSkb(p) => p.fd(),
Self::CgroupSysctl(p) => p.fd(),
Self::CgroupSockopt(p) => p.fd(),
Self::LircMode2(p) => p.fd(),
Self::PerfEvent(p) => p.fd(),
Self::RawTracePoint(p) => p.fd(),
Self::Lsm(p) => p.fd(),
Self::BtfTracePoint(p) => p.fd(),
Self::FEntry(p) => p.fd(),
Self::FExit(p) => p.fd(),
Self::Extension(p) => p.fd(),
Self::CgroupSockAddr(p) => p.fd(),
Self::SkLookup(p) => p.fd(),
Self::CgroupSock(p) => p.fd(),
Self::CgroupDevice(p) => p.fd(),
}
}
/// Returns information about a loaded program with the [`ProgramInfo`] structure.
///
/// This information is populated at load time by the kernel and can be used
/// to get kernel details for a given [`Program`].
pub fn info(&self) -> Result<ProgramInfo, ProgramError> {
match self {
Self::KProbe(p) => p.info(),
Self::UProbe(p) => p.info(),
Self::TracePoint(p) => p.info(),
Self::SocketFilter(p) => p.info(),
Self::Xdp(p) => p.info(),
Self::SkMsg(p) => p.info(),
Self::SkSkb(p) => p.info(),
Self::SockOps(p) => p.info(),
Self::SchedClassifier(p) => p.info(),
Self::CgroupSkb(p) => p.info(),
Self::CgroupSysctl(p) => p.info(),
Self::CgroupSockopt(p) => p.info(),
Self::LircMode2(p) => p.info(),
Self::PerfEvent(p) => p.info(),
Self::RawTracePoint(p) => p.info(),
Self::Lsm(p) => p.info(),
Self::BtfTracePoint(p) => p.info(),
Self::FEntry(p) => p.info(),
Self::FExit(p) => p.info(),
Self::Extension(p) => p.info(),
Self::CgroupSockAddr(p) => p.info(),
Self::SkLookup(p) => p.info(),
Self::CgroupSock(p) => p.info(),
Self::CgroupDevice(p) => p.info(),
}
}
}
@ -434,8 +466,8 @@ impl<T: Link> ProgramData<T> {
obj: (obj::Program, obj::Function),
btf_fd: Option<Arc<OwnedFd>>,
verifier_log_level: VerifierLogLevel,
) -> ProgramData<T> {
ProgramData {
) -> Self {
Self {
name,
obj: Some(obj),
fd: None,
@ -457,7 +489,7 @@ impl<T: Link> ProgramData<T> {
path: &Path,
info: bpf_prog_info,
verifier_log_level: VerifierLogLevel,
) -> Result<ProgramData<T>, ProgramError> {
) -> Result<Self, ProgramError> {
let attach_btf_id = if info.attach_btf_id > 0 {
Some(info.attach_btf_id)
} else {
@ -467,7 +499,7 @@ impl<T: Link> ProgramData<T> {
.then(|| bpf_btf_get_fd_by_id(info.attach_btf_obj_id))
.transpose()?;
Ok(ProgramData {
Ok(Self {
name,
obj: None,
fd: Some(ProgramFd(fd)),
@ -486,10 +518,11 @@ impl<T: Link> ProgramData<T> {
pub(crate) fn from_pinned_path<P: AsRef<Path>>(
path: P,
verifier_log_level: VerifierLogLevel,
) -> Result<ProgramData<T>, ProgramError> {
let path_string =
CString::new(path.as_ref().as_os_str().to_string_lossy().as_bytes()).unwrap();
) -> Result<Self, ProgramError> {
use std::os::unix::ffi::OsStrExt as _;
// TODO: avoid this unwrap by adding a new error variant.
let path_string = CString::new(path.as_ref().as_os_str().as_bytes()).unwrap();
let fd = bpf_get_object(&path_string).map_err(|(_, io_error)| SyscallError {
call: "bpf_obj_get",
io_error,
@ -497,7 +530,7 @@ impl<T: Link> ProgramData<T> {
let info = ProgramInfo::new_from_fd(fd.as_fd())?;
let name = info.name_as_str().map(|s| s.to_string());
ProgramData::from_bpf_prog_info(name, fd, path.as_ref(), info.0, verifier_log_level)
Self::from_bpf_prog_info(name, fd, path.as_ref(), info.0, verifier_log_level)
}
}
@ -520,6 +553,8 @@ fn unload_program<T: Link>(data: &mut ProgramData<T>) -> Result<(), ProgramError
}
fn pin_program<T: Link, P: AsRef<Path>>(data: &ProgramData<T>, path: P) -> Result<(), PinError> {
use std::os::unix::ffi::OsStrExt as _;
let fd = data.fd.as_ref().ok_or(PinError::NoFd {
name: data
.name
@ -527,12 +562,13 @@ fn pin_program<T: Link, P: AsRef<Path>>(data: &ProgramData<T>, path: P) -> Resul
.unwrap_or("<unknown program>")
.to_string(),
})?;
let path_string = CString::new(path.as_ref().to_string_lossy().into_owned()).map_err(|e| {
PinError::InvalidPinPath {
error: e.to_string(),
}
})?;
bpf_pin_object(fd.as_fd().as_raw_fd(), &path_string).map_err(|(_, io_error)| SyscallError {
let path = path.as_ref();
let path_string =
CString::new(path.as_os_str().as_bytes()).map_err(|error| PinError::InvalidPinPath {
path: path.into(),
error,
})?;
bpf_pin_object(fd.as_fd(), &path_string).map_err(|(_, io_error)| SyscallError {
call: "BPF_OBJ_PIN",
io_error,
})?;
@ -571,7 +607,7 @@ fn load_program<T: Link>(
kernel_version,
..
},
Function {
obj::Function {
instructions,
func_info,
line_info,
@ -581,14 +617,8 @@ fn load_program<T: Link>(
},
) = obj;
let target_kernel_version = kernel_version.unwrap_or_else(|| {
let KernelVersion {
major,
minor,
patch,
} = KernelVersion::current().unwrap();
(u32::from(major) << 16) + (u32::from(minor) << 8) + u32::from(patch)
});
let target_kernel_version =
kernel_version.unwrap_or_else(|| KernelVersion::current().unwrap().code());
let prog_name = if let Some(name) = name {
let mut name = name.clone();
@ -636,8 +666,8 @@ fn load_program<T: Link>(
}
}
pub(crate) fn query<T: AsRawFd>(
target_fd: T,
pub(crate) fn query(
target_fd: BorrowedFd<'_>,
attach_type: bpf_attach_type,
query_flags: u32,
attach_flags: &mut Option<u32>,
@ -649,7 +679,7 @@ pub(crate) fn query<T: AsRawFd>(
loop {
match bpf_prog_query(
target_fd.as_raw_fd(),
target_fd.as_fd().as_raw_fd(),
attach_type,
query_flags,
attach_flags.as_mut(),
@ -843,7 +873,6 @@ macro_rules! impl_from_pin {
impl_from_pin!(
TracePoint,
SocketFilter,
Xdp,
SkMsg,
CgroupSysctl,
LircMode2,
@ -919,12 +948,12 @@ impl_try_from_program!(
/// This information is populated at load time by the kernel and can be used
/// to correlate a given [`Program`] to it's corresponding [`ProgramInfo`]
/// metadata.
macro_rules! impl_program_info {
macro_rules! impl_info {
($($struct_name:ident),+ $(,)?) => {
$(
impl $struct_name {
/// Returns the file descriptor of this Program.
pub fn program_info(&self) -> Result<ProgramInfo, ProgramError> {
pub fn info(&self) -> Result<ProgramInfo, ProgramError> {
let ProgramFd(fd) = self.fd()?;
ProgramInfo::new_from_fd(fd.as_fd())
@ -934,7 +963,7 @@ macro_rules! impl_program_info {
}
}
impl_program_info!(
impl_info!(
KProbe,
UProbe,
TracePoint,
@ -973,17 +1002,7 @@ impl ProgramInfo {
/// The name of the program as was provided when it was load. This is limited to 16 bytes
pub fn name(&self) -> &[u8] {
let length = self
.0
.name
.iter()
.rposition(|ch| *ch != 0)
.map(|pos| pos + 1)
.unwrap_or(0);
// The name field is defined as [std::os::raw::c_char; 16]. c_char may be signed or
// unsigned depending on the platform; that's why we're using from_raw_parts here
unsafe { std::slice::from_raw_parts(self.0.name.as_ptr() as *const _, length) }
bytes_of_bpf_name(&self.0.name)
}
/// The name of the program as a &str. If the name was not valid unicode, None is returned.
@ -1072,15 +1091,18 @@ impl ProgramInfo {
}
/// Loads a program from a pinned path in bpffs.
pub fn from_pin<P: AsRef<Path>>(path: P) -> Result<ProgramInfo, ProgramError> {
let path_string = CString::new(path.as_ref().to_str().unwrap()).unwrap();
pub fn from_pin<P: AsRef<Path>>(path: P) -> Result<Self, ProgramError> {
use std::os::unix::ffi::OsStrExt as _;
// TODO: avoid this unwrap by adding a new error variant.
let path_string = CString::new(path.as_ref().as_os_str().as_bytes()).unwrap();
let fd = bpf_get_object(&path_string).map_err(|(_, io_error)| SyscallError {
call: "BPF_OBJ_GET",
io_error,
})?;
let info = bpf_prog_get_info_by_fd(fd.as_fd(), &mut [])?;
Ok(ProgramInfo(info))
Ok(Self(info))
}
}

@ -1,5 +1,5 @@
//! Perf attach links.
use std::os::fd::{AsFd as _, AsRawFd as _, OwnedFd, RawFd};
use std::os::fd::{AsFd as _, AsRawFd as _, BorrowedFd, OwnedFd, RawFd};
use crate::{
generated::bpf_attach_type::BPF_PERF_EVENT,
@ -7,7 +7,7 @@ use crate::{
probe::{detach_debug_fs, ProbeEvent},
FdLink, Link, ProgramError,
},
sys::{bpf_link_create, perf_event_ioctl, SysResult, SyscallError},
sys::{bpf_link_create, perf_event_ioctl, LinkTarget, SysResult, SyscallError},
FEATURES, PERF_EVENT_IOC_DISABLE, PERF_EVENT_IOC_ENABLE, PERF_EVENT_IOC_SET_BPF,
};
@ -28,15 +28,15 @@ impl Link for PerfLinkInner {
fn id(&self) -> Self::Id {
match self {
PerfLinkInner::FdLink(link) => PerfLinkIdInner::FdLinkId(link.id()),
PerfLinkInner::PerfLink(link) => PerfLinkIdInner::PerfLinkId(link.id()),
Self::FdLink(link) => PerfLinkIdInner::FdLinkId(link.id()),
Self::PerfLink(link) => PerfLinkIdInner::PerfLinkId(link.id()),
}
}
fn detach(self) -> Result<(), ProgramError> {
match self {
PerfLinkInner::FdLink(link) => link.detach(),
PerfLinkInner::PerfLink(link) => link.detach(),
Self::FdLink(link) => link.detach(),
Self::PerfLink(link) => link.detach(),
}
}
}
@ -70,14 +70,16 @@ impl Link for PerfLink {
}
}
pub(crate) fn perf_attach(prog_fd: RawFd, fd: OwnedFd) -> Result<PerfLinkInner, ProgramError> {
pub(crate) fn perf_attach(
prog_fd: BorrowedFd<'_>,
fd: OwnedFd,
) -> Result<PerfLinkInner, ProgramError> {
if FEATURES.bpf_perf_link() {
let link_fd = bpf_link_create(prog_fd, fd.as_raw_fd(), BPF_PERF_EVENT, None, 0).map_err(
|(_, io_error)| SyscallError {
let link_fd = bpf_link_create(prog_fd, LinkTarget::Fd(fd.as_fd()), BPF_PERF_EVENT, None, 0)
.map_err(|(_, io_error)| SyscallError {
call: "bpf_link_create",
io_error,
},
)?;
})?;
Ok(PerfLinkInner::FdLink(FdLink::new(link_fd)))
} else {
perf_attach_either(prog_fd, fd, None)
@ -85,7 +87,7 @@ pub(crate) fn perf_attach(prog_fd: RawFd, fd: OwnedFd) -> Result<PerfLinkInner,
}
pub(crate) fn perf_attach_debugfs(
prog_fd: RawFd,
prog_fd: BorrowedFd<'_>,
fd: OwnedFd,
event: ProbeEvent,
) -> Result<PerfLinkInner, ProgramError> {
@ -93,16 +95,16 @@ pub(crate) fn perf_attach_debugfs(
}
fn perf_attach_either(
prog_fd: RawFd,
prog_fd: BorrowedFd<'_>,
fd: OwnedFd,
event: Option<ProbeEvent>,
) -> Result<PerfLinkInner, ProgramError> {
perf_event_ioctl(fd.as_fd(), PERF_EVENT_IOC_SET_BPF, prog_fd).map_err(|(_, io_error)| {
SyscallError {
perf_event_ioctl(fd.as_fd(), PERF_EVENT_IOC_SET_BPF, prog_fd.as_raw_fd()).map_err(
|(_, io_error)| SyscallError {
call: "PERF_EVENT_IOC_SET_BPF",
io_error,
}
})?;
},
)?;
perf_event_ioctl(fd.as_fd(), PERF_EVENT_IOC_ENABLE, 0).map_err(|(_, io_error)| {
SyscallError {
call: "PERF_EVENT_IOC_ENABLE",

@ -1,11 +1,10 @@
//! Perf event programs.
use std::os::fd::{AsFd as _, AsRawFd as _};
use std::os::fd::AsFd as _;
pub use crate::generated::{
perf_hw_cache_id, perf_hw_cache_op_id, perf_hw_cache_op_result_id, perf_hw_id, perf_sw_ids,
};
use crate::{
generated::{
bpf_link_type,
@ -148,7 +147,6 @@ impl PerfEvent {
) -> Result<PerfEventLinkId, ProgramError> {
let prog_fd = self.fd()?;
let prog_fd = prog_fd.as_fd();
let prog_fd = prog_fd.as_raw_fd();
let (sample_period, sample_frequency) = match sample_policy {
SamplePolicy::Period(period) => (period, None),
SamplePolicy::Frequency(frequency) => (0, Some(frequency)),
@ -213,7 +211,7 @@ impl TryFrom<FdLink> for PerfEventLink {
fn try_from(fd_link: FdLink) -> Result<Self, Self::Error> {
let info = bpf_link_get_info_by_fd(fd_link.fd.as_fd())?;
if info.type_ == (bpf_link_type::BPF_LINK_TYPE_PERF_EVENT as u32) {
return Ok(PerfEventLink::new(PerfLinkInner::FdLink(fd_link)));
return Ok(Self::new(PerfLinkInner::FdLink(fd_link)));
}
Err(LinkError::InvalidLink)
}

@ -1,14 +1,16 @@
use crate::util::KernelVersion;
use libc::pid_t;
use std::{
ffi::{OsStr, OsString},
fmt::Write as _,
fs::{self, OpenOptions},
io::{self, Write},
os::fd::{AsFd as _, AsRawFd as _, OwnedFd},
path::Path,
os::fd::{AsFd as _, OwnedFd},
path::{Path, PathBuf},
process,
sync::atomic::{AtomicUsize, Ordering},
};
use libc::pid_t;
use crate::{
programs::{
kprobe::KProbeError, perf_attach, perf_attach::PerfLinkInner, perf_attach_debugfs,
@ -16,6 +18,7 @@ use crate::{
Link, ProgramData, ProgramError,
},
sys::{perf_event_open_probe, perf_event_open_trace_point, SyscallError},
util::KernelVersion,
};
static PROBE_NAME_INDEX: AtomicUsize = AtomicUsize::new(0);
@ -36,22 +39,76 @@ pub enum ProbeKind {
impl ProbeKind {
fn pmu(&self) -> &'static str {
match *self {
ProbeKind::KProbe | ProbeKind::KRetProbe => "kprobe",
ProbeKind::UProbe | ProbeKind::URetProbe => "uprobe",
Self::KProbe | Self::KRetProbe => "kprobe",
Self::UProbe | Self::URetProbe => "uprobe",
}
}
}
pub(crate) fn lines(bytes: &[u8]) -> impl Iterator<Item = &OsStr> {
use std::os::unix::ffi::OsStrExt as _;
bytes.as_ref().split(|b| b == &b'\n').map(|mut line| {
while let [stripped @ .., c] = line {
if c.is_ascii_whitespace() {
line = stripped;
continue;
}
break;
}
OsStr::from_bytes(line)
})
}
pub(crate) trait OsStringExt {
fn starts_with(&self, needle: &OsStr) -> bool;
fn ends_with(&self, needle: &OsStr) -> bool;
fn strip_prefix(&self, prefix: &OsStr) -> Option<&OsStr>;
fn strip_suffix(&self, suffix: &OsStr) -> Option<&OsStr>;
}
impl OsStringExt for OsStr {
fn starts_with(&self, needle: &OsStr) -> bool {
use std::os::unix::ffi::OsStrExt as _;
self.as_bytes().starts_with(needle.as_bytes())
}
fn ends_with(&self, needle: &OsStr) -> bool {
use std::os::unix::ffi::OsStrExt as _;
self.as_bytes().ends_with(needle.as_bytes())
}
fn strip_prefix(&self, prefix: &OsStr) -> Option<&OsStr> {
use std::os::unix::ffi::OsStrExt as _;
self.as_bytes()
.strip_prefix(prefix.as_bytes())
.map(Self::from_bytes)
}
fn strip_suffix(&self, suffix: &OsStr) -> Option<&OsStr> {
use std::os::unix::ffi::OsStrExt as _;
self.as_bytes()
.strip_suffix(suffix.as_bytes())
.map(Self::from_bytes)
}
}
#[derive(Debug)]
pub(crate) struct ProbeEvent {
kind: ProbeKind,
event_alias: String,
event_alias: OsString,
}
pub(crate) fn attach<T: Link + From<PerfLinkInner>>(
program_data: &mut ProgramData<T>,
kind: ProbeKind,
fn_name: &str,
// NB: the meaning of this argument is different for kprobe/kretprobe and uprobe/uretprobe; in
// the kprobe case it is the name of the function to attach to, in the uprobe case it is a path
// to the binary or library.
//
// TODO: consider encoding the type and the argument in the [`ProbeKind`] enum instead of a
// separate argument.
fn_name: &OsStr,
offset: u64,
pid: Option<pid_t>,
) -> Result<T::Id, ProgramError> {
@ -59,7 +116,6 @@ pub(crate) fn attach<T: Link + From<PerfLinkInner>>(
// Use debugfs to create probe
let prog_fd = program_data.fd()?;
let prog_fd = prog_fd.as_fd();
let prog_fd = prog_fd.as_raw_fd();
let link = if KernelVersion::current().unwrap() < KernelVersion::new(4, 17, 0) {
let (fd, event_alias) = create_as_trace_point(kind, fn_name, offset, pid)?;
perf_attach_debugfs(prog_fd, fd, ProbeEvent { kind, event_alias })
@ -90,7 +146,7 @@ pub(crate) fn detach_debug_fs(event: ProbeEvent) -> Result<(), ProgramError> {
fn create_as_probe(
kind: ProbeKind,
fn_name: &str,
fn_name: &OsStr,
offset: u64,
pid: Option<pid_t>,
) -> Result<OwnedFd, ProgramError> {
@ -126,10 +182,10 @@ fn create_as_probe(
fn create_as_trace_point(
kind: ProbeKind,
name: &str,
name: &OsStr,
offset: u64,
pid: Option<pid_t>,
) -> Result<(OwnedFd, String), ProgramError> {
) -> Result<(OwnedFd, OsString), ProgramError> {
use ProbeKind::*;
let tracefs = find_tracefs_path()?;
@ -142,7 +198,7 @@ fn create_as_trace_point(
};
let category = format!("{}s", kind.pmu());
let tpid = read_sys_fs_trace_point_id(tracefs, &category, &event_alias)?;
let tpid = read_sys_fs_trace_point_id(tracefs, &category, event_alias.as_ref())?;
let fd = perf_event_open_trace_point(tpid, pid).map_err(|(_code, io_error)| SyscallError {
call: "perf_event_open",
io_error,
@ -154,9 +210,11 @@ fn create_as_trace_point(
fn create_probe_event(
tracefs: &Path,
kind: ProbeKind,
fn_name: &str,
fn_name: &OsStr,
offset: u64,
) -> Result<String, (String, io::Error)> {
) -> Result<OsString, (PathBuf, io::Error)> {
use std::os::unix::ffi::OsStrExt as _;
use ProbeKind::*;
let events_file_name = tracefs.join(format!("{}_events", kind.pmu()));
@ -165,93 +223,127 @@ fn create_probe_event(
KRetProbe | URetProbe => 'r',
};
let fixed_fn_name = fn_name.replace(['.', '/', '-'], "_");
let event_alias = format!(
"aya_{}_{}_{}_{:#x}_{}",
let mut event_alias = OsString::new();
write!(
&mut event_alias,
"aya_{}_{}_",
process::id(),
probe_type_prefix,
fixed_fn_name,
)
.unwrap();
for b in fn_name.as_bytes() {
let b = match *b {
b'.' | b'/' | b'-' => b'_',
b => b,
};
event_alias.push(OsStr::from_bytes(&[b]));
}
write!(
&mut event_alias,
"_{:#x}_{}",
offset,
PROBE_NAME_INDEX.fetch_add(1, Ordering::AcqRel)
);
let offset_suffix = match kind {
KProbe => format!("+{offset}"),
UProbe | URetProbe => format!(":{offset:#x}"),
_ => String::new(),
)
.unwrap();
let mut probe = OsString::new();
write!(&mut probe, "{}:{}s/", probe_type_prefix, kind.pmu(),).unwrap();
probe.push(&event_alias);
probe.push(" ");
probe.push(fn_name);
match kind {
KProbe => write!(&mut probe, "+{offset}").unwrap(),
UProbe | URetProbe => write!(&mut probe, ":{offset:#x}").unwrap(),
_ => {}
};
let probe = format!(
"{}:{}s/{} {}{}\n",
probe_type_prefix,
kind.pmu(),
event_alias,
fn_name,
offset_suffix
);
probe.push("\n");
let mut events_file = OpenOptions::new()
OpenOptions::new()
.append(true)
.open(&events_file_name)
.map_err(|e| (events_file_name.display().to_string(), e))?;
events_file
.write_all(probe.as_bytes())
.map_err(|e| (events_file_name.display().to_string(), e))?;
.and_then(|mut events_file| events_file.write_all(probe.as_bytes()))
.map_err(|e| (events_file_name, e))?;
Ok(event_alias)
}
fn delete_probe_event(tracefs: &Path, event: ProbeEvent) -> Result<(), (String, io::Error)> {
fn delete_probe_event(tracefs: &Path, event: ProbeEvent) -> Result<(), (PathBuf, io::Error)> {
use std::os::unix::ffi::OsStrExt as _;
let ProbeEvent { kind, event_alias } = event;
let events_file_name = tracefs.join(format!("{}_events", kind.pmu()));
let events = fs::read_to_string(&events_file_name)
.map_err(|e| (events_file_name.display().to_string(), e))?;
let found = events.lines().any(|line| line.contains(&event_alias));
if found {
let mut events_file = OpenOptions::new()
.append(true)
.open(&events_file_name)
.map_err(|e| (events_file_name.display().to_string(), e))?;
let rm = format!("-:{event_alias}\n");
events_file
.write_all(rm.as_bytes())
.map_err(|e| (events_file_name.display().to_string(), e))?;
}
Ok(())
fs::read(&events_file_name)
.and_then(|events| {
let found = lines(&events).any(|line| {
let mut line = line.as_bytes();
// See [`create_probe_event`] and the documentation:
//
// https://docs.kernel.org/trace/kprobetrace.html
//
// https://docs.kernel.org/trace/uprobetracer.html
loop {
match line.split_first() {
None => break false,
Some((b, rest)) => {
line = rest;
if *b == b'/' {
break line.starts_with(event_alias.as_bytes());
}
}
}
}
});
if found {
OpenOptions::new()
.append(true)
.open(&events_file_name)
.and_then(|mut events_file| {
let mut rm = OsString::new();
rm.push("-:");
rm.push(event_alias);
rm.push("\n");
events_file.write_all(rm.as_bytes())
})
} else {
Ok(())
}
})
.map_err(|e| (events_file_name, e))
}
fn read_sys_fs_perf_type(pmu: &str) -> Result<u32, (String, io::Error)> {
let file = format!("/sys/bus/event_source/devices/{pmu}/type");
let perf_ty = fs::read_to_string(&file).map_err(|e| (file.clone(), e))?;
let perf_ty = perf_ty
.trim()
.parse::<u32>()
.map_err(|e| (file, io::Error::new(io::ErrorKind::Other, e)))?;
Ok(perf_ty)
fn read_sys_fs_perf_type(pmu: &str) -> Result<u32, (PathBuf, io::Error)> {
let file = Path::new("/sys/bus/event_source/devices")
.join(pmu)
.join("type");
fs::read_to_string(&file)
.and_then(|perf_ty| {
perf_ty
.trim()
.parse::<u32>()
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
})
.map_err(|e| (file, e))
}
fn read_sys_fs_perf_ret_probe(pmu: &str) -> Result<u32, (String, io::Error)> {
let file = format!("/sys/bus/event_source/devices/{pmu}/format/retprobe");
let data = fs::read_to_string(&file).map_err(|e| (file.clone(), e))?;
let mut parts = data.trim().splitn(2, ':').skip(1);
let config = parts.next().ok_or_else(|| {
(
file.clone(),
io::Error::new(io::ErrorKind::Other, "invalid format"),
)
})?;
config
.parse::<u32>()
.map_err(|e| (file, io::Error::new(io::ErrorKind::Other, e)))
fn read_sys_fs_perf_ret_probe(pmu: &str) -> Result<u32, (PathBuf, io::Error)> {
let file = Path::new("/sys/bus/event_source/devices")
.join(pmu)
.join("format/retprobe");
fs::read_to_string(&file)
.and_then(|data| {
let mut parts = data.trim().splitn(2, ':').skip(1);
let config = parts
.next()
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "invalid format"))?;
config
.parse::<u32>()
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
})
.map_err(|e| (file, e))
}

@ -1,13 +1,12 @@
use std::os::fd::{AsFd as _, AsRawFd};
use std::os::fd::AsFd;
use super::links::FdLink;
use crate::{
generated::{bpf_attach_type::BPF_SK_LOOKUP, bpf_prog_type::BPF_PROG_TYPE_SK_LOOKUP},
programs::{define_link_wrapper, load_program, FdLinkId, ProgramData, ProgramError},
sys::{bpf_link_create, SyscallError},
sys::{bpf_link_create, LinkTarget, SyscallError},
};
use super::links::FdLink;
/// A program used to redirect incoming packets to a local socket.
///
/// [`SkLookup`] programs are attached to network namespaces to provide programmable
@ -60,18 +59,16 @@ impl SkLookup {
/// Attaches the program to the given network namespace.
///
/// The returned value can be used to detach, see [SkLookup::detach].
pub fn attach<T: AsRawFd>(&mut self, netns: T) -> Result<SkLookupLinkId, ProgramError> {
pub fn attach<T: AsFd>(&mut self, netns: T) -> Result<SkLookupLinkId, ProgramError> {
let prog_fd = self.fd()?;
let prog_fd = prog_fd.as_fd();
let prog_fd = prog_fd.as_raw_fd();
let netns_fd = netns.as_raw_fd();
let netns_fd = netns.as_fd();
let link_fd = bpf_link_create(prog_fd, netns_fd, BPF_SK_LOOKUP, None, 0).map_err(
|(_, io_error)| SyscallError {
let link_fd = bpf_link_create(prog_fd, LinkTarget::Fd(netns_fd), BPF_SK_LOOKUP, None, 0)
.map_err(|(_, io_error)| SyscallError {
call: "bpf_link_create",
io_error,
},
)?;
})?;
self.data
.links
.insert(SkLookupLink::new(FdLink::new(link_fd)))

@ -1,6 +1,6 @@
//! Skmsg programs.
use std::os::fd::{AsFd as _, AsRawFd as _};
use std::os::fd::AsFd as _;
use crate::{
generated::{bpf_attach_type::BPF_SK_MSG_VERDICT, bpf_prog_type::BPF_PROG_TYPE_SK_MSG},
@ -9,7 +9,6 @@ use crate::{
define_link_wrapper, load_program, ProgAttachLink, ProgAttachLinkId, ProgramData,
ProgramError,
},
sys::{bpf_prog_attach, SyscallError},
};
/// A program used to intercept messages sent with `sendmsg()`/`sendfile()`.
@ -44,11 +43,11 @@ use crate::{
/// use aya::programs::SkMsg;
///
/// let intercept_egress: SockHash<_, u32> = bpf.map("INTERCEPT_EGRESS").unwrap().try_into()?;
/// let map_fd = intercept_egress.fd()?;
/// let map_fd = intercept_egress.fd().try_clone()?;
///
/// let prog: &mut SkMsg = bpf.program_mut("intercept_egress_packet").unwrap().try_into()?;
/// prog.load()?;
/// prog.attach(map_fd)?;
/// prog.attach(&map_fd)?;
///
/// let mut client = TcpStream::connect("127.0.0.1:1234")?;
/// let mut intercept_egress: SockHash<_, u32> = bpf.map_mut("INTERCEPT_EGRESS").unwrap().try_into()?;
@ -78,23 +77,12 @@ impl SkMsg {
/// Attaches the program to the given sockmap.
///
/// The returned value can be used to detach, see [SkMsg::detach].
pub fn attach(&mut self, map: SockMapFd) -> Result<SkMsgLinkId, ProgramError> {
pub fn attach(&mut self, map: &SockMapFd) -> Result<SkMsgLinkId, ProgramError> {
let prog_fd = self.fd()?;
let prog_fd = prog_fd.as_fd();
let prog_fd = prog_fd.as_raw_fd();
let map_fd = map.as_raw_fd();
let link = ProgAttachLink::attach(prog_fd, map.as_fd(), BPF_SK_MSG_VERDICT)?;
bpf_prog_attach(prog_fd, map_fd, BPF_SK_MSG_VERDICT).map_err(|(_, io_error)| {
SyscallError {
call: "bpf_prog_attach",
io_error,
}
})?;
self.data.links.insert(SkMsgLink::new(ProgAttachLink::new(
prog_fd,
map_fd,
BPF_SK_MSG_VERDICT,
)))
self.data.links.insert(SkMsgLink::new(link))
}
/// Detaches the program from a sockmap.

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save