aya+ebpf: Implement read+write methods for PerfEventArray

This allow to read _and_ write a PerfEventArray, from userspace _and_ kernel.
reviewable/pr649/r1
Guillaume Raffin 2 years ago
parent cc29c8a74d
commit 6cba3c5120

@ -8,14 +8,16 @@ use std::{
sync::Arc, sync::Arc,
}; };
use aya_obj::generated::BPF_ANY;
use bytes::BytesMut; use bytes::BytesMut;
use crate::{ use crate::{
maps::{ maps::{
check_bounds,
perf::{Events, PerfBuffer, PerfBufferError}, perf::{Events, PerfBuffer, PerfBufferError},
MapData, MapError, MapData, MapError,
}, },
sys::bpf_map_update_elem, sys::{bpf_map_update_elem, SyscallError},
util::page_size, util::page_size,
}; };
@ -197,4 +199,27 @@ impl<T: BorrowMut<MapData>> PerfEventArray<T> {
_map: self.map.clone(), _map: self.map.clone(),
}) })
} }
/// Inserts a perf_event file descriptor at the given index.
///
/// ## Errors
///
/// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`]
/// if `bpf_map_update_elem` fails.
pub fn set(&mut self, index: u32, value: i32) -> Result<(), MapError> {
let data: &MapData = self.map.deref().borrow();
check_bounds(data, index)?;
let fd = data.fd().as_fd();
// only BPF_ANY or BPF_EXIST are allowed, and for arrays they do the same thing (the elements always exist)
let flags = BPF_ANY as u64;
bpf_map_update_elem(fd, Some(&index), &value, flags).map_err(|(_, io_error)| {
SyscallError {
call: "bpf_map_update_elem",
io_error,
}
})?;
Ok(())
}
} }

@ -1,12 +1,25 @@
use core::{cell::UnsafeCell, marker::PhantomData, mem}; use core::{
cell::UnsafeCell,
marker::PhantomData,
mem::{self, MaybeUninit},
};
use aya_bpf_bindings::{bindings::bpf_perf_event_value, helpers::bpf_perf_event_read_value};
use crate::{ use crate::{
bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_F_CURRENT_CPU}, bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_F_CURRENT_CPU, BPF_F_INDEX_MASK},
helpers::bpf_perf_event_output, helpers::bpf_perf_event_output,
maps::PinningType, maps::PinningType,
BpfContext, BpfContext,
}; };
/// A map of type `BPF_MAP_TYPE_PERF_EVENT_ARRAY`.
///
/// # Minimum kernel version
///
/// The minimum kernel version required to read perf_event values using [PerfEventArray] is 4.15.
/// This concerns the functions [`read_current_cpu()`], [`read_at_index()`] and [`read()`].
///
#[repr(transparent)] #[repr(transparent)]
pub struct PerfEventArray<T> { pub struct PerfEventArray<T> {
def: UnsafeCell<bpf_map_def>, def: UnsafeCell<bpf_map_def>,
@ -50,20 +63,54 @@ impl<T> PerfEventArray<T> {
} }
} }
pub fn output<C: BpfContext>(&self, ctx: &C, data: &T, flags: u32) { pub fn output_current_cpu<C: BpfContext>(&self, ctx: &C, data: &T) -> Result<(), i64> {
self.output_at_index(ctx, BPF_F_CURRENT_CPU as u32, data, flags) self.output(ctx, data, BPF_F_CURRENT_CPU)
}
pub fn output_at_index<C: BpfContext>(&self, ctx: &C, data: &T, index: u32) -> Result<(), i64> {
self.output(ctx, data, (index as u64) & BPF_F_INDEX_MASK)
} }
pub fn output_at_index<C: BpfContext>(&self, ctx: &C, index: u32, data: &T, flags: u32) { fn output<C: BpfContext>(&self, ctx: &C, data: &T, flags: u64) -> Result<(), i64> {
let flags = u64::from(flags) << 32 | u64::from(index);
unsafe { unsafe {
bpf_perf_event_output( let ret = bpf_perf_event_output(
ctx.as_ptr(), ctx.as_ptr(),
self.def.get() as *mut _, self.def.get() as *mut _,
flags, flags,
data as *const _ as *mut _, data as *const _ as *mut _,
mem::size_of::<T>() as u64, mem::size_of::<T>() as u64,
); );
if ret == 0 {
Ok(())
} else {
Err(ret)
}
}
}
pub fn read_current_cpu(&self) -> Result<bpf_perf_event_value, i64> {
self.read(BPF_F_CURRENT_CPU)
}
pub fn read_at_index(&self, index: u32) -> Result<bpf_perf_event_value, i64> {
self.read(u64::from(index) & BPF_F_INDEX_MASK)
}
fn read(&self, flags: u64) -> Result<bpf_perf_event_value, i64> {
let mut buf = MaybeUninit::<bpf_perf_event_value>::uninit();
unsafe {
// According to the Linux manual, `bpf_perf_event_read_value` is preferred over `bpf_perf_event_read`.
let ret = bpf_perf_event_read_value(
self.def.get() as *mut _,
flags,
buf.as_mut_ptr(),
mem::size_of::<bpf_perf_event_value>() as u32,
);
if ret == 0 {
Ok(buf.assume_init())
} else {
Err(ret)
}
} }
} }
} }

Loading…
Cancel
Save