|
|
@ -1,12 +1,25 @@
|
|
|
|
use core::{cell::UnsafeCell, marker::PhantomData, mem};
|
|
|
|
use core::{
|
|
|
|
|
|
|
|
cell::UnsafeCell,
|
|
|
|
|
|
|
|
marker::PhantomData,
|
|
|
|
|
|
|
|
mem::{self, MaybeUninit},
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
use aya_bpf_bindings::{bindings::bpf_perf_event_value, helpers::bpf_perf_event_read_value};
|
|
|
|
|
|
|
|
|
|
|
|
use crate::{
|
|
|
|
use crate::{
|
|
|
|
bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_F_CURRENT_CPU},
|
|
|
|
bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_F_CURRENT_CPU, BPF_F_INDEX_MASK},
|
|
|
|
helpers::bpf_perf_event_output,
|
|
|
|
helpers::bpf_perf_event_output,
|
|
|
|
maps::PinningType,
|
|
|
|
maps::PinningType,
|
|
|
|
BpfContext,
|
|
|
|
BpfContext,
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/// A map of type `BPF_MAP_TYPE_PERF_EVENT_ARRAY`.
|
|
|
|
|
|
|
|
///
|
|
|
|
|
|
|
|
/// # Minimum kernel version
|
|
|
|
|
|
|
|
///
|
|
|
|
|
|
|
|
/// The minimum kernel version required to read perf_event values using [PerfEventArray] is 4.15.
|
|
|
|
|
|
|
|
/// This concerns the functions [`read_current_cpu()`], [`read_at_index()`] and [`read()`].
|
|
|
|
|
|
|
|
///
|
|
|
|
#[repr(transparent)]
|
|
|
|
#[repr(transparent)]
|
|
|
|
pub struct PerfEventArray<T> {
|
|
|
|
pub struct PerfEventArray<T> {
|
|
|
|
def: UnsafeCell<bpf_map_def>,
|
|
|
|
def: UnsafeCell<bpf_map_def>,
|
|
|
@ -50,20 +63,54 @@ impl<T> PerfEventArray<T> {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pub fn output<C: BpfContext>(&self, ctx: &C, data: &T, flags: u32) {
|
|
|
|
pub fn output_current_cpu<C: BpfContext>(&self, ctx: &C, data: &T) -> Result<(), i64> {
|
|
|
|
self.output_at_index(ctx, BPF_F_CURRENT_CPU as u32, data, flags)
|
|
|
|
self.output(ctx, data, BPF_F_CURRENT_CPU)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pub fn output_at_index<C: BpfContext>(&self, ctx: &C, index: u32, data: &T, flags: u32) {
|
|
|
|
pub fn output_at_index<C: BpfContext>(&self, ctx: &C, data: &T, index: u32) -> Result<(), i64> {
|
|
|
|
let flags = u64::from(flags) << 32 | u64::from(index);
|
|
|
|
self.output(ctx, data, (index as u64) & BPF_F_INDEX_MASK)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
fn output<C: BpfContext>(&self, ctx: &C, data: &T, flags: u64) -> Result<(), i64> {
|
|
|
|
unsafe {
|
|
|
|
unsafe {
|
|
|
|
bpf_perf_event_output(
|
|
|
|
let ret = bpf_perf_event_output(
|
|
|
|
ctx.as_ptr(),
|
|
|
|
ctx.as_ptr(),
|
|
|
|
self.def.get() as *mut _,
|
|
|
|
self.def.get() as *mut _,
|
|
|
|
flags,
|
|
|
|
flags,
|
|
|
|
data as *const _ as *mut _,
|
|
|
|
data as *const _ as *mut _,
|
|
|
|
mem::size_of::<T>() as u64,
|
|
|
|
mem::size_of::<T>() as u64,
|
|
|
|
);
|
|
|
|
);
|
|
|
|
|
|
|
|
if ret == 0 {
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
Err(ret)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pub fn read_current_cpu(&self) -> Result<bpf_perf_event_value, i64> {
|
|
|
|
|
|
|
|
self.read(BPF_F_CURRENT_CPU)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pub fn read_at_index(&self, index: u32) -> Result<bpf_perf_event_value, i64> {
|
|
|
|
|
|
|
|
self.read(u64::from(index) & BPF_F_INDEX_MASK)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
fn read(&self, flags: u64) -> Result<bpf_perf_event_value, i64> {
|
|
|
|
|
|
|
|
let mut buf = MaybeUninit::<bpf_perf_event_value>::uninit();
|
|
|
|
|
|
|
|
unsafe {
|
|
|
|
|
|
|
|
// According to the Linux manual, `bpf_perf_event_read_value` is preferred over `bpf_perf_event_read`.
|
|
|
|
|
|
|
|
let ret = bpf_perf_event_read_value(
|
|
|
|
|
|
|
|
self.def.get() as *mut _,
|
|
|
|
|
|
|
|
flags,
|
|
|
|
|
|
|
|
buf.as_mut_ptr(),
|
|
|
|
|
|
|
|
mem::size_of::<bpf_perf_event_value>() as u32,
|
|
|
|
|
|
|
|
);
|
|
|
|
|
|
|
|
if ret == 0 {
|
|
|
|
|
|
|
|
Ok(buf.assume_init())
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
Err(ret)
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|