perf_buffer: use MMap from ring_buf

pull/1161/merge
Tamir Duberstein 2 weeks ago
parent ff82c244f7
commit 888701425b

@ -60,7 +60,9 @@ use std::{
}; };
use aya_obj::{generated::bpf_map_type, parse_map_info, EbpfSectionKind, InvalidTypeBinding}; use aya_obj::{generated::bpf_map_type, parse_map_info, EbpfSectionKind, InvalidTypeBinding};
use libc::{getrlimit, rlim_t, rlimit, RLIMIT_MEMLOCK, RLIM_INFINITY}; use libc::{
c_int, c_void, getrlimit, off_t, rlim_t, rlimit, MAP_FAILED, RLIMIT_MEMLOCK, RLIM_INFINITY,
};
use log::warn; use log::warn;
use thiserror::Error; use thiserror::Error;
@ -68,7 +70,7 @@ use crate::{
pin::PinError, pin::PinError,
sys::{ sys::{
bpf_create_map, bpf_get_object, bpf_map_freeze, bpf_map_get_fd_by_id, bpf_map_get_next_key, bpf_create_map, bpf_get_object, bpf_map_freeze, bpf_map_get_fd_by_id, bpf_map_get_next_key,
bpf_map_update_elem_ptr, bpf_pin_object, SyscallError, bpf_map_update_elem_ptr, bpf_pin_object, mmap, munmap, SyscallError,
}, },
util::{nr_cpus, KernelVersion}, util::{nr_cpus, KernelVersion},
PinningType, Pod, PinningType, Pod,
@ -952,6 +954,62 @@ impl<T: Pod> Deref for PerCpuValues<T> {
} }
} }
// MMap corresponds to a memory-mapped region.
//
// The data is unmapped in Drop.
#[cfg_attr(test, derive(Debug))]
struct MMap {
ptr: ptr::NonNull<c_void>,
len: usize,
}
// Needed because NonNull<T> is !Send and !Sync out of caution that the data
// might be aliased unsafely.
unsafe impl Send for MMap {}
unsafe impl Sync for MMap {}
impl MMap {
fn new(
fd: BorrowedFd<'_>,
len: usize,
prot: c_int,
flags: c_int,
offset: off_t,
) -> Result<Self, SyscallError> {
match unsafe { mmap(ptr::null_mut(), len, prot, flags, fd, offset) } {
MAP_FAILED => Err(SyscallError {
call: "mmap",
io_error: io::Error::last_os_error(),
}),
ptr => {
let ptr = ptr::NonNull::new(ptr).ok_or(
// This should never happen, but to be paranoid, and so we never need to talk
// about a null pointer, we check it anyway.
SyscallError {
call: "mmap",
io_error: io::Error::other("mmap returned null pointer"),
},
)?;
Ok(Self { ptr, len })
}
}
}
}
impl AsRef<[u8]> for MMap {
fn as_ref(&self) -> &[u8] {
let Self { ptr, len } = self;
unsafe { std::slice::from_raw_parts(ptr.as_ptr().cast(), *len) }
}
}
impl Drop for MMap {
fn drop(&mut self) {
let Self { ptr, len } = *self;
unsafe { munmap(ptr.as_ptr(), len) };
}
}
#[cfg(test)] #[cfg(test)]
mod test_utils { mod test_utils {
use aya_obj::{ use aya_obj::{

@ -1,9 +1,8 @@
use std::{ use std::{
ffi::c_void,
io, mem, io, mem,
os::fd::{AsFd, BorrowedFd}, os::fd::{AsFd, BorrowedFd},
ptr, slice, ptr, slice,
sync::atomic::{self, AtomicPtr, Ordering}, sync::atomic::{self, Ordering},
}; };
use aya_obj::generated::{ use aya_obj::generated::{
@ -12,10 +11,13 @@ use aya_obj::generated::{
PERF_EVENT_IOC_DISABLE, PERF_EVENT_IOC_ENABLE, PERF_EVENT_IOC_DISABLE, PERF_EVENT_IOC_ENABLE,
}; };
use bytes::BytesMut; use bytes::BytesMut;
use libc::{munmap, MAP_FAILED, MAP_SHARED, PROT_READ, PROT_WRITE}; use libc::{MAP_SHARED, PROT_READ, PROT_WRITE};
use thiserror::Error; use thiserror::Error;
use crate::sys::{mmap, perf_event_ioctl, perf_event_open_bpf, SysResult}; use crate::{
maps::MMap,
sys::{perf_event_ioctl, perf_event_open_bpf, SysResult, SyscallError},
};
/// Perf buffer error. /// Perf buffer error.
#[derive(Error, Debug)] #[derive(Error, Debug)]
@ -81,9 +83,9 @@ pub struct Events {
pub lost: usize, pub lost: usize,
} }
#[derive(Debug)] #[cfg_attr(test, derive(Debug))]
pub(crate) struct PerfBuffer { pub(crate) struct PerfBuffer {
buf: AtomicPtr<perf_event_mmap_page>, mmap: MMap,
size: usize, size: usize,
page_size: usize, page_size: usize,
fd: crate::MockableFd, fd: crate::MockableFd,
@ -102,24 +104,17 @@ impl PerfBuffer {
let fd = perf_event_open_bpf(cpu_id as i32) let fd = perf_event_open_bpf(cpu_id as i32)
.map_err(|(_, io_error)| PerfBufferError::OpenError { io_error })?; .map_err(|(_, io_error)| PerfBufferError::OpenError { io_error })?;
let size = page_size * page_count; let size = page_size * page_count;
let buf = unsafe { let mmap = MMap::new(
mmap( fd.as_fd(),
ptr::null_mut(), size + page_size,
size + page_size, PROT_READ | PROT_WRITE,
PROT_READ | PROT_WRITE, MAP_SHARED,
MAP_SHARED, 0,
fd.as_fd(), )
0, .map_err(|SyscallError { call: _, io_error }| PerfBufferError::MMapError { io_error })?;
)
};
if buf == MAP_FAILED {
return Err(PerfBufferError::MMapError {
io_error: io::Error::last_os_error(),
});
}
let perf_buf = Self { let perf_buf = Self {
buf: AtomicPtr::new(buf as *mut perf_event_mmap_page), mmap,
size, size,
page_size, page_size,
fd, fd,
@ -131,8 +126,12 @@ impl PerfBuffer {
Ok(perf_buf) Ok(perf_buf)
} }
fn buf(&self) -> ptr::NonNull<perf_event_mmap_page> {
self.mmap.ptr.cast()
}
pub(crate) fn readable(&self) -> bool { pub(crate) fn readable(&self) -> bool {
let header = self.buf.load(Ordering::SeqCst); let header = self.buf().as_ptr();
let head = unsafe { (*header).data_head } as usize; let head = unsafe { (*header).data_head } as usize;
let tail = unsafe { (*header).data_tail } as usize; let tail = unsafe { (*header).data_tail } as usize;
head != tail head != tail
@ -145,7 +144,7 @@ impl PerfBuffer {
if buffers.is_empty() { if buffers.is_empty() {
return Err(PerfBufferError::NoBuffers); return Err(PerfBufferError::NoBuffers);
} }
let header = self.buf.load(Ordering::SeqCst); let header = self.buf().as_ptr();
let base = header as usize + self.page_size; let base = header as usize + self.page_size;
let mut events = Events { read: 0, lost: 0 }; let mut events = Events { read: 0, lost: 0 };
@ -265,13 +264,7 @@ impl AsFd for PerfBuffer {
impl Drop for PerfBuffer { impl Drop for PerfBuffer {
fn drop(&mut self) { fn drop(&mut self) {
unsafe { let _: SysResult<_> = perf_event_ioctl(self.fd.as_fd(), PERF_EVENT_IOC_DISABLE, 0);
let _: SysResult<_> = perf_event_ioctl(self.fd.as_fd(), PERF_EVENT_IOC_DISABLE, 0);
munmap(
self.buf.load(Ordering::SeqCst) as *mut c_void,
self.size + self.page_size,
);
}
} }
} }

@ -6,23 +6,18 @@
use std::{ use std::{
borrow::Borrow, borrow::Borrow,
ffi::{c_int, c_void},
fmt::{self, Debug, Formatter}, fmt::{self, Debug, Formatter},
io, mem, mem,
ops::Deref, ops::Deref,
os::fd::{AsFd as _, AsRawFd, BorrowedFd, RawFd}, os::fd::{AsFd as _, AsRawFd, BorrowedFd, RawFd},
ptr,
ptr::NonNull,
slice,
sync::atomic::{AtomicU32, AtomicUsize, Ordering}, sync::atomic::{AtomicU32, AtomicUsize, Ordering},
}; };
use aya_obj::generated::{BPF_RINGBUF_BUSY_BIT, BPF_RINGBUF_DISCARD_BIT, BPF_RINGBUF_HDR_SZ}; use aya_obj::generated::{BPF_RINGBUF_BUSY_BIT, BPF_RINGBUF_DISCARD_BIT, BPF_RINGBUF_HDR_SZ};
use libc::{munmap, off_t, MAP_FAILED, MAP_SHARED, PROT_READ, PROT_WRITE}; use libc::{MAP_SHARED, PROT_READ, PROT_WRITE};
use crate::{ use crate::{
maps::{MapData, MapError}, maps::{MMap, MapData, MapError},
sys::{mmap, SyscallError},
util::page_size, util::page_size,
}; };
@ -403,58 +398,3 @@ impl ProducerData {
} }
} }
} }
// MMap corresponds to a memory-mapped region.
//
// The data is unmapped in Drop.
struct MMap {
ptr: NonNull<c_void>,
len: usize,
}
// Needed because NonNull<T> is !Send and !Sync out of caution that the data
// might be aliased unsafely.
unsafe impl Send for MMap {}
unsafe impl Sync for MMap {}
impl MMap {
fn new(
fd: BorrowedFd<'_>,
len: usize,
prot: c_int,
flags: c_int,
offset: off_t,
) -> Result<Self, MapError> {
match unsafe { mmap(ptr::null_mut(), len, prot, flags, fd, offset) } {
MAP_FAILED => Err(MapError::SyscallError(SyscallError {
call: "mmap",
io_error: io::Error::last_os_error(),
})),
ptr => Ok(Self {
ptr: NonNull::new(ptr).ok_or(
// This should never happen, but to be paranoid, and so we never need to talk
// about a null pointer, we check it anyway.
MapError::SyscallError(SyscallError {
call: "mmap",
io_error: io::Error::other("mmap returned null pointer"),
}),
)?,
len,
}),
}
}
}
impl AsRef<[u8]> for MMap {
fn as_ref(&self) -> &[u8] {
let Self { ptr, len } = self;
unsafe { slice::from_raw_parts(ptr.as_ptr().cast(), *len) }
}
}
impl Drop for MMap {
fn drop(&mut self) {
let Self { ptr, len } = *self;
unsafe { munmap(ptr.as_ptr(), len) };
}
}

@ -135,6 +135,15 @@ pub(crate) unsafe fn mmap(
TEST_MMAP_RET.with(|ret| *ret.borrow()) TEST_MMAP_RET.with(|ret| *ret.borrow())
} }
#[cfg_attr(test, allow(unused_variables))]
pub(crate) unsafe fn munmap(addr: *mut c_void, len: usize) -> c_int {
#[cfg(not(test))]
return libc::munmap(addr, len);
#[cfg(test)]
0
}
/// The type of eBPF statistic to enable. /// The type of eBPF statistic to enable.
#[non_exhaustive] #[non_exhaustive]
#[doc(alias = "bpf_stats_type")] #[doc(alias = "bpf_stats_type")]

@ -434,7 +434,7 @@ pub fn aya::maps::perf::AsyncPerfEventArray<T>::from(t: T) -> T
pub struct aya::maps::perf::AsyncPerfEventArrayBuffer<T: core::borrow::BorrowMut<aya::maps::MapData>> pub struct aya::maps::perf::AsyncPerfEventArrayBuffer<T: core::borrow::BorrowMut<aya::maps::MapData>>
impl<T: core::borrow::BorrowMut<aya::maps::MapData>> aya::maps::perf::AsyncPerfEventArrayBuffer<T> impl<T: core::borrow::BorrowMut<aya::maps::MapData>> aya::maps::perf::AsyncPerfEventArrayBuffer<T>
pub async fn aya::maps::perf::AsyncPerfEventArrayBuffer<T>::read_events(&mut self, buffers: &mut [bytes::bytes_mut::BytesMut]) -> core::result::Result<aya::maps::perf::Events, aya::maps::perf::PerfBufferError> pub async fn aya::maps::perf::AsyncPerfEventArrayBuffer<T>::read_events(&mut self, buffers: &mut [bytes::bytes_mut::BytesMut]) -> core::result::Result<aya::maps::perf::Events, aya::maps::perf::PerfBufferError>
impl<T> !core::marker::Freeze for aya::maps::perf::AsyncPerfEventArrayBuffer<T> impl<T> core::marker::Freeze for aya::maps::perf::AsyncPerfEventArrayBuffer<T>
impl<T> core::marker::Send for aya::maps::perf::AsyncPerfEventArrayBuffer<T> where T: core::marker::Sync + core::marker::Send impl<T> core::marker::Send for aya::maps::perf::AsyncPerfEventArrayBuffer<T> where T: core::marker::Sync + core::marker::Send
impl<T> core::marker::Sync for aya::maps::perf::AsyncPerfEventArrayBuffer<T> where T: core::marker::Sync + core::marker::Send impl<T> core::marker::Sync for aya::maps::perf::AsyncPerfEventArrayBuffer<T> where T: core::marker::Sync + core::marker::Send
impl<T> core::marker::Unpin for aya::maps::perf::AsyncPerfEventArrayBuffer<T> impl<T> core::marker::Unpin for aya::maps::perf::AsyncPerfEventArrayBuffer<T>
@ -533,7 +533,7 @@ impl<T: core::borrow::BorrowMut<aya::maps::MapData>> std::os::fd::owned::AsFd fo
pub fn aya::maps::perf::PerfEventArrayBuffer<T>::as_fd(&self) -> std::os::fd::owned::BorrowedFd<'_> pub fn aya::maps::perf::PerfEventArrayBuffer<T>::as_fd(&self) -> std::os::fd::owned::BorrowedFd<'_>
impl<T: core::borrow::BorrowMut<aya::maps::MapData>> std::os::fd::raw::AsRawFd for aya::maps::perf::PerfEventArrayBuffer<T> impl<T: core::borrow::BorrowMut<aya::maps::MapData>> std::os::fd::raw::AsRawFd for aya::maps::perf::PerfEventArrayBuffer<T>
pub fn aya::maps::perf::PerfEventArrayBuffer<T>::as_raw_fd(&self) -> std::os::fd::raw::RawFd pub fn aya::maps::perf::PerfEventArrayBuffer<T>::as_raw_fd(&self) -> std::os::fd::raw::RawFd
impl<T> !core::marker::Freeze for aya::maps::perf::PerfEventArrayBuffer<T> impl<T> core::marker::Freeze for aya::maps::perf::PerfEventArrayBuffer<T>
impl<T> core::marker::Send for aya::maps::perf::PerfEventArrayBuffer<T> where T: core::marker::Sync + core::marker::Send impl<T> core::marker::Send for aya::maps::perf::PerfEventArrayBuffer<T> where T: core::marker::Sync + core::marker::Send
impl<T> core::marker::Sync for aya::maps::perf::PerfEventArrayBuffer<T> where T: core::marker::Sync + core::marker::Send impl<T> core::marker::Sync for aya::maps::perf::PerfEventArrayBuffer<T> where T: core::marker::Sync + core::marker::Send
impl<T> core::marker::Unpin for aya::maps::perf::PerfEventArrayBuffer<T> impl<T> core::marker::Unpin for aya::maps::perf::PerfEventArrayBuffer<T>

Loading…
Cancel
Save