use core::{ cell::UnsafeCell, mem, mem::MaybeUninit, ops::{Deref, DerefMut}, }; use crate::{ bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_RINGBUF}, helpers::{ bpf_ringbuf_discard, bpf_ringbuf_output, bpf_ringbuf_query, bpf_ringbuf_reserve, bpf_ringbuf_submit, }, maps::PinningType, }; #[cfg(unstable)] mod const_assert { pub struct Assert {} pub trait IsTrue {} impl IsTrue for Assert {} } #[cfg(unstable)] use const_assert::{Assert, IsTrue}; #[repr(transparent)] pub struct RingBuf { def: UnsafeCell, } unsafe impl Sync for RingBuf {} /// A ring buffer entry, returned from [`RingBuf::reserve`]. /// /// You must [`submit`] or [`discard`] this entry before it gets dropped. /// /// [`submit`]: RingBufEntry::submit /// [`discard`]: RingBufEntry::discard #[must_use = "eBPF verifier requires ring buffer entries to be either submitted or discarded"] pub struct RingBufEntry(&'static mut MaybeUninit); impl Deref for RingBufEntry { type Target = MaybeUninit; fn deref(&self) -> &Self::Target { self.0 } } impl DerefMut for RingBufEntry { fn deref_mut(&mut self) -> &mut Self::Target { self.0 } } impl RingBufEntry { /// Discard this ring buffer entry. The entry will be skipped by the userspace reader. pub fn discard(self, flags: u64) { unsafe { bpf_ringbuf_discard(self.0.as_mut_ptr() as *mut _, flags) }; } /// Commit this ring buffer entry. The entry will be made visible to the userspace reader. pub fn submit(self, flags: u64) { unsafe { bpf_ringbuf_submit(self.0.as_mut_ptr() as *mut _, flags) }; } } impl RingBuf { /// Declare an eBPF ring buffer. /// /// The linux kernel requires that `byte_size` be a power-of-2 multiple of the page size. The /// loading program may coerce the size when loading the map. pub const fn with_byte_size(byte_size: u32, flags: u32) -> Self { Self::new(byte_size, flags, PinningType::None) } /// Declare a pinned eBPF ring buffer. /// /// The linux kernel requires that `byte_size` be a power-of-2 multiple of the page size. The /// loading program may coerce the size when loading the map. pub const fn pinned(byte_size: u32, flags: u32) -> Self { Self::new(byte_size, flags, PinningType::ByName) } const fn new(byte_size: u32, flags: u32, pinning_type: PinningType) -> Self { Self { def: UnsafeCell::new(bpf_map_def { type_: BPF_MAP_TYPE_RINGBUF, key_size: 0, value_size: 0, max_entries: byte_size, map_flags: flags, id: 0, pinning: pinning_type as u32, }), } } /// Reserve memory in the ring buffer that can fit `T`. /// /// Returns `None` if the ring buffer is full. #[cfg(unstable)] pub fn reserve(&self, flags: u64) -> Option> where Assert<{ 8 % mem::align_of::() == 0 }>: IsTrue, { self.reserve_impl(flags) } /// Reserve memory in the ring buffer that can fit `T`. /// /// Returns `None` if the ring buffer is full. /// /// The kernel will reserve memory at an 8-bytes aligned boundary, so `mem::align_of()` must /// be equal or smaller than 8. If you use this with a `T` that isn't properly aligned, this /// function will be compiled to a panic; depending on your panic_handler, this may make /// the eBPF program fail to load, or it may make it have undefined behavior. #[cfg(not(unstable))] pub fn reserve(&self, flags: u64) -> Option> { assert_eq!(8 % mem::align_of::(), 0); self.reserve_impl(flags) } fn reserve_impl(&self, flags: u64) -> Option> { let ptr = unsafe { bpf_ringbuf_reserve(self.def.get() as *mut _, mem::size_of::() as _, flags) } as *mut MaybeUninit; unsafe { ptr.as_mut() }.map(|ptr| RingBufEntry(ptr)) } /// Copy `data` to the ring buffer output. /// /// Consider using [`reserve`] and [`submit`] if `T` is statically sized and you want to save a /// copy from either a map buffer or the stack. /// /// Unlike [`reserve`], this function can handle dynamically sized types (which is hard to /// create in eBPF but still possible, e.g. by slicing an array). /// /// Note: `T` must be aligned to no more than 8 bytes; it's not possible to fulfill larger /// alignment requests. If you use this with a `T` that isn't properly aligned, this function will /// be compiled to a panic and silently make your eBPF program fail to load. /// See [here](https://github.com/torvalds/linux/blob/3f01e9fed/kernel/bpf/ringbuf.c#L418). /// /// [`reserve`]: RingBuf::reserve /// [`submit`]: RingBufEntry::submit pub fn output(&self, data: &T, flags: u64) -> Result<(), i64> { assert_eq!(8 % mem::align_of_val(data), 0); let ret = unsafe { bpf_ringbuf_output( self.def.get() as *mut _, data as *const _ as *mut _, mem::size_of_val(data) as _, flags, ) }; if ret < 0 { Err(ret) } else { Ok(()) } } /// Query various information about the ring buffer. /// /// Consult `bpf_ringbuf_query` documentation for a list of allowed flags. pub fn query(&self, flags: u64) -> u64 { unsafe { bpf_ringbuf_query(self.def.get() as *mut _, flags) } } }