diff --git a/bpf/aya-bpf/src/maps/mod.rs b/bpf/aya-bpf/src/maps/mod.rs index 049b4a65..2bdd11cf 100644 --- a/bpf/aya-bpf/src/maps/mod.rs +++ b/bpf/aya-bpf/src/maps/mod.rs @@ -1,9 +1,13 @@ pub mod array; pub mod hash_map; pub mod perf_map; +pub mod queue; pub mod sock_hash; +pub mod sock_map; pub use array::Array; pub use hash_map::HashMap; pub use perf_map::PerfMap; +pub use queue::Queue; pub use sock_hash::SockHash; +pub use sock_map::SockMap; diff --git a/bpf/aya-bpf/src/maps/queue.rs b/bpf/aya-bpf/src/maps/queue.rs new file mode 100644 index 00000000..9ef32143 --- /dev/null +++ b/bpf/aya-bpf/src/maps/queue.rs @@ -0,0 +1,57 @@ +use core::{marker::PhantomData, mem}; + +use aya_bpf_cty::c_void; + +use crate::{ + bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_QUEUE}, + helpers::{bpf_map_pop_elem, bpf_map_push_elem}, +}; + +#[repr(transparent)] +pub struct Queue { + def: bpf_map_def, + _t: PhantomData, +} + +impl Queue { + pub const fn with_max_entries(max_entries: u32, flags: u32) -> Queue { + Queue { + def: bpf_map_def { + type_: BPF_MAP_TYPE_QUEUE, + key_size: 0, + value_size: mem::size_of::() as u32, + max_entries, + map_flags: flags, + id: 0, + pinning: 0, + }, + _t: PhantomData, + } + } + + pub unsafe fn push(&mut self, value: &T, flags: u64) -> Result<(), i64> { + let ret = bpf_map_push_elem( + &mut self.def as *mut _ as *mut _, + value as *const _ as *const _, + flags, + ); + if ret < 0 { + Err(ret) + } else { + Ok(()) + } + } + + pub unsafe fn pop(&mut self) -> Option { + let mut value = mem::MaybeUninit::uninit(); + let ret = bpf_map_pop_elem( + &mut self.def as *mut _ as *mut _, + &mut value as *mut _ as *mut _, + ); + if ret < 0 { + None + } else { + Some(value.assume_init()) + } + } +} diff --git a/bpf/aya-bpf/src/maps/sock_map.rs b/bpf/aya-bpf/src/maps/sock_map.rs new file mode 100644 index 00000000..2d0d7376 --- /dev/null +++ b/bpf/aya-bpf/src/maps/sock_map.rs @@ -0,0 +1,60 @@ +use core::mem; + +use aya_bpf_cty::c_void; + +use crate::{ + bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_SOCKMAP, bpf_sock_ops}, + helpers::{bpf_msg_redirect_map, bpf_sock_map_update}, + programs::SkMsgContext, + BpfContext, +}; + +#[repr(transparent)] +pub struct SockMap { + def: bpf_map_def, +} + +impl SockMap { + pub const fn with_max_entries(max_entries: u32, flags: u32) -> SockMap { + SockMap { + def: bpf_map_def { + type_: BPF_MAP_TYPE_SOCKMAP, + key_size: mem::size_of::() as u32, + value_size: mem::size_of::() as u32, + max_entries, + map_flags: flags, + id: 0, + pinning: 0, + }, + } + } + + pub unsafe fn update( + &mut self, + mut index: u32, + sk_ops: *mut bpf_sock_ops, + flags: u64, + ) -> Result<(), i64> { + let ret = bpf_sock_map_update( + sk_ops, + &mut self.def as *mut _ as *mut _, + &mut index as *mut _ as *mut c_void, + flags, + ); + if ret < 0 { + Err(ret) + } else { + Ok(()) + } + } + + pub unsafe fn redirect(&mut self, ctx: &SkMsgContext, index: u32, flags: u64) -> i64 { + let ret = bpf_msg_redirect_map( + ctx.as_ptr() as *mut _, + &mut self.def as *mut _ as *mut _, + index, + flags, + ); + ret + } +}