bpf: add aya-bpf-bindings

Move the generated bindings to aya-bpf-bindings.
pull/1/head
Alessandro Decina 4 years ago
parent b5cb1f479f
commit 7815711196

@ -1,2 +1,2 @@
[workspace]
members = ["aya-bpf", "aya-bpf-macros"]
members = ["aya-bpf", "aya-bpf-macros", "aya-bpf-bindings"]

@ -0,0 +1,9 @@
[package]
name = "aya-bpf-bindings"
version = "0.1.0"
authors = ["Alessandro Decina <alessandro.d@gmail.com>"]
edition = "2018"
[dependencies]
aya-bpf-cty = { path = "../aya-bpf-cty" }

@ -0,0 +1,9 @@
use std::env;
fn main() {
if !env::var("CARGO_CFG_BPF_TARGET_ARCH").is_ok() {
let arch = env::var("HOST").unwrap();
let arch = arch.splitn(2, "-").next().unwrap();
println!("cargo:rustc-cfg=bpf_target_arch=\"{}\"", arch);
}
}

@ -0,0 +1,21 @@
pub mod bindings;
pub mod getters;
pub mod helpers;
use aya_bpf_cty::{c_long, c_void};
use core::mem::{self, MaybeUninit};
#[inline]
unsafe fn bpf_probe_read<T>(src: *const T) -> Result<T, c_long> {
let mut v: MaybeUninit<T> = MaybeUninit::uninit();
let ret = helpers::bpf_probe_read(
v.as_mut_ptr() as *mut c_void,
mem::size_of::<T>() as u32,
src as *const c_void,
);
if ret < 0 {
return Err(ret);
}
Ok(v.assume_init())
}

@ -0,0 +1,32 @@
#![no_std]
#![allow(non_camel_case_types, non_upper_case_globals, non_snake_case)]
#[cfg(bpf_target_arch = "x86_64")]
mod x86_64;
#[cfg(bpf_target_arch = "aarch64")]
mod aarch64;
#[cfg(bpf_target_arch = "x86_64")]
pub use x86_64::*;
#[cfg(bpf_target_arch = "aarch64")]
pub use aarch64::*;
use aya_bpf_cty::{c_long, c_void};
use core::mem::{self, MaybeUninit};
#[inline]
unsafe fn bpf_probe_read<T>(src: *const T) -> Result<T, c_long> {
let mut v: MaybeUninit<T> = MaybeUninit::uninit();
let ret = helpers::bpf_probe_read(
v.as_mut_ptr() as *mut c_void,
mem::size_of::<T>() as u32,
src as *const c_void,
);
if ret < 0 {
return Err(ret);
}
Ok(v.assume_init())
}

@ -0,0 +1,3 @@
pub mod bindings;
pub mod getters;
pub mod helpers;

@ -7,3 +7,4 @@ edition = "2018"
[dependencies]
aya-bpf-cty = { path = "../aya-bpf-cty" }
aya-bpf-macros = { path = "../aya-bpf-macros" }
aya-bpf-bindings = { path = "../aya-bpf-bindings" }

@ -1,932 +0,0 @@
#[repr(C)]
#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct __BindgenBitfieldUnit<Storage> {
storage: Storage,
}
impl<Storage> __BindgenBitfieldUnit<Storage> {
#[inline]
pub const fn new(storage: Storage) -> Self {
Self { storage }
}
}
impl<Storage> __BindgenBitfieldUnit<Storage>
where
Storage: AsRef<[u8]> + AsMut<[u8]>,
{
#[inline]
pub fn get_bit(&self, index: usize) -> bool {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = self.storage.as_ref()[byte_index];
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
index % 8
};
let mask = 1 << bit_index;
byte & mask == mask
}
#[inline]
pub fn set_bit(&mut self, index: usize, val: bool) {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = &mut self.storage.as_mut()[byte_index];
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
index % 8
};
let mask = 1 << bit_index;
if val {
*byte |= mask;
} else {
*byte &= !mask;
}
}
#[inline]
pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
let mut val = 0;
for i in 0..(bit_width as usize) {
if self.get_bit(i + bit_offset) {
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
val |= 1 << index;
}
}
val
}
#[inline]
pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
for i in 0..(bit_width as usize) {
let mask = 1 << i;
let val_bit_is_set = val & mask == mask;
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
self.set_bit(index + bit_offset, val_bit_is_set);
}
}
}
pub const BPF_LD: u32 = 0;
pub const BPF_LDX: u32 = 1;
pub const BPF_ST: u32 = 2;
pub const BPF_STX: u32 = 3;
pub const BPF_ALU: u32 = 4;
pub const BPF_JMP: u32 = 5;
pub const BPF_RET: u32 = 6;
pub const BPF_MISC: u32 = 7;
pub const BPF_W: u32 = 0;
pub const BPF_H: u32 = 8;
pub const BPF_B: u32 = 16;
pub const BPF_IMM: u32 = 0;
pub const BPF_ABS: u32 = 32;
pub const BPF_IND: u32 = 64;
pub const BPF_MEM: u32 = 96;
pub const BPF_LEN: u32 = 128;
pub const BPF_MSH: u32 = 160;
pub const BPF_ADD: u32 = 0;
pub const BPF_SUB: u32 = 16;
pub const BPF_MUL: u32 = 32;
pub const BPF_DIV: u32 = 48;
pub const BPF_OR: u32 = 64;
pub const BPF_AND: u32 = 80;
pub const BPF_LSH: u32 = 96;
pub const BPF_RSH: u32 = 112;
pub const BPF_NEG: u32 = 128;
pub const BPF_MOD: u32 = 144;
pub const BPF_XOR: u32 = 160;
pub const BPF_JA: u32 = 0;
pub const BPF_JEQ: u32 = 16;
pub const BPF_JGT: u32 = 32;
pub const BPF_JGE: u32 = 48;
pub const BPF_JSET: u32 = 64;
pub const BPF_K: u32 = 0;
pub const BPF_X: u32 = 8;
pub const BPF_MAXINSNS: u32 = 4096;
pub const BPF_JMP32: u32 = 6;
pub const BPF_ALU64: u32 = 7;
pub const BPF_DW: u32 = 24;
pub const BPF_XADD: u32 = 192;
pub const BPF_MOV: u32 = 176;
pub const BPF_ARSH: u32 = 192;
pub const BPF_END: u32 = 208;
pub const BPF_TO_LE: u32 = 0;
pub const BPF_TO_BE: u32 = 8;
pub const BPF_FROM_LE: u32 = 0;
pub const BPF_FROM_BE: u32 = 8;
pub const BPF_JNE: u32 = 80;
pub const BPF_JLT: u32 = 160;
pub const BPF_JLE: u32 = 176;
pub const BPF_JSGT: u32 = 96;
pub const BPF_JSGE: u32 = 112;
pub const BPF_JSLT: u32 = 192;
pub const BPF_JSLE: u32 = 208;
pub const BPF_CALL: u32 = 128;
pub const BPF_EXIT: u32 = 144;
pub const BPF_F_ALLOW_OVERRIDE: u32 = 1;
pub const BPF_F_ALLOW_MULTI: u32 = 2;
pub const BPF_F_REPLACE: u32 = 4;
pub const BPF_F_STRICT_ALIGNMENT: u32 = 1;
pub const BPF_F_ANY_ALIGNMENT: u32 = 2;
pub const BPF_F_TEST_RND_HI32: u32 = 4;
pub const BPF_F_TEST_STATE_FREQ: u32 = 8;
pub const BPF_PSEUDO_MAP_FD: u32 = 1;
pub const BPF_PSEUDO_MAP_VALUE: u32 = 2;
pub const BPF_PSEUDO_CALL: u32 = 1;
pub const BPF_F_QUERY_EFFECTIVE: u32 = 1;
pub const BPF_BUILD_ID_SIZE: u32 = 20;
pub const BPF_OBJ_NAME_LEN: u32 = 16;
pub const BPF_TAG_SIZE: u32 = 8;
pub type __u8 = ::aya_bpf_cty::c_uchar;
pub type __u16 = ::aya_bpf_cty::c_ushort;
pub type __s32 = ::aya_bpf_cty::c_int;
pub type __u32 = ::aya_bpf_cty::c_uint;
pub type __s64 = ::aya_bpf_cty::c_longlong;
pub type __u64 = ::aya_bpf_cty::c_ulonglong;
pub type __be16 = __u16;
pub type __be32 = __u32;
pub type __wsum = __u32;
pub const BPF_REG_0: ::aya_bpf_cty::c_uint = 0;
pub const BPF_REG_1: ::aya_bpf_cty::c_uint = 1;
pub const BPF_REG_2: ::aya_bpf_cty::c_uint = 2;
pub const BPF_REG_3: ::aya_bpf_cty::c_uint = 3;
pub const BPF_REG_4: ::aya_bpf_cty::c_uint = 4;
pub const BPF_REG_5: ::aya_bpf_cty::c_uint = 5;
pub const BPF_REG_6: ::aya_bpf_cty::c_uint = 6;
pub const BPF_REG_7: ::aya_bpf_cty::c_uint = 7;
pub const BPF_REG_8: ::aya_bpf_cty::c_uint = 8;
pub const BPF_REG_9: ::aya_bpf_cty::c_uint = 9;
pub const BPF_REG_10: ::aya_bpf_cty::c_uint = 10;
pub const __MAX_BPF_REG: ::aya_bpf_cty::c_uint = 11;
pub type _bindgen_ty_1 = ::aya_bpf_cty::c_uint;
pub const BPF_MAP_TYPE_UNSPEC: bpf_map_type = 0;
pub const BPF_MAP_TYPE_HASH: bpf_map_type = 1;
pub const BPF_MAP_TYPE_ARRAY: bpf_map_type = 2;
pub const BPF_MAP_TYPE_PROG_ARRAY: bpf_map_type = 3;
pub const BPF_MAP_TYPE_PERF_EVENT_ARRAY: bpf_map_type = 4;
pub const BPF_MAP_TYPE_PERCPU_HASH: bpf_map_type = 5;
pub const BPF_MAP_TYPE_PERCPU_ARRAY: bpf_map_type = 6;
pub const BPF_MAP_TYPE_STACK_TRACE: bpf_map_type = 7;
pub const BPF_MAP_TYPE_CGROUP_ARRAY: bpf_map_type = 8;
pub const BPF_MAP_TYPE_LRU_HASH: bpf_map_type = 9;
pub const BPF_MAP_TYPE_LRU_PERCPU_HASH: bpf_map_type = 10;
pub const BPF_MAP_TYPE_LPM_TRIE: bpf_map_type = 11;
pub const BPF_MAP_TYPE_ARRAY_OF_MAPS: bpf_map_type = 12;
pub const BPF_MAP_TYPE_HASH_OF_MAPS: bpf_map_type = 13;
pub const BPF_MAP_TYPE_DEVMAP: bpf_map_type = 14;
pub const BPF_MAP_TYPE_SOCKMAP: bpf_map_type = 15;
pub const BPF_MAP_TYPE_CPUMAP: bpf_map_type = 16;
pub const BPF_MAP_TYPE_XSKMAP: bpf_map_type = 17;
pub const BPF_MAP_TYPE_SOCKHASH: bpf_map_type = 18;
pub const BPF_MAP_TYPE_CGROUP_STORAGE: bpf_map_type = 19;
pub const BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: bpf_map_type = 20;
pub const BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: bpf_map_type = 21;
pub const BPF_MAP_TYPE_QUEUE: bpf_map_type = 22;
pub const BPF_MAP_TYPE_STACK: bpf_map_type = 23;
pub const BPF_MAP_TYPE_SK_STORAGE: bpf_map_type = 24;
pub const BPF_MAP_TYPE_DEVMAP_HASH: bpf_map_type = 25;
pub const BPF_MAP_TYPE_STRUCT_OPS: bpf_map_type = 26;
pub const BPF_MAP_TYPE_RINGBUF: bpf_map_type = 27;
pub type bpf_map_type = ::aya_bpf_cty::c_uint;
pub const BPF_ANY: ::aya_bpf_cty::c_uint = 0;
pub const BPF_NOEXIST: ::aya_bpf_cty::c_uint = 1;
pub const BPF_EXIST: ::aya_bpf_cty::c_uint = 2;
pub const BPF_F_LOCK: ::aya_bpf_cty::c_uint = 4;
pub type _bindgen_ty_2 = ::aya_bpf_cty::c_uint;
pub const BPF_F_NO_PREALLOC: ::aya_bpf_cty::c_uint = 1;
pub const BPF_F_NO_COMMON_LRU: ::aya_bpf_cty::c_uint = 2;
pub const BPF_F_NUMA_NODE: ::aya_bpf_cty::c_uint = 4;
pub const BPF_F_RDONLY: ::aya_bpf_cty::c_uint = 8;
pub const BPF_F_WRONLY: ::aya_bpf_cty::c_uint = 16;
pub const BPF_F_STACK_BUILD_ID: ::aya_bpf_cty::c_uint = 32;
pub const BPF_F_ZERO_SEED: ::aya_bpf_cty::c_uint = 64;
pub const BPF_F_RDONLY_PROG: ::aya_bpf_cty::c_uint = 128;
pub const BPF_F_WRONLY_PROG: ::aya_bpf_cty::c_uint = 256;
pub const BPF_F_CLONE: ::aya_bpf_cty::c_uint = 512;
pub const BPF_F_MMAPABLE: ::aya_bpf_cty::c_uint = 1024;
pub type _bindgen_ty_3 = ::aya_bpf_cty::c_uint;
pub const BPF_F_RECOMPUTE_CSUM: ::aya_bpf_cty::c_uint = 1;
pub const BPF_F_INVALIDATE_HASH: ::aya_bpf_cty::c_uint = 2;
pub type _bindgen_ty_4 = ::aya_bpf_cty::c_uint;
pub const BPF_F_HDR_FIELD_MASK: ::aya_bpf_cty::c_uint = 15;
pub type _bindgen_ty_5 = ::aya_bpf_cty::c_uint;
pub const BPF_F_PSEUDO_HDR: ::aya_bpf_cty::c_uint = 16;
pub const BPF_F_MARK_MANGLED_0: ::aya_bpf_cty::c_uint = 32;
pub const BPF_F_MARK_ENFORCE: ::aya_bpf_cty::c_uint = 64;
pub type _bindgen_ty_6 = ::aya_bpf_cty::c_uint;
pub const BPF_F_INGRESS: ::aya_bpf_cty::c_uint = 1;
pub type _bindgen_ty_7 = ::aya_bpf_cty::c_uint;
pub const BPF_F_TUNINFO_IPV6: ::aya_bpf_cty::c_uint = 1;
pub type _bindgen_ty_8 = ::aya_bpf_cty::c_uint;
pub const BPF_F_SKIP_FIELD_MASK: ::aya_bpf_cty::c_uint = 255;
pub const BPF_F_USER_STACK: ::aya_bpf_cty::c_uint = 256;
pub const BPF_F_FAST_STACK_CMP: ::aya_bpf_cty::c_uint = 512;
pub const BPF_F_REUSE_STACKID: ::aya_bpf_cty::c_uint = 1024;
pub const BPF_F_USER_BUILD_ID: ::aya_bpf_cty::c_uint = 2048;
pub type _bindgen_ty_9 = ::aya_bpf_cty::c_uint;
pub const BPF_F_ZERO_CSUM_TX: ::aya_bpf_cty::c_uint = 2;
pub const BPF_F_DONT_FRAGMENT: ::aya_bpf_cty::c_uint = 4;
pub const BPF_F_SEQ_NUMBER: ::aya_bpf_cty::c_uint = 8;
pub type _bindgen_ty_10 = ::aya_bpf_cty::c_uint;
pub const BPF_F_INDEX_MASK: ::aya_bpf_cty::c_ulong = 4294967295;
pub const BPF_F_CURRENT_CPU: ::aya_bpf_cty::c_ulong = 4294967295;
pub const BPF_F_CTXLEN_MASK: ::aya_bpf_cty::c_ulong = 4503595332403200;
pub type _bindgen_ty_11 = ::aya_bpf_cty::c_ulong;
pub const BPF_F_CURRENT_NETNS: ::aya_bpf_cty::c_int = -1;
pub type _bindgen_ty_12 = ::aya_bpf_cty::c_int;
pub const BPF_CSUM_LEVEL_QUERY: ::aya_bpf_cty::c_uint = 0;
pub const BPF_CSUM_LEVEL_INC: ::aya_bpf_cty::c_uint = 1;
pub const BPF_CSUM_LEVEL_DEC: ::aya_bpf_cty::c_uint = 2;
pub const BPF_CSUM_LEVEL_RESET: ::aya_bpf_cty::c_uint = 3;
pub type _bindgen_ty_13 = ::aya_bpf_cty::c_uint;
pub const BPF_F_ADJ_ROOM_FIXED_GSO: ::aya_bpf_cty::c_uint = 1;
pub const BPF_F_ADJ_ROOM_ENCAP_L3_IPV4: ::aya_bpf_cty::c_uint = 2;
pub const BPF_F_ADJ_ROOM_ENCAP_L3_IPV6: ::aya_bpf_cty::c_uint = 4;
pub const BPF_F_ADJ_ROOM_ENCAP_L4_GRE: ::aya_bpf_cty::c_uint = 8;
pub const BPF_F_ADJ_ROOM_ENCAP_L4_UDP: ::aya_bpf_cty::c_uint = 16;
pub const BPF_F_ADJ_ROOM_NO_CSUM_RESET: ::aya_bpf_cty::c_uint = 32;
pub type _bindgen_ty_14 = ::aya_bpf_cty::c_uint;
pub const BPF_ADJ_ROOM_ENCAP_L2_MASK: ::aya_bpf_cty::c_uint = 255;
pub const BPF_ADJ_ROOM_ENCAP_L2_SHIFT: ::aya_bpf_cty::c_uint = 56;
pub type _bindgen_ty_15 = ::aya_bpf_cty::c_uint;
pub const BPF_F_SYSCTL_BASE_NAME: ::aya_bpf_cty::c_uint = 1;
pub type _bindgen_ty_16 = ::aya_bpf_cty::c_uint;
pub const BPF_SK_STORAGE_GET_F_CREATE: ::aya_bpf_cty::c_uint = 1;
pub type _bindgen_ty_17 = ::aya_bpf_cty::c_uint;
pub const BPF_F_GET_BRANCH_RECORDS_SIZE: ::aya_bpf_cty::c_uint = 1;
pub type _bindgen_ty_18 = ::aya_bpf_cty::c_uint;
pub const BPF_RB_NO_WAKEUP: ::aya_bpf_cty::c_uint = 1;
pub const BPF_RB_FORCE_WAKEUP: ::aya_bpf_cty::c_uint = 2;
pub type _bindgen_ty_19 = ::aya_bpf_cty::c_uint;
pub const BPF_RB_AVAIL_DATA: ::aya_bpf_cty::c_uint = 0;
pub const BPF_RB_RING_SIZE: ::aya_bpf_cty::c_uint = 1;
pub const BPF_RB_CONS_POS: ::aya_bpf_cty::c_uint = 2;
pub const BPF_RB_PROD_POS: ::aya_bpf_cty::c_uint = 3;
pub type _bindgen_ty_20 = ::aya_bpf_cty::c_uint;
pub const BPF_RINGBUF_BUSY_BIT: ::aya_bpf_cty::c_uint = 2147483648;
pub const BPF_RINGBUF_DISCARD_BIT: ::aya_bpf_cty::c_uint = 1073741824;
pub const BPF_RINGBUF_HDR_SZ: ::aya_bpf_cty::c_uint = 8;
pub type _bindgen_ty_21 = ::aya_bpf_cty::c_uint;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct __sk_buff {
pub len: __u32,
pub pkt_type: __u32,
pub mark: __u32,
pub queue_mapping: __u32,
pub protocol: __u32,
pub vlan_present: __u32,
pub vlan_tci: __u32,
pub vlan_proto: __u32,
pub priority: __u32,
pub ingress_ifindex: __u32,
pub ifindex: __u32,
pub tc_index: __u32,
pub cb: [__u32; 5usize],
pub hash: __u32,
pub tc_classid: __u32,
pub data: __u32,
pub data_end: __u32,
pub napi_id: __u32,
pub family: __u32,
pub remote_ip4: __u32,
pub local_ip4: __u32,
pub remote_ip6: [__u32; 4usize],
pub local_ip6: [__u32; 4usize],
pub remote_port: __u32,
pub local_port: __u32,
pub data_meta: __u32,
pub __bindgen_anon_1: __sk_buff__bindgen_ty_1,
pub tstamp: __u64,
pub wire_len: __u32,
pub gso_segs: __u32,
pub __bindgen_anon_2: __sk_buff__bindgen_ty_2,
pub gso_size: __u32,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union __sk_buff__bindgen_ty_1 {
pub flow_keys: *mut bpf_flow_keys,
pub _bitfield_align_1: [u8; 0],
pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>,
_bindgen_union_align: u64,
}
impl __sk_buff__bindgen_ty_1 {
#[inline]
pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default();
__bindgen_bitfield_unit
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union __sk_buff__bindgen_ty_2 {
pub sk: *mut bpf_sock,
pub _bitfield_align_1: [u8; 0],
pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>,
_bindgen_union_align: u64,
}
impl __sk_buff__bindgen_ty_2 {
#[inline]
pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default();
__bindgen_bitfield_unit
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct bpf_tunnel_key {
pub tunnel_id: __u32,
pub __bindgen_anon_1: bpf_tunnel_key__bindgen_ty_1,
pub tunnel_tos: __u8,
pub tunnel_ttl: __u8,
pub tunnel_ext: __u16,
pub tunnel_label: __u32,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union bpf_tunnel_key__bindgen_ty_1 {
pub remote_ipv4: __u32,
pub remote_ipv6: [__u32; 4usize],
_bindgen_union_align: [u32; 4usize],
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct bpf_xfrm_state {
pub reqid: __u32,
pub spi: __u32,
pub family: __u16,
pub ext: __u16,
pub __bindgen_anon_1: bpf_xfrm_state__bindgen_ty_1,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union bpf_xfrm_state__bindgen_ty_1 {
pub remote_ipv4: __u32,
pub remote_ipv6: [__u32; 4usize],
_bindgen_union_align: [u32; 4usize],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct bpf_sock {
pub bound_dev_if: __u32,
pub family: __u32,
pub type_: __u32,
pub protocol: __u32,
pub mark: __u32,
pub priority: __u32,
pub src_ip4: __u32,
pub src_ip6: [__u32; 4usize],
pub src_port: __u32,
pub dst_port: __u32,
pub dst_ip4: __u32,
pub dst_ip6: [__u32; 4usize],
pub state: __u32,
pub rx_queue_mapping: __s32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct bpf_tcp_sock {
pub snd_cwnd: __u32,
pub srtt_us: __u32,
pub rtt_min: __u32,
pub snd_ssthresh: __u32,
pub rcv_nxt: __u32,
pub snd_nxt: __u32,
pub snd_una: __u32,
pub mss_cache: __u32,
pub ecn_flags: __u32,
pub rate_delivered: __u32,
pub rate_interval_us: __u32,
pub packets_out: __u32,
pub retrans_out: __u32,
pub total_retrans: __u32,
pub segs_in: __u32,
pub data_segs_in: __u32,
pub segs_out: __u32,
pub data_segs_out: __u32,
pub lost_out: __u32,
pub sacked_out: __u32,
pub bytes_received: __u64,
pub bytes_acked: __u64,
pub dsack_dups: __u32,
pub delivered: __u32,
pub delivered_ce: __u32,
pub icsk_retransmits: __u32,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct bpf_sock_tuple {
pub __bindgen_anon_1: bpf_sock_tuple__bindgen_ty_1,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union bpf_sock_tuple__bindgen_ty_1 {
pub ipv4: bpf_sock_tuple__bindgen_ty_1__bindgen_ty_1,
pub ipv6: bpf_sock_tuple__bindgen_ty_1__bindgen_ty_2,
_bindgen_union_align: [u32; 9usize],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct bpf_sock_tuple__bindgen_ty_1__bindgen_ty_1 {
pub saddr: __be32,
pub daddr: __be32,
pub sport: __be16,
pub dport: __be16,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct bpf_sock_tuple__bindgen_ty_1__bindgen_ty_2 {
pub saddr: [__be32; 4usize],
pub daddr: [__be32; 4usize],
pub sport: __be16,
pub dport: __be16,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct xdp_md {
pub data: __u32,
pub data_end: __u32,
pub data_meta: __u32,
pub ingress_ifindex: __u32,
pub rx_queue_index: __u32,
pub egress_ifindex: __u32,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct sk_msg_md {
pub __bindgen_anon_1: sk_msg_md__bindgen_ty_1,
pub __bindgen_anon_2: sk_msg_md__bindgen_ty_2,
pub family: __u32,
pub remote_ip4: __u32,
pub local_ip4: __u32,
pub remote_ip6: [__u32; 4usize],
pub local_ip6: [__u32; 4usize],
pub remote_port: __u32,
pub local_port: __u32,
pub size: __u32,
pub __bindgen_anon_3: sk_msg_md__bindgen_ty_3,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union sk_msg_md__bindgen_ty_1 {
pub data: *mut ::aya_bpf_cty::c_void,
pub _bitfield_align_1: [u8; 0],
pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>,
_bindgen_union_align: u64,
}
impl sk_msg_md__bindgen_ty_1 {
#[inline]
pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default();
__bindgen_bitfield_unit
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union sk_msg_md__bindgen_ty_2 {
pub data_end: *mut ::aya_bpf_cty::c_void,
pub _bitfield_align_1: [u8; 0],
pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>,
_bindgen_union_align: u64,
}
impl sk_msg_md__bindgen_ty_2 {
#[inline]
pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default();
__bindgen_bitfield_unit
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union sk_msg_md__bindgen_ty_3 {
pub sk: *mut bpf_sock,
pub _bitfield_align_1: [u8; 0],
pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>,
_bindgen_union_align: u64,
}
impl sk_msg_md__bindgen_ty_3 {
#[inline]
pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default();
__bindgen_bitfield_unit
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct sk_reuseport_md {
pub __bindgen_anon_1: sk_reuseport_md__bindgen_ty_1,
pub __bindgen_anon_2: sk_reuseport_md__bindgen_ty_2,
pub len: __u32,
pub eth_protocol: __u32,
pub ip_protocol: __u32,
pub bind_inany: __u32,
pub hash: __u32,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union sk_reuseport_md__bindgen_ty_1 {
pub data: *mut ::aya_bpf_cty::c_void,
pub _bitfield_align_1: [u8; 0],
pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>,
_bindgen_union_align: u64,
}
impl sk_reuseport_md__bindgen_ty_1 {
#[inline]
pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default();
__bindgen_bitfield_unit
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union sk_reuseport_md__bindgen_ty_2 {
pub data_end: *mut ::aya_bpf_cty::c_void,
pub _bitfield_align_1: [u8; 0],
pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>,
_bindgen_union_align: u64,
}
impl sk_reuseport_md__bindgen_ty_2 {
#[inline]
pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default();
__bindgen_bitfield_unit
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct bpf_map_info {
pub type_: __u32,
pub id: __u32,
pub key_size: __u32,
pub value_size: __u32,
pub max_entries: __u32,
pub map_flags: __u32,
pub name: [::aya_bpf_cty::c_char; 16usize],
pub ifindex: __u32,
pub btf_vmlinux_value_type_id: __u32,
pub netns_dev: __u64,
pub netns_ino: __u64,
pub btf_id: __u32,
pub btf_key_type_id: __u32,
pub btf_value_type_id: __u32,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct bpf_sock_addr {
pub user_family: __u32,
pub user_ip4: __u32,
pub user_ip6: [__u32; 4usize],
pub user_port: __u32,
pub family: __u32,
pub type_: __u32,
pub protocol: __u32,
pub msg_src_ip4: __u32,
pub msg_src_ip6: [__u32; 4usize],
pub __bindgen_anon_1: bpf_sock_addr__bindgen_ty_1,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union bpf_sock_addr__bindgen_ty_1 {
pub sk: *mut bpf_sock,
pub _bitfield_align_1: [u8; 0],
pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>,
_bindgen_union_align: u64,
}
impl bpf_sock_addr__bindgen_ty_1 {
#[inline]
pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default();
__bindgen_bitfield_unit
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct bpf_sock_ops {
pub op: __u32,
pub __bindgen_anon_1: bpf_sock_ops__bindgen_ty_1,
pub family: __u32,
pub remote_ip4: __u32,
pub local_ip4: __u32,
pub remote_ip6: [__u32; 4usize],
pub local_ip6: [__u32; 4usize],
pub remote_port: __u32,
pub local_port: __u32,
pub is_fullsock: __u32,
pub snd_cwnd: __u32,
pub srtt_us: __u32,
pub bpf_sock_ops_cb_flags: __u32,
pub state: __u32,
pub rtt_min: __u32,
pub snd_ssthresh: __u32,
pub rcv_nxt: __u32,
pub snd_nxt: __u32,
pub snd_una: __u32,
pub mss_cache: __u32,
pub ecn_flags: __u32,
pub rate_delivered: __u32,
pub rate_interval_us: __u32,
pub packets_out: __u32,
pub retrans_out: __u32,
pub total_retrans: __u32,
pub segs_in: __u32,
pub data_segs_in: __u32,
pub segs_out: __u32,
pub data_segs_out: __u32,
pub lost_out: __u32,
pub sacked_out: __u32,
pub sk_txhash: __u32,
pub bytes_received: __u64,
pub bytes_acked: __u64,
pub __bindgen_anon_2: bpf_sock_ops__bindgen_ty_2,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union bpf_sock_ops__bindgen_ty_1 {
pub args: [__u32; 4usize],
pub reply: __u32,
pub replylong: [__u32; 4usize],
_bindgen_union_align: [u32; 4usize],
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union bpf_sock_ops__bindgen_ty_2 {
pub sk: *mut bpf_sock,
pub _bitfield_align_1: [u8; 0],
pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>,
_bindgen_union_align: u64,
}
impl bpf_sock_ops__bindgen_ty_2 {
#[inline]
pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default();
__bindgen_bitfield_unit
}
}
pub const BPF_SOCK_OPS_RTO_CB_FLAG: ::aya_bpf_cty::c_uint = 1;
pub const BPF_SOCK_OPS_RETRANS_CB_FLAG: ::aya_bpf_cty::c_uint = 2;
pub const BPF_SOCK_OPS_STATE_CB_FLAG: ::aya_bpf_cty::c_uint = 4;
pub const BPF_SOCK_OPS_RTT_CB_FLAG: ::aya_bpf_cty::c_uint = 8;
pub const BPF_SOCK_OPS_ALL_CB_FLAGS: ::aya_bpf_cty::c_uint = 15;
pub type _bindgen_ty_22 = ::aya_bpf_cty::c_uint;
pub const BPF_SOCK_OPS_VOID: ::aya_bpf_cty::c_uint = 0;
pub const BPF_SOCK_OPS_TIMEOUT_INIT: ::aya_bpf_cty::c_uint = 1;
pub const BPF_SOCK_OPS_RWND_INIT: ::aya_bpf_cty::c_uint = 2;
pub const BPF_SOCK_OPS_TCP_CONNECT_CB: ::aya_bpf_cty::c_uint = 3;
pub const BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: ::aya_bpf_cty::c_uint = 4;
pub const BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: ::aya_bpf_cty::c_uint = 5;
pub const BPF_SOCK_OPS_NEEDS_ECN: ::aya_bpf_cty::c_uint = 6;
pub const BPF_SOCK_OPS_BASE_RTT: ::aya_bpf_cty::c_uint = 7;
pub const BPF_SOCK_OPS_RTO_CB: ::aya_bpf_cty::c_uint = 8;
pub const BPF_SOCK_OPS_RETRANS_CB: ::aya_bpf_cty::c_uint = 9;
pub const BPF_SOCK_OPS_STATE_CB: ::aya_bpf_cty::c_uint = 10;
pub const BPF_SOCK_OPS_TCP_LISTEN_CB: ::aya_bpf_cty::c_uint = 11;
pub const BPF_SOCK_OPS_RTT_CB: ::aya_bpf_cty::c_uint = 12;
pub type _bindgen_ty_23 = ::aya_bpf_cty::c_uint;
pub const BPF_TCP_ESTABLISHED: ::aya_bpf_cty::c_uint = 1;
pub const BPF_TCP_SYN_SENT: ::aya_bpf_cty::c_uint = 2;
pub const BPF_TCP_SYN_RECV: ::aya_bpf_cty::c_uint = 3;
pub const BPF_TCP_FIN_WAIT1: ::aya_bpf_cty::c_uint = 4;
pub const BPF_TCP_FIN_WAIT2: ::aya_bpf_cty::c_uint = 5;
pub const BPF_TCP_TIME_WAIT: ::aya_bpf_cty::c_uint = 6;
pub const BPF_TCP_CLOSE: ::aya_bpf_cty::c_uint = 7;
pub const BPF_TCP_CLOSE_WAIT: ::aya_bpf_cty::c_uint = 8;
pub const BPF_TCP_LAST_ACK: ::aya_bpf_cty::c_uint = 9;
pub const BPF_TCP_LISTEN: ::aya_bpf_cty::c_uint = 10;
pub const BPF_TCP_CLOSING: ::aya_bpf_cty::c_uint = 11;
pub const BPF_TCP_NEW_SYN_RECV: ::aya_bpf_cty::c_uint = 12;
pub const BPF_TCP_MAX_STATES: ::aya_bpf_cty::c_uint = 13;
pub type _bindgen_ty_24 = ::aya_bpf_cty::c_uint;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct bpf_perf_event_value {
pub counter: __u64,
pub enabled: __u64,
pub running: __u64,
}
pub const BPF_DEVCG_ACC_MKNOD: ::aya_bpf_cty::c_uint = 1;
pub const BPF_DEVCG_ACC_READ: ::aya_bpf_cty::c_uint = 2;
pub const BPF_DEVCG_ACC_WRITE: ::aya_bpf_cty::c_uint = 4;
pub type _bindgen_ty_26 = ::aya_bpf_cty::c_uint;
pub const BPF_DEVCG_DEV_BLOCK: ::aya_bpf_cty::c_uint = 1;
pub const BPF_DEVCG_DEV_CHAR: ::aya_bpf_cty::c_uint = 2;
pub type _bindgen_ty_27 = ::aya_bpf_cty::c_uint;
pub const BPF_FIB_LOOKUP_DIRECT: ::aya_bpf_cty::c_uint = 1;
pub const BPF_FIB_LOOKUP_OUTPUT: ::aya_bpf_cty::c_uint = 2;
pub type _bindgen_ty_28 = ::aya_bpf_cty::c_uint;
pub const BPF_FIB_LKUP_RET_SUCCESS: ::aya_bpf_cty::c_uint = 0;
pub const BPF_FIB_LKUP_RET_BLACKHOLE: ::aya_bpf_cty::c_uint = 1;
pub const BPF_FIB_LKUP_RET_UNREACHABLE: ::aya_bpf_cty::c_uint = 2;
pub const BPF_FIB_LKUP_RET_PROHIBIT: ::aya_bpf_cty::c_uint = 3;
pub const BPF_FIB_LKUP_RET_NOT_FWDED: ::aya_bpf_cty::c_uint = 4;
pub const BPF_FIB_LKUP_RET_FWD_DISABLED: ::aya_bpf_cty::c_uint = 5;
pub const BPF_FIB_LKUP_RET_UNSUPP_LWT: ::aya_bpf_cty::c_uint = 6;
pub const BPF_FIB_LKUP_RET_NO_NEIGH: ::aya_bpf_cty::c_uint = 7;
pub const BPF_FIB_LKUP_RET_FRAG_NEEDED: ::aya_bpf_cty::c_uint = 8;
pub type _bindgen_ty_29 = ::aya_bpf_cty::c_uint;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct bpf_fib_lookup {
pub family: __u8,
pub l4_protocol: __u8,
pub sport: __be16,
pub dport: __be16,
pub tot_len: __u16,
pub ifindex: __u32,
pub __bindgen_anon_1: bpf_fib_lookup__bindgen_ty_1,
pub __bindgen_anon_2: bpf_fib_lookup__bindgen_ty_2,
pub __bindgen_anon_3: bpf_fib_lookup__bindgen_ty_3,
pub h_vlan_proto: __be16,
pub h_vlan_TCI: __be16,
pub smac: [__u8; 6usize],
pub dmac: [__u8; 6usize],
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union bpf_fib_lookup__bindgen_ty_1 {
pub tos: __u8,
pub flowinfo: __be32,
pub rt_metric: __u32,
_bindgen_union_align: u32,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union bpf_fib_lookup__bindgen_ty_2 {
pub ipv4_src: __be32,
pub ipv6_src: [__u32; 4usize],
_bindgen_union_align: [u32; 4usize],
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union bpf_fib_lookup__bindgen_ty_3 {
pub ipv4_dst: __be32,
pub ipv6_dst: [__u32; 4usize],
_bindgen_union_align: [u32; 4usize],
}
pub const BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG: ::aya_bpf_cty::c_uint = 1;
pub const BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL: ::aya_bpf_cty::c_uint = 2;
pub const BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP: ::aya_bpf_cty::c_uint = 4;
pub type _bindgen_ty_30 = ::aya_bpf_cty::c_uint;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct bpf_flow_keys {
pub nhoff: __u16,
pub thoff: __u16,
pub addr_proto: __u16,
pub is_frag: __u8,
pub is_first_frag: __u8,
pub is_encap: __u8,
pub ip_proto: __u8,
pub n_proto: __be16,
pub sport: __be16,
pub dport: __be16,
pub __bindgen_anon_1: bpf_flow_keys__bindgen_ty_1,
pub flags: __u32,
pub flow_label: __be32,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union bpf_flow_keys__bindgen_ty_1 {
pub __bindgen_anon_1: bpf_flow_keys__bindgen_ty_1__bindgen_ty_1,
pub __bindgen_anon_2: bpf_flow_keys__bindgen_ty_1__bindgen_ty_2,
_bindgen_union_align: [u32; 8usize],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct bpf_flow_keys__bindgen_ty_1__bindgen_ty_1 {
pub ipv4_src: __be32,
pub ipv4_dst: __be32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct bpf_flow_keys__bindgen_ty_1__bindgen_ty_2 {
pub ipv6_src: [__u32; 4usize],
pub ipv6_dst: [__u32; 4usize],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct bpf_spin_lock {
pub val: __u32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct bpf_sysctl {
pub write: __u32,
pub file_pos: __u32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct bpf_pidns_info {
pub pid: __u32,
pub tgid: __u32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct bpf_perf_event_data {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct bpf_redir_neigh {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct linux_binprm {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct pt_regs {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct sockaddr {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct tcphdr {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct seq_file {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct tcp6_sock {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct tcp_sock {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct tcp_timewait_sock {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct tcp_request_sock {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct udp6_sock {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct task_struct {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct path {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct btf_ptr {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct inode {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct socket {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct file {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct bpf_map_def {
pub type_: ::aya_bpf_cty::c_uint,
pub key_size: ::aya_bpf_cty::c_uint,
pub value_size: ::aya_bpf_cty::c_uint,
pub max_entries: ::aya_bpf_cty::c_uint,
pub map_flags: ::aya_bpf_cty::c_uint,
}

@ -1,985 +0,0 @@
use crate::bpf::generated::bindings::*;
impl<Storage> __BindgenBitfieldUnit<Storage> {}
impl __sk_buff {
pub fn len(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.len) }.ok()
}
pub fn pkt_type(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.pkt_type) }.ok()
}
pub fn mark(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.mark) }.ok()
}
pub fn queue_mapping(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.queue_mapping) }.ok()
}
pub fn protocol(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.protocol) }.ok()
}
pub fn vlan_present(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.vlan_present) }.ok()
}
pub fn vlan_tci(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.vlan_tci) }.ok()
}
pub fn vlan_proto(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.vlan_proto) }.ok()
}
pub fn priority(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.priority) }.ok()
}
pub fn ingress_ifindex(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.ingress_ifindex) }.ok()
}
pub fn ifindex(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.ifindex) }.ok()
}
pub fn tc_index(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.tc_index) }.ok()
}
pub fn cb(&self) -> Option<[__u32; 5usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.cb) }.ok()
}
pub fn hash(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.hash) }.ok()
}
pub fn tc_classid(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.tc_classid) }.ok()
}
pub fn data(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.data) }.ok()
}
pub fn data_end(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.data_end) }.ok()
}
pub fn napi_id(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.napi_id) }.ok()
}
pub fn family(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.family) }.ok()
}
pub fn remote_ip4(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_ip4) }.ok()
}
pub fn local_ip4(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.local_ip4) }.ok()
}
pub fn remote_ip6(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_ip6) }.ok()
}
pub fn local_ip6(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.local_ip6) }.ok()
}
pub fn remote_port(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_port) }.ok()
}
pub fn local_port(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.local_port) }.ok()
}
pub fn data_meta(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.data_meta) }.ok()
}
pub fn flow_keys(&self) -> Option<*mut bpf_flow_keys> {
let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.flow_keys) }
.ok()?;
if v.is_null() {
None
} else {
Some(v)
}
}
pub fn tstamp(&self) -> Option<__u64> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.tstamp) }.ok()
}
pub fn wire_len(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.wire_len) }.ok()
}
pub fn gso_segs(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.gso_segs) }.ok()
}
pub fn sk(&self) -> Option<*mut bpf_sock> {
let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_2.sk) }.ok()?;
if v.is_null() {
None
} else {
Some(v)
}
}
pub fn gso_size(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.gso_size) }.ok()
}
}
impl __sk_buff__bindgen_ty_1 {
pub fn flow_keys(&self) -> Option<*mut bpf_flow_keys> {
let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.flow_keys) }.ok()?;
if v.is_null() {
None
} else {
Some(v)
}
}
}
impl __sk_buff__bindgen_ty_2 {
pub fn sk(&self) -> Option<*mut bpf_sock> {
let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.sk) }.ok()?;
if v.is_null() {
None
} else {
Some(v)
}
}
}
impl bpf_tunnel_key {
pub fn tunnel_id(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.tunnel_id) }.ok()
}
pub fn remote_ipv4(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.remote_ipv4) }.ok()
}
pub fn remote_ipv6(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.remote_ipv6) }.ok()
}
pub fn tunnel_tos(&self) -> Option<__u8> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.tunnel_tos) }.ok()
}
pub fn tunnel_ttl(&self) -> Option<__u8> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.tunnel_ttl) }.ok()
}
pub fn tunnel_ext(&self) -> Option<__u16> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.tunnel_ext) }.ok()
}
pub fn tunnel_label(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.tunnel_label) }.ok()
}
}
impl bpf_tunnel_key__bindgen_ty_1 {
pub fn remote_ipv4(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_ipv4) }.ok()
}
pub fn remote_ipv6(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_ipv6) }.ok()
}
}
impl bpf_xfrm_state {
pub fn reqid(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.reqid) }.ok()
}
pub fn spi(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.spi) }.ok()
}
pub fn family(&self) -> Option<__u16> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.family) }.ok()
}
pub fn ext(&self) -> Option<__u16> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.ext) }.ok()
}
pub fn remote_ipv4(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.remote_ipv4) }.ok()
}
pub fn remote_ipv6(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.remote_ipv6) }.ok()
}
}
impl bpf_xfrm_state__bindgen_ty_1 {
pub fn remote_ipv4(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_ipv4) }.ok()
}
pub fn remote_ipv6(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_ipv6) }.ok()
}
}
impl bpf_sock {
pub fn bound_dev_if(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.bound_dev_if) }.ok()
}
pub fn family(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.family) }.ok()
}
pub fn type_(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.type_) }.ok()
}
pub fn protocol(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.protocol) }.ok()
}
pub fn mark(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.mark) }.ok()
}
pub fn priority(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.priority) }.ok()
}
pub fn src_ip4(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.src_ip4) }.ok()
}
pub fn src_ip6(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.src_ip6) }.ok()
}
pub fn src_port(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.src_port) }.ok()
}
pub fn dst_port(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.dst_port) }.ok()
}
pub fn dst_ip4(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.dst_ip4) }.ok()
}
pub fn dst_ip6(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.dst_ip6) }.ok()
}
pub fn state(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.state) }.ok()
}
pub fn rx_queue_mapping(&self) -> Option<__s32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.rx_queue_mapping) }.ok()
}
}
impl bpf_tcp_sock {
pub fn snd_cwnd(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.snd_cwnd) }.ok()
}
pub fn srtt_us(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.srtt_us) }.ok()
}
pub fn rtt_min(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.rtt_min) }.ok()
}
pub fn snd_ssthresh(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.snd_ssthresh) }.ok()
}
pub fn rcv_nxt(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.rcv_nxt) }.ok()
}
pub fn snd_nxt(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.snd_nxt) }.ok()
}
pub fn snd_una(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.snd_una) }.ok()
}
pub fn mss_cache(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.mss_cache) }.ok()
}
pub fn ecn_flags(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.ecn_flags) }.ok()
}
pub fn rate_delivered(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.rate_delivered) }.ok()
}
pub fn rate_interval_us(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.rate_interval_us) }.ok()
}
pub fn packets_out(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.packets_out) }.ok()
}
pub fn retrans_out(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.retrans_out) }.ok()
}
pub fn total_retrans(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.total_retrans) }.ok()
}
pub fn segs_in(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.segs_in) }.ok()
}
pub fn data_segs_in(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.data_segs_in) }.ok()
}
pub fn segs_out(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.segs_out) }.ok()
}
pub fn data_segs_out(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.data_segs_out) }.ok()
}
pub fn lost_out(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.lost_out) }.ok()
}
pub fn sacked_out(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.sacked_out) }.ok()
}
pub fn bytes_received(&self) -> Option<__u64> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.bytes_received) }.ok()
}
pub fn bytes_acked(&self) -> Option<__u64> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.bytes_acked) }.ok()
}
pub fn dsack_dups(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.dsack_dups) }.ok()
}
pub fn delivered(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.delivered) }.ok()
}
pub fn delivered_ce(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.delivered_ce) }.ok()
}
pub fn icsk_retransmits(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.icsk_retransmits) }.ok()
}
}
impl bpf_sock_tuple {
pub fn ipv4(&self) -> Option<bpf_sock_tuple__bindgen_ty_1__bindgen_ty_1> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.ipv4) }.ok()
}
pub fn ipv6(&self) -> Option<bpf_sock_tuple__bindgen_ty_1__bindgen_ty_2> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.ipv6) }.ok()
}
}
impl bpf_sock_tuple__bindgen_ty_1 {
pub fn ipv4(&self) -> Option<bpf_sock_tuple__bindgen_ty_1__bindgen_ty_1> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.ipv4) }.ok()
}
pub fn ipv6(&self) -> Option<bpf_sock_tuple__bindgen_ty_1__bindgen_ty_2> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.ipv6) }.ok()
}
}
impl bpf_sock_tuple__bindgen_ty_1__bindgen_ty_1 {
pub fn saddr(&self) -> Option<__be32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.saddr) }.ok()
}
pub fn daddr(&self) -> Option<__be32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.daddr) }.ok()
}
pub fn sport(&self) -> Option<__be16> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.sport) }.ok()
}
pub fn dport(&self) -> Option<__be16> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.dport) }.ok()
}
}
impl bpf_sock_tuple__bindgen_ty_1__bindgen_ty_2 {
pub fn saddr(&self) -> Option<[__be32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.saddr) }.ok()
}
pub fn daddr(&self) -> Option<[__be32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.daddr) }.ok()
}
pub fn sport(&self) -> Option<__be16> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.sport) }.ok()
}
pub fn dport(&self) -> Option<__be16> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.dport) }.ok()
}
}
impl xdp_md {
pub fn data(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.data) }.ok()
}
pub fn data_end(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.data_end) }.ok()
}
pub fn data_meta(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.data_meta) }.ok()
}
pub fn ingress_ifindex(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.ingress_ifindex) }.ok()
}
pub fn rx_queue_index(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.rx_queue_index) }.ok()
}
pub fn egress_ifindex(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.egress_ifindex) }.ok()
}
}
impl sk_msg_md {
pub fn data(&self) -> Option<*mut ::aya_bpf_cty::c_void> {
let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.data) }.ok()?;
if v.is_null() {
None
} else {
Some(v)
}
}
pub fn data_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> {
let v =
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_2.data_end) }.ok()?;
if v.is_null() {
None
} else {
Some(v)
}
}
pub fn family(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.family) }.ok()
}
pub fn remote_ip4(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_ip4) }.ok()
}
pub fn local_ip4(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.local_ip4) }.ok()
}
pub fn remote_ip6(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_ip6) }.ok()
}
pub fn local_ip6(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.local_ip6) }.ok()
}
pub fn remote_port(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_port) }.ok()
}
pub fn local_port(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.local_port) }.ok()
}
pub fn size(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.size) }.ok()
}
pub fn sk(&self) -> Option<*mut bpf_sock> {
let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_3.sk) }.ok()?;
if v.is_null() {
None
} else {
Some(v)
}
}
}
impl sk_msg_md__bindgen_ty_1 {
pub fn data(&self) -> Option<*mut ::aya_bpf_cty::c_void> {
let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.data) }.ok()?;
if v.is_null() {
None
} else {
Some(v)
}
}
}
impl sk_msg_md__bindgen_ty_2 {
pub fn data_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> {
let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.data_end) }.ok()?;
if v.is_null() {
None
} else {
Some(v)
}
}
}
impl sk_msg_md__bindgen_ty_3 {
pub fn sk(&self) -> Option<*mut bpf_sock> {
let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.sk) }.ok()?;
if v.is_null() {
None
} else {
Some(v)
}
}
}
impl sk_reuseport_md {
pub fn data(&self) -> Option<*mut ::aya_bpf_cty::c_void> {
let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.data) }.ok()?;
if v.is_null() {
None
} else {
Some(v)
}
}
pub fn data_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> {
let v =
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_2.data_end) }.ok()?;
if v.is_null() {
None
} else {
Some(v)
}
}
pub fn len(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.len) }.ok()
}
pub fn eth_protocol(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.eth_protocol) }.ok()
}
pub fn ip_protocol(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.ip_protocol) }.ok()
}
pub fn bind_inany(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.bind_inany) }.ok()
}
pub fn hash(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.hash) }.ok()
}
}
impl sk_reuseport_md__bindgen_ty_1 {
pub fn data(&self) -> Option<*mut ::aya_bpf_cty::c_void> {
let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.data) }.ok()?;
if v.is_null() {
None
} else {
Some(v)
}
}
}
impl sk_reuseport_md__bindgen_ty_2 {
pub fn data_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> {
let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.data_end) }.ok()?;
if v.is_null() {
None
} else {
Some(v)
}
}
}
impl bpf_map_info {
pub fn type_(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.type_) }.ok()
}
pub fn id(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.id) }.ok()
}
pub fn key_size(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.key_size) }.ok()
}
pub fn value_size(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.value_size) }.ok()
}
pub fn max_entries(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.max_entries) }.ok()
}
pub fn map_flags(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.map_flags) }.ok()
}
pub fn name(&self) -> Option<[::aya_bpf_cty::c_char; 16usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.name) }.ok()
}
pub fn ifindex(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.ifindex) }.ok()
}
pub fn btf_vmlinux_value_type_id(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.btf_vmlinux_value_type_id) }.ok()
}
pub fn netns_dev(&self) -> Option<__u64> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.netns_dev) }.ok()
}
pub fn netns_ino(&self) -> Option<__u64> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.netns_ino) }.ok()
}
pub fn btf_id(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.btf_id) }.ok()
}
pub fn btf_key_type_id(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.btf_key_type_id) }.ok()
}
pub fn btf_value_type_id(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.btf_value_type_id) }.ok()
}
}
impl bpf_sock_addr {
pub fn user_family(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.user_family) }.ok()
}
pub fn user_ip4(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.user_ip4) }.ok()
}
pub fn user_ip6(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.user_ip6) }.ok()
}
pub fn user_port(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.user_port) }.ok()
}
pub fn family(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.family) }.ok()
}
pub fn type_(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.type_) }.ok()
}
pub fn protocol(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.protocol) }.ok()
}
pub fn msg_src_ip4(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.msg_src_ip4) }.ok()
}
pub fn msg_src_ip6(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.msg_src_ip6) }.ok()
}
pub fn sk(&self) -> Option<*mut bpf_sock> {
let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.sk) }.ok()?;
if v.is_null() {
None
} else {
Some(v)
}
}
}
impl bpf_sock_addr__bindgen_ty_1 {
pub fn sk(&self) -> Option<*mut bpf_sock> {
let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.sk) }.ok()?;
if v.is_null() {
None
} else {
Some(v)
}
}
}
impl bpf_sock_ops {
pub fn op(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.op) }.ok()
}
pub fn args(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.args) }.ok()
}
pub fn reply(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.reply) }.ok()
}
pub fn replylong(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.replylong) }.ok()
}
pub fn family(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.family) }.ok()
}
pub fn remote_ip4(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_ip4) }.ok()
}
pub fn local_ip4(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.local_ip4) }.ok()
}
pub fn remote_ip6(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_ip6) }.ok()
}
pub fn local_ip6(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.local_ip6) }.ok()
}
pub fn remote_port(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.remote_port) }.ok()
}
pub fn local_port(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.local_port) }.ok()
}
pub fn is_fullsock(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.is_fullsock) }.ok()
}
pub fn snd_cwnd(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.snd_cwnd) }.ok()
}
pub fn srtt_us(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.srtt_us) }.ok()
}
pub fn bpf_sock_ops_cb_flags(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.bpf_sock_ops_cb_flags) }.ok()
}
pub fn state(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.state) }.ok()
}
pub fn rtt_min(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.rtt_min) }.ok()
}
pub fn snd_ssthresh(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.snd_ssthresh) }.ok()
}
pub fn rcv_nxt(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.rcv_nxt) }.ok()
}
pub fn snd_nxt(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.snd_nxt) }.ok()
}
pub fn snd_una(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.snd_una) }.ok()
}
pub fn mss_cache(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.mss_cache) }.ok()
}
pub fn ecn_flags(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.ecn_flags) }.ok()
}
pub fn rate_delivered(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.rate_delivered) }.ok()
}
pub fn rate_interval_us(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.rate_interval_us) }.ok()
}
pub fn packets_out(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.packets_out) }.ok()
}
pub fn retrans_out(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.retrans_out) }.ok()
}
pub fn total_retrans(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.total_retrans) }.ok()
}
pub fn segs_in(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.segs_in) }.ok()
}
pub fn data_segs_in(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.data_segs_in) }.ok()
}
pub fn segs_out(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.segs_out) }.ok()
}
pub fn data_segs_out(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.data_segs_out) }.ok()
}
pub fn lost_out(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.lost_out) }.ok()
}
pub fn sacked_out(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.sacked_out) }.ok()
}
pub fn sk_txhash(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.sk_txhash) }.ok()
}
pub fn bytes_received(&self) -> Option<__u64> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.bytes_received) }.ok()
}
pub fn bytes_acked(&self) -> Option<__u64> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.bytes_acked) }.ok()
}
pub fn sk(&self) -> Option<*mut bpf_sock> {
let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_2.sk) }.ok()?;
if v.is_null() {
None
} else {
Some(v)
}
}
}
impl bpf_sock_ops__bindgen_ty_1 {
pub fn args(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.args) }.ok()
}
pub fn reply(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.reply) }.ok()
}
pub fn replylong(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.replylong) }.ok()
}
}
impl bpf_sock_ops__bindgen_ty_2 {
pub fn sk(&self) -> Option<*mut bpf_sock> {
let v = unsafe { crate::bpf::helpers::bpf_probe_read(&self.sk) }.ok()?;
if v.is_null() {
None
} else {
Some(v)
}
}
}
impl bpf_perf_event_value {
pub fn counter(&self) -> Option<__u64> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.counter) }.ok()
}
pub fn enabled(&self) -> Option<__u64> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.enabled) }.ok()
}
pub fn running(&self) -> Option<__u64> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.running) }.ok()
}
}
impl bpf_fib_lookup {
pub fn family(&self) -> Option<__u8> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.family) }.ok()
}
pub fn l4_protocol(&self) -> Option<__u8> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.l4_protocol) }.ok()
}
pub fn sport(&self) -> Option<__be16> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.sport) }.ok()
}
pub fn dport(&self) -> Option<__be16> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.dport) }.ok()
}
pub fn tot_len(&self) -> Option<__u16> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.tot_len) }.ok()
}
pub fn ifindex(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.ifindex) }.ok()
}
pub fn tos(&self) -> Option<__u8> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.tos) }.ok()
}
pub fn flowinfo(&self) -> Option<__be32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.flowinfo) }.ok()
}
pub fn rt_metric(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.rt_metric) }.ok()
}
pub fn ipv4_src(&self) -> Option<__be32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_2.ipv4_src) }.ok()
}
pub fn ipv6_src(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_2.ipv6_src) }.ok()
}
pub fn ipv4_dst(&self) -> Option<__be32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_3.ipv4_dst) }.ok()
}
pub fn ipv6_dst(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_3.ipv6_dst) }.ok()
}
pub fn h_vlan_proto(&self) -> Option<__be16> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.h_vlan_proto) }.ok()
}
pub fn h_vlan_TCI(&self) -> Option<__be16> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.h_vlan_TCI) }.ok()
}
pub fn smac(&self) -> Option<[__u8; 6usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.smac) }.ok()
}
pub fn dmac(&self) -> Option<[__u8; 6usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.dmac) }.ok()
}
}
impl bpf_fib_lookup__bindgen_ty_1 {
pub fn tos(&self) -> Option<__u8> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.tos) }.ok()
}
pub fn flowinfo(&self) -> Option<__be32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.flowinfo) }.ok()
}
pub fn rt_metric(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.rt_metric) }.ok()
}
}
impl bpf_fib_lookup__bindgen_ty_2 {
pub fn ipv4_src(&self) -> Option<__be32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.ipv4_src) }.ok()
}
pub fn ipv6_src(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.ipv6_src) }.ok()
}
}
impl bpf_fib_lookup__bindgen_ty_3 {
pub fn ipv4_dst(&self) -> Option<__be32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.ipv4_dst) }.ok()
}
pub fn ipv6_dst(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.ipv6_dst) }.ok()
}
}
impl bpf_flow_keys {
pub fn nhoff(&self) -> Option<__u16> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.nhoff) }.ok()
}
pub fn thoff(&self) -> Option<__u16> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.thoff) }.ok()
}
pub fn addr_proto(&self) -> Option<__u16> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.addr_proto) }.ok()
}
pub fn is_frag(&self) -> Option<__u8> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.is_frag) }.ok()
}
pub fn is_first_frag(&self) -> Option<__u8> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.is_first_frag) }.ok()
}
pub fn is_encap(&self) -> Option<__u8> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.is_encap) }.ok()
}
pub fn ip_proto(&self) -> Option<__u8> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.ip_proto) }.ok()
}
pub fn n_proto(&self) -> Option<__be16> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.n_proto) }.ok()
}
pub fn sport(&self) -> Option<__be16> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.sport) }.ok()
}
pub fn dport(&self) -> Option<__be16> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.dport) }.ok()
}
pub fn ipv4_src(&self) -> Option<__be32> {
unsafe {
crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.__bindgen_anon_1.ipv4_src)
}
.ok()
}
pub fn ipv4_dst(&self) -> Option<__be32> {
unsafe {
crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.__bindgen_anon_1.ipv4_dst)
}
.ok()
}
pub fn ipv6_src(&self) -> Option<[__u32; 4usize]> {
unsafe {
crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.__bindgen_anon_2.ipv6_src)
}
.ok()
}
pub fn ipv6_dst(&self) -> Option<[__u32; 4usize]> {
unsafe {
crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.__bindgen_anon_2.ipv6_dst)
}
.ok()
}
pub fn flags(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.flags) }.ok()
}
pub fn flow_label(&self) -> Option<__be32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.flow_label) }.ok()
}
}
impl bpf_flow_keys__bindgen_ty_1 {
pub fn ipv4_src(&self) -> Option<__be32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.ipv4_src) }.ok()
}
pub fn ipv4_dst(&self) -> Option<__be32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_1.ipv4_dst) }.ok()
}
pub fn ipv6_src(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_2.ipv6_src) }.ok()
}
pub fn ipv6_dst(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.__bindgen_anon_2.ipv6_dst) }.ok()
}
}
impl bpf_flow_keys__bindgen_ty_1__bindgen_ty_1 {
pub fn ipv4_src(&self) -> Option<__be32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.ipv4_src) }.ok()
}
pub fn ipv4_dst(&self) -> Option<__be32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.ipv4_dst) }.ok()
}
}
impl bpf_flow_keys__bindgen_ty_1__bindgen_ty_2 {
pub fn ipv6_src(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.ipv6_src) }.ok()
}
pub fn ipv6_dst(&self) -> Option<[__u32; 4usize]> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.ipv6_dst) }.ok()
}
}
impl bpf_spin_lock {
pub fn val(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.val) }.ok()
}
}
impl bpf_sysctl {
pub fn write(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.write) }.ok()
}
pub fn file_pos(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.file_pos) }.ok()
}
}
impl bpf_pidns_info {
pub fn pid(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.pid) }.ok()
}
pub fn tgid(&self) -> Option<__u32> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.tgid) }.ok()
}
}
impl bpf_perf_event_data {}
impl bpf_redir_neigh {}
impl linux_binprm {}
impl pt_regs {}
impl sockaddr {}
impl tcphdr {}
impl seq_file {}
impl tcp6_sock {}
impl tcp_sock {}
impl tcp_timewait_sock {}
impl tcp_request_sock {}
impl udp6_sock {}
impl task_struct {}
impl path {}
impl btf_ptr {}
impl inode {}
impl socket {}
impl file {}
impl bpf_map_def {
pub fn type_(&self) -> Option<::aya_bpf_cty::c_uint> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.type_) }.ok()
}
pub fn key_size(&self) -> Option<::aya_bpf_cty::c_uint> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.key_size) }.ok()
}
pub fn value_size(&self) -> Option<::aya_bpf_cty::c_uint> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.value_size) }.ok()
}
pub fn max_entries(&self) -> Option<::aya_bpf_cty::c_uint> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.max_entries) }.ok()
}
pub fn map_flags(&self) -> Option<::aya_bpf_cty::c_uint> {
unsafe { crate::bpf::helpers::bpf_probe_read(&self.map_flags) }.ok()
}
}

File diff suppressed because it is too large Load Diff

@ -1,5 +0,0 @@
#![allow(dead_code, non_camel_case_types, non_snake_case)]
pub(crate) mod bindings;
pub(crate) mod getters;
pub(crate) mod helpers;

@ -1,4 +0,0 @@
mod generated;
pub mod helpers;
pub use generated::bindings::*;

@ -1,11 +1,10 @@
use core::mem::{self, MaybeUninit};
use crate::{
bpf::generated::helpers as gen,
cty::{c_char, c_long, c_void},
};
use aya_bpf_bindings::helpers as gen;
pub use gen::*;
use crate::cty::{c_char, c_long, c_void};
#[inline]
pub unsafe fn bpf_probe_read<T>(src: *const T) -> Result<T, c_long> {
let mut v: MaybeUninit<T> = MaybeUninit::uninit();

@ -1,14 +1,16 @@
#![no_std]
pub mod bpf;
pub use aya_bpf_bindings::bindings;
pub mod helpers;
pub mod maps;
pub mod programs;
pub use aya_bpf_cty as cty;
use bpf::helpers::{bpf_get_current_comm, bpf_get_current_pid_tgid};
use core::ffi::c_void;
use cty::c_char;
use helpers::{bpf_get_current_comm, bpf_get_current_pid_tgid};
pub use aya_bpf_macros as macros;

@ -1,10 +1,8 @@
use core::{marker::PhantomData, mem};
use crate::{
bpf::{
bpf_map_def, helpers::bpf_perf_event_output, BPF_F_CURRENT_CPU,
BPF_MAP_TYPE_PERF_EVENT_ARRAY,
},
bindings::{bpf_map_def, BPF_F_CURRENT_CPU, BPF_MAP_TYPE_PERF_EVENT_ARRAY},
helpers::bpf_perf_event_output,
BpfContext,
};

@ -1,6 +1,6 @@
use core::ffi::c_void;
use crate::{bpf::pt_regs, BpfContext};
use crate::{bindings::pt_regs, BpfContext};
pub struct ProbeContext {
regs: *mut pt_regs,

@ -17,22 +17,26 @@ use syn::{
use crate::codegen::{
bindings::{self, bindgen},
getters::{generate_getters_for_items, Getter},
Architecture,
};
#[derive(StructOpt)]
pub struct CodegenOptions {
#[structopt(long)]
arch: Architecture,
#[structopt(long)]
libbpf_dir: PathBuf,
}
pub fn codegen(opts: CodegenOptions) -> Result<(), anyhow::Error> {
let dir = PathBuf::from("bpf/aya-bpf");
let generated = dir.join("src/bpf/generated");
let dir = PathBuf::from("bpf/aya-bpf-bindings");
let generated = dir.join("src").join(opts.arch.to_string());
let types = ["bpf_map_.*"];
let vars = ["BPF_.*", "bpf_.*"];
let mut cmd = bindgen(&types, &vars);
cmd.arg(&*dir.join("include/aya_bpf_bindings.h").to_string_lossy());
cmd.arg(&*dir.join("include/bindings.h").to_string_lossy());
cmd.arg("--");
cmd.arg("-I").arg(opts.libbpf_dir.join("src"));
@ -60,13 +64,13 @@ pub fn codegen(opts: CodegenOptions) -> Result<(), anyhow::Error> {
bindings::write(
&tx.helpers.join(""),
"use crate::bpf::generated::bindings::*;",
"use super::bindings::*;",
&generated.join("helpers.rs"),
)?;
bindings::write(
&generate_getters_for_items(&tree.items, gen_probe_read_getter).to_string(),
"use crate::bpf::generated::bindings::*;",
"use super::bindings::*;",
&generated.join("getters.rs"),
)?;
@ -81,7 +85,7 @@ fn gen_probe_read_getter(getter: &Getter<'_>) -> TokenStream {
Type::Ptr(_) => {
quote! {
pub fn #ident(&self) -> Option<#ty> {
let v = unsafe { crate::bpf::helpers::bpf_probe_read(&#(#prefix).*.#ident) }.ok()?;
let v = unsafe { crate::bpf_probe_read(&#(#prefix).*.#ident) }.ok()?;
if v.is_null() {
None
} else {
@ -93,7 +97,7 @@ fn gen_probe_read_getter(getter: &Getter<'_>) -> TokenStream {
_ => {
quote! {
pub fn #ident(&self) -> Option<#ty> {
unsafe { crate::bpf::helpers::bpf_probe_read(&#(#prefix).*.#ident) }.ok()
unsafe { crate::bpf_probe_read(&#(#prefix).*.#ident) }.ok()
}
}
}

@ -1,9 +1,36 @@
mod aya_bpf;
mod aya_bpf_bindings;
mod bindings;
pub mod getters;
use structopt::StructOpt;
#[derive(Debug, Copy, Clone)]
pub enum Architecture {
X86_64,
AArch64,
}
impl std::str::FromStr for Architecture {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s {
"x86_64" => Architecture::X86_64,
"aarch64" => Architecture::AArch64,
_ => return Err("invalid architecture".to_owned()),
})
}
}
impl std::fmt::Display for Architecture {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(match self {
Architecture::X86_64 => "x86_64",
Architecture::AArch64 => "aarch64",
})
}
}
#[derive(StructOpt)]
pub struct Options {
#[structopt(subcommand)]
@ -12,13 +39,13 @@ pub struct Options {
#[derive(StructOpt)]
enum Command {
#[structopt(name = "aya-bpf")]
AyaBpf(aya_bpf::CodegenOptions),
#[structopt(name = "aya-bpf-bindings")]
AyaBpfBindings(aya_bpf_bindings::CodegenOptions),
}
pub fn codegen(opts: Options) -> Result<(), anyhow::Error> {
use Command::*;
match opts.command {
AyaBpf(opts) => aya_bpf::codegen(opts),
AyaBpfBindings(opts) => aya_bpf_bindings::codegen(opts),
}
}

Loading…
Cancel
Save