diff --git a/.cargo/config b/.cargo/config index 35049cbc..757fd081 100644 --- a/.cargo/config +++ b/.cargo/config @@ -1,2 +1,8 @@ [alias] xtask = "run --package xtask --" + +[target.armv7-unknown-linux-gnueabi] +linker = "arm-linux-gnueabi-gcc" + +[target.armv7-unknown-linux-gnueabihf] +linker = "arm-linux-gnueabihf-gcc" \ No newline at end of file diff --git a/aya/src/generated/linux_bindings_aarch64.rs b/aya/src/generated/linux_bindings_aarch64.rs index f7135f80..3509714c 100644 --- a/aya/src/generated/linux_bindings_aarch64.rs +++ b/aya/src/generated/linux_bindings_aarch64.rs @@ -802,7 +802,11 @@ pub enum perf_event_sample_format { PERF_SAMPLE_TRANSACTION = 131072, PERF_SAMPLE_REGS_INTR = 262144, PERF_SAMPLE_PHYS_ADDR = 524288, - PERF_SAMPLE_MAX = 1048576, + PERF_SAMPLE_AUX = 1048576, + PERF_SAMPLE_CGROUP = 2097152, + PERF_SAMPLE_DATA_PAGE_SIZE = 4194304, + PERF_SAMPLE_CODE_PAGE_SIZE = 8388608, + PERF_SAMPLE_MAX = 16777216, __PERF_SAMPLE_CALLCHAIN_EARLY = 9223372036854775808, } #[repr(C)] @@ -828,6 +832,8 @@ pub struct perf_event_attr { pub aux_watermark: __u32, pub sample_max_stack: __u16, pub __reserved_2: __u16, + pub aux_sample_size: __u32, + pub __reserved_3: __u32, } #[repr(C)] #[derive(Copy, Clone)] @@ -1204,14 +1210,36 @@ impl perf_event_attr { } } #[inline] + pub fn cgroup(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(32usize, 1u8) as u64) } + } + #[inline] + pub fn set_cgroup(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(32usize, 1u8, val as u64) + } + } + #[inline] + pub fn text_poke(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(33usize, 1u8) as u64) } + } + #[inline] + pub fn set_text_poke(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(33usize, 1u8, val as u64) + } + } + #[inline] pub fn __reserved_1(&self) -> __u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(32usize, 32u8) as u64) } + unsafe { ::std::mem::transmute(self._bitfield_1.get(34usize, 30u8) as u64) } } #[inline] pub fn set___reserved_1(&mut self, val: __u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(32usize, 32u8, val as u64) + self._bitfield_1.set(34usize, 30u8, val as u64) } } #[inline] @@ -1247,6 +1275,8 @@ impl perf_event_attr { ksymbol: __u64, bpf_event: __u64, aux_output: __u64, + cgroup: __u64, + text_poke: __u64, __reserved_1: __u64, ) -> __BindgenBitfieldUnit<[u8; 8usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); @@ -1376,7 +1406,15 @@ impl perf_event_attr { let aux_output: u64 = unsafe { ::std::mem::transmute(aux_output) }; aux_output as u64 }); - __bindgen_bitfield_unit.set(32usize, 32u8, { + __bindgen_bitfield_unit.set(32usize, 1u8, { + let cgroup: u64 = unsafe { ::std::mem::transmute(cgroup) }; + cgroup as u64 + }); + __bindgen_bitfield_unit.set(33usize, 1u8, { + let text_poke: u64 = unsafe { ::std::mem::transmute(text_poke) }; + text_poke as u64 + }); + __bindgen_bitfield_unit.set(34usize, 30u8, { let __reserved_1: u64 = unsafe { ::std::mem::transmute(__reserved_1) }; __reserved_1 as u64 }); @@ -1400,7 +1438,10 @@ pub struct perf_event_mmap_page { pub time_offset: __u64, pub time_zero: __u64, pub size: __u32, - pub __reserved: [__u8; 948usize], + pub __reserved_1: __u32, + pub time_cycles: __u64, + pub time_mask: __u64, + pub __reserved: [__u8; 928usize], pub data_head: __u64, pub data_tail: __u64, pub data_offset: __u64, @@ -1481,14 +1522,25 @@ impl perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1 { } } #[inline] + pub fn cap_user_time_short(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u64) } + } + #[inline] + pub fn set_cap_user_time_short(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(5usize, 1u8, val as u64) + } + } + #[inline] pub fn cap_____res(&self) -> __u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 59u8) as u64) } + unsafe { ::std::mem::transmute(self._bitfield_1.get(6usize, 58u8) as u64) } } #[inline] pub fn set_cap_____res(&mut self, val: __u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(5usize, 59u8, val as u64) + self._bitfield_1.set(6usize, 58u8, val as u64) } } #[inline] @@ -1498,6 +1550,7 @@ impl perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1 { cap_user_rdpmc: __u64, cap_user_time: __u64, cap_user_time_zero: __u64, + cap_user_time_short: __u64, cap_____res: __u64, ) -> __BindgenBitfieldUnit<[u8; 8usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); @@ -1522,7 +1575,11 @@ impl perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1 { let cap_user_time_zero: u64 = unsafe { ::std::mem::transmute(cap_user_time_zero) }; cap_user_time_zero as u64 }); - __bindgen_bitfield_unit.set(5usize, 59u8, { + __bindgen_bitfield_unit.set(5usize, 1u8, { + let cap_user_time_short: u64 = unsafe { ::std::mem::transmute(cap_user_time_short) }; + cap_user_time_short as u64 + }); + __bindgen_bitfield_unit.set(6usize, 58u8, { let cap_____res: u64 = unsafe { ::std::mem::transmute(cap_____res) }; cap_____res as u64 }); @@ -1557,7 +1614,9 @@ pub enum perf_event_type { PERF_RECORD_NAMESPACES = 16, PERF_RECORD_KSYMBOL = 17, PERF_RECORD_BPF_EVENT = 18, - PERF_RECORD_MAX = 19, + PERF_RECORD_CGROUP = 19, + PERF_RECORD_TEXT_POKE = 20, + PERF_RECORD_MAX = 21, } pub const IFLA_XDP_UNSPEC: _bindgen_ty_80 = _bindgen_ty_80::IFLA_XDP_UNSPEC; pub const IFLA_XDP_FD: _bindgen_ty_80 = _bindgen_ty_80::IFLA_XDP_FD; @@ -1604,25 +1663,26 @@ pub struct tcmsg { pub tcm_parent: __u32, pub tcm_info: __u32, } -pub const TCA_UNSPEC: _bindgen_ty_92 = _bindgen_ty_92::TCA_UNSPEC; -pub const TCA_KIND: _bindgen_ty_92 = _bindgen_ty_92::TCA_KIND; -pub const TCA_OPTIONS: _bindgen_ty_92 = _bindgen_ty_92::TCA_OPTIONS; -pub const TCA_STATS: _bindgen_ty_92 = _bindgen_ty_92::TCA_STATS; -pub const TCA_XSTATS: _bindgen_ty_92 = _bindgen_ty_92::TCA_XSTATS; -pub const TCA_RATE: _bindgen_ty_92 = _bindgen_ty_92::TCA_RATE; -pub const TCA_FCNT: _bindgen_ty_92 = _bindgen_ty_92::TCA_FCNT; -pub const TCA_STATS2: _bindgen_ty_92 = _bindgen_ty_92::TCA_STATS2; -pub const TCA_STAB: _bindgen_ty_92 = _bindgen_ty_92::TCA_STAB; -pub const TCA_PAD: _bindgen_ty_92 = _bindgen_ty_92::TCA_PAD; -pub const TCA_DUMP_INVISIBLE: _bindgen_ty_92 = _bindgen_ty_92::TCA_DUMP_INVISIBLE; -pub const TCA_CHAIN: _bindgen_ty_92 = _bindgen_ty_92::TCA_CHAIN; -pub const TCA_HW_OFFLOAD: _bindgen_ty_92 = _bindgen_ty_92::TCA_HW_OFFLOAD; -pub const TCA_INGRESS_BLOCK: _bindgen_ty_92 = _bindgen_ty_92::TCA_INGRESS_BLOCK; -pub const TCA_EGRESS_BLOCK: _bindgen_ty_92 = _bindgen_ty_92::TCA_EGRESS_BLOCK; -pub const __TCA_MAX: _bindgen_ty_92 = _bindgen_ty_92::__TCA_MAX; +pub const TCA_UNSPEC: _bindgen_ty_94 = _bindgen_ty_94::TCA_UNSPEC; +pub const TCA_KIND: _bindgen_ty_94 = _bindgen_ty_94::TCA_KIND; +pub const TCA_OPTIONS: _bindgen_ty_94 = _bindgen_ty_94::TCA_OPTIONS; +pub const TCA_STATS: _bindgen_ty_94 = _bindgen_ty_94::TCA_STATS; +pub const TCA_XSTATS: _bindgen_ty_94 = _bindgen_ty_94::TCA_XSTATS; +pub const TCA_RATE: _bindgen_ty_94 = _bindgen_ty_94::TCA_RATE; +pub const TCA_FCNT: _bindgen_ty_94 = _bindgen_ty_94::TCA_FCNT; +pub const TCA_STATS2: _bindgen_ty_94 = _bindgen_ty_94::TCA_STATS2; +pub const TCA_STAB: _bindgen_ty_94 = _bindgen_ty_94::TCA_STAB; +pub const TCA_PAD: _bindgen_ty_94 = _bindgen_ty_94::TCA_PAD; +pub const TCA_DUMP_INVISIBLE: _bindgen_ty_94 = _bindgen_ty_94::TCA_DUMP_INVISIBLE; +pub const TCA_CHAIN: _bindgen_ty_94 = _bindgen_ty_94::TCA_CHAIN; +pub const TCA_HW_OFFLOAD: _bindgen_ty_94 = _bindgen_ty_94::TCA_HW_OFFLOAD; +pub const TCA_INGRESS_BLOCK: _bindgen_ty_94 = _bindgen_ty_94::TCA_INGRESS_BLOCK; +pub const TCA_EGRESS_BLOCK: _bindgen_ty_94 = _bindgen_ty_94::TCA_EGRESS_BLOCK; +pub const TCA_DUMP_FLAGS: _bindgen_ty_94 = _bindgen_ty_94::TCA_DUMP_FLAGS; +pub const __TCA_MAX: _bindgen_ty_94 = _bindgen_ty_94::__TCA_MAX; #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub enum _bindgen_ty_92 { +pub enum _bindgen_ty_94 { TCA_UNSPEC = 0, TCA_KIND = 1, TCA_OPTIONS = 2, @@ -1638,24 +1698,25 @@ pub enum _bindgen_ty_92 { TCA_HW_OFFLOAD = 12, TCA_INGRESS_BLOCK = 13, TCA_EGRESS_BLOCK = 14, - __TCA_MAX = 15, -} -pub const TCA_BPF_UNSPEC: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_UNSPEC; -pub const TCA_BPF_ACT: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_ACT; -pub const TCA_BPF_POLICE: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_POLICE; -pub const TCA_BPF_CLASSID: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_CLASSID; -pub const TCA_BPF_OPS_LEN: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_OPS_LEN; -pub const TCA_BPF_OPS: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_OPS; -pub const TCA_BPF_FD: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_FD; -pub const TCA_BPF_NAME: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_NAME; -pub const TCA_BPF_FLAGS: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_FLAGS; -pub const TCA_BPF_FLAGS_GEN: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_FLAGS_GEN; -pub const TCA_BPF_TAG: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_TAG; -pub const TCA_BPF_ID: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_ID; -pub const __TCA_BPF_MAX: _bindgen_ty_147 = _bindgen_ty_147::__TCA_BPF_MAX; + TCA_DUMP_FLAGS = 15, + __TCA_MAX = 16, +} +pub const TCA_BPF_UNSPEC: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_UNSPEC; +pub const TCA_BPF_ACT: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_ACT; +pub const TCA_BPF_POLICE: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_POLICE; +pub const TCA_BPF_CLASSID: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_CLASSID; +pub const TCA_BPF_OPS_LEN: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_OPS_LEN; +pub const TCA_BPF_OPS: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_OPS; +pub const TCA_BPF_FD: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_FD; +pub const TCA_BPF_NAME: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_NAME; +pub const TCA_BPF_FLAGS: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_FLAGS; +pub const TCA_BPF_FLAGS_GEN: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_FLAGS_GEN; +pub const TCA_BPF_TAG: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_TAG; +pub const TCA_BPF_ID: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_ID; +pub const __TCA_BPF_MAX: _bindgen_ty_151 = _bindgen_ty_151::__TCA_BPF_MAX; #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub enum _bindgen_ty_147 { +pub enum _bindgen_ty_151 { TCA_BPF_UNSPEC = 0, TCA_BPF_ACT = 1, TCA_BPF_POLICE = 2, diff --git a/aya/src/generated/linux_bindings_armv7.rs b/aya/src/generated/linux_bindings_armv7.rs new file mode 100644 index 00000000..3509714c --- /dev/null +++ b/aya/src/generated/linux_bindings_armv7.rs @@ -0,0 +1,1736 @@ +/* automatically generated by rust-bindgen 0.57.0 */ + +#[repr(C)] +#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct __BindgenBitfieldUnit { + storage: Storage, +} +impl __BindgenBitfieldUnit { + #[inline] + pub const fn new(storage: Storage) -> Self { + Self { storage } + } +} +impl __BindgenBitfieldUnit +where + Storage: AsRef<[u8]> + AsMut<[u8]>, +{ + #[inline] + pub fn get_bit(&self, index: usize) -> bool { + debug_assert!(index / 8 < self.storage.as_ref().len()); + let byte_index = index / 8; + let byte = self.storage.as_ref()[byte_index]; + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + let mask = 1 << bit_index; + byte & mask == mask + } + #[inline] + pub fn set_bit(&mut self, index: usize, val: bool) { + debug_assert!(index / 8 < self.storage.as_ref().len()); + let byte_index = index / 8; + let byte = &mut self.storage.as_mut()[byte_index]; + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + let mask = 1 << bit_index; + if val { + *byte |= mask; + } else { + *byte &= !mask; + } + } + #[inline] + pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len()); + let mut val = 0; + for i in 0..(bit_width as usize) { + if self.get_bit(i + bit_offset) { + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + val |= 1 << index; + } + } + val + } + #[inline] + pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len()); + for i in 0..(bit_width as usize) { + let mask = 1 << i; + let val_bit_is_set = val & mask == mask; + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + self.set_bit(index + bit_offset, val_bit_is_set); + } + } +} +pub const BPF_LD: u32 = 0; +pub const BPF_LDX: u32 = 1; +pub const BPF_ST: u32 = 2; +pub const BPF_STX: u32 = 3; +pub const BPF_ALU: u32 = 4; +pub const BPF_JMP: u32 = 5; +pub const BPF_W: u32 = 0; +pub const BPF_H: u32 = 8; +pub const BPF_B: u32 = 16; +pub const BPF_K: u32 = 0; +pub const BPF_ALU64: u32 = 7; +pub const BPF_DW: u32 = 24; +pub const BPF_CALL: u32 = 128; +pub const BPF_PSEUDO_MAP_FD: u32 = 1; +pub const BPF_PSEUDO_MAP_IDX: u32 = 5; +pub const BPF_PSEUDO_MAP_VALUE: u32 = 2; +pub const BPF_PSEUDO_MAP_IDX_VALUE: u32 = 6; +pub const BPF_PSEUDO_BTF_ID: u32 = 3; +pub const BPF_PSEUDO_FUNC: u32 = 4; +pub const BPF_PSEUDO_CALL: u32 = 1; +pub const BPF_PSEUDO_KFUNC_CALL: u32 = 2; +pub const BTF_KIND_UNKN: u32 = 0; +pub const BTF_KIND_INT: u32 = 1; +pub const BTF_KIND_PTR: u32 = 2; +pub const BTF_KIND_ARRAY: u32 = 3; +pub const BTF_KIND_STRUCT: u32 = 4; +pub const BTF_KIND_UNION: u32 = 5; +pub const BTF_KIND_ENUM: u32 = 6; +pub const BTF_KIND_FWD: u32 = 7; +pub const BTF_KIND_TYPEDEF: u32 = 8; +pub const BTF_KIND_VOLATILE: u32 = 9; +pub const BTF_KIND_CONST: u32 = 10; +pub const BTF_KIND_RESTRICT: u32 = 11; +pub const BTF_KIND_FUNC: u32 = 12; +pub const BTF_KIND_FUNC_PROTO: u32 = 13; +pub const BTF_KIND_VAR: u32 = 14; +pub const BTF_KIND_DATASEC: u32 = 15; +pub const BTF_KIND_FLOAT: u32 = 16; +pub const BTF_KIND_MAX: u32 = 16; +pub const BTF_INT_SIGNED: u32 = 1; +pub const BTF_INT_CHAR: u32 = 2; +pub const BTF_INT_BOOL: u32 = 4; +pub const PERF_MAX_STACK_DEPTH: u32 = 127; +pub const PERF_MAX_CONTEXTS_PER_STACK: u32 = 8; +pub const PERF_FLAG_FD_NO_GROUP: u32 = 1; +pub const PERF_FLAG_FD_OUTPUT: u32 = 2; +pub const PERF_FLAG_PID_CGROUP: u32 = 4; +pub const PERF_FLAG_FD_CLOEXEC: u32 = 8; +pub const NLMSG_ALIGNTO: u32 = 4; +pub const XDP_FLAGS_UPDATE_IF_NOEXIST: u32 = 1; +pub const XDP_FLAGS_SKB_MODE: u32 = 2; +pub const XDP_FLAGS_DRV_MODE: u32 = 4; +pub const XDP_FLAGS_HW_MODE: u32 = 8; +pub const XDP_FLAGS_REPLACE: u32 = 16; +pub const XDP_FLAGS_MODES: u32 = 14; +pub const XDP_FLAGS_MASK: u32 = 31; +pub const SO_ATTACH_BPF: u32 = 50; +pub const SO_DETACH_BPF: u32 = 27; +pub const TC_H_MAJ_MASK: u32 = 4294901760; +pub const TC_H_MIN_MASK: u32 = 65535; +pub const TC_H_UNSPEC: u32 = 0; +pub const TC_H_ROOT: u32 = 4294967295; +pub const TC_H_INGRESS: u32 = 4294967281; +pub const TC_H_CLSACT: u32 = 4294967281; +pub const TC_H_MIN_PRIORITY: u32 = 65504; +pub const TC_H_MIN_INGRESS: u32 = 65522; +pub const TC_H_MIN_EGRESS: u32 = 65523; +pub const TCA_BPF_FLAG_ACT_DIRECT: u32 = 1; +pub type __u8 = ::std::os::raw::c_uchar; +pub type __s16 = ::std::os::raw::c_short; +pub type __u16 = ::std::os::raw::c_ushort; +pub type __s32 = ::std::os::raw::c_int; +pub type __u32 = ::std::os::raw::c_uint; +pub type __s64 = ::std::os::raw::c_longlong; +pub type __u64 = ::std::os::raw::c_ulonglong; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_insn { + pub code: __u8, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 1usize]>, + pub off: __s16, + pub imm: __s32, +} +impl bpf_insn { + #[inline] + pub fn dst_reg(&self) -> __u8 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u8) } + } + #[inline] + pub fn set_dst_reg(&mut self, val: __u8) { + unsafe { + let val: u8 = ::std::mem::transmute(val); + self._bitfield_1.set(0usize, 4u8, val as u64) + } + } + #[inline] + pub fn src_reg(&self) -> __u8 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u8) } + } + #[inline] + pub fn set_src_reg(&mut self, val: __u8) { + unsafe { + let val: u8 = ::std::mem::transmute(val); + self._bitfield_1.set(4usize, 4u8, val as u64) + } + } + #[inline] + pub fn new_bitfield_1(dst_reg: __u8, src_reg: __u8) -> __BindgenBitfieldUnit<[u8; 1usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); + __bindgen_bitfield_unit.set(0usize, 4u8, { + let dst_reg: u8 = unsafe { ::std::mem::transmute(dst_reg) }; + dst_reg as u64 + }); + __bindgen_bitfield_unit.set(4usize, 4u8, { + let src_reg: u8 = unsafe { ::std::mem::transmute(src_reg) }; + src_reg as u64 + }); + __bindgen_bitfield_unit + } +} +impl bpf_cmd { + pub const BPF_PROG_RUN: bpf_cmd = bpf_cmd::BPF_PROG_TEST_RUN; +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum bpf_cmd { + BPF_MAP_CREATE = 0, + BPF_MAP_LOOKUP_ELEM = 1, + BPF_MAP_UPDATE_ELEM = 2, + BPF_MAP_DELETE_ELEM = 3, + BPF_MAP_GET_NEXT_KEY = 4, + BPF_PROG_LOAD = 5, + BPF_OBJ_PIN = 6, + BPF_OBJ_GET = 7, + BPF_PROG_ATTACH = 8, + BPF_PROG_DETACH = 9, + BPF_PROG_TEST_RUN = 10, + BPF_PROG_GET_NEXT_ID = 11, + BPF_MAP_GET_NEXT_ID = 12, + BPF_PROG_GET_FD_BY_ID = 13, + BPF_MAP_GET_FD_BY_ID = 14, + BPF_OBJ_GET_INFO_BY_FD = 15, + BPF_PROG_QUERY = 16, + BPF_RAW_TRACEPOINT_OPEN = 17, + BPF_BTF_LOAD = 18, + BPF_BTF_GET_FD_BY_ID = 19, + BPF_TASK_FD_QUERY = 20, + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 21, + BPF_MAP_FREEZE = 22, + BPF_BTF_GET_NEXT_ID = 23, + BPF_MAP_LOOKUP_BATCH = 24, + BPF_MAP_LOOKUP_AND_DELETE_BATCH = 25, + BPF_MAP_UPDATE_BATCH = 26, + BPF_MAP_DELETE_BATCH = 27, + BPF_LINK_CREATE = 28, + BPF_LINK_UPDATE = 29, + BPF_LINK_GET_FD_BY_ID = 30, + BPF_LINK_GET_NEXT_ID = 31, + BPF_ENABLE_STATS = 32, + BPF_ITER_CREATE = 33, + BPF_LINK_DETACH = 34, + BPF_PROG_BIND_MAP = 35, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum bpf_map_type { + BPF_MAP_TYPE_UNSPEC = 0, + BPF_MAP_TYPE_HASH = 1, + BPF_MAP_TYPE_ARRAY = 2, + BPF_MAP_TYPE_PROG_ARRAY = 3, + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4, + BPF_MAP_TYPE_PERCPU_HASH = 5, + BPF_MAP_TYPE_PERCPU_ARRAY = 6, + BPF_MAP_TYPE_STACK_TRACE = 7, + BPF_MAP_TYPE_CGROUP_ARRAY = 8, + BPF_MAP_TYPE_LRU_HASH = 9, + BPF_MAP_TYPE_LRU_PERCPU_HASH = 10, + BPF_MAP_TYPE_LPM_TRIE = 11, + BPF_MAP_TYPE_ARRAY_OF_MAPS = 12, + BPF_MAP_TYPE_HASH_OF_MAPS = 13, + BPF_MAP_TYPE_DEVMAP = 14, + BPF_MAP_TYPE_SOCKMAP = 15, + BPF_MAP_TYPE_CPUMAP = 16, + BPF_MAP_TYPE_XSKMAP = 17, + BPF_MAP_TYPE_SOCKHASH = 18, + BPF_MAP_TYPE_CGROUP_STORAGE = 19, + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 21, + BPF_MAP_TYPE_QUEUE = 22, + BPF_MAP_TYPE_STACK = 23, + BPF_MAP_TYPE_SK_STORAGE = 24, + BPF_MAP_TYPE_DEVMAP_HASH = 25, + BPF_MAP_TYPE_STRUCT_OPS = 26, + BPF_MAP_TYPE_RINGBUF = 27, + BPF_MAP_TYPE_INODE_STORAGE = 28, + BPF_MAP_TYPE_TASK_STORAGE = 29, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum bpf_prog_type { + BPF_PROG_TYPE_UNSPEC = 0, + BPF_PROG_TYPE_SOCKET_FILTER = 1, + BPF_PROG_TYPE_KPROBE = 2, + BPF_PROG_TYPE_SCHED_CLS = 3, + BPF_PROG_TYPE_SCHED_ACT = 4, + BPF_PROG_TYPE_TRACEPOINT = 5, + BPF_PROG_TYPE_XDP = 6, + BPF_PROG_TYPE_PERF_EVENT = 7, + BPF_PROG_TYPE_CGROUP_SKB = 8, + BPF_PROG_TYPE_CGROUP_SOCK = 9, + BPF_PROG_TYPE_LWT_IN = 10, + BPF_PROG_TYPE_LWT_OUT = 11, + BPF_PROG_TYPE_LWT_XMIT = 12, + BPF_PROG_TYPE_SOCK_OPS = 13, + BPF_PROG_TYPE_SK_SKB = 14, + BPF_PROG_TYPE_CGROUP_DEVICE = 15, + BPF_PROG_TYPE_SK_MSG = 16, + BPF_PROG_TYPE_RAW_TRACEPOINT = 17, + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 18, + BPF_PROG_TYPE_LWT_SEG6LOCAL = 19, + BPF_PROG_TYPE_LIRC_MODE2 = 20, + BPF_PROG_TYPE_SK_REUSEPORT = 21, + BPF_PROG_TYPE_FLOW_DISSECTOR = 22, + BPF_PROG_TYPE_CGROUP_SYSCTL = 23, + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 24, + BPF_PROG_TYPE_CGROUP_SOCKOPT = 25, + BPF_PROG_TYPE_TRACING = 26, + BPF_PROG_TYPE_STRUCT_OPS = 27, + BPF_PROG_TYPE_EXT = 28, + BPF_PROG_TYPE_LSM = 29, + BPF_PROG_TYPE_SK_LOOKUP = 30, + BPF_PROG_TYPE_SYSCALL = 31, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum bpf_attach_type { + BPF_CGROUP_INET_INGRESS = 0, + BPF_CGROUP_INET_EGRESS = 1, + BPF_CGROUP_INET_SOCK_CREATE = 2, + BPF_CGROUP_SOCK_OPS = 3, + BPF_SK_SKB_STREAM_PARSER = 4, + BPF_SK_SKB_STREAM_VERDICT = 5, + BPF_CGROUP_DEVICE = 6, + BPF_SK_MSG_VERDICT = 7, + BPF_CGROUP_INET4_BIND = 8, + BPF_CGROUP_INET6_BIND = 9, + BPF_CGROUP_INET4_CONNECT = 10, + BPF_CGROUP_INET6_CONNECT = 11, + BPF_CGROUP_INET4_POST_BIND = 12, + BPF_CGROUP_INET6_POST_BIND = 13, + BPF_CGROUP_UDP4_SENDMSG = 14, + BPF_CGROUP_UDP6_SENDMSG = 15, + BPF_LIRC_MODE2 = 16, + BPF_FLOW_DISSECTOR = 17, + BPF_CGROUP_SYSCTL = 18, + BPF_CGROUP_UDP4_RECVMSG = 19, + BPF_CGROUP_UDP6_RECVMSG = 20, + BPF_CGROUP_GETSOCKOPT = 21, + BPF_CGROUP_SETSOCKOPT = 22, + BPF_TRACE_RAW_TP = 23, + BPF_TRACE_FENTRY = 24, + BPF_TRACE_FEXIT = 25, + BPF_MODIFY_RETURN = 26, + BPF_LSM_MAC = 27, + BPF_TRACE_ITER = 28, + BPF_CGROUP_INET4_GETPEERNAME = 29, + BPF_CGROUP_INET6_GETPEERNAME = 30, + BPF_CGROUP_INET4_GETSOCKNAME = 31, + BPF_CGROUP_INET6_GETSOCKNAME = 32, + BPF_XDP_DEVMAP = 33, + BPF_CGROUP_INET_SOCK_RELEASE = 34, + BPF_XDP_CPUMAP = 35, + BPF_SK_LOOKUP = 36, + BPF_XDP = 37, + BPF_SK_SKB_VERDICT = 38, + BPF_SK_REUSEPORT_SELECT = 39, + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE = 40, + __MAX_BPF_ATTACH_TYPE = 41, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr { + pub __bindgen_anon_1: bpf_attr__bindgen_ty_1, + pub __bindgen_anon_2: bpf_attr__bindgen_ty_2, + pub batch: bpf_attr__bindgen_ty_3, + pub __bindgen_anon_3: bpf_attr__bindgen_ty_4, + pub __bindgen_anon_4: bpf_attr__bindgen_ty_5, + pub __bindgen_anon_5: bpf_attr__bindgen_ty_6, + pub test: bpf_attr__bindgen_ty_7, + pub __bindgen_anon_6: bpf_attr__bindgen_ty_8, + pub info: bpf_attr__bindgen_ty_9, + pub query: bpf_attr__bindgen_ty_10, + pub raw_tracepoint: bpf_attr__bindgen_ty_11, + pub __bindgen_anon_7: bpf_attr__bindgen_ty_12, + pub task_fd_query: bpf_attr__bindgen_ty_13, + pub link_create: bpf_attr__bindgen_ty_14, + pub link_update: bpf_attr__bindgen_ty_15, + pub link_detach: bpf_attr__bindgen_ty_16, + pub enable_stats: bpf_attr__bindgen_ty_17, + pub iter_create: bpf_attr__bindgen_ty_18, + pub prog_bind_map: bpf_attr__bindgen_ty_19, + _bindgen_union_align: [u64; 16usize], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_1 { + pub map_type: __u32, + pub key_size: __u32, + pub value_size: __u32, + pub max_entries: __u32, + pub map_flags: __u32, + pub inner_map_fd: __u32, + pub numa_node: __u32, + pub map_name: [::std::os::raw::c_char; 16usize], + pub map_ifindex: __u32, + pub btf_fd: __u32, + pub btf_key_type_id: __u32, + pub btf_value_type_id: __u32, + pub btf_vmlinux_value_type_id: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_2 { + pub map_fd: __u32, + pub key: __u64, + pub __bindgen_anon_1: bpf_attr__bindgen_ty_2__bindgen_ty_1, + pub flags: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_2__bindgen_ty_1 { + pub value: __u64, + pub next_key: __u64, + _bindgen_union_align: u64, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_3 { + pub in_batch: __u64, + pub out_batch: __u64, + pub keys: __u64, + pub values: __u64, + pub count: __u32, + pub map_fd: __u32, + pub elem_flags: __u64, + pub flags: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_4 { + pub prog_type: __u32, + pub insn_cnt: __u32, + pub insns: __u64, + pub license: __u64, + pub log_level: __u32, + pub log_size: __u32, + pub log_buf: __u64, + pub kern_version: __u32, + pub prog_flags: __u32, + pub prog_name: [::std::os::raw::c_char; 16usize], + pub prog_ifindex: __u32, + pub expected_attach_type: __u32, + pub prog_btf_fd: __u32, + pub func_info_rec_size: __u32, + pub func_info: __u64, + pub func_info_cnt: __u32, + pub line_info_rec_size: __u32, + pub line_info: __u64, + pub line_info_cnt: __u32, + pub attach_btf_id: __u32, + pub __bindgen_anon_1: bpf_attr__bindgen_ty_4__bindgen_ty_1, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>, + pub fd_array: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_4__bindgen_ty_1 { + pub attach_prog_fd: __u32, + pub attach_btf_obj_fd: __u32, + _bindgen_union_align: u32, +} +impl bpf_attr__bindgen_ty_4 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 4usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_5 { + pub pathname: __u64, + pub bpf_fd: __u32, + pub file_flags: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_6 { + pub target_fd: __u32, + pub attach_bpf_fd: __u32, + pub attach_type: __u32, + pub attach_flags: __u32, + pub replace_bpf_fd: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_7 { + pub prog_fd: __u32, + pub retval: __u32, + pub data_size_in: __u32, + pub data_size_out: __u32, + pub data_in: __u64, + pub data_out: __u64, + pub repeat: __u32, + pub duration: __u32, + pub ctx_size_in: __u32, + pub ctx_size_out: __u32, + pub ctx_in: __u64, + pub ctx_out: __u64, + pub flags: __u32, + pub cpu: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_8 { + pub __bindgen_anon_1: bpf_attr__bindgen_ty_8__bindgen_ty_1, + pub next_id: __u32, + pub open_flags: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_8__bindgen_ty_1 { + pub start_id: __u32, + pub prog_id: __u32, + pub map_id: __u32, + pub btf_id: __u32, + pub link_id: __u32, + _bindgen_union_align: u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_9 { + pub bpf_fd: __u32, + pub info_len: __u32, + pub info: __u64, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_10 { + pub target_fd: __u32, + pub attach_type: __u32, + pub query_flags: __u32, + pub attach_flags: __u32, + pub prog_ids: __u64, + pub prog_cnt: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_11 { + pub name: __u64, + pub prog_fd: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_12 { + pub btf: __u64, + pub btf_log_buf: __u64, + pub btf_size: __u32, + pub btf_log_size: __u32, + pub btf_log_level: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_13 { + pub pid: __u32, + pub fd: __u32, + pub flags: __u32, + pub buf_len: __u32, + pub buf: __u64, + pub prog_id: __u32, + pub fd_type: __u32, + pub probe_offset: __u64, + pub probe_addr: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_14 { + pub prog_fd: __u32, + pub __bindgen_anon_1: bpf_attr__bindgen_ty_14__bindgen_ty_1, + pub attach_type: __u32, + pub flags: __u32, + pub __bindgen_anon_2: bpf_attr__bindgen_ty_14__bindgen_ty_2, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_14__bindgen_ty_1 { + pub target_fd: __u32, + pub target_ifindex: __u32, + _bindgen_union_align: u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_14__bindgen_ty_2 { + pub target_btf_id: __u32, + pub __bindgen_anon_1: bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_1, + _bindgen_union_align: [u64; 2usize], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_1 { + pub iter_info: __u64, + pub iter_info_len: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_15 { + pub link_fd: __u32, + pub new_prog_fd: __u32, + pub flags: __u32, + pub old_prog_fd: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_16 { + pub link_fd: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_17 { + pub type_: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_18 { + pub link_fd: __u32, + pub flags: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_19 { + pub prog_fd: __u32, + pub map_fd: __u32, + pub flags: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_prog_info { + pub type_: __u32, + pub id: __u32, + pub tag: [__u8; 8usize], + pub jited_prog_len: __u32, + pub xlated_prog_len: __u32, + pub jited_prog_insns: __u64, + pub xlated_prog_insns: __u64, + pub load_time: __u64, + pub created_by_uid: __u32, + pub nr_map_ids: __u32, + pub map_ids: __u64, + pub name: [::std::os::raw::c_char; 16usize], + pub ifindex: __u32, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>, + pub netns_dev: __u64, + pub netns_ino: __u64, + pub nr_jited_ksyms: __u32, + pub nr_jited_func_lens: __u32, + pub jited_ksyms: __u64, + pub jited_func_lens: __u64, + pub btf_id: __u32, + pub func_info_rec_size: __u32, + pub func_info: __u64, + pub nr_func_info: __u32, + pub nr_line_info: __u32, + pub line_info: __u64, + pub jited_line_info: __u64, + pub nr_jited_line_info: __u32, + pub line_info_rec_size: __u32, + pub jited_line_info_rec_size: __u32, + pub nr_prog_tags: __u32, + pub prog_tags: __u64, + pub run_time_ns: __u64, + pub run_cnt: __u64, + pub recursion_misses: __u64, +} +impl bpf_prog_info { + #[inline] + pub fn gpl_compatible(&self) -> __u32 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + } + #[inline] + pub fn set_gpl_compatible(&mut self, val: __u32) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(0usize, 1u8, val as u64) + } + } + #[inline] + pub fn new_bitfield_1(gpl_compatible: __u32) -> __BindgenBitfieldUnit<[u8; 4usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); + __bindgen_bitfield_unit.set(0usize, 1u8, { + let gpl_compatible: u32 = unsafe { ::std::mem::transmute(gpl_compatible) }; + gpl_compatible as u64 + }); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_header { + pub magic: __u16, + pub version: __u8, + pub flags: __u8, + pub hdr_len: __u32, + pub type_off: __u32, + pub type_len: __u32, + pub str_off: __u32, + pub str_len: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct btf_type { + pub name_off: __u32, + pub info: __u32, + pub __bindgen_anon_1: btf_type__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union btf_type__bindgen_ty_1 { + pub size: __u32, + pub type_: __u32, + _bindgen_union_align: u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_enum { + pub name_off: __u32, + pub val: __s32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_array { + pub type_: __u32, + pub index_type: __u32, + pub nelems: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_member { + pub name_off: __u32, + pub type_: __u32, + pub offset: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_param { + pub name_off: __u32, + pub type_: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_var { + pub linkage: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_var_secinfo { + pub type_: __u32, + pub offset: __u32, + pub size: __u32, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum perf_type_id { + PERF_TYPE_HARDWARE = 0, + PERF_TYPE_SOFTWARE = 1, + PERF_TYPE_TRACEPOINT = 2, + PERF_TYPE_HW_CACHE = 3, + PERF_TYPE_RAW = 4, + PERF_TYPE_BREAKPOINT = 5, + PERF_TYPE_MAX = 6, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum perf_sw_ids { + PERF_COUNT_SW_CPU_CLOCK = 0, + PERF_COUNT_SW_TASK_CLOCK = 1, + PERF_COUNT_SW_PAGE_FAULTS = 2, + PERF_COUNT_SW_CONTEXT_SWITCHES = 3, + PERF_COUNT_SW_CPU_MIGRATIONS = 4, + PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, + PERF_COUNT_SW_EMULATION_FAULTS = 8, + PERF_COUNT_SW_DUMMY = 9, + PERF_COUNT_SW_BPF_OUTPUT = 10, + PERF_COUNT_SW_MAX = 11, +} +#[repr(u64)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum perf_event_sample_format { + PERF_SAMPLE_IP = 1, + PERF_SAMPLE_TID = 2, + PERF_SAMPLE_TIME = 4, + PERF_SAMPLE_ADDR = 8, + PERF_SAMPLE_READ = 16, + PERF_SAMPLE_CALLCHAIN = 32, + PERF_SAMPLE_ID = 64, + PERF_SAMPLE_CPU = 128, + PERF_SAMPLE_PERIOD = 256, + PERF_SAMPLE_STREAM_ID = 512, + PERF_SAMPLE_RAW = 1024, + PERF_SAMPLE_BRANCH_STACK = 2048, + PERF_SAMPLE_REGS_USER = 4096, + PERF_SAMPLE_STACK_USER = 8192, + PERF_SAMPLE_WEIGHT = 16384, + PERF_SAMPLE_DATA_SRC = 32768, + PERF_SAMPLE_IDENTIFIER = 65536, + PERF_SAMPLE_TRANSACTION = 131072, + PERF_SAMPLE_REGS_INTR = 262144, + PERF_SAMPLE_PHYS_ADDR = 524288, + PERF_SAMPLE_AUX = 1048576, + PERF_SAMPLE_CGROUP = 2097152, + PERF_SAMPLE_DATA_PAGE_SIZE = 4194304, + PERF_SAMPLE_CODE_PAGE_SIZE = 8388608, + PERF_SAMPLE_MAX = 16777216, + __PERF_SAMPLE_CALLCHAIN_EARLY = 9223372036854775808, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct perf_event_attr { + pub type_: __u32, + pub size: __u32, + pub config: __u64, + pub __bindgen_anon_1: perf_event_attr__bindgen_ty_1, + pub sample_type: __u64, + pub read_format: __u64, + pub _bitfield_align_1: [u32; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, + pub __bindgen_anon_2: perf_event_attr__bindgen_ty_2, + pub bp_type: __u32, + pub __bindgen_anon_3: perf_event_attr__bindgen_ty_3, + pub __bindgen_anon_4: perf_event_attr__bindgen_ty_4, + pub branch_sample_type: __u64, + pub sample_regs_user: __u64, + pub sample_stack_user: __u32, + pub clockid: __s32, + pub sample_regs_intr: __u64, + pub aux_watermark: __u32, + pub sample_max_stack: __u16, + pub __reserved_2: __u16, + pub aux_sample_size: __u32, + pub __reserved_3: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union perf_event_attr__bindgen_ty_1 { + pub sample_period: __u64, + pub sample_freq: __u64, + _bindgen_union_align: u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union perf_event_attr__bindgen_ty_2 { + pub wakeup_events: __u32, + pub wakeup_watermark: __u32, + _bindgen_union_align: u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union perf_event_attr__bindgen_ty_3 { + pub bp_addr: __u64, + pub kprobe_func: __u64, + pub uprobe_path: __u64, + pub config1: __u64, + _bindgen_union_align: u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union perf_event_attr__bindgen_ty_4 { + pub bp_len: __u64, + pub kprobe_addr: __u64, + pub probe_offset: __u64, + pub config2: __u64, + _bindgen_union_align: u64, +} +impl perf_event_attr { + #[inline] + pub fn disabled(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u64) } + } + #[inline] + pub fn set_disabled(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(0usize, 1u8, val as u64) + } + } + #[inline] + pub fn inherit(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u64) } + } + #[inline] + pub fn set_inherit(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(1usize, 1u8, val as u64) + } + } + #[inline] + pub fn pinned(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u64) } + } + #[inline] + pub fn set_pinned(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(2usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclusive(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclusive(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(3usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_user(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_user(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(4usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_kernel(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_kernel(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(5usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_hv(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(6usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_hv(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(6usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_idle(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(7usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_idle(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(7usize, 1u8, val as u64) + } + } + #[inline] + pub fn mmap(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 1u8) as u64) } + } + #[inline] + pub fn set_mmap(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(8usize, 1u8, val as u64) + } + } + #[inline] + pub fn comm(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(9usize, 1u8) as u64) } + } + #[inline] + pub fn set_comm(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(9usize, 1u8, val as u64) + } + } + #[inline] + pub fn freq(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(10usize, 1u8) as u64) } + } + #[inline] + pub fn set_freq(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(10usize, 1u8, val as u64) + } + } + #[inline] + pub fn inherit_stat(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(11usize, 1u8) as u64) } + } + #[inline] + pub fn set_inherit_stat(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(11usize, 1u8, val as u64) + } + } + #[inline] + pub fn enable_on_exec(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(12usize, 1u8) as u64) } + } + #[inline] + pub fn set_enable_on_exec(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(12usize, 1u8, val as u64) + } + } + #[inline] + pub fn task(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(13usize, 1u8) as u64) } + } + #[inline] + pub fn set_task(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(13usize, 1u8, val as u64) + } + } + #[inline] + pub fn watermark(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(14usize, 1u8) as u64) } + } + #[inline] + pub fn set_watermark(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(14usize, 1u8, val as u64) + } + } + #[inline] + pub fn precise_ip(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(15usize, 2u8) as u64) } + } + #[inline] + pub fn set_precise_ip(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(15usize, 2u8, val as u64) + } + } + #[inline] + pub fn mmap_data(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(17usize, 1u8) as u64) } + } + #[inline] + pub fn set_mmap_data(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(17usize, 1u8, val as u64) + } + } + #[inline] + pub fn sample_id_all(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(18usize, 1u8) as u64) } + } + #[inline] + pub fn set_sample_id_all(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(18usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_host(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(19usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_host(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(19usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_guest(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(20usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_guest(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(20usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_callchain_kernel(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(21usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_callchain_kernel(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(21usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_callchain_user(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(22usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_callchain_user(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(22usize, 1u8, val as u64) + } + } + #[inline] + pub fn mmap2(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(23usize, 1u8) as u64) } + } + #[inline] + pub fn set_mmap2(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(23usize, 1u8, val as u64) + } + } + #[inline] + pub fn comm_exec(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 1u8) as u64) } + } + #[inline] + pub fn set_comm_exec(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(24usize, 1u8, val as u64) + } + } + #[inline] + pub fn use_clockid(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(25usize, 1u8) as u64) } + } + #[inline] + pub fn set_use_clockid(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(25usize, 1u8, val as u64) + } + } + #[inline] + pub fn context_switch(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(26usize, 1u8) as u64) } + } + #[inline] + pub fn set_context_switch(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(26usize, 1u8, val as u64) + } + } + #[inline] + pub fn write_backward(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(27usize, 1u8) as u64) } + } + #[inline] + pub fn set_write_backward(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(27usize, 1u8, val as u64) + } + } + #[inline] + pub fn namespaces(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(28usize, 1u8) as u64) } + } + #[inline] + pub fn set_namespaces(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(28usize, 1u8, val as u64) + } + } + #[inline] + pub fn ksymbol(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(29usize, 1u8) as u64) } + } + #[inline] + pub fn set_ksymbol(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(29usize, 1u8, val as u64) + } + } + #[inline] + pub fn bpf_event(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(30usize, 1u8) as u64) } + } + #[inline] + pub fn set_bpf_event(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(30usize, 1u8, val as u64) + } + } + #[inline] + pub fn aux_output(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(31usize, 1u8) as u64) } + } + #[inline] + pub fn set_aux_output(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(31usize, 1u8, val as u64) + } + } + #[inline] + pub fn cgroup(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(32usize, 1u8) as u64) } + } + #[inline] + pub fn set_cgroup(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(32usize, 1u8, val as u64) + } + } + #[inline] + pub fn text_poke(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(33usize, 1u8) as u64) } + } + #[inline] + pub fn set_text_poke(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(33usize, 1u8, val as u64) + } + } + #[inline] + pub fn __reserved_1(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(34usize, 30u8) as u64) } + } + #[inline] + pub fn set___reserved_1(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(34usize, 30u8, val as u64) + } + } + #[inline] + pub fn new_bitfield_1( + disabled: __u64, + inherit: __u64, + pinned: __u64, + exclusive: __u64, + exclude_user: __u64, + exclude_kernel: __u64, + exclude_hv: __u64, + exclude_idle: __u64, + mmap: __u64, + comm: __u64, + freq: __u64, + inherit_stat: __u64, + enable_on_exec: __u64, + task: __u64, + watermark: __u64, + precise_ip: __u64, + mmap_data: __u64, + sample_id_all: __u64, + exclude_host: __u64, + exclude_guest: __u64, + exclude_callchain_kernel: __u64, + exclude_callchain_user: __u64, + mmap2: __u64, + comm_exec: __u64, + use_clockid: __u64, + context_switch: __u64, + write_backward: __u64, + namespaces: __u64, + ksymbol: __u64, + bpf_event: __u64, + aux_output: __u64, + cgroup: __u64, + text_poke: __u64, + __reserved_1: __u64, + ) -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit.set(0usize, 1u8, { + let disabled: u64 = unsafe { ::std::mem::transmute(disabled) }; + disabled as u64 + }); + __bindgen_bitfield_unit.set(1usize, 1u8, { + let inherit: u64 = unsafe { ::std::mem::transmute(inherit) }; + inherit as u64 + }); + __bindgen_bitfield_unit.set(2usize, 1u8, { + let pinned: u64 = unsafe { ::std::mem::transmute(pinned) }; + pinned as u64 + }); + __bindgen_bitfield_unit.set(3usize, 1u8, { + let exclusive: u64 = unsafe { ::std::mem::transmute(exclusive) }; + exclusive as u64 + }); + __bindgen_bitfield_unit.set(4usize, 1u8, { + let exclude_user: u64 = unsafe { ::std::mem::transmute(exclude_user) }; + exclude_user as u64 + }); + __bindgen_bitfield_unit.set(5usize, 1u8, { + let exclude_kernel: u64 = unsafe { ::std::mem::transmute(exclude_kernel) }; + exclude_kernel as u64 + }); + __bindgen_bitfield_unit.set(6usize, 1u8, { + let exclude_hv: u64 = unsafe { ::std::mem::transmute(exclude_hv) }; + exclude_hv as u64 + }); + __bindgen_bitfield_unit.set(7usize, 1u8, { + let exclude_idle: u64 = unsafe { ::std::mem::transmute(exclude_idle) }; + exclude_idle as u64 + }); + __bindgen_bitfield_unit.set(8usize, 1u8, { + let mmap: u64 = unsafe { ::std::mem::transmute(mmap) }; + mmap as u64 + }); + __bindgen_bitfield_unit.set(9usize, 1u8, { + let comm: u64 = unsafe { ::std::mem::transmute(comm) }; + comm as u64 + }); + __bindgen_bitfield_unit.set(10usize, 1u8, { + let freq: u64 = unsafe { ::std::mem::transmute(freq) }; + freq as u64 + }); + __bindgen_bitfield_unit.set(11usize, 1u8, { + let inherit_stat: u64 = unsafe { ::std::mem::transmute(inherit_stat) }; + inherit_stat as u64 + }); + __bindgen_bitfield_unit.set(12usize, 1u8, { + let enable_on_exec: u64 = unsafe { ::std::mem::transmute(enable_on_exec) }; + enable_on_exec as u64 + }); + __bindgen_bitfield_unit.set(13usize, 1u8, { + let task: u64 = unsafe { ::std::mem::transmute(task) }; + task as u64 + }); + __bindgen_bitfield_unit.set(14usize, 1u8, { + let watermark: u64 = unsafe { ::std::mem::transmute(watermark) }; + watermark as u64 + }); + __bindgen_bitfield_unit.set(15usize, 2u8, { + let precise_ip: u64 = unsafe { ::std::mem::transmute(precise_ip) }; + precise_ip as u64 + }); + __bindgen_bitfield_unit.set(17usize, 1u8, { + let mmap_data: u64 = unsafe { ::std::mem::transmute(mmap_data) }; + mmap_data as u64 + }); + __bindgen_bitfield_unit.set(18usize, 1u8, { + let sample_id_all: u64 = unsafe { ::std::mem::transmute(sample_id_all) }; + sample_id_all as u64 + }); + __bindgen_bitfield_unit.set(19usize, 1u8, { + let exclude_host: u64 = unsafe { ::std::mem::transmute(exclude_host) }; + exclude_host as u64 + }); + __bindgen_bitfield_unit.set(20usize, 1u8, { + let exclude_guest: u64 = unsafe { ::std::mem::transmute(exclude_guest) }; + exclude_guest as u64 + }); + __bindgen_bitfield_unit.set(21usize, 1u8, { + let exclude_callchain_kernel: u64 = + unsafe { ::std::mem::transmute(exclude_callchain_kernel) }; + exclude_callchain_kernel as u64 + }); + __bindgen_bitfield_unit.set(22usize, 1u8, { + let exclude_callchain_user: u64 = + unsafe { ::std::mem::transmute(exclude_callchain_user) }; + exclude_callchain_user as u64 + }); + __bindgen_bitfield_unit.set(23usize, 1u8, { + let mmap2: u64 = unsafe { ::std::mem::transmute(mmap2) }; + mmap2 as u64 + }); + __bindgen_bitfield_unit.set(24usize, 1u8, { + let comm_exec: u64 = unsafe { ::std::mem::transmute(comm_exec) }; + comm_exec as u64 + }); + __bindgen_bitfield_unit.set(25usize, 1u8, { + let use_clockid: u64 = unsafe { ::std::mem::transmute(use_clockid) }; + use_clockid as u64 + }); + __bindgen_bitfield_unit.set(26usize, 1u8, { + let context_switch: u64 = unsafe { ::std::mem::transmute(context_switch) }; + context_switch as u64 + }); + __bindgen_bitfield_unit.set(27usize, 1u8, { + let write_backward: u64 = unsafe { ::std::mem::transmute(write_backward) }; + write_backward as u64 + }); + __bindgen_bitfield_unit.set(28usize, 1u8, { + let namespaces: u64 = unsafe { ::std::mem::transmute(namespaces) }; + namespaces as u64 + }); + __bindgen_bitfield_unit.set(29usize, 1u8, { + let ksymbol: u64 = unsafe { ::std::mem::transmute(ksymbol) }; + ksymbol as u64 + }); + __bindgen_bitfield_unit.set(30usize, 1u8, { + let bpf_event: u64 = unsafe { ::std::mem::transmute(bpf_event) }; + bpf_event as u64 + }); + __bindgen_bitfield_unit.set(31usize, 1u8, { + let aux_output: u64 = unsafe { ::std::mem::transmute(aux_output) }; + aux_output as u64 + }); + __bindgen_bitfield_unit.set(32usize, 1u8, { + let cgroup: u64 = unsafe { ::std::mem::transmute(cgroup) }; + cgroup as u64 + }); + __bindgen_bitfield_unit.set(33usize, 1u8, { + let text_poke: u64 = unsafe { ::std::mem::transmute(text_poke) }; + text_poke as u64 + }); + __bindgen_bitfield_unit.set(34usize, 30u8, { + let __reserved_1: u64 = unsafe { ::std::mem::transmute(__reserved_1) }; + __reserved_1 as u64 + }); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct perf_event_mmap_page { + pub version: __u32, + pub compat_version: __u32, + pub lock: __u32, + pub index: __u32, + pub offset: __s64, + pub time_enabled: __u64, + pub time_running: __u64, + pub __bindgen_anon_1: perf_event_mmap_page__bindgen_ty_1, + pub pmc_width: __u16, + pub time_shift: __u16, + pub time_mult: __u32, + pub time_offset: __u64, + pub time_zero: __u64, + pub size: __u32, + pub __reserved_1: __u32, + pub time_cycles: __u64, + pub time_mask: __u64, + pub __reserved: [__u8; 928usize], + pub data_head: __u64, + pub data_tail: __u64, + pub data_offset: __u64, + pub data_size: __u64, + pub aux_head: __u64, + pub aux_tail: __u64, + pub aux_offset: __u64, + pub aux_size: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union perf_event_mmap_page__bindgen_ty_1 { + pub capabilities: __u64, + pub __bindgen_anon_1: perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1, + _bindgen_union_align: u64, +} +#[repr(C)] +#[repr(align(8))] +#[derive(Debug, Copy, Clone)] +pub struct perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1 { + pub _bitfield_align_1: [u64; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, +} +impl perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1 { + #[inline] + pub fn cap_bit0(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u64) } + } + #[inline] + pub fn set_cap_bit0(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(0usize, 1u8, val as u64) + } + } + #[inline] + pub fn cap_bit0_is_deprecated(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u64) } + } + #[inline] + pub fn set_cap_bit0_is_deprecated(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(1usize, 1u8, val as u64) + } + } + #[inline] + pub fn cap_user_rdpmc(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u64) } + } + #[inline] + pub fn set_cap_user_rdpmc(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(2usize, 1u8, val as u64) + } + } + #[inline] + pub fn cap_user_time(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u64) } + } + #[inline] + pub fn set_cap_user_time(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(3usize, 1u8, val as u64) + } + } + #[inline] + pub fn cap_user_time_zero(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u64) } + } + #[inline] + pub fn set_cap_user_time_zero(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(4usize, 1u8, val as u64) + } + } + #[inline] + pub fn cap_user_time_short(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u64) } + } + #[inline] + pub fn set_cap_user_time_short(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(5usize, 1u8, val as u64) + } + } + #[inline] + pub fn cap_____res(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(6usize, 58u8) as u64) } + } + #[inline] + pub fn set_cap_____res(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(6usize, 58u8, val as u64) + } + } + #[inline] + pub fn new_bitfield_1( + cap_bit0: __u64, + cap_bit0_is_deprecated: __u64, + cap_user_rdpmc: __u64, + cap_user_time: __u64, + cap_user_time_zero: __u64, + cap_user_time_short: __u64, + cap_____res: __u64, + ) -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit.set(0usize, 1u8, { + let cap_bit0: u64 = unsafe { ::std::mem::transmute(cap_bit0) }; + cap_bit0 as u64 + }); + __bindgen_bitfield_unit.set(1usize, 1u8, { + let cap_bit0_is_deprecated: u64 = + unsafe { ::std::mem::transmute(cap_bit0_is_deprecated) }; + cap_bit0_is_deprecated as u64 + }); + __bindgen_bitfield_unit.set(2usize, 1u8, { + let cap_user_rdpmc: u64 = unsafe { ::std::mem::transmute(cap_user_rdpmc) }; + cap_user_rdpmc as u64 + }); + __bindgen_bitfield_unit.set(3usize, 1u8, { + let cap_user_time: u64 = unsafe { ::std::mem::transmute(cap_user_time) }; + cap_user_time as u64 + }); + __bindgen_bitfield_unit.set(4usize, 1u8, { + let cap_user_time_zero: u64 = unsafe { ::std::mem::transmute(cap_user_time_zero) }; + cap_user_time_zero as u64 + }); + __bindgen_bitfield_unit.set(5usize, 1u8, { + let cap_user_time_short: u64 = unsafe { ::std::mem::transmute(cap_user_time_short) }; + cap_user_time_short as u64 + }); + __bindgen_bitfield_unit.set(6usize, 58u8, { + let cap_____res: u64 = unsafe { ::std::mem::transmute(cap_____res) }; + cap_____res as u64 + }); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct perf_event_header { + pub type_: __u32, + pub misc: __u16, + pub size: __u16, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum perf_event_type { + PERF_RECORD_MMAP = 1, + PERF_RECORD_LOST = 2, + PERF_RECORD_COMM = 3, + PERF_RECORD_EXIT = 4, + PERF_RECORD_THROTTLE = 5, + PERF_RECORD_UNTHROTTLE = 6, + PERF_RECORD_FORK = 7, + PERF_RECORD_READ = 8, + PERF_RECORD_SAMPLE = 9, + PERF_RECORD_MMAP2 = 10, + PERF_RECORD_AUX = 11, + PERF_RECORD_ITRACE_START = 12, + PERF_RECORD_LOST_SAMPLES = 13, + PERF_RECORD_SWITCH = 14, + PERF_RECORD_SWITCH_CPU_WIDE = 15, + PERF_RECORD_NAMESPACES = 16, + PERF_RECORD_KSYMBOL = 17, + PERF_RECORD_BPF_EVENT = 18, + PERF_RECORD_CGROUP = 19, + PERF_RECORD_TEXT_POKE = 20, + PERF_RECORD_MAX = 21, +} +pub const IFLA_XDP_UNSPEC: _bindgen_ty_80 = _bindgen_ty_80::IFLA_XDP_UNSPEC; +pub const IFLA_XDP_FD: _bindgen_ty_80 = _bindgen_ty_80::IFLA_XDP_FD; +pub const IFLA_XDP_ATTACHED: _bindgen_ty_80 = _bindgen_ty_80::IFLA_XDP_ATTACHED; +pub const IFLA_XDP_FLAGS: _bindgen_ty_80 = _bindgen_ty_80::IFLA_XDP_FLAGS; +pub const IFLA_XDP_PROG_ID: _bindgen_ty_80 = _bindgen_ty_80::IFLA_XDP_PROG_ID; +pub const IFLA_XDP_DRV_PROG_ID: _bindgen_ty_80 = _bindgen_ty_80::IFLA_XDP_DRV_PROG_ID; +pub const IFLA_XDP_SKB_PROG_ID: _bindgen_ty_80 = _bindgen_ty_80::IFLA_XDP_SKB_PROG_ID; +pub const IFLA_XDP_HW_PROG_ID: _bindgen_ty_80 = _bindgen_ty_80::IFLA_XDP_HW_PROG_ID; +pub const IFLA_XDP_EXPECTED_FD: _bindgen_ty_80 = _bindgen_ty_80::IFLA_XDP_EXPECTED_FD; +pub const __IFLA_XDP_MAX: _bindgen_ty_80 = _bindgen_ty_80::__IFLA_XDP_MAX; +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum _bindgen_ty_80 { + IFLA_XDP_UNSPEC = 0, + IFLA_XDP_FD = 1, + IFLA_XDP_ATTACHED = 2, + IFLA_XDP_FLAGS = 3, + IFLA_XDP_PROG_ID = 4, + IFLA_XDP_DRV_PROG_ID = 5, + IFLA_XDP_SKB_PROG_ID = 6, + IFLA_XDP_HW_PROG_ID = 7, + IFLA_XDP_EXPECTED_FD = 8, + __IFLA_XDP_MAX = 9, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct ifinfomsg { + pub ifi_family: ::std::os::raw::c_uchar, + pub __ifi_pad: ::std::os::raw::c_uchar, + pub ifi_type: ::std::os::raw::c_ushort, + pub ifi_index: ::std::os::raw::c_int, + pub ifi_flags: ::std::os::raw::c_uint, + pub ifi_change: ::std::os::raw::c_uint, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct tcmsg { + pub tcm_family: ::std::os::raw::c_uchar, + pub tcm__pad1: ::std::os::raw::c_uchar, + pub tcm__pad2: ::std::os::raw::c_ushort, + pub tcm_ifindex: ::std::os::raw::c_int, + pub tcm_handle: __u32, + pub tcm_parent: __u32, + pub tcm_info: __u32, +} +pub const TCA_UNSPEC: _bindgen_ty_94 = _bindgen_ty_94::TCA_UNSPEC; +pub const TCA_KIND: _bindgen_ty_94 = _bindgen_ty_94::TCA_KIND; +pub const TCA_OPTIONS: _bindgen_ty_94 = _bindgen_ty_94::TCA_OPTIONS; +pub const TCA_STATS: _bindgen_ty_94 = _bindgen_ty_94::TCA_STATS; +pub const TCA_XSTATS: _bindgen_ty_94 = _bindgen_ty_94::TCA_XSTATS; +pub const TCA_RATE: _bindgen_ty_94 = _bindgen_ty_94::TCA_RATE; +pub const TCA_FCNT: _bindgen_ty_94 = _bindgen_ty_94::TCA_FCNT; +pub const TCA_STATS2: _bindgen_ty_94 = _bindgen_ty_94::TCA_STATS2; +pub const TCA_STAB: _bindgen_ty_94 = _bindgen_ty_94::TCA_STAB; +pub const TCA_PAD: _bindgen_ty_94 = _bindgen_ty_94::TCA_PAD; +pub const TCA_DUMP_INVISIBLE: _bindgen_ty_94 = _bindgen_ty_94::TCA_DUMP_INVISIBLE; +pub const TCA_CHAIN: _bindgen_ty_94 = _bindgen_ty_94::TCA_CHAIN; +pub const TCA_HW_OFFLOAD: _bindgen_ty_94 = _bindgen_ty_94::TCA_HW_OFFLOAD; +pub const TCA_INGRESS_BLOCK: _bindgen_ty_94 = _bindgen_ty_94::TCA_INGRESS_BLOCK; +pub const TCA_EGRESS_BLOCK: _bindgen_ty_94 = _bindgen_ty_94::TCA_EGRESS_BLOCK; +pub const TCA_DUMP_FLAGS: _bindgen_ty_94 = _bindgen_ty_94::TCA_DUMP_FLAGS; +pub const __TCA_MAX: _bindgen_ty_94 = _bindgen_ty_94::__TCA_MAX; +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum _bindgen_ty_94 { + TCA_UNSPEC = 0, + TCA_KIND = 1, + TCA_OPTIONS = 2, + TCA_STATS = 3, + TCA_XSTATS = 4, + TCA_RATE = 5, + TCA_FCNT = 6, + TCA_STATS2 = 7, + TCA_STAB = 8, + TCA_PAD = 9, + TCA_DUMP_INVISIBLE = 10, + TCA_CHAIN = 11, + TCA_HW_OFFLOAD = 12, + TCA_INGRESS_BLOCK = 13, + TCA_EGRESS_BLOCK = 14, + TCA_DUMP_FLAGS = 15, + __TCA_MAX = 16, +} +pub const TCA_BPF_UNSPEC: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_UNSPEC; +pub const TCA_BPF_ACT: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_ACT; +pub const TCA_BPF_POLICE: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_POLICE; +pub const TCA_BPF_CLASSID: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_CLASSID; +pub const TCA_BPF_OPS_LEN: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_OPS_LEN; +pub const TCA_BPF_OPS: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_OPS; +pub const TCA_BPF_FD: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_FD; +pub const TCA_BPF_NAME: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_NAME; +pub const TCA_BPF_FLAGS: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_FLAGS; +pub const TCA_BPF_FLAGS_GEN: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_FLAGS_GEN; +pub const TCA_BPF_TAG: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_TAG; +pub const TCA_BPF_ID: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_ID; +pub const __TCA_BPF_MAX: _bindgen_ty_151 = _bindgen_ty_151::__TCA_BPF_MAX; +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum _bindgen_ty_151 { + TCA_BPF_UNSPEC = 0, + TCA_BPF_ACT = 1, + TCA_BPF_POLICE = 2, + TCA_BPF_CLASSID = 3, + TCA_BPF_OPS_LEN = 4, + TCA_BPF_OPS = 5, + TCA_BPF_FD = 6, + TCA_BPF_NAME = 7, + TCA_BPF_FLAGS = 8, + TCA_BPF_FLAGS_GEN = 9, + TCA_BPF_TAG = 10, + TCA_BPF_ID = 11, + __TCA_BPF_MAX = 12, +} +pub const AYA_PERF_EVENT_IOC_ENABLE: ::std::os::raw::c_int = 9216; +pub const AYA_PERF_EVENT_IOC_DISABLE: ::std::os::raw::c_int = 9217; +pub const AYA_PERF_EVENT_IOC_SET_BPF: ::std::os::raw::c_int = 1074013192; diff --git a/aya/src/generated/linux_bindings_x86_64.rs b/aya/src/generated/linux_bindings_x86_64.rs index f7135f80..3509714c 100644 --- a/aya/src/generated/linux_bindings_x86_64.rs +++ b/aya/src/generated/linux_bindings_x86_64.rs @@ -802,7 +802,11 @@ pub enum perf_event_sample_format { PERF_SAMPLE_TRANSACTION = 131072, PERF_SAMPLE_REGS_INTR = 262144, PERF_SAMPLE_PHYS_ADDR = 524288, - PERF_SAMPLE_MAX = 1048576, + PERF_SAMPLE_AUX = 1048576, + PERF_SAMPLE_CGROUP = 2097152, + PERF_SAMPLE_DATA_PAGE_SIZE = 4194304, + PERF_SAMPLE_CODE_PAGE_SIZE = 8388608, + PERF_SAMPLE_MAX = 16777216, __PERF_SAMPLE_CALLCHAIN_EARLY = 9223372036854775808, } #[repr(C)] @@ -828,6 +832,8 @@ pub struct perf_event_attr { pub aux_watermark: __u32, pub sample_max_stack: __u16, pub __reserved_2: __u16, + pub aux_sample_size: __u32, + pub __reserved_3: __u32, } #[repr(C)] #[derive(Copy, Clone)] @@ -1204,14 +1210,36 @@ impl perf_event_attr { } } #[inline] + pub fn cgroup(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(32usize, 1u8) as u64) } + } + #[inline] + pub fn set_cgroup(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(32usize, 1u8, val as u64) + } + } + #[inline] + pub fn text_poke(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(33usize, 1u8) as u64) } + } + #[inline] + pub fn set_text_poke(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(33usize, 1u8, val as u64) + } + } + #[inline] pub fn __reserved_1(&self) -> __u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(32usize, 32u8) as u64) } + unsafe { ::std::mem::transmute(self._bitfield_1.get(34usize, 30u8) as u64) } } #[inline] pub fn set___reserved_1(&mut self, val: __u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(32usize, 32u8, val as u64) + self._bitfield_1.set(34usize, 30u8, val as u64) } } #[inline] @@ -1247,6 +1275,8 @@ impl perf_event_attr { ksymbol: __u64, bpf_event: __u64, aux_output: __u64, + cgroup: __u64, + text_poke: __u64, __reserved_1: __u64, ) -> __BindgenBitfieldUnit<[u8; 8usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); @@ -1376,7 +1406,15 @@ impl perf_event_attr { let aux_output: u64 = unsafe { ::std::mem::transmute(aux_output) }; aux_output as u64 }); - __bindgen_bitfield_unit.set(32usize, 32u8, { + __bindgen_bitfield_unit.set(32usize, 1u8, { + let cgroup: u64 = unsafe { ::std::mem::transmute(cgroup) }; + cgroup as u64 + }); + __bindgen_bitfield_unit.set(33usize, 1u8, { + let text_poke: u64 = unsafe { ::std::mem::transmute(text_poke) }; + text_poke as u64 + }); + __bindgen_bitfield_unit.set(34usize, 30u8, { let __reserved_1: u64 = unsafe { ::std::mem::transmute(__reserved_1) }; __reserved_1 as u64 }); @@ -1400,7 +1438,10 @@ pub struct perf_event_mmap_page { pub time_offset: __u64, pub time_zero: __u64, pub size: __u32, - pub __reserved: [__u8; 948usize], + pub __reserved_1: __u32, + pub time_cycles: __u64, + pub time_mask: __u64, + pub __reserved: [__u8; 928usize], pub data_head: __u64, pub data_tail: __u64, pub data_offset: __u64, @@ -1481,14 +1522,25 @@ impl perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1 { } } #[inline] + pub fn cap_user_time_short(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u64) } + } + #[inline] + pub fn set_cap_user_time_short(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(5usize, 1u8, val as u64) + } + } + #[inline] pub fn cap_____res(&self) -> __u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 59u8) as u64) } + unsafe { ::std::mem::transmute(self._bitfield_1.get(6usize, 58u8) as u64) } } #[inline] pub fn set_cap_____res(&mut self, val: __u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(5usize, 59u8, val as u64) + self._bitfield_1.set(6usize, 58u8, val as u64) } } #[inline] @@ -1498,6 +1550,7 @@ impl perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1 { cap_user_rdpmc: __u64, cap_user_time: __u64, cap_user_time_zero: __u64, + cap_user_time_short: __u64, cap_____res: __u64, ) -> __BindgenBitfieldUnit<[u8; 8usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); @@ -1522,7 +1575,11 @@ impl perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1 { let cap_user_time_zero: u64 = unsafe { ::std::mem::transmute(cap_user_time_zero) }; cap_user_time_zero as u64 }); - __bindgen_bitfield_unit.set(5usize, 59u8, { + __bindgen_bitfield_unit.set(5usize, 1u8, { + let cap_user_time_short: u64 = unsafe { ::std::mem::transmute(cap_user_time_short) }; + cap_user_time_short as u64 + }); + __bindgen_bitfield_unit.set(6usize, 58u8, { let cap_____res: u64 = unsafe { ::std::mem::transmute(cap_____res) }; cap_____res as u64 }); @@ -1557,7 +1614,9 @@ pub enum perf_event_type { PERF_RECORD_NAMESPACES = 16, PERF_RECORD_KSYMBOL = 17, PERF_RECORD_BPF_EVENT = 18, - PERF_RECORD_MAX = 19, + PERF_RECORD_CGROUP = 19, + PERF_RECORD_TEXT_POKE = 20, + PERF_RECORD_MAX = 21, } pub const IFLA_XDP_UNSPEC: _bindgen_ty_80 = _bindgen_ty_80::IFLA_XDP_UNSPEC; pub const IFLA_XDP_FD: _bindgen_ty_80 = _bindgen_ty_80::IFLA_XDP_FD; @@ -1604,25 +1663,26 @@ pub struct tcmsg { pub tcm_parent: __u32, pub tcm_info: __u32, } -pub const TCA_UNSPEC: _bindgen_ty_92 = _bindgen_ty_92::TCA_UNSPEC; -pub const TCA_KIND: _bindgen_ty_92 = _bindgen_ty_92::TCA_KIND; -pub const TCA_OPTIONS: _bindgen_ty_92 = _bindgen_ty_92::TCA_OPTIONS; -pub const TCA_STATS: _bindgen_ty_92 = _bindgen_ty_92::TCA_STATS; -pub const TCA_XSTATS: _bindgen_ty_92 = _bindgen_ty_92::TCA_XSTATS; -pub const TCA_RATE: _bindgen_ty_92 = _bindgen_ty_92::TCA_RATE; -pub const TCA_FCNT: _bindgen_ty_92 = _bindgen_ty_92::TCA_FCNT; -pub const TCA_STATS2: _bindgen_ty_92 = _bindgen_ty_92::TCA_STATS2; -pub const TCA_STAB: _bindgen_ty_92 = _bindgen_ty_92::TCA_STAB; -pub const TCA_PAD: _bindgen_ty_92 = _bindgen_ty_92::TCA_PAD; -pub const TCA_DUMP_INVISIBLE: _bindgen_ty_92 = _bindgen_ty_92::TCA_DUMP_INVISIBLE; -pub const TCA_CHAIN: _bindgen_ty_92 = _bindgen_ty_92::TCA_CHAIN; -pub const TCA_HW_OFFLOAD: _bindgen_ty_92 = _bindgen_ty_92::TCA_HW_OFFLOAD; -pub const TCA_INGRESS_BLOCK: _bindgen_ty_92 = _bindgen_ty_92::TCA_INGRESS_BLOCK; -pub const TCA_EGRESS_BLOCK: _bindgen_ty_92 = _bindgen_ty_92::TCA_EGRESS_BLOCK; -pub const __TCA_MAX: _bindgen_ty_92 = _bindgen_ty_92::__TCA_MAX; +pub const TCA_UNSPEC: _bindgen_ty_94 = _bindgen_ty_94::TCA_UNSPEC; +pub const TCA_KIND: _bindgen_ty_94 = _bindgen_ty_94::TCA_KIND; +pub const TCA_OPTIONS: _bindgen_ty_94 = _bindgen_ty_94::TCA_OPTIONS; +pub const TCA_STATS: _bindgen_ty_94 = _bindgen_ty_94::TCA_STATS; +pub const TCA_XSTATS: _bindgen_ty_94 = _bindgen_ty_94::TCA_XSTATS; +pub const TCA_RATE: _bindgen_ty_94 = _bindgen_ty_94::TCA_RATE; +pub const TCA_FCNT: _bindgen_ty_94 = _bindgen_ty_94::TCA_FCNT; +pub const TCA_STATS2: _bindgen_ty_94 = _bindgen_ty_94::TCA_STATS2; +pub const TCA_STAB: _bindgen_ty_94 = _bindgen_ty_94::TCA_STAB; +pub const TCA_PAD: _bindgen_ty_94 = _bindgen_ty_94::TCA_PAD; +pub const TCA_DUMP_INVISIBLE: _bindgen_ty_94 = _bindgen_ty_94::TCA_DUMP_INVISIBLE; +pub const TCA_CHAIN: _bindgen_ty_94 = _bindgen_ty_94::TCA_CHAIN; +pub const TCA_HW_OFFLOAD: _bindgen_ty_94 = _bindgen_ty_94::TCA_HW_OFFLOAD; +pub const TCA_INGRESS_BLOCK: _bindgen_ty_94 = _bindgen_ty_94::TCA_INGRESS_BLOCK; +pub const TCA_EGRESS_BLOCK: _bindgen_ty_94 = _bindgen_ty_94::TCA_EGRESS_BLOCK; +pub const TCA_DUMP_FLAGS: _bindgen_ty_94 = _bindgen_ty_94::TCA_DUMP_FLAGS; +pub const __TCA_MAX: _bindgen_ty_94 = _bindgen_ty_94::__TCA_MAX; #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub enum _bindgen_ty_92 { +pub enum _bindgen_ty_94 { TCA_UNSPEC = 0, TCA_KIND = 1, TCA_OPTIONS = 2, @@ -1638,24 +1698,25 @@ pub enum _bindgen_ty_92 { TCA_HW_OFFLOAD = 12, TCA_INGRESS_BLOCK = 13, TCA_EGRESS_BLOCK = 14, - __TCA_MAX = 15, -} -pub const TCA_BPF_UNSPEC: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_UNSPEC; -pub const TCA_BPF_ACT: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_ACT; -pub const TCA_BPF_POLICE: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_POLICE; -pub const TCA_BPF_CLASSID: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_CLASSID; -pub const TCA_BPF_OPS_LEN: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_OPS_LEN; -pub const TCA_BPF_OPS: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_OPS; -pub const TCA_BPF_FD: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_FD; -pub const TCA_BPF_NAME: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_NAME; -pub const TCA_BPF_FLAGS: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_FLAGS; -pub const TCA_BPF_FLAGS_GEN: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_FLAGS_GEN; -pub const TCA_BPF_TAG: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_TAG; -pub const TCA_BPF_ID: _bindgen_ty_147 = _bindgen_ty_147::TCA_BPF_ID; -pub const __TCA_BPF_MAX: _bindgen_ty_147 = _bindgen_ty_147::__TCA_BPF_MAX; + TCA_DUMP_FLAGS = 15, + __TCA_MAX = 16, +} +pub const TCA_BPF_UNSPEC: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_UNSPEC; +pub const TCA_BPF_ACT: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_ACT; +pub const TCA_BPF_POLICE: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_POLICE; +pub const TCA_BPF_CLASSID: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_CLASSID; +pub const TCA_BPF_OPS_LEN: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_OPS_LEN; +pub const TCA_BPF_OPS: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_OPS; +pub const TCA_BPF_FD: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_FD; +pub const TCA_BPF_NAME: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_NAME; +pub const TCA_BPF_FLAGS: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_FLAGS; +pub const TCA_BPF_FLAGS_GEN: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_FLAGS_GEN; +pub const TCA_BPF_TAG: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_TAG; +pub const TCA_BPF_ID: _bindgen_ty_151 = _bindgen_ty_151::TCA_BPF_ID; +pub const __TCA_BPF_MAX: _bindgen_ty_151 = _bindgen_ty_151::__TCA_BPF_MAX; #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub enum _bindgen_ty_147 { +pub enum _bindgen_ty_151 { TCA_BPF_UNSPEC = 0, TCA_BPF_ACT = 1, TCA_BPF_POLICE = 2, diff --git a/aya/src/generated/mod.rs b/aya/src/generated/mod.rs index 1deb535a..3c413f32 100644 --- a/aya/src/generated/mod.rs +++ b/aya/src/generated/mod.rs @@ -3,6 +3,8 @@ mod btf_internal_bindings; #[cfg(target_arch = "aarch64")] mod linux_bindings_aarch64; +#[cfg(target_arch = "arm")] +mod linux_bindings_armv7; #[cfg(target_arch = "x86_64")] mod linux_bindings_x86_64; @@ -11,5 +13,8 @@ pub use btf_internal_bindings::*; #[cfg(target_arch = "x86_64")] pub use linux_bindings_x86_64::*; +#[cfg(target_arch = "arm")] +pub use linux_bindings_armv7::*; + #[cfg(target_arch = "aarch64")] pub use linux_bindings_aarch64::*; diff --git a/aya/src/maps/mod.rs b/aya/src/maps/mod.rs index f96e4b32..d5cf2fa3 100644 --- a/aya/src/maps/mod.rs +++ b/aya/src/maps/mod.rs @@ -64,6 +64,7 @@ pub use queue::Queue; pub use sock::{SockHash, SockMap}; pub use stack::Stack; pub use stack_trace::StackTraceMap; + #[derive(Error, Debug)] pub enum MapError { #[error("map `{name}` not found ")] @@ -84,7 +85,7 @@ pub enum MapError { #[error("failed to create map `{name}`: {code}")] CreateError { name: String, - code: i64, + code: libc::c_long, io_error: io::Error, }, @@ -109,7 +110,7 @@ pub enum MapError { #[error("the `{call}` syscall failed with code {code} io_error {io_error}")] SyscallError { call: String, - code: i64, + code: libc::c_long, io_error: io::Error, }, diff --git a/aya/src/maps/perf/perf_buffer.rs b/aya/src/maps/perf/perf_buffer.rs index 485ab106..bdab9325 100644 --- a/aya/src/maps/perf/perf_buffer.rs +++ b/aya/src/maps/perf/perf_buffer.rs @@ -280,7 +280,7 @@ unsafe fn mmap( prot: c_int, flags: c_int, fd: i32, - offset: i64, + offset: libc::off_t, ) -> *mut c_void { #[cfg(not(test))] return libc::mmap(addr, len, prot, flags, fd, offset); diff --git a/aya/src/sys/mod.rs b/aya/src/sys/mod.rs index 18da818b..deee918f 100644 --- a/aya/src/sys/mod.rs +++ b/aya/src/sys/mod.rs @@ -64,7 +64,7 @@ unsafe fn syscall_impl(call: Syscall) -> SysResult { flags, } => libc::syscall(SYS_perf_event_open, &attr, pid, cpu, group, flags), PerfEventIoctl { fd, request, arg } => { - libc::ioctl(fd, request.try_into().unwrap(), arg) as i64 + libc::ioctl(fd, request.try_into().unwrap(), arg) as libc::c_long } }; diff --git a/bpf/aya-bpf-bindings/src/aarch64/bindings.rs b/bpf/aya-bpf-bindings/src/aarch64/bindings.rs index dc0a0703..0e8a177d 100644 --- a/bpf/aya-bpf-bindings/src/aarch64/bindings.rs +++ b/bpf/aya-bpf-bindings/src/aarch64/bindings.rs @@ -228,6 +228,8 @@ pub const SO_TIMESTAMPING_NEW: u32 = 65; pub const SO_RCVTIMEO_NEW: u32 = 66; pub const SO_SNDTIMEO_NEW: u32 = 67; pub const SO_DETACH_REUSEPORT_BPF: u32 = 68; +pub const SO_PREFER_BUSY_POLL: u32 = 69; +pub const SO_BUSY_POLL_BUDGET: u32 = 70; pub const SO_TIMESTAMP: u32 = 29; pub const SO_TIMESTAMPNS: u32 = 35; pub const SO_TIMESTAMPING: u32 = 37; diff --git a/bpf/aya-bpf-bindings/src/armv7/bindings.rs b/bpf/aya-bpf-bindings/src/armv7/bindings.rs new file mode 100644 index 00000000..0e8a177d --- /dev/null +++ b/bpf/aya-bpf-bindings/src/armv7/bindings.rs @@ -0,0 +1,1176 @@ +#[repr(C)] +#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct __BindgenBitfieldUnit { + storage: Storage, +} +impl __BindgenBitfieldUnit { + #[inline] + pub const fn new(storage: Storage) -> Self { + Self { storage } + } +} +impl __BindgenBitfieldUnit +where + Storage: AsRef<[u8]> + AsMut<[u8]>, +{ + #[inline] + pub fn get_bit(&self, index: usize) -> bool { + debug_assert!(index / 8 < self.storage.as_ref().len()); + let byte_index = index / 8; + let byte = self.storage.as_ref()[byte_index]; + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + let mask = 1 << bit_index; + byte & mask == mask + } + #[inline] + pub fn set_bit(&mut self, index: usize, val: bool) { + debug_assert!(index / 8 < self.storage.as_ref().len()); + let byte_index = index / 8; + let byte = &mut self.storage.as_mut()[byte_index]; + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + let mask = 1 << bit_index; + if val { + *byte |= mask; + } else { + *byte &= !mask; + } + } + #[inline] + pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len()); + let mut val = 0; + for i in 0..(bit_width as usize) { + if self.get_bit(i + bit_offset) { + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + val |= 1 << index; + } + } + val + } + #[inline] + pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len()); + for i in 0..(bit_width as usize) { + let mask = 1 << i; + let val_bit_is_set = val & mask == mask; + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + self.set_bit(index + bit_offset, val_bit_is_set); + } + } +} +pub const BPF_LD: u32 = 0; +pub const BPF_LDX: u32 = 1; +pub const BPF_ST: u32 = 2; +pub const BPF_STX: u32 = 3; +pub const BPF_ALU: u32 = 4; +pub const BPF_JMP: u32 = 5; +pub const BPF_RET: u32 = 6; +pub const BPF_MISC: u32 = 7; +pub const BPF_W: u32 = 0; +pub const BPF_H: u32 = 8; +pub const BPF_B: u32 = 16; +pub const BPF_IMM: u32 = 0; +pub const BPF_ABS: u32 = 32; +pub const BPF_IND: u32 = 64; +pub const BPF_MEM: u32 = 96; +pub const BPF_LEN: u32 = 128; +pub const BPF_MSH: u32 = 160; +pub const BPF_ADD: u32 = 0; +pub const BPF_SUB: u32 = 16; +pub const BPF_MUL: u32 = 32; +pub const BPF_DIV: u32 = 48; +pub const BPF_OR: u32 = 64; +pub const BPF_AND: u32 = 80; +pub const BPF_LSH: u32 = 96; +pub const BPF_RSH: u32 = 112; +pub const BPF_NEG: u32 = 128; +pub const BPF_MOD: u32 = 144; +pub const BPF_XOR: u32 = 160; +pub const BPF_JA: u32 = 0; +pub const BPF_JEQ: u32 = 16; +pub const BPF_JGT: u32 = 32; +pub const BPF_JGE: u32 = 48; +pub const BPF_JSET: u32 = 64; +pub const BPF_K: u32 = 0; +pub const BPF_X: u32 = 8; +pub const BPF_MAXINSNS: u32 = 4096; +pub const BPF_JMP32: u32 = 6; +pub const BPF_ALU64: u32 = 7; +pub const BPF_DW: u32 = 24; +pub const BPF_ATOMIC: u32 = 192; +pub const BPF_XADD: u32 = 192; +pub const BPF_MOV: u32 = 176; +pub const BPF_ARSH: u32 = 192; +pub const BPF_END: u32 = 208; +pub const BPF_TO_LE: u32 = 0; +pub const BPF_TO_BE: u32 = 8; +pub const BPF_FROM_LE: u32 = 0; +pub const BPF_FROM_BE: u32 = 8; +pub const BPF_JNE: u32 = 80; +pub const BPF_JLT: u32 = 160; +pub const BPF_JLE: u32 = 176; +pub const BPF_JSGT: u32 = 96; +pub const BPF_JSGE: u32 = 112; +pub const BPF_JSLT: u32 = 192; +pub const BPF_JSLE: u32 = 208; +pub const BPF_CALL: u32 = 128; +pub const BPF_EXIT: u32 = 144; +pub const BPF_FETCH: u32 = 1; +pub const BPF_XCHG: u32 = 225; +pub const BPF_CMPXCHG: u32 = 241; +pub const BPF_F_ALLOW_OVERRIDE: u32 = 1; +pub const BPF_F_ALLOW_MULTI: u32 = 2; +pub const BPF_F_REPLACE: u32 = 4; +pub const BPF_F_STRICT_ALIGNMENT: u32 = 1; +pub const BPF_F_ANY_ALIGNMENT: u32 = 2; +pub const BPF_F_TEST_RND_HI32: u32 = 4; +pub const BPF_F_TEST_STATE_FREQ: u32 = 8; +pub const BPF_F_SLEEPABLE: u32 = 16; +pub const BPF_PSEUDO_MAP_FD: u32 = 1; +pub const BPF_PSEUDO_MAP_IDX: u32 = 5; +pub const BPF_PSEUDO_MAP_VALUE: u32 = 2; +pub const BPF_PSEUDO_MAP_IDX_VALUE: u32 = 6; +pub const BPF_PSEUDO_BTF_ID: u32 = 3; +pub const BPF_PSEUDO_FUNC: u32 = 4; +pub const BPF_PSEUDO_CALL: u32 = 1; +pub const BPF_PSEUDO_KFUNC_CALL: u32 = 2; +pub const BPF_F_QUERY_EFFECTIVE: u32 = 1; +pub const BPF_F_TEST_RUN_ON_CPU: u32 = 1; +pub const BPF_BUILD_ID_SIZE: u32 = 20; +pub const BPF_OBJ_NAME_LEN: u32 = 16; +pub const BPF_TAG_SIZE: u32 = 8; +pub const SOL_SOCKET: u32 = 1; +pub const SO_DEBUG: u32 = 1; +pub const SO_REUSEADDR: u32 = 2; +pub const SO_TYPE: u32 = 3; +pub const SO_ERROR: u32 = 4; +pub const SO_DONTROUTE: u32 = 5; +pub const SO_BROADCAST: u32 = 6; +pub const SO_SNDBUF: u32 = 7; +pub const SO_RCVBUF: u32 = 8; +pub const SO_SNDBUFFORCE: u32 = 32; +pub const SO_RCVBUFFORCE: u32 = 33; +pub const SO_KEEPALIVE: u32 = 9; +pub const SO_OOBINLINE: u32 = 10; +pub const SO_NO_CHECK: u32 = 11; +pub const SO_PRIORITY: u32 = 12; +pub const SO_LINGER: u32 = 13; +pub const SO_BSDCOMPAT: u32 = 14; +pub const SO_REUSEPORT: u32 = 15; +pub const SO_PASSCRED: u32 = 16; +pub const SO_PEERCRED: u32 = 17; +pub const SO_RCVLOWAT: u32 = 18; +pub const SO_SNDLOWAT: u32 = 19; +pub const SO_RCVTIMEO_OLD: u32 = 20; +pub const SO_SNDTIMEO_OLD: u32 = 21; +pub const SO_SECURITY_AUTHENTICATION: u32 = 22; +pub const SO_SECURITY_ENCRYPTION_TRANSPORT: u32 = 23; +pub const SO_SECURITY_ENCRYPTION_NETWORK: u32 = 24; +pub const SO_BINDTODEVICE: u32 = 25; +pub const SO_ATTACH_FILTER: u32 = 26; +pub const SO_DETACH_FILTER: u32 = 27; +pub const SO_GET_FILTER: u32 = 26; +pub const SO_PEERNAME: u32 = 28; +pub const SO_ACCEPTCONN: u32 = 30; +pub const SO_PEERSEC: u32 = 31; +pub const SO_PASSSEC: u32 = 34; +pub const SO_MARK: u32 = 36; +pub const SO_PROTOCOL: u32 = 38; +pub const SO_DOMAIN: u32 = 39; +pub const SO_RXQ_OVFL: u32 = 40; +pub const SO_WIFI_STATUS: u32 = 41; +pub const SO_PEEK_OFF: u32 = 42; +pub const SO_NOFCS: u32 = 43; +pub const SO_LOCK_FILTER: u32 = 44; +pub const SO_SELECT_ERR_QUEUE: u32 = 45; +pub const SO_BUSY_POLL: u32 = 46; +pub const SO_MAX_PACING_RATE: u32 = 47; +pub const SO_BPF_EXTENSIONS: u32 = 48; +pub const SO_INCOMING_CPU: u32 = 49; +pub const SO_ATTACH_BPF: u32 = 50; +pub const SO_DETACH_BPF: u32 = 27; +pub const SO_ATTACH_REUSEPORT_CBPF: u32 = 51; +pub const SO_ATTACH_REUSEPORT_EBPF: u32 = 52; +pub const SO_CNX_ADVICE: u32 = 53; +pub const SO_MEMINFO: u32 = 55; +pub const SO_INCOMING_NAPI_ID: u32 = 56; +pub const SO_COOKIE: u32 = 57; +pub const SO_PEERGROUPS: u32 = 59; +pub const SO_ZEROCOPY: u32 = 60; +pub const SO_TXTIME: u32 = 61; +pub const SO_BINDTOIFINDEX: u32 = 62; +pub const SO_TIMESTAMP_OLD: u32 = 29; +pub const SO_TIMESTAMPNS_OLD: u32 = 35; +pub const SO_TIMESTAMPING_OLD: u32 = 37; +pub const SO_TIMESTAMP_NEW: u32 = 63; +pub const SO_TIMESTAMPNS_NEW: u32 = 64; +pub const SO_TIMESTAMPING_NEW: u32 = 65; +pub const SO_RCVTIMEO_NEW: u32 = 66; +pub const SO_SNDTIMEO_NEW: u32 = 67; +pub const SO_DETACH_REUSEPORT_BPF: u32 = 68; +pub const SO_PREFER_BUSY_POLL: u32 = 69; +pub const SO_BUSY_POLL_BUDGET: u32 = 70; +pub const SO_TIMESTAMP: u32 = 29; +pub const SO_TIMESTAMPNS: u32 = 35; +pub const SO_TIMESTAMPING: u32 = 37; +pub const SO_RCVTIMEO: u32 = 20; +pub const SO_SNDTIMEO: u32 = 21; +pub const TC_ACT_UNSPEC: i32 = -1; +pub const TC_ACT_OK: u32 = 0; +pub const TC_ACT_RECLASSIFY: u32 = 1; +pub const TC_ACT_SHOT: u32 = 2; +pub const TC_ACT_PIPE: u32 = 3; +pub const TC_ACT_STOLEN: u32 = 4; +pub const TC_ACT_QUEUED: u32 = 5; +pub const TC_ACT_REPEAT: u32 = 6; +pub const TC_ACT_REDIRECT: u32 = 7; +pub const TC_ACT_TRAP: u32 = 8; +pub const TC_ACT_VALUE_MAX: u32 = 8; +pub const TC_ACT_EXT_VAL_MASK: u32 = 268435455; +pub type __u8 = ::aya_bpf_cty::c_uchar; +pub type __u16 = ::aya_bpf_cty::c_ushort; +pub type __s32 = ::aya_bpf_cty::c_int; +pub type __u32 = ::aya_bpf_cty::c_uint; +pub type __s64 = ::aya_bpf_cty::c_longlong; +pub type __u64 = ::aya_bpf_cty::c_ulonglong; +pub type __be16 = __u16; +pub type __be32 = __u32; +pub type __wsum = __u32; +pub const BPF_REG_0: ::aya_bpf_cty::c_uint = 0; +pub const BPF_REG_1: ::aya_bpf_cty::c_uint = 1; +pub const BPF_REG_2: ::aya_bpf_cty::c_uint = 2; +pub const BPF_REG_3: ::aya_bpf_cty::c_uint = 3; +pub const BPF_REG_4: ::aya_bpf_cty::c_uint = 4; +pub const BPF_REG_5: ::aya_bpf_cty::c_uint = 5; +pub const BPF_REG_6: ::aya_bpf_cty::c_uint = 6; +pub const BPF_REG_7: ::aya_bpf_cty::c_uint = 7; +pub const BPF_REG_8: ::aya_bpf_cty::c_uint = 8; +pub const BPF_REG_9: ::aya_bpf_cty::c_uint = 9; +pub const BPF_REG_10: ::aya_bpf_cty::c_uint = 10; +pub const __MAX_BPF_REG: ::aya_bpf_cty::c_uint = 11; +pub type _bindgen_ty_1 = ::aya_bpf_cty::c_uint; +pub mod bpf_map_type { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_MAP_TYPE_UNSPEC: Type = 0; + pub const BPF_MAP_TYPE_HASH: Type = 1; + pub const BPF_MAP_TYPE_ARRAY: Type = 2; + pub const BPF_MAP_TYPE_PROG_ARRAY: Type = 3; + pub const BPF_MAP_TYPE_PERF_EVENT_ARRAY: Type = 4; + pub const BPF_MAP_TYPE_PERCPU_HASH: Type = 5; + pub const BPF_MAP_TYPE_PERCPU_ARRAY: Type = 6; + pub const BPF_MAP_TYPE_STACK_TRACE: Type = 7; + pub const BPF_MAP_TYPE_CGROUP_ARRAY: Type = 8; + pub const BPF_MAP_TYPE_LRU_HASH: Type = 9; + pub const BPF_MAP_TYPE_LRU_PERCPU_HASH: Type = 10; + pub const BPF_MAP_TYPE_LPM_TRIE: Type = 11; + pub const BPF_MAP_TYPE_ARRAY_OF_MAPS: Type = 12; + pub const BPF_MAP_TYPE_HASH_OF_MAPS: Type = 13; + pub const BPF_MAP_TYPE_DEVMAP: Type = 14; + pub const BPF_MAP_TYPE_SOCKMAP: Type = 15; + pub const BPF_MAP_TYPE_CPUMAP: Type = 16; + pub const BPF_MAP_TYPE_XSKMAP: Type = 17; + pub const BPF_MAP_TYPE_SOCKHASH: Type = 18; + pub const BPF_MAP_TYPE_CGROUP_STORAGE: Type = 19; + pub const BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: Type = 20; + pub const BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: Type = 21; + pub const BPF_MAP_TYPE_QUEUE: Type = 22; + pub const BPF_MAP_TYPE_STACK: Type = 23; + pub const BPF_MAP_TYPE_SK_STORAGE: Type = 24; + pub const BPF_MAP_TYPE_DEVMAP_HASH: Type = 25; + pub const BPF_MAP_TYPE_STRUCT_OPS: Type = 26; + pub const BPF_MAP_TYPE_RINGBUF: Type = 27; + pub const BPF_MAP_TYPE_INODE_STORAGE: Type = 28; + pub const BPF_MAP_TYPE_TASK_STORAGE: Type = 29; +} +pub const BPF_ANY: ::aya_bpf_cty::c_uint = 0; +pub const BPF_NOEXIST: ::aya_bpf_cty::c_uint = 1; +pub const BPF_EXIST: ::aya_bpf_cty::c_uint = 2; +pub const BPF_F_LOCK: ::aya_bpf_cty::c_uint = 4; +pub type _bindgen_ty_2 = ::aya_bpf_cty::c_uint; +pub const BPF_F_NO_PREALLOC: ::aya_bpf_cty::c_uint = 1; +pub const BPF_F_NO_COMMON_LRU: ::aya_bpf_cty::c_uint = 2; +pub const BPF_F_NUMA_NODE: ::aya_bpf_cty::c_uint = 4; +pub const BPF_F_RDONLY: ::aya_bpf_cty::c_uint = 8; +pub const BPF_F_WRONLY: ::aya_bpf_cty::c_uint = 16; +pub const BPF_F_STACK_BUILD_ID: ::aya_bpf_cty::c_uint = 32; +pub const BPF_F_ZERO_SEED: ::aya_bpf_cty::c_uint = 64; +pub const BPF_F_RDONLY_PROG: ::aya_bpf_cty::c_uint = 128; +pub const BPF_F_WRONLY_PROG: ::aya_bpf_cty::c_uint = 256; +pub const BPF_F_CLONE: ::aya_bpf_cty::c_uint = 512; +pub const BPF_F_MMAPABLE: ::aya_bpf_cty::c_uint = 1024; +pub const BPF_F_PRESERVE_ELEMS: ::aya_bpf_cty::c_uint = 2048; +pub const BPF_F_INNER_MAP: ::aya_bpf_cty::c_uint = 4096; +pub type _bindgen_ty_3 = ::aya_bpf_cty::c_uint; +pub const BPF_F_RECOMPUTE_CSUM: ::aya_bpf_cty::c_uint = 1; +pub const BPF_F_INVALIDATE_HASH: ::aya_bpf_cty::c_uint = 2; +pub type _bindgen_ty_4 = ::aya_bpf_cty::c_uint; +pub const BPF_F_HDR_FIELD_MASK: ::aya_bpf_cty::c_uint = 15; +pub type _bindgen_ty_5 = ::aya_bpf_cty::c_uint; +pub const BPF_F_PSEUDO_HDR: ::aya_bpf_cty::c_uint = 16; +pub const BPF_F_MARK_MANGLED_0: ::aya_bpf_cty::c_uint = 32; +pub const BPF_F_MARK_ENFORCE: ::aya_bpf_cty::c_uint = 64; +pub type _bindgen_ty_6 = ::aya_bpf_cty::c_uint; +pub const BPF_F_INGRESS: ::aya_bpf_cty::c_uint = 1; +pub type _bindgen_ty_7 = ::aya_bpf_cty::c_uint; +pub const BPF_F_TUNINFO_IPV6: ::aya_bpf_cty::c_uint = 1; +pub type _bindgen_ty_8 = ::aya_bpf_cty::c_uint; +pub const BPF_F_SKIP_FIELD_MASK: ::aya_bpf_cty::c_uint = 255; +pub const BPF_F_USER_STACK: ::aya_bpf_cty::c_uint = 256; +pub const BPF_F_FAST_STACK_CMP: ::aya_bpf_cty::c_uint = 512; +pub const BPF_F_REUSE_STACKID: ::aya_bpf_cty::c_uint = 1024; +pub const BPF_F_USER_BUILD_ID: ::aya_bpf_cty::c_uint = 2048; +pub type _bindgen_ty_9 = ::aya_bpf_cty::c_uint; +pub const BPF_F_ZERO_CSUM_TX: ::aya_bpf_cty::c_uint = 2; +pub const BPF_F_DONT_FRAGMENT: ::aya_bpf_cty::c_uint = 4; +pub const BPF_F_SEQ_NUMBER: ::aya_bpf_cty::c_uint = 8; +pub type _bindgen_ty_10 = ::aya_bpf_cty::c_uint; +pub const BPF_F_INDEX_MASK: ::aya_bpf_cty::c_ulong = 4294967295; +pub const BPF_F_CURRENT_CPU: ::aya_bpf_cty::c_ulong = 4294967295; +pub const BPF_F_CTXLEN_MASK: ::aya_bpf_cty::c_ulong = 4503595332403200; +pub type _bindgen_ty_11 = ::aya_bpf_cty::c_ulong; +pub const BPF_F_CURRENT_NETNS: ::aya_bpf_cty::c_int = -1; +pub type _bindgen_ty_12 = ::aya_bpf_cty::c_int; +pub const BPF_CSUM_LEVEL_QUERY: ::aya_bpf_cty::c_uint = 0; +pub const BPF_CSUM_LEVEL_INC: ::aya_bpf_cty::c_uint = 1; +pub const BPF_CSUM_LEVEL_DEC: ::aya_bpf_cty::c_uint = 2; +pub const BPF_CSUM_LEVEL_RESET: ::aya_bpf_cty::c_uint = 3; +pub type _bindgen_ty_13 = ::aya_bpf_cty::c_uint; +pub const BPF_F_ADJ_ROOM_FIXED_GSO: ::aya_bpf_cty::c_uint = 1; +pub const BPF_F_ADJ_ROOM_ENCAP_L3_IPV4: ::aya_bpf_cty::c_uint = 2; +pub const BPF_F_ADJ_ROOM_ENCAP_L3_IPV6: ::aya_bpf_cty::c_uint = 4; +pub const BPF_F_ADJ_ROOM_ENCAP_L4_GRE: ::aya_bpf_cty::c_uint = 8; +pub const BPF_F_ADJ_ROOM_ENCAP_L4_UDP: ::aya_bpf_cty::c_uint = 16; +pub const BPF_F_ADJ_ROOM_NO_CSUM_RESET: ::aya_bpf_cty::c_uint = 32; +pub const BPF_F_ADJ_ROOM_ENCAP_L2_ETH: ::aya_bpf_cty::c_uint = 64; +pub type _bindgen_ty_14 = ::aya_bpf_cty::c_uint; +pub const BPF_ADJ_ROOM_ENCAP_L2_MASK: ::aya_bpf_cty::c_uint = 255; +pub const BPF_ADJ_ROOM_ENCAP_L2_SHIFT: ::aya_bpf_cty::c_uint = 56; +pub type _bindgen_ty_15 = ::aya_bpf_cty::c_uint; +pub const BPF_F_SYSCTL_BASE_NAME: ::aya_bpf_cty::c_uint = 1; +pub type _bindgen_ty_16 = ::aya_bpf_cty::c_uint; +pub const BPF_LOCAL_STORAGE_GET_F_CREATE: ::aya_bpf_cty::c_uint = 1; +pub const BPF_SK_STORAGE_GET_F_CREATE: ::aya_bpf_cty::c_uint = 1; +pub type _bindgen_ty_17 = ::aya_bpf_cty::c_uint; +pub const BPF_F_GET_BRANCH_RECORDS_SIZE: ::aya_bpf_cty::c_uint = 1; +pub type _bindgen_ty_18 = ::aya_bpf_cty::c_uint; +pub const BPF_RB_NO_WAKEUP: ::aya_bpf_cty::c_uint = 1; +pub const BPF_RB_FORCE_WAKEUP: ::aya_bpf_cty::c_uint = 2; +pub type _bindgen_ty_19 = ::aya_bpf_cty::c_uint; +pub const BPF_RB_AVAIL_DATA: ::aya_bpf_cty::c_uint = 0; +pub const BPF_RB_RING_SIZE: ::aya_bpf_cty::c_uint = 1; +pub const BPF_RB_CONS_POS: ::aya_bpf_cty::c_uint = 2; +pub const BPF_RB_PROD_POS: ::aya_bpf_cty::c_uint = 3; +pub type _bindgen_ty_20 = ::aya_bpf_cty::c_uint; +pub const BPF_RINGBUF_BUSY_BIT: ::aya_bpf_cty::c_uint = 2147483648; +pub const BPF_RINGBUF_DISCARD_BIT: ::aya_bpf_cty::c_uint = 1073741824; +pub const BPF_RINGBUF_HDR_SZ: ::aya_bpf_cty::c_uint = 8; +pub type _bindgen_ty_21 = ::aya_bpf_cty::c_uint; +pub const BPF_SK_LOOKUP_F_REPLACE: ::aya_bpf_cty::c_uint = 1; +pub const BPF_SK_LOOKUP_F_NO_REUSEPORT: ::aya_bpf_cty::c_uint = 2; +pub type _bindgen_ty_22 = ::aya_bpf_cty::c_uint; +pub mod bpf_adj_room_mode { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_ADJ_ROOM_NET: Type = 0; + pub const BPF_ADJ_ROOM_MAC: Type = 1; +} +pub const BPF_F_BPRM_SECUREEXEC: ::aya_bpf_cty::c_uint = 1; +pub type _bindgen_ty_23 = ::aya_bpf_cty::c_uint; +pub const BPF_F_BROADCAST: ::aya_bpf_cty::c_uint = 8; +pub const BPF_F_EXCLUDE_INGRESS: ::aya_bpf_cty::c_uint = 16; +pub type _bindgen_ty_24 = ::aya_bpf_cty::c_uint; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct __sk_buff { + pub len: __u32, + pub pkt_type: __u32, + pub mark: __u32, + pub queue_mapping: __u32, + pub protocol: __u32, + pub vlan_present: __u32, + pub vlan_tci: __u32, + pub vlan_proto: __u32, + pub priority: __u32, + pub ingress_ifindex: __u32, + pub ifindex: __u32, + pub tc_index: __u32, + pub cb: [__u32; 5usize], + pub hash: __u32, + pub tc_classid: __u32, + pub data: __u32, + pub data_end: __u32, + pub napi_id: __u32, + pub family: __u32, + pub remote_ip4: __u32, + pub local_ip4: __u32, + pub remote_ip6: [__u32; 4usize], + pub local_ip6: [__u32; 4usize], + pub remote_port: __u32, + pub local_port: __u32, + pub data_meta: __u32, + pub __bindgen_anon_1: __sk_buff__bindgen_ty_1, + pub tstamp: __u64, + pub wire_len: __u32, + pub gso_segs: __u32, + pub __bindgen_anon_2: __sk_buff__bindgen_ty_2, + pub gso_size: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union __sk_buff__bindgen_ty_1 { + pub flow_keys: *mut bpf_flow_keys, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, + _bindgen_union_align: u64, +} +impl __sk_buff__bindgen_ty_1 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union __sk_buff__bindgen_ty_2 { + pub sk: *mut bpf_sock, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, + _bindgen_union_align: u64, +} +impl __sk_buff__bindgen_ty_2 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_tunnel_key { + pub tunnel_id: __u32, + pub __bindgen_anon_1: bpf_tunnel_key__bindgen_ty_1, + pub tunnel_tos: __u8, + pub tunnel_ttl: __u8, + pub tunnel_ext: __u16, + pub tunnel_label: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_tunnel_key__bindgen_ty_1 { + pub remote_ipv4: __u32, + pub remote_ipv6: [__u32; 4usize], + _bindgen_union_align: [u32; 4usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_xfrm_state { + pub reqid: __u32, + pub spi: __u32, + pub family: __u16, + pub ext: __u16, + pub __bindgen_anon_1: bpf_xfrm_state__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_xfrm_state__bindgen_ty_1 { + pub remote_ipv4: __u32, + pub remote_ipv6: [__u32; 4usize], + _bindgen_union_align: [u32; 4usize], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_sock { + pub bound_dev_if: __u32, + pub family: __u32, + pub type_: __u32, + pub protocol: __u32, + pub mark: __u32, + pub priority: __u32, + pub src_ip4: __u32, + pub src_ip6: [__u32; 4usize], + pub src_port: __u32, + pub dst_port: __u32, + pub dst_ip4: __u32, + pub dst_ip6: [__u32; 4usize], + pub state: __u32, + pub rx_queue_mapping: __s32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_tcp_sock { + pub snd_cwnd: __u32, + pub srtt_us: __u32, + pub rtt_min: __u32, + pub snd_ssthresh: __u32, + pub rcv_nxt: __u32, + pub snd_nxt: __u32, + pub snd_una: __u32, + pub mss_cache: __u32, + pub ecn_flags: __u32, + pub rate_delivered: __u32, + pub rate_interval_us: __u32, + pub packets_out: __u32, + pub retrans_out: __u32, + pub total_retrans: __u32, + pub segs_in: __u32, + pub data_segs_in: __u32, + pub segs_out: __u32, + pub data_segs_out: __u32, + pub lost_out: __u32, + pub sacked_out: __u32, + pub bytes_received: __u64, + pub bytes_acked: __u64, + pub dsack_dups: __u32, + pub delivered: __u32, + pub delivered_ce: __u32, + pub icsk_retransmits: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_sock_tuple { + pub __bindgen_anon_1: bpf_sock_tuple__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_sock_tuple__bindgen_ty_1 { + pub ipv4: bpf_sock_tuple__bindgen_ty_1__bindgen_ty_1, + pub ipv6: bpf_sock_tuple__bindgen_ty_1__bindgen_ty_2, + _bindgen_union_align: [u32; 9usize], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_sock_tuple__bindgen_ty_1__bindgen_ty_1 { + pub saddr: __be32, + pub daddr: __be32, + pub sport: __be16, + pub dport: __be16, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_sock_tuple__bindgen_ty_1__bindgen_ty_2 { + pub saddr: [__be32; 4usize], + pub daddr: [__be32; 4usize], + pub sport: __be16, + pub dport: __be16, +} +pub mod xdp_action { + pub type Type = ::aya_bpf_cty::c_uint; + pub const XDP_ABORTED: Type = 0; + pub const XDP_DROP: Type = 1; + pub const XDP_PASS: Type = 2; + pub const XDP_TX: Type = 3; + pub const XDP_REDIRECT: Type = 4; +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct xdp_md { + pub data: __u32, + pub data_end: __u32, + pub data_meta: __u32, + pub ingress_ifindex: __u32, + pub rx_queue_index: __u32, + pub egress_ifindex: __u32, +} +pub mod sk_action { + pub type Type = ::aya_bpf_cty::c_uint; + pub const SK_DROP: Type = 0; + pub const SK_PASS: Type = 1; +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct sk_msg_md { + pub __bindgen_anon_1: sk_msg_md__bindgen_ty_1, + pub __bindgen_anon_2: sk_msg_md__bindgen_ty_2, + pub family: __u32, + pub remote_ip4: __u32, + pub local_ip4: __u32, + pub remote_ip6: [__u32; 4usize], + pub local_ip6: [__u32; 4usize], + pub remote_port: __u32, + pub local_port: __u32, + pub size: __u32, + pub __bindgen_anon_3: sk_msg_md__bindgen_ty_3, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union sk_msg_md__bindgen_ty_1 { + pub data: *mut ::aya_bpf_cty::c_void, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, + _bindgen_union_align: u64, +} +impl sk_msg_md__bindgen_ty_1 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union sk_msg_md__bindgen_ty_2 { + pub data_end: *mut ::aya_bpf_cty::c_void, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, + _bindgen_union_align: u64, +} +impl sk_msg_md__bindgen_ty_2 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union sk_msg_md__bindgen_ty_3 { + pub sk: *mut bpf_sock, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, + _bindgen_union_align: u64, +} +impl sk_msg_md__bindgen_ty_3 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct sk_reuseport_md { + pub __bindgen_anon_1: sk_reuseport_md__bindgen_ty_1, + pub __bindgen_anon_2: sk_reuseport_md__bindgen_ty_2, + pub len: __u32, + pub eth_protocol: __u32, + pub ip_protocol: __u32, + pub bind_inany: __u32, + pub hash: __u32, + pub __bindgen_anon_3: sk_reuseport_md__bindgen_ty_3, + pub __bindgen_anon_4: sk_reuseport_md__bindgen_ty_4, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union sk_reuseport_md__bindgen_ty_1 { + pub data: *mut ::aya_bpf_cty::c_void, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, + _bindgen_union_align: u64, +} +impl sk_reuseport_md__bindgen_ty_1 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union sk_reuseport_md__bindgen_ty_2 { + pub data_end: *mut ::aya_bpf_cty::c_void, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, + _bindgen_union_align: u64, +} +impl sk_reuseport_md__bindgen_ty_2 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union sk_reuseport_md__bindgen_ty_3 { + pub sk: *mut bpf_sock, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, + _bindgen_union_align: u64, +} +impl sk_reuseport_md__bindgen_ty_3 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union sk_reuseport_md__bindgen_ty_4 { + pub migrating_sk: *mut bpf_sock, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, + _bindgen_union_align: u64, +} +impl sk_reuseport_md__bindgen_ty_4 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_map_info { + pub type_: __u32, + pub id: __u32, + pub key_size: __u32, + pub value_size: __u32, + pub max_entries: __u32, + pub map_flags: __u32, + pub name: [::aya_bpf_cty::c_char; 16usize], + pub ifindex: __u32, + pub btf_vmlinux_value_type_id: __u32, + pub netns_dev: __u64, + pub netns_ino: __u64, + pub btf_id: __u32, + pub btf_key_type_id: __u32, + pub btf_value_type_id: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_sock_addr { + pub user_family: __u32, + pub user_ip4: __u32, + pub user_ip6: [__u32; 4usize], + pub user_port: __u32, + pub family: __u32, + pub type_: __u32, + pub protocol: __u32, + pub msg_src_ip4: __u32, + pub msg_src_ip6: [__u32; 4usize], + pub __bindgen_anon_1: bpf_sock_addr__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_sock_addr__bindgen_ty_1 { + pub sk: *mut bpf_sock, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, + _bindgen_union_align: u64, +} +impl bpf_sock_addr__bindgen_ty_1 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_sock_ops { + pub op: __u32, + pub __bindgen_anon_1: bpf_sock_ops__bindgen_ty_1, + pub family: __u32, + pub remote_ip4: __u32, + pub local_ip4: __u32, + pub remote_ip6: [__u32; 4usize], + pub local_ip6: [__u32; 4usize], + pub remote_port: __u32, + pub local_port: __u32, + pub is_fullsock: __u32, + pub snd_cwnd: __u32, + pub srtt_us: __u32, + pub bpf_sock_ops_cb_flags: __u32, + pub state: __u32, + pub rtt_min: __u32, + pub snd_ssthresh: __u32, + pub rcv_nxt: __u32, + pub snd_nxt: __u32, + pub snd_una: __u32, + pub mss_cache: __u32, + pub ecn_flags: __u32, + pub rate_delivered: __u32, + pub rate_interval_us: __u32, + pub packets_out: __u32, + pub retrans_out: __u32, + pub total_retrans: __u32, + pub segs_in: __u32, + pub data_segs_in: __u32, + pub segs_out: __u32, + pub data_segs_out: __u32, + pub lost_out: __u32, + pub sacked_out: __u32, + pub sk_txhash: __u32, + pub bytes_received: __u64, + pub bytes_acked: __u64, + pub __bindgen_anon_2: bpf_sock_ops__bindgen_ty_2, + pub __bindgen_anon_3: bpf_sock_ops__bindgen_ty_3, + pub __bindgen_anon_4: bpf_sock_ops__bindgen_ty_4, + pub skb_len: __u32, + pub skb_tcp_flags: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_sock_ops__bindgen_ty_1 { + pub args: [__u32; 4usize], + pub reply: __u32, + pub replylong: [__u32; 4usize], + _bindgen_union_align: [u32; 4usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_sock_ops__bindgen_ty_2 { + pub sk: *mut bpf_sock, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, + _bindgen_union_align: u64, +} +impl bpf_sock_ops__bindgen_ty_2 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_sock_ops__bindgen_ty_3 { + pub skb_data: *mut ::aya_bpf_cty::c_void, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, + _bindgen_union_align: u64, +} +impl bpf_sock_ops__bindgen_ty_3 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_sock_ops__bindgen_ty_4 { + pub skb_data_end: *mut ::aya_bpf_cty::c_void, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, + _bindgen_union_align: u64, +} +impl bpf_sock_ops__bindgen_ty_4 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +pub const BPF_SOCK_OPS_RTO_CB_FLAG: ::aya_bpf_cty::c_uint = 1; +pub const BPF_SOCK_OPS_RETRANS_CB_FLAG: ::aya_bpf_cty::c_uint = 2; +pub const BPF_SOCK_OPS_STATE_CB_FLAG: ::aya_bpf_cty::c_uint = 4; +pub const BPF_SOCK_OPS_RTT_CB_FLAG: ::aya_bpf_cty::c_uint = 8; +pub const BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG: ::aya_bpf_cty::c_uint = 16; +pub const BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG: ::aya_bpf_cty::c_uint = 32; +pub const BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG: ::aya_bpf_cty::c_uint = 64; +pub const BPF_SOCK_OPS_ALL_CB_FLAGS: ::aya_bpf_cty::c_uint = 127; +pub type _bindgen_ty_25 = ::aya_bpf_cty::c_uint; +pub const BPF_SOCK_OPS_VOID: ::aya_bpf_cty::c_uint = 0; +pub const BPF_SOCK_OPS_TIMEOUT_INIT: ::aya_bpf_cty::c_uint = 1; +pub const BPF_SOCK_OPS_RWND_INIT: ::aya_bpf_cty::c_uint = 2; +pub const BPF_SOCK_OPS_TCP_CONNECT_CB: ::aya_bpf_cty::c_uint = 3; +pub const BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: ::aya_bpf_cty::c_uint = 4; +pub const BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: ::aya_bpf_cty::c_uint = 5; +pub const BPF_SOCK_OPS_NEEDS_ECN: ::aya_bpf_cty::c_uint = 6; +pub const BPF_SOCK_OPS_BASE_RTT: ::aya_bpf_cty::c_uint = 7; +pub const BPF_SOCK_OPS_RTO_CB: ::aya_bpf_cty::c_uint = 8; +pub const BPF_SOCK_OPS_RETRANS_CB: ::aya_bpf_cty::c_uint = 9; +pub const BPF_SOCK_OPS_STATE_CB: ::aya_bpf_cty::c_uint = 10; +pub const BPF_SOCK_OPS_TCP_LISTEN_CB: ::aya_bpf_cty::c_uint = 11; +pub const BPF_SOCK_OPS_RTT_CB: ::aya_bpf_cty::c_uint = 12; +pub const BPF_SOCK_OPS_PARSE_HDR_OPT_CB: ::aya_bpf_cty::c_uint = 13; +pub const BPF_SOCK_OPS_HDR_OPT_LEN_CB: ::aya_bpf_cty::c_uint = 14; +pub const BPF_SOCK_OPS_WRITE_HDR_OPT_CB: ::aya_bpf_cty::c_uint = 15; +pub type _bindgen_ty_26 = ::aya_bpf_cty::c_uint; +pub const BPF_TCP_ESTABLISHED: ::aya_bpf_cty::c_uint = 1; +pub const BPF_TCP_SYN_SENT: ::aya_bpf_cty::c_uint = 2; +pub const BPF_TCP_SYN_RECV: ::aya_bpf_cty::c_uint = 3; +pub const BPF_TCP_FIN_WAIT1: ::aya_bpf_cty::c_uint = 4; +pub const BPF_TCP_FIN_WAIT2: ::aya_bpf_cty::c_uint = 5; +pub const BPF_TCP_TIME_WAIT: ::aya_bpf_cty::c_uint = 6; +pub const BPF_TCP_CLOSE: ::aya_bpf_cty::c_uint = 7; +pub const BPF_TCP_CLOSE_WAIT: ::aya_bpf_cty::c_uint = 8; +pub const BPF_TCP_LAST_ACK: ::aya_bpf_cty::c_uint = 9; +pub const BPF_TCP_LISTEN: ::aya_bpf_cty::c_uint = 10; +pub const BPF_TCP_CLOSING: ::aya_bpf_cty::c_uint = 11; +pub const BPF_TCP_NEW_SYN_RECV: ::aya_bpf_cty::c_uint = 12; +pub const BPF_TCP_MAX_STATES: ::aya_bpf_cty::c_uint = 13; +pub type _bindgen_ty_27 = ::aya_bpf_cty::c_uint; +pub mod _bindgen_ty_29 { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_LOAD_HDR_OPT_TCP_SYN: Type = 1; +} +pub mod _bindgen_ty_30 { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_WRITE_HDR_TCP_CURRENT_MSS: Type = 1; + pub const BPF_WRITE_HDR_TCP_SYNACK_COOKIE: Type = 2; +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_perf_event_value { + pub counter: __u64, + pub enabled: __u64, + pub running: __u64, +} +pub const BPF_DEVCG_ACC_MKNOD: ::aya_bpf_cty::c_uint = 1; +pub const BPF_DEVCG_ACC_READ: ::aya_bpf_cty::c_uint = 2; +pub const BPF_DEVCG_ACC_WRITE: ::aya_bpf_cty::c_uint = 4; +pub type _bindgen_ty_31 = ::aya_bpf_cty::c_uint; +pub const BPF_DEVCG_DEV_BLOCK: ::aya_bpf_cty::c_uint = 1; +pub const BPF_DEVCG_DEV_CHAR: ::aya_bpf_cty::c_uint = 2; +pub type _bindgen_ty_32 = ::aya_bpf_cty::c_uint; +pub const BPF_FIB_LOOKUP_DIRECT: ::aya_bpf_cty::c_uint = 1; +pub const BPF_FIB_LOOKUP_OUTPUT: ::aya_bpf_cty::c_uint = 2; +pub type _bindgen_ty_33 = ::aya_bpf_cty::c_uint; +pub const BPF_FIB_LKUP_RET_SUCCESS: ::aya_bpf_cty::c_uint = 0; +pub const BPF_FIB_LKUP_RET_BLACKHOLE: ::aya_bpf_cty::c_uint = 1; +pub const BPF_FIB_LKUP_RET_UNREACHABLE: ::aya_bpf_cty::c_uint = 2; +pub const BPF_FIB_LKUP_RET_PROHIBIT: ::aya_bpf_cty::c_uint = 3; +pub const BPF_FIB_LKUP_RET_NOT_FWDED: ::aya_bpf_cty::c_uint = 4; +pub const BPF_FIB_LKUP_RET_FWD_DISABLED: ::aya_bpf_cty::c_uint = 5; +pub const BPF_FIB_LKUP_RET_UNSUPP_LWT: ::aya_bpf_cty::c_uint = 6; +pub const BPF_FIB_LKUP_RET_NO_NEIGH: ::aya_bpf_cty::c_uint = 7; +pub const BPF_FIB_LKUP_RET_FRAG_NEEDED: ::aya_bpf_cty::c_uint = 8; +pub type _bindgen_ty_34 = ::aya_bpf_cty::c_uint; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_fib_lookup { + pub family: __u8, + pub l4_protocol: __u8, + pub sport: __be16, + pub dport: __be16, + pub __bindgen_anon_1: bpf_fib_lookup__bindgen_ty_1, + pub ifindex: __u32, + pub __bindgen_anon_2: bpf_fib_lookup__bindgen_ty_2, + pub __bindgen_anon_3: bpf_fib_lookup__bindgen_ty_3, + pub __bindgen_anon_4: bpf_fib_lookup__bindgen_ty_4, + pub h_vlan_proto: __be16, + pub h_vlan_TCI: __be16, + pub smac: [__u8; 6usize], + pub dmac: [__u8; 6usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_fib_lookup__bindgen_ty_1 { + pub tot_len: __u16, + pub mtu_result: __u16, + _bindgen_union_align: u16, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_fib_lookup__bindgen_ty_2 { + pub tos: __u8, + pub flowinfo: __be32, + pub rt_metric: __u32, + _bindgen_union_align: u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_fib_lookup__bindgen_ty_3 { + pub ipv4_src: __be32, + pub ipv6_src: [__u32; 4usize], + _bindgen_union_align: [u32; 4usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_fib_lookup__bindgen_ty_4 { + pub ipv4_dst: __be32, + pub ipv6_dst: [__u32; 4usize], + _bindgen_union_align: [u32; 4usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_redir_neigh { + pub nh_family: __u32, + pub __bindgen_anon_1: bpf_redir_neigh__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_redir_neigh__bindgen_ty_1 { + pub ipv4_nh: __be32, + pub ipv6_nh: [__u32; 4usize], + _bindgen_union_align: [u32; 4usize], +} +pub const BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG: ::aya_bpf_cty::c_uint = 1; +pub const BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL: ::aya_bpf_cty::c_uint = 2; +pub const BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP: ::aya_bpf_cty::c_uint = 4; +pub type _bindgen_ty_35 = ::aya_bpf_cty::c_uint; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_flow_keys { + pub nhoff: __u16, + pub thoff: __u16, + pub addr_proto: __u16, + pub is_frag: __u8, + pub is_first_frag: __u8, + pub is_encap: __u8, + pub ip_proto: __u8, + pub n_proto: __be16, + pub sport: __be16, + pub dport: __be16, + pub __bindgen_anon_1: bpf_flow_keys__bindgen_ty_1, + pub flags: __u32, + pub flow_label: __be32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_flow_keys__bindgen_ty_1 { + pub __bindgen_anon_1: bpf_flow_keys__bindgen_ty_1__bindgen_ty_1, + pub __bindgen_anon_2: bpf_flow_keys__bindgen_ty_1__bindgen_ty_2, + _bindgen_union_align: [u32; 8usize], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_flow_keys__bindgen_ty_1__bindgen_ty_1 { + pub ipv4_src: __be32, + pub ipv4_dst: __be32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_flow_keys__bindgen_ty_1__bindgen_ty_2 { + pub ipv6_src: [__u32; 4usize], + pub ipv6_dst: [__u32; 4usize], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_spin_lock { + pub val: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_sysctl { + pub write: __u32, + pub file_pos: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_pidns_info { + pub pid: __u32, + pub tgid: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_ptr { + pub ptr: *mut ::aya_bpf_cty::c_void, + pub type_id: __u32, + pub flags: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct pt_regs { + pub r15: ::aya_bpf_cty::c_ulong, + pub r14: ::aya_bpf_cty::c_ulong, + pub r13: ::aya_bpf_cty::c_ulong, + pub r12: ::aya_bpf_cty::c_ulong, + pub rbp: ::aya_bpf_cty::c_ulong, + pub rbx: ::aya_bpf_cty::c_ulong, + pub r11: ::aya_bpf_cty::c_ulong, + pub r10: ::aya_bpf_cty::c_ulong, + pub r9: ::aya_bpf_cty::c_ulong, + pub r8: ::aya_bpf_cty::c_ulong, + pub rax: ::aya_bpf_cty::c_ulong, + pub rcx: ::aya_bpf_cty::c_ulong, + pub rdx: ::aya_bpf_cty::c_ulong, + pub rsi: ::aya_bpf_cty::c_ulong, + pub rdi: ::aya_bpf_cty::c_ulong, + pub orig_rax: ::aya_bpf_cty::c_ulong, + pub rip: ::aya_bpf_cty::c_ulong, + pub cs: ::aya_bpf_cty::c_ulong, + pub eflags: ::aya_bpf_cty::c_ulong, + pub rsp: ::aya_bpf_cty::c_ulong, + pub ss: ::aya_bpf_cty::c_ulong, +} +pub type sa_family_t = ::aya_bpf_cty::c_ushort; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sockaddr { + pub sa_family: sa_family_t, + pub sa_data: [::aya_bpf_cty::c_char; 14usize], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_perf_event_data { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct linux_binprm { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct tcphdr { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct seq_file { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct tcp6_sock { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct tcp_sock { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct tcp_timewait_sock { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct tcp_request_sock { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct udp6_sock { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct task_struct { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct path { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct inode { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct socket { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct file { + _unused: [u8; 0], +} diff --git a/bpf/aya-bpf-bindings/src/armv7/getters.rs b/bpf/aya-bpf-bindings/src/armv7/getters.rs new file mode 100644 index 00000000..0bc7b54b --- /dev/null +++ b/bpf/aya-bpf-bindings/src/armv7/getters.rs @@ -0,0 +1,1146 @@ +use super::bindings::*; +impl __BindgenBitfieldUnit {} +impl __sk_buff { + pub fn len(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.len) }.ok() + } + pub fn pkt_type(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.pkt_type) }.ok() + } + pub fn mark(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.mark) }.ok() + } + pub fn queue_mapping(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.queue_mapping) }.ok() + } + pub fn protocol(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.protocol) }.ok() + } + pub fn vlan_present(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.vlan_present) }.ok() + } + pub fn vlan_tci(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.vlan_tci) }.ok() + } + pub fn vlan_proto(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.vlan_proto) }.ok() + } + pub fn priority(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.priority) }.ok() + } + pub fn ingress_ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ingress_ifindex) }.ok() + } + pub fn ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ifindex) }.ok() + } + pub fn tc_index(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.tc_index) }.ok() + } + pub fn cb(&self) -> Option<[__u32; 5usize]> { + unsafe { crate::bpf_probe_read(&self.cb) }.ok() + } + pub fn hash(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.hash) }.ok() + } + pub fn tc_classid(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.tc_classid) }.ok() + } + pub fn data(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data) }.ok() + } + pub fn data_end(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data_end) }.ok() + } + pub fn napi_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.napi_id) }.ok() + } + pub fn family(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.family) }.ok() + } + pub fn remote_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.remote_ip4) }.ok() + } + pub fn local_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.local_ip4) }.ok() + } + pub fn remote_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.remote_ip6) }.ok() + } + pub fn local_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.local_ip6) }.ok() + } + pub fn remote_port(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.remote_port) }.ok() + } + pub fn local_port(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.local_port) }.ok() + } + pub fn data_meta(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data_meta) }.ok() + } + pub fn flow_keys(&self) -> Option<*mut bpf_flow_keys> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.flow_keys) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn tstamp(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.tstamp) }.ok() + } + pub fn wire_len(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.wire_len) }.ok() + } + pub fn gso_segs(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.gso_segs) }.ok() + } + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn gso_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.gso_size) }.ok() + } +} +impl __sk_buff__bindgen_ty_1 { + pub fn flow_keys(&self) -> Option<*mut bpf_flow_keys> { + let v = unsafe { crate::bpf_probe_read(&self.flow_keys) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl __sk_buff__bindgen_ty_2 { + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl bpf_tunnel_key { + pub fn tunnel_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.tunnel_id) }.ok() + } + pub fn remote_ipv4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.remote_ipv4) }.ok() + } + pub fn remote_ipv6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.remote_ipv6) }.ok() + } + pub fn tunnel_tos(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.tunnel_tos) }.ok() + } + pub fn tunnel_ttl(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.tunnel_ttl) }.ok() + } + pub fn tunnel_ext(&self) -> Option<__u16> { + unsafe { crate::bpf_probe_read(&self.tunnel_ext) }.ok() + } + pub fn tunnel_label(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.tunnel_label) }.ok() + } +} +impl bpf_tunnel_key__bindgen_ty_1 { + pub fn remote_ipv4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.remote_ipv4) }.ok() + } + pub fn remote_ipv6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.remote_ipv6) }.ok() + } +} +impl bpf_xfrm_state { + pub fn reqid(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.reqid) }.ok() + } + pub fn spi(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.spi) }.ok() + } + pub fn family(&self) -> Option<__u16> { + unsafe { crate::bpf_probe_read(&self.family) }.ok() + } + pub fn ext(&self) -> Option<__u16> { + unsafe { crate::bpf_probe_read(&self.ext) }.ok() + } + pub fn remote_ipv4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.remote_ipv4) }.ok() + } + pub fn remote_ipv6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.remote_ipv6) }.ok() + } +} +impl bpf_xfrm_state__bindgen_ty_1 { + pub fn remote_ipv4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.remote_ipv4) }.ok() + } + pub fn remote_ipv6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.remote_ipv6) }.ok() + } +} +impl bpf_sock { + pub fn bound_dev_if(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.bound_dev_if) }.ok() + } + pub fn family(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.family) }.ok() + } + pub fn type_(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.type_) }.ok() + } + pub fn protocol(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.protocol) }.ok() + } + pub fn mark(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.mark) }.ok() + } + pub fn priority(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.priority) }.ok() + } + pub fn src_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.src_ip4) }.ok() + } + pub fn src_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.src_ip6) }.ok() + } + pub fn src_port(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.src_port) }.ok() + } + pub fn dst_port(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.dst_port) }.ok() + } + pub fn dst_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.dst_ip4) }.ok() + } + pub fn dst_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.dst_ip6) }.ok() + } + pub fn state(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.state) }.ok() + } + pub fn rx_queue_mapping(&self) -> Option<__s32> { + unsafe { crate::bpf_probe_read(&self.rx_queue_mapping) }.ok() + } +} +impl bpf_tcp_sock { + pub fn snd_cwnd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.snd_cwnd) }.ok() + } + pub fn srtt_us(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.srtt_us) }.ok() + } + pub fn rtt_min(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.rtt_min) }.ok() + } + pub fn snd_ssthresh(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.snd_ssthresh) }.ok() + } + pub fn rcv_nxt(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.rcv_nxt) }.ok() + } + pub fn snd_nxt(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.snd_nxt) }.ok() + } + pub fn snd_una(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.snd_una) }.ok() + } + pub fn mss_cache(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.mss_cache) }.ok() + } + pub fn ecn_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ecn_flags) }.ok() + } + pub fn rate_delivered(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.rate_delivered) }.ok() + } + pub fn rate_interval_us(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.rate_interval_us) }.ok() + } + pub fn packets_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.packets_out) }.ok() + } + pub fn retrans_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.retrans_out) }.ok() + } + pub fn total_retrans(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.total_retrans) }.ok() + } + pub fn segs_in(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.segs_in) }.ok() + } + pub fn data_segs_in(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data_segs_in) }.ok() + } + pub fn segs_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.segs_out) }.ok() + } + pub fn data_segs_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data_segs_out) }.ok() + } + pub fn lost_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.lost_out) }.ok() + } + pub fn sacked_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.sacked_out) }.ok() + } + pub fn bytes_received(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.bytes_received) }.ok() + } + pub fn bytes_acked(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.bytes_acked) }.ok() + } + pub fn dsack_dups(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.dsack_dups) }.ok() + } + pub fn delivered(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.delivered) }.ok() + } + pub fn delivered_ce(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.delivered_ce) }.ok() + } + pub fn icsk_retransmits(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.icsk_retransmits) }.ok() + } +} +impl bpf_sock_tuple { + pub fn ipv4(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.ipv4) }.ok() + } + pub fn ipv6(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.ipv6) }.ok() + } +} +impl bpf_sock_tuple__bindgen_ty_1 { + pub fn ipv4(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.ipv4) }.ok() + } + pub fn ipv6(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.ipv6) }.ok() + } +} +impl bpf_sock_tuple__bindgen_ty_1__bindgen_ty_1 { + pub fn saddr(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.saddr) }.ok() + } + pub fn daddr(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.daddr) }.ok() + } + pub fn sport(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.sport) }.ok() + } + pub fn dport(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.dport) }.ok() + } +} +impl bpf_sock_tuple__bindgen_ty_1__bindgen_ty_2 { + pub fn saddr(&self) -> Option<[__be32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.saddr) }.ok() + } + pub fn daddr(&self) -> Option<[__be32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.daddr) }.ok() + } + pub fn sport(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.sport) }.ok() + } + pub fn dport(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.dport) }.ok() + } +} +impl xdp_md { + pub fn data(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data) }.ok() + } + pub fn data_end(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data_end) }.ok() + } + pub fn data_meta(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data_meta) }.ok() + } + pub fn ingress_ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ingress_ifindex) }.ok() + } + pub fn rx_queue_index(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.rx_queue_index) }.ok() + } + pub fn egress_ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.egress_ifindex) }.ok() + } +} +impl sk_msg_md { + pub fn data(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.data) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn data_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.data_end) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn family(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.family) }.ok() + } + pub fn remote_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.remote_ip4) }.ok() + } + pub fn local_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.local_ip4) }.ok() + } + pub fn remote_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.remote_ip6) }.ok() + } + pub fn local_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.local_ip6) }.ok() + } + pub fn remote_port(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.remote_port) }.ok() + } + pub fn local_port(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.local_port) }.ok() + } + pub fn size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.size) }.ok() + } + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl sk_msg_md__bindgen_ty_1 { + pub fn data(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.data) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl sk_msg_md__bindgen_ty_2 { + pub fn data_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.data_end) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl sk_msg_md__bindgen_ty_3 { + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl sk_reuseport_md { + pub fn data(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.data) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn data_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.data_end) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn len(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.len) }.ok() + } + pub fn eth_protocol(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.eth_protocol) }.ok() + } + pub fn ip_protocol(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ip_protocol) }.ok() + } + pub fn bind_inany(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.bind_inany) }.ok() + } + pub fn hash(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.hash) }.ok() + } + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn migrating_sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_4.migrating_sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl sk_reuseport_md__bindgen_ty_1 { + pub fn data(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.data) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl sk_reuseport_md__bindgen_ty_2 { + pub fn data_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.data_end) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl sk_reuseport_md__bindgen_ty_3 { + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl sk_reuseport_md__bindgen_ty_4 { + pub fn migrating_sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.migrating_sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl bpf_map_info { + pub fn type_(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.type_) }.ok() + } + pub fn id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.id) }.ok() + } + pub fn key_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.key_size) }.ok() + } + pub fn value_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.value_size) }.ok() + } + pub fn max_entries(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.max_entries) }.ok() + } + pub fn map_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.map_flags) }.ok() + } + pub fn name(&self) -> Option<[::aya_bpf_cty::c_char; 16usize]> { + unsafe { crate::bpf_probe_read(&self.name) }.ok() + } + pub fn ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ifindex) }.ok() + } + pub fn btf_vmlinux_value_type_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.btf_vmlinux_value_type_id) }.ok() + } + pub fn netns_dev(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.netns_dev) }.ok() + } + pub fn netns_ino(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.netns_ino) }.ok() + } + pub fn btf_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.btf_id) }.ok() + } + pub fn btf_key_type_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.btf_key_type_id) }.ok() + } + pub fn btf_value_type_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.btf_value_type_id) }.ok() + } +} +impl bpf_sock_addr { + pub fn user_family(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.user_family) }.ok() + } + pub fn user_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.user_ip4) }.ok() + } + pub fn user_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.user_ip6) }.ok() + } + pub fn user_port(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.user_port) }.ok() + } + pub fn family(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.family) }.ok() + } + pub fn type_(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.type_) }.ok() + } + pub fn protocol(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.protocol) }.ok() + } + pub fn msg_src_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.msg_src_ip4) }.ok() + } + pub fn msg_src_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.msg_src_ip6) }.ok() + } + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl bpf_sock_addr__bindgen_ty_1 { + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl bpf_sock_ops { + pub fn op(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.op) }.ok() + } + pub fn args(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.args) }.ok() + } + pub fn reply(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.reply) }.ok() + } + pub fn replylong(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.replylong) }.ok() + } + pub fn family(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.family) }.ok() + } + pub fn remote_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.remote_ip4) }.ok() + } + pub fn local_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.local_ip4) }.ok() + } + pub fn remote_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.remote_ip6) }.ok() + } + pub fn local_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.local_ip6) }.ok() + } + pub fn remote_port(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.remote_port) }.ok() + } + pub fn local_port(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.local_port) }.ok() + } + pub fn is_fullsock(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.is_fullsock) }.ok() + } + pub fn snd_cwnd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.snd_cwnd) }.ok() + } + pub fn srtt_us(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.srtt_us) }.ok() + } + pub fn bpf_sock_ops_cb_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.bpf_sock_ops_cb_flags) }.ok() + } + pub fn state(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.state) }.ok() + } + pub fn rtt_min(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.rtt_min) }.ok() + } + pub fn snd_ssthresh(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.snd_ssthresh) }.ok() + } + pub fn rcv_nxt(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.rcv_nxt) }.ok() + } + pub fn snd_nxt(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.snd_nxt) }.ok() + } + pub fn snd_una(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.snd_una) }.ok() + } + pub fn mss_cache(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.mss_cache) }.ok() + } + pub fn ecn_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ecn_flags) }.ok() + } + pub fn rate_delivered(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.rate_delivered) }.ok() + } + pub fn rate_interval_us(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.rate_interval_us) }.ok() + } + pub fn packets_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.packets_out) }.ok() + } + pub fn retrans_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.retrans_out) }.ok() + } + pub fn total_retrans(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.total_retrans) }.ok() + } + pub fn segs_in(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.segs_in) }.ok() + } + pub fn data_segs_in(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data_segs_in) }.ok() + } + pub fn segs_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.segs_out) }.ok() + } + pub fn data_segs_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data_segs_out) }.ok() + } + pub fn lost_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.lost_out) }.ok() + } + pub fn sacked_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.sacked_out) }.ok() + } + pub fn sk_txhash(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.sk_txhash) }.ok() + } + pub fn bytes_received(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.bytes_received) }.ok() + } + pub fn bytes_acked(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.bytes_acked) }.ok() + } + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn skb_data(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.skb_data) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn skb_data_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_4.skb_data_end) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn skb_len(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.skb_len) }.ok() + } + pub fn skb_tcp_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.skb_tcp_flags) }.ok() + } +} +impl bpf_sock_ops__bindgen_ty_1 { + pub fn args(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.args) }.ok() + } + pub fn reply(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.reply) }.ok() + } + pub fn replylong(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.replylong) }.ok() + } +} +impl bpf_sock_ops__bindgen_ty_2 { + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl bpf_sock_ops__bindgen_ty_3 { + pub fn skb_data(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.skb_data) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl bpf_sock_ops__bindgen_ty_4 { + pub fn skb_data_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.skb_data_end) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl bpf_perf_event_value { + pub fn counter(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.counter) }.ok() + } + pub fn enabled(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.enabled) }.ok() + } + pub fn running(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.running) }.ok() + } +} +impl bpf_fib_lookup { + pub fn family(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.family) }.ok() + } + pub fn l4_protocol(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.l4_protocol) }.ok() + } + pub fn sport(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.sport) }.ok() + } + pub fn dport(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.dport) }.ok() + } + pub fn tot_len(&self) -> Option<__u16> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.tot_len) }.ok() + } + pub fn mtu_result(&self) -> Option<__u16> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.mtu_result) }.ok() + } + pub fn ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ifindex) }.ok() + } + pub fn tos(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.tos) }.ok() + } + pub fn flowinfo(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.flowinfo) }.ok() + } + pub fn rt_metric(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.rt_metric) }.ok() + } + pub fn ipv4_src(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.ipv4_src) }.ok() + } + pub fn ipv6_src(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.ipv6_src) }.ok() + } + pub fn ipv4_dst(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_4.ipv4_dst) }.ok() + } + pub fn ipv6_dst(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_4.ipv6_dst) }.ok() + } + pub fn h_vlan_proto(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.h_vlan_proto) }.ok() + } + pub fn h_vlan_TCI(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.h_vlan_TCI) }.ok() + } + pub fn smac(&self) -> Option<[__u8; 6usize]> { + unsafe { crate::bpf_probe_read(&self.smac) }.ok() + } + pub fn dmac(&self) -> Option<[__u8; 6usize]> { + unsafe { crate::bpf_probe_read(&self.dmac) }.ok() + } +} +impl bpf_fib_lookup__bindgen_ty_1 { + pub fn tot_len(&self) -> Option<__u16> { + unsafe { crate::bpf_probe_read(&self.tot_len) }.ok() + } + pub fn mtu_result(&self) -> Option<__u16> { + unsafe { crate::bpf_probe_read(&self.mtu_result) }.ok() + } +} +impl bpf_fib_lookup__bindgen_ty_2 { + pub fn tos(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.tos) }.ok() + } + pub fn flowinfo(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.flowinfo) }.ok() + } + pub fn rt_metric(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.rt_metric) }.ok() + } +} +impl bpf_fib_lookup__bindgen_ty_3 { + pub fn ipv4_src(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.ipv4_src) }.ok() + } + pub fn ipv6_src(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.ipv6_src) }.ok() + } +} +impl bpf_fib_lookup__bindgen_ty_4 { + pub fn ipv4_dst(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.ipv4_dst) }.ok() + } + pub fn ipv6_dst(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.ipv6_dst) }.ok() + } +} +impl bpf_redir_neigh { + pub fn nh_family(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.nh_family) }.ok() + } + pub fn ipv4_nh(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.ipv4_nh) }.ok() + } + pub fn ipv6_nh(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.ipv6_nh) }.ok() + } +} +impl bpf_redir_neigh__bindgen_ty_1 { + pub fn ipv4_nh(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.ipv4_nh) }.ok() + } + pub fn ipv6_nh(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.ipv6_nh) }.ok() + } +} +impl bpf_flow_keys { + pub fn nhoff(&self) -> Option<__u16> { + unsafe { crate::bpf_probe_read(&self.nhoff) }.ok() + } + pub fn thoff(&self) -> Option<__u16> { + unsafe { crate::bpf_probe_read(&self.thoff) }.ok() + } + pub fn addr_proto(&self) -> Option<__u16> { + unsafe { crate::bpf_probe_read(&self.addr_proto) }.ok() + } + pub fn is_frag(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.is_frag) }.ok() + } + pub fn is_first_frag(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.is_first_frag) }.ok() + } + pub fn is_encap(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.is_encap) }.ok() + } + pub fn ip_proto(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.ip_proto) }.ok() + } + pub fn n_proto(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.n_proto) }.ok() + } + pub fn sport(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.sport) }.ok() + } + pub fn dport(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.dport) }.ok() + } + pub fn ipv4_src(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.__bindgen_anon_1.ipv4_src) }.ok() + } + pub fn ipv4_dst(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.__bindgen_anon_1.ipv4_dst) }.ok() + } + pub fn ipv6_src(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.__bindgen_anon_2.ipv6_src) }.ok() + } + pub fn ipv6_dst(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.__bindgen_anon_2.ipv6_dst) }.ok() + } + pub fn flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.flags) }.ok() + } + pub fn flow_label(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.flow_label) }.ok() + } +} +impl bpf_flow_keys__bindgen_ty_1 { + pub fn ipv4_src(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.ipv4_src) }.ok() + } + pub fn ipv4_dst(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.ipv4_dst) }.ok() + } + pub fn ipv6_src(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.ipv6_src) }.ok() + } + pub fn ipv6_dst(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.ipv6_dst) }.ok() + } +} +impl bpf_flow_keys__bindgen_ty_1__bindgen_ty_1 { + pub fn ipv4_src(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.ipv4_src) }.ok() + } + pub fn ipv4_dst(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.ipv4_dst) }.ok() + } +} +impl bpf_flow_keys__bindgen_ty_1__bindgen_ty_2 { + pub fn ipv6_src(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.ipv6_src) }.ok() + } + pub fn ipv6_dst(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.ipv6_dst) }.ok() + } +} +impl bpf_spin_lock { + pub fn val(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.val) }.ok() + } +} +impl bpf_sysctl { + pub fn write(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.write) }.ok() + } + pub fn file_pos(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.file_pos) }.ok() + } +} +impl bpf_pidns_info { + pub fn pid(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.pid) }.ok() + } + pub fn tgid(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.tgid) }.ok() + } +} +impl btf_ptr { + pub fn ptr(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.ptr) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn type_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.type_id) }.ok() + } + pub fn flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.flags) }.ok() + } +} +impl pt_regs { + pub fn r15(&self) -> Option<::aya_bpf_cty::c_ulong> { + unsafe { crate::bpf_probe_read(&self.r15) }.ok() + } + pub fn r14(&self) -> Option<::aya_bpf_cty::c_ulong> { + unsafe { crate::bpf_probe_read(&self.r14) }.ok() + } + pub fn r13(&self) -> Option<::aya_bpf_cty::c_ulong> { + unsafe { crate::bpf_probe_read(&self.r13) }.ok() + } + pub fn r12(&self) -> Option<::aya_bpf_cty::c_ulong> { + unsafe { crate::bpf_probe_read(&self.r12) }.ok() + } + pub fn rbp(&self) -> Option<::aya_bpf_cty::c_ulong> { + unsafe { crate::bpf_probe_read(&self.rbp) }.ok() + } + pub fn rbx(&self) -> Option<::aya_bpf_cty::c_ulong> { + unsafe { crate::bpf_probe_read(&self.rbx) }.ok() + } + pub fn r11(&self) -> Option<::aya_bpf_cty::c_ulong> { + unsafe { crate::bpf_probe_read(&self.r11) }.ok() + } + pub fn r10(&self) -> Option<::aya_bpf_cty::c_ulong> { + unsafe { crate::bpf_probe_read(&self.r10) }.ok() + } + pub fn r9(&self) -> Option<::aya_bpf_cty::c_ulong> { + unsafe { crate::bpf_probe_read(&self.r9) }.ok() + } + pub fn r8(&self) -> Option<::aya_bpf_cty::c_ulong> { + unsafe { crate::bpf_probe_read(&self.r8) }.ok() + } + pub fn rax(&self) -> Option<::aya_bpf_cty::c_ulong> { + unsafe { crate::bpf_probe_read(&self.rax) }.ok() + } + pub fn rcx(&self) -> Option<::aya_bpf_cty::c_ulong> { + unsafe { crate::bpf_probe_read(&self.rcx) }.ok() + } + pub fn rdx(&self) -> Option<::aya_bpf_cty::c_ulong> { + unsafe { crate::bpf_probe_read(&self.rdx) }.ok() + } + pub fn rsi(&self) -> Option<::aya_bpf_cty::c_ulong> { + unsafe { crate::bpf_probe_read(&self.rsi) }.ok() + } + pub fn rdi(&self) -> Option<::aya_bpf_cty::c_ulong> { + unsafe { crate::bpf_probe_read(&self.rdi) }.ok() + } + pub fn orig_rax(&self) -> Option<::aya_bpf_cty::c_ulong> { + unsafe { crate::bpf_probe_read(&self.orig_rax) }.ok() + } + pub fn rip(&self) -> Option<::aya_bpf_cty::c_ulong> { + unsafe { crate::bpf_probe_read(&self.rip) }.ok() + } + pub fn cs(&self) -> Option<::aya_bpf_cty::c_ulong> { + unsafe { crate::bpf_probe_read(&self.cs) }.ok() + } + pub fn eflags(&self) -> Option<::aya_bpf_cty::c_ulong> { + unsafe { crate::bpf_probe_read(&self.eflags) }.ok() + } + pub fn rsp(&self) -> Option<::aya_bpf_cty::c_ulong> { + unsafe { crate::bpf_probe_read(&self.rsp) }.ok() + } + pub fn ss(&self) -> Option<::aya_bpf_cty::c_ulong> { + unsafe { crate::bpf_probe_read(&self.ss) }.ok() + } +} +impl sockaddr { + pub fn sa_family(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.sa_family) }.ok() + } + pub fn sa_data(&self) -> Option<[::aya_bpf_cty::c_char; 14usize]> { + unsafe { crate::bpf_probe_read(&self.sa_data) }.ok() + } +} +impl bpf_perf_event_data {} +impl linux_binprm {} +impl tcphdr {} +impl seq_file {} +impl tcp6_sock {} +impl tcp_sock {} +impl tcp_timewait_sock {} +impl tcp_request_sock {} +impl udp6_sock {} +impl task_struct {} +impl path {} +impl inode {} +impl socket {} +impl file {} diff --git a/bpf/aya-bpf-bindings/src/armv7/helpers.rs b/bpf/aya-bpf-bindings/src/armv7/helpers.rs new file mode 100644 index 00000000..01c5600e --- /dev/null +++ b/bpf/aya-bpf-bindings/src/armv7/helpers.rs @@ -0,0 +1,1712 @@ +use super::bindings::*; +pub unsafe fn bpf_map_lookup_elem( + map: *mut ::aya_bpf_cty::c_void, + key: *const ::aya_bpf_cty::c_void, +) -> *mut ::aya_bpf_cty::c_void { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + key: *const ::aya_bpf_cty::c_void, + ) -> *mut ::aya_bpf_cty::c_void = ::core::mem::transmute(1usize); + fun(map, key) +} +pub unsafe fn bpf_map_update_elem( + map: *mut ::aya_bpf_cty::c_void, + key: *const ::aya_bpf_cty::c_void, + value: *const ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + key: *const ::aya_bpf_cty::c_void, + value: *const ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(2usize); + fun(map, key, value, flags) +} +pub unsafe fn bpf_map_delete_elem( + map: *mut ::aya_bpf_cty::c_void, + key: *const ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + key: *const ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(3usize); + fun(map, key) +} +pub unsafe fn bpf_probe_read( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(4usize); + fun(dst, size, unsafe_ptr) +} +pub unsafe fn bpf_ktime_get_ns() -> __u64 { + let fun: unsafe extern "C" fn() -> __u64 = ::core::mem::transmute(5usize); + fun() +} +pub unsafe fn bpf_get_prandom_u32() -> __u32 { + let fun: unsafe extern "C" fn() -> __u32 = ::core::mem::transmute(7usize); + fun() +} +pub unsafe fn bpf_get_smp_processor_id() -> __u32 { + let fun: unsafe extern "C" fn() -> __u32 = ::core::mem::transmute(8usize); + fun() +} +pub unsafe fn bpf_skb_store_bytes( + skb: *mut __sk_buff, + offset: __u32, + from: *const ::aya_bpf_cty::c_void, + len: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + offset: __u32, + from: *const ::aya_bpf_cty::c_void, + len: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(9usize); + fun(skb, offset, from, len, flags) +} +pub unsafe fn bpf_l3_csum_replace( + skb: *mut __sk_buff, + offset: __u32, + from: __u64, + to: __u64, + size: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + offset: __u32, + from: __u64, + to: __u64, + size: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(10usize); + fun(skb, offset, from, to, size) +} +pub unsafe fn bpf_l4_csum_replace( + skb: *mut __sk_buff, + offset: __u32, + from: __u64, + to: __u64, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + offset: __u32, + from: __u64, + to: __u64, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(11usize); + fun(skb, offset, from, to, flags) +} +pub unsafe fn bpf_tail_call( + ctx: *mut ::aya_bpf_cty::c_void, + prog_array_map: *mut ::aya_bpf_cty::c_void, + index: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + prog_array_map: *mut ::aya_bpf_cty::c_void, + index: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(12usize); + fun(ctx, prog_array_map, index) +} +pub unsafe fn bpf_clone_redirect( + skb: *mut __sk_buff, + ifindex: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + ifindex: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(13usize); + fun(skb, ifindex, flags) +} +pub unsafe fn bpf_get_current_pid_tgid() -> __u64 { + let fun: unsafe extern "C" fn() -> __u64 = ::core::mem::transmute(14usize); + fun() +} +pub unsafe fn bpf_get_current_uid_gid() -> __u64 { + let fun: unsafe extern "C" fn() -> __u64 = ::core::mem::transmute(15usize); + fun() +} +pub unsafe fn bpf_get_current_comm( + buf: *mut ::aya_bpf_cty::c_void, + size_of_buf: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + buf: *mut ::aya_bpf_cty::c_void, + size_of_buf: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(16usize); + fun(buf, size_of_buf) +} +pub unsafe fn bpf_get_cgroup_classid(skb: *mut __sk_buff) -> __u32 { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff) -> __u32 = ::core::mem::transmute(17usize); + fun(skb) +} +pub unsafe fn bpf_skb_vlan_push( + skb: *mut __sk_buff, + vlan_proto: __be16, + vlan_tci: __u16, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + vlan_proto: __be16, + vlan_tci: __u16, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(18usize); + fun(skb, vlan_proto, vlan_tci) +} +pub unsafe fn bpf_skb_vlan_pop(skb: *mut __sk_buff) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(19usize); + fun(skb) +} +pub unsafe fn bpf_skb_get_tunnel_key( + skb: *mut __sk_buff, + key: *mut bpf_tunnel_key, + size: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + key: *mut bpf_tunnel_key, + size: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(20usize); + fun(skb, key, size, flags) +} +pub unsafe fn bpf_skb_set_tunnel_key( + skb: *mut __sk_buff, + key: *mut bpf_tunnel_key, + size: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + key: *mut bpf_tunnel_key, + size: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(21usize); + fun(skb, key, size, flags) +} +pub unsafe fn bpf_perf_event_read(map: *mut ::aya_bpf_cty::c_void, flags: __u64) -> __u64 { + let fun: unsafe extern "C" fn(map: *mut ::aya_bpf_cty::c_void, flags: __u64) -> __u64 = + ::core::mem::transmute(22usize); + fun(map, flags) +} +pub unsafe fn bpf_redirect(ifindex: __u32, flags: __u64) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(ifindex: __u32, flags: __u64) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(23usize); + fun(ifindex, flags) +} +pub unsafe fn bpf_get_route_realm(skb: *mut __sk_buff) -> __u32 { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff) -> __u32 = ::core::mem::transmute(24usize); + fun(skb) +} +pub unsafe fn bpf_perf_event_output( + ctx: *mut ::aya_bpf_cty::c_void, + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, + data: *mut ::aya_bpf_cty::c_void, + size: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, + data: *mut ::aya_bpf_cty::c_void, + size: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(25usize); + fun(ctx, map, flags, data, size) +} +pub unsafe fn bpf_skb_load_bytes( + skb: *const ::aya_bpf_cty::c_void, + offset: __u32, + to: *mut ::aya_bpf_cty::c_void, + len: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *const ::aya_bpf_cty::c_void, + offset: __u32, + to: *mut ::aya_bpf_cty::c_void, + len: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(26usize); + fun(skb, offset, to, len) +} +pub unsafe fn bpf_get_stackid( + ctx: *mut ::aya_bpf_cty::c_void, + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(27usize); + fun(ctx, map, flags) +} +pub unsafe fn bpf_csum_diff( + from: *mut __be32, + from_size: __u32, + to: *mut __be32, + to_size: __u32, + seed: __wsum, +) -> __s64 { + let fun: unsafe extern "C" fn( + from: *mut __be32, + from_size: __u32, + to: *mut __be32, + to_size: __u32, + seed: __wsum, + ) -> __s64 = ::core::mem::transmute(28usize); + fun(from, from_size, to, to_size, seed) +} +pub unsafe fn bpf_skb_get_tunnel_opt( + skb: *mut __sk_buff, + opt: *mut ::aya_bpf_cty::c_void, + size: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + opt: *mut ::aya_bpf_cty::c_void, + size: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(29usize); + fun(skb, opt, size) +} +pub unsafe fn bpf_skb_set_tunnel_opt( + skb: *mut __sk_buff, + opt: *mut ::aya_bpf_cty::c_void, + size: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + opt: *mut ::aya_bpf_cty::c_void, + size: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(30usize); + fun(skb, opt, size) +} +pub unsafe fn bpf_skb_change_proto( + skb: *mut __sk_buff, + proto: __be16, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + proto: __be16, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(31usize); + fun(skb, proto, flags) +} +pub unsafe fn bpf_skb_change_type(skb: *mut __sk_buff, type_: __u32) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff, type_: __u32) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(32usize); + fun(skb, type_) +} +pub unsafe fn bpf_skb_under_cgroup( + skb: *mut __sk_buff, + map: *mut ::aya_bpf_cty::c_void, + index: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + map: *mut ::aya_bpf_cty::c_void, + index: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(33usize); + fun(skb, map, index) +} +pub unsafe fn bpf_get_hash_recalc(skb: *mut __sk_buff) -> __u32 { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff) -> __u32 = ::core::mem::transmute(34usize); + fun(skb) +} +pub unsafe fn bpf_get_current_task() -> __u64 { + let fun: unsafe extern "C" fn() -> __u64 = ::core::mem::transmute(35usize); + fun() +} +pub unsafe fn bpf_probe_write_user( + dst: *mut ::aya_bpf_cty::c_void, + src: *const ::aya_bpf_cty::c_void, + len: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + dst: *mut ::aya_bpf_cty::c_void, + src: *const ::aya_bpf_cty::c_void, + len: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(36usize); + fun(dst, src, len) +} +pub unsafe fn bpf_current_task_under_cgroup( + map: *mut ::aya_bpf_cty::c_void, + index: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + index: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(37usize); + fun(map, index) +} +pub unsafe fn bpf_skb_change_tail( + skb: *mut __sk_buff, + len: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + len: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(38usize); + fun(skb, len, flags) +} +pub unsafe fn bpf_skb_pull_data(skb: *mut __sk_buff, len: __u32) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff, len: __u32) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(39usize); + fun(skb, len) +} +pub unsafe fn bpf_csum_update(skb: *mut __sk_buff, csum: __wsum) -> __s64 { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff, csum: __wsum) -> __s64 = + ::core::mem::transmute(40usize); + fun(skb, csum) +} +pub unsafe fn bpf_set_hash_invalid(skb: *mut __sk_buff) { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff) = ::core::mem::transmute(41usize); + fun(skb) +} +pub unsafe fn bpf_get_numa_node_id() -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn() -> ::aya_bpf_cty::c_long = ::core::mem::transmute(42usize); + fun() +} +pub unsafe fn bpf_skb_change_head( + skb: *mut __sk_buff, + len: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + len: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(43usize); + fun(skb, len, flags) +} +pub unsafe fn bpf_xdp_adjust_head( + xdp_md: *mut xdp_md, + delta: ::aya_bpf_cty::c_int, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + xdp_md: *mut xdp_md, + delta: ::aya_bpf_cty::c_int, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(44usize); + fun(xdp_md, delta) +} +pub unsafe fn bpf_probe_read_str( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(45usize); + fun(dst, size, unsafe_ptr) +} +pub unsafe fn bpf_get_socket_cookie(ctx: *mut ::aya_bpf_cty::c_void) -> __u64 { + let fun: unsafe extern "C" fn(ctx: *mut ::aya_bpf_cty::c_void) -> __u64 = + ::core::mem::transmute(46usize); + fun(ctx) +} +pub unsafe fn bpf_get_socket_uid(skb: *mut __sk_buff) -> __u32 { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff) -> __u32 = ::core::mem::transmute(47usize); + fun(skb) +} +pub unsafe fn bpf_set_hash(skb: *mut __sk_buff, hash: __u32) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff, hash: __u32) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(48usize); + fun(skb, hash) +} +pub unsafe fn bpf_setsockopt( + bpf_socket: *mut ::aya_bpf_cty::c_void, + level: ::aya_bpf_cty::c_int, + optname: ::aya_bpf_cty::c_int, + optval: *mut ::aya_bpf_cty::c_void, + optlen: ::aya_bpf_cty::c_int, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + bpf_socket: *mut ::aya_bpf_cty::c_void, + level: ::aya_bpf_cty::c_int, + optname: ::aya_bpf_cty::c_int, + optval: *mut ::aya_bpf_cty::c_void, + optlen: ::aya_bpf_cty::c_int, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(49usize); + fun(bpf_socket, level, optname, optval, optlen) +} +pub unsafe fn bpf_skb_adjust_room( + skb: *mut __sk_buff, + len_diff: __s32, + mode: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + len_diff: __s32, + mode: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(50usize); + fun(skb, len_diff, mode, flags) +} +pub unsafe fn bpf_redirect_map( + map: *mut ::aya_bpf_cty::c_void, + key: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + key: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(51usize); + fun(map, key, flags) +} +pub unsafe fn bpf_sk_redirect_map( + skb: *mut __sk_buff, + map: *mut ::aya_bpf_cty::c_void, + key: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + map: *mut ::aya_bpf_cty::c_void, + key: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(52usize); + fun(skb, map, key, flags) +} +pub unsafe fn bpf_sock_map_update( + skops: *mut bpf_sock_ops, + map: *mut ::aya_bpf_cty::c_void, + key: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skops: *mut bpf_sock_ops, + map: *mut ::aya_bpf_cty::c_void, + key: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(53usize); + fun(skops, map, key, flags) +} +pub unsafe fn bpf_xdp_adjust_meta( + xdp_md: *mut xdp_md, + delta: ::aya_bpf_cty::c_int, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + xdp_md: *mut xdp_md, + delta: ::aya_bpf_cty::c_int, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(54usize); + fun(xdp_md, delta) +} +pub unsafe fn bpf_perf_event_read_value( + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, + buf: *mut bpf_perf_event_value, + buf_size: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, + buf: *mut bpf_perf_event_value, + buf_size: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(55usize); + fun(map, flags, buf, buf_size) +} +pub unsafe fn bpf_perf_prog_read_value( + ctx: *mut bpf_perf_event_data, + buf: *mut bpf_perf_event_value, + buf_size: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut bpf_perf_event_data, + buf: *mut bpf_perf_event_value, + buf_size: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(56usize); + fun(ctx, buf, buf_size) +} +pub unsafe fn bpf_getsockopt( + bpf_socket: *mut ::aya_bpf_cty::c_void, + level: ::aya_bpf_cty::c_int, + optname: ::aya_bpf_cty::c_int, + optval: *mut ::aya_bpf_cty::c_void, + optlen: ::aya_bpf_cty::c_int, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + bpf_socket: *mut ::aya_bpf_cty::c_void, + level: ::aya_bpf_cty::c_int, + optname: ::aya_bpf_cty::c_int, + optval: *mut ::aya_bpf_cty::c_void, + optlen: ::aya_bpf_cty::c_int, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(57usize); + fun(bpf_socket, level, optname, optval, optlen) +} +pub unsafe fn bpf_override_return(regs: *mut pt_regs, rc: __u64) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(regs: *mut pt_regs, rc: __u64) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(58usize); + fun(regs, rc) +} +pub unsafe fn bpf_sock_ops_cb_flags_set( + bpf_sock: *mut bpf_sock_ops, + argval: ::aya_bpf_cty::c_int, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + bpf_sock: *mut bpf_sock_ops, + argval: ::aya_bpf_cty::c_int, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(59usize); + fun(bpf_sock, argval) +} +pub unsafe fn bpf_msg_redirect_map( + msg: *mut sk_msg_md, + map: *mut ::aya_bpf_cty::c_void, + key: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + msg: *mut sk_msg_md, + map: *mut ::aya_bpf_cty::c_void, + key: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(60usize); + fun(msg, map, key, flags) +} +pub unsafe fn bpf_msg_apply_bytes(msg: *mut sk_msg_md, bytes: __u32) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(msg: *mut sk_msg_md, bytes: __u32) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(61usize); + fun(msg, bytes) +} +pub unsafe fn bpf_msg_cork_bytes(msg: *mut sk_msg_md, bytes: __u32) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(msg: *mut sk_msg_md, bytes: __u32) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(62usize); + fun(msg, bytes) +} +pub unsafe fn bpf_msg_pull_data( + msg: *mut sk_msg_md, + start: __u32, + end: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + msg: *mut sk_msg_md, + start: __u32, + end: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(63usize); + fun(msg, start, end, flags) +} +pub unsafe fn bpf_bind( + ctx: *mut bpf_sock_addr, + addr: *mut sockaddr, + addr_len: ::aya_bpf_cty::c_int, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut bpf_sock_addr, + addr: *mut sockaddr, + addr_len: ::aya_bpf_cty::c_int, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(64usize); + fun(ctx, addr, addr_len) +} +pub unsafe fn bpf_xdp_adjust_tail( + xdp_md: *mut xdp_md, + delta: ::aya_bpf_cty::c_int, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + xdp_md: *mut xdp_md, + delta: ::aya_bpf_cty::c_int, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(65usize); + fun(xdp_md, delta) +} +pub unsafe fn bpf_skb_get_xfrm_state( + skb: *mut __sk_buff, + index: __u32, + xfrm_state: *mut bpf_xfrm_state, + size: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + index: __u32, + xfrm_state: *mut bpf_xfrm_state, + size: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(66usize); + fun(skb, index, xfrm_state, size, flags) +} +pub unsafe fn bpf_get_stack( + ctx: *mut ::aya_bpf_cty::c_void, + buf: *mut ::aya_bpf_cty::c_void, + size: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + buf: *mut ::aya_bpf_cty::c_void, + size: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(67usize); + fun(ctx, buf, size, flags) +} +pub unsafe fn bpf_skb_load_bytes_relative( + skb: *const ::aya_bpf_cty::c_void, + offset: __u32, + to: *mut ::aya_bpf_cty::c_void, + len: __u32, + start_header: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *const ::aya_bpf_cty::c_void, + offset: __u32, + to: *mut ::aya_bpf_cty::c_void, + len: __u32, + start_header: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(68usize); + fun(skb, offset, to, len, start_header) +} +pub unsafe fn bpf_fib_lookup( + ctx: *mut ::aya_bpf_cty::c_void, + params: *mut bpf_fib_lookup, + plen: ::aya_bpf_cty::c_int, + flags: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + params: *mut bpf_fib_lookup, + plen: ::aya_bpf_cty::c_int, + flags: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(69usize); + fun(ctx, params, plen, flags) +} +pub unsafe fn bpf_sock_hash_update( + skops: *mut bpf_sock_ops, + map: *mut ::aya_bpf_cty::c_void, + key: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skops: *mut bpf_sock_ops, + map: *mut ::aya_bpf_cty::c_void, + key: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(70usize); + fun(skops, map, key, flags) +} +pub unsafe fn bpf_msg_redirect_hash( + msg: *mut sk_msg_md, + map: *mut ::aya_bpf_cty::c_void, + key: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + msg: *mut sk_msg_md, + map: *mut ::aya_bpf_cty::c_void, + key: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(71usize); + fun(msg, map, key, flags) +} +pub unsafe fn bpf_sk_redirect_hash( + skb: *mut __sk_buff, + map: *mut ::aya_bpf_cty::c_void, + key: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + map: *mut ::aya_bpf_cty::c_void, + key: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(72usize); + fun(skb, map, key, flags) +} +pub unsafe fn bpf_lwt_push_encap( + skb: *mut __sk_buff, + type_: __u32, + hdr: *mut ::aya_bpf_cty::c_void, + len: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + type_: __u32, + hdr: *mut ::aya_bpf_cty::c_void, + len: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(73usize); + fun(skb, type_, hdr, len) +} +pub unsafe fn bpf_lwt_seg6_store_bytes( + skb: *mut __sk_buff, + offset: __u32, + from: *const ::aya_bpf_cty::c_void, + len: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + offset: __u32, + from: *const ::aya_bpf_cty::c_void, + len: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(74usize); + fun(skb, offset, from, len) +} +pub unsafe fn bpf_lwt_seg6_adjust_srh( + skb: *mut __sk_buff, + offset: __u32, + delta: __s32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + offset: __u32, + delta: __s32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(75usize); + fun(skb, offset, delta) +} +pub unsafe fn bpf_lwt_seg6_action( + skb: *mut __sk_buff, + action: __u32, + param: *mut ::aya_bpf_cty::c_void, + param_len: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + action: __u32, + param: *mut ::aya_bpf_cty::c_void, + param_len: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(76usize); + fun(skb, action, param, param_len) +} +pub unsafe fn bpf_rc_repeat(ctx: *mut ::aya_bpf_cty::c_void) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(ctx: *mut ::aya_bpf_cty::c_void) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(77usize); + fun(ctx) +} +pub unsafe fn bpf_rc_keydown( + ctx: *mut ::aya_bpf_cty::c_void, + protocol: __u32, + scancode: __u64, + toggle: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + protocol: __u32, + scancode: __u64, + toggle: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(78usize); + fun(ctx, protocol, scancode, toggle) +} +pub unsafe fn bpf_skb_cgroup_id(skb: *mut __sk_buff) -> __u64 { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff) -> __u64 = ::core::mem::transmute(79usize); + fun(skb) +} +pub unsafe fn bpf_get_current_cgroup_id() -> __u64 { + let fun: unsafe extern "C" fn() -> __u64 = ::core::mem::transmute(80usize); + fun() +} +pub unsafe fn bpf_get_local_storage( + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> *mut ::aya_bpf_cty::c_void { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> *mut ::aya_bpf_cty::c_void = ::core::mem::transmute(81usize); + fun(map, flags) +} +pub unsafe fn bpf_sk_select_reuseport( + reuse: *mut sk_reuseport_md, + map: *mut ::aya_bpf_cty::c_void, + key: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + reuse: *mut sk_reuseport_md, + map: *mut ::aya_bpf_cty::c_void, + key: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(82usize); + fun(reuse, map, key, flags) +} +pub unsafe fn bpf_skb_ancestor_cgroup_id( + skb: *mut __sk_buff, + ancestor_level: ::aya_bpf_cty::c_int, +) -> __u64 { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + ancestor_level: ::aya_bpf_cty::c_int, + ) -> __u64 = ::core::mem::transmute(83usize); + fun(skb, ancestor_level) +} +pub unsafe fn bpf_sk_lookup_tcp( + ctx: *mut ::aya_bpf_cty::c_void, + tuple: *mut bpf_sock_tuple, + tuple_size: __u32, + netns: __u64, + flags: __u64, +) -> *mut bpf_sock { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + tuple: *mut bpf_sock_tuple, + tuple_size: __u32, + netns: __u64, + flags: __u64, + ) -> *mut bpf_sock = ::core::mem::transmute(84usize); + fun(ctx, tuple, tuple_size, netns, flags) +} +pub unsafe fn bpf_sk_lookup_udp( + ctx: *mut ::aya_bpf_cty::c_void, + tuple: *mut bpf_sock_tuple, + tuple_size: __u32, + netns: __u64, + flags: __u64, +) -> *mut bpf_sock { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + tuple: *mut bpf_sock_tuple, + tuple_size: __u32, + netns: __u64, + flags: __u64, + ) -> *mut bpf_sock = ::core::mem::transmute(85usize); + fun(ctx, tuple, tuple_size, netns, flags) +} +pub unsafe fn bpf_sk_release(sock: *mut ::aya_bpf_cty::c_void) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(sock: *mut ::aya_bpf_cty::c_void) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(86usize); + fun(sock) +} +pub unsafe fn bpf_map_push_elem( + map: *mut ::aya_bpf_cty::c_void, + value: *const ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + value: *const ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(87usize); + fun(map, value, flags) +} +pub unsafe fn bpf_map_pop_elem( + map: *mut ::aya_bpf_cty::c_void, + value: *mut ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + value: *mut ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(88usize); + fun(map, value) +} +pub unsafe fn bpf_map_peek_elem( + map: *mut ::aya_bpf_cty::c_void, + value: *mut ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + value: *mut ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(89usize); + fun(map, value) +} +pub unsafe fn bpf_msg_push_data( + msg: *mut sk_msg_md, + start: __u32, + len: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + msg: *mut sk_msg_md, + start: __u32, + len: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(90usize); + fun(msg, start, len, flags) +} +pub unsafe fn bpf_msg_pop_data( + msg: *mut sk_msg_md, + start: __u32, + len: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + msg: *mut sk_msg_md, + start: __u32, + len: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(91usize); + fun(msg, start, len, flags) +} +pub unsafe fn bpf_rc_pointer_rel( + ctx: *mut ::aya_bpf_cty::c_void, + rel_x: __s32, + rel_y: __s32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + rel_x: __s32, + rel_y: __s32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(92usize); + fun(ctx, rel_x, rel_y) +} +pub unsafe fn bpf_spin_lock(lock: *mut bpf_spin_lock) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(lock: *mut bpf_spin_lock) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(93usize); + fun(lock) +} +pub unsafe fn bpf_spin_unlock(lock: *mut bpf_spin_lock) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(lock: *mut bpf_spin_lock) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(94usize); + fun(lock) +} +pub unsafe fn bpf_sk_fullsock(sk: *mut bpf_sock) -> *mut bpf_sock { + let fun: unsafe extern "C" fn(sk: *mut bpf_sock) -> *mut bpf_sock = + ::core::mem::transmute(95usize); + fun(sk) +} +pub unsafe fn bpf_tcp_sock(sk: *mut bpf_sock) -> *mut bpf_tcp_sock { + let fun: unsafe extern "C" fn(sk: *mut bpf_sock) -> *mut bpf_tcp_sock = + ::core::mem::transmute(96usize); + fun(sk) +} +pub unsafe fn bpf_skb_ecn_set_ce(skb: *mut __sk_buff) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(97usize); + fun(skb) +} +pub unsafe fn bpf_get_listener_sock(sk: *mut bpf_sock) -> *mut bpf_sock { + let fun: unsafe extern "C" fn(sk: *mut bpf_sock) -> *mut bpf_sock = + ::core::mem::transmute(98usize); + fun(sk) +} +pub unsafe fn bpf_skc_lookup_tcp( + ctx: *mut ::aya_bpf_cty::c_void, + tuple: *mut bpf_sock_tuple, + tuple_size: __u32, + netns: __u64, + flags: __u64, +) -> *mut bpf_sock { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + tuple: *mut bpf_sock_tuple, + tuple_size: __u32, + netns: __u64, + flags: __u64, + ) -> *mut bpf_sock = ::core::mem::transmute(99usize); + fun(ctx, tuple, tuple_size, netns, flags) +} +pub unsafe fn bpf_tcp_check_syncookie( + sk: *mut ::aya_bpf_cty::c_void, + iph: *mut ::aya_bpf_cty::c_void, + iph_len: __u32, + th: *mut tcphdr, + th_len: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + sk: *mut ::aya_bpf_cty::c_void, + iph: *mut ::aya_bpf_cty::c_void, + iph_len: __u32, + th: *mut tcphdr, + th_len: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(100usize); + fun(sk, iph, iph_len, th, th_len) +} +pub unsafe fn bpf_sysctl_get_name( + ctx: *mut bpf_sysctl, + buf: *mut ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut bpf_sysctl, + buf: *mut ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(101usize); + fun(ctx, buf, buf_len, flags) +} +pub unsafe fn bpf_sysctl_get_current_value( + ctx: *mut bpf_sysctl, + buf: *mut ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut bpf_sysctl, + buf: *mut ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(102usize); + fun(ctx, buf, buf_len) +} +pub unsafe fn bpf_sysctl_get_new_value( + ctx: *mut bpf_sysctl, + buf: *mut ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut bpf_sysctl, + buf: *mut ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(103usize); + fun(ctx, buf, buf_len) +} +pub unsafe fn bpf_sysctl_set_new_value( + ctx: *mut bpf_sysctl, + buf: *const ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut bpf_sysctl, + buf: *const ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(104usize); + fun(ctx, buf, buf_len) +} +pub unsafe fn bpf_strtol( + buf: *const ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, + flags: __u64, + res: *mut ::aya_bpf_cty::c_long, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + buf: *const ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, + flags: __u64, + res: *mut ::aya_bpf_cty::c_long, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(105usize); + fun(buf, buf_len, flags, res) +} +pub unsafe fn bpf_strtoul( + buf: *const ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, + flags: __u64, + res: *mut ::aya_bpf_cty::c_ulong, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + buf: *const ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, + flags: __u64, + res: *mut ::aya_bpf_cty::c_ulong, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(106usize); + fun(buf, buf_len, flags, res) +} +pub unsafe fn bpf_sk_storage_get( + map: *mut ::aya_bpf_cty::c_void, + sk: *mut ::aya_bpf_cty::c_void, + value: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> *mut ::aya_bpf_cty::c_void { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + sk: *mut ::aya_bpf_cty::c_void, + value: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> *mut ::aya_bpf_cty::c_void = ::core::mem::transmute(107usize); + fun(map, sk, value, flags) +} +pub unsafe fn bpf_sk_storage_delete( + map: *mut ::aya_bpf_cty::c_void, + sk: *mut ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + sk: *mut ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(108usize); + fun(map, sk) +} +pub unsafe fn bpf_send_signal(sig: __u32) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(sig: __u32) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(109usize); + fun(sig) +} +pub unsafe fn bpf_tcp_gen_syncookie( + sk: *mut ::aya_bpf_cty::c_void, + iph: *mut ::aya_bpf_cty::c_void, + iph_len: __u32, + th: *mut tcphdr, + th_len: __u32, +) -> __s64 { + let fun: unsafe extern "C" fn( + sk: *mut ::aya_bpf_cty::c_void, + iph: *mut ::aya_bpf_cty::c_void, + iph_len: __u32, + th: *mut tcphdr, + th_len: __u32, + ) -> __s64 = ::core::mem::transmute(110usize); + fun(sk, iph, iph_len, th, th_len) +} +pub unsafe fn bpf_skb_output( + ctx: *mut ::aya_bpf_cty::c_void, + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, + data: *mut ::aya_bpf_cty::c_void, + size: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, + data: *mut ::aya_bpf_cty::c_void, + size: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(111usize); + fun(ctx, map, flags, data, size) +} +pub unsafe fn bpf_probe_read_user( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(112usize); + fun(dst, size, unsafe_ptr) +} +pub unsafe fn bpf_probe_read_kernel( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(113usize); + fun(dst, size, unsafe_ptr) +} +pub unsafe fn bpf_probe_read_user_str( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(114usize); + fun(dst, size, unsafe_ptr) +} +pub unsafe fn bpf_probe_read_kernel_str( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(115usize); + fun(dst, size, unsafe_ptr) +} +pub unsafe fn bpf_tcp_send_ack( + tp: *mut ::aya_bpf_cty::c_void, + rcv_nxt: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + tp: *mut ::aya_bpf_cty::c_void, + rcv_nxt: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(116usize); + fun(tp, rcv_nxt) +} +pub unsafe fn bpf_send_signal_thread(sig: __u32) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(sig: __u32) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(117usize); + fun(sig) +} +pub unsafe fn bpf_jiffies64() -> __u64 { + let fun: unsafe extern "C" fn() -> __u64 = ::core::mem::transmute(118usize); + fun() +} +pub unsafe fn bpf_read_branch_records( + ctx: *mut bpf_perf_event_data, + buf: *mut ::aya_bpf_cty::c_void, + size: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut bpf_perf_event_data, + buf: *mut ::aya_bpf_cty::c_void, + size: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(119usize); + fun(ctx, buf, size, flags) +} +pub unsafe fn bpf_get_ns_current_pid_tgid( + dev: __u64, + ino: __u64, + nsdata: *mut bpf_pidns_info, + size: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + dev: __u64, + ino: __u64, + nsdata: *mut bpf_pidns_info, + size: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(120usize); + fun(dev, ino, nsdata, size) +} +pub unsafe fn bpf_xdp_output( + ctx: *mut ::aya_bpf_cty::c_void, + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, + data: *mut ::aya_bpf_cty::c_void, + size: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, + data: *mut ::aya_bpf_cty::c_void, + size: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(121usize); + fun(ctx, map, flags, data, size) +} +pub unsafe fn bpf_get_netns_cookie(ctx: *mut ::aya_bpf_cty::c_void) -> __u64 { + let fun: unsafe extern "C" fn(ctx: *mut ::aya_bpf_cty::c_void) -> __u64 = + ::core::mem::transmute(122usize); + fun(ctx) +} +pub unsafe fn bpf_get_current_ancestor_cgroup_id(ancestor_level: ::aya_bpf_cty::c_int) -> __u64 { + let fun: unsafe extern "C" fn(ancestor_level: ::aya_bpf_cty::c_int) -> __u64 = + ::core::mem::transmute(123usize); + fun(ancestor_level) +} +pub unsafe fn bpf_sk_assign( + ctx: *mut ::aya_bpf_cty::c_void, + sk: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + sk: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(124usize); + fun(ctx, sk, flags) +} +pub unsafe fn bpf_ktime_get_boot_ns() -> __u64 { + let fun: unsafe extern "C" fn() -> __u64 = ::core::mem::transmute(125usize); + fun() +} +pub unsafe fn bpf_seq_printf( + m: *mut seq_file, + fmt: *const ::aya_bpf_cty::c_char, + fmt_size: __u32, + data: *const ::aya_bpf_cty::c_void, + data_len: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + m: *mut seq_file, + fmt: *const ::aya_bpf_cty::c_char, + fmt_size: __u32, + data: *const ::aya_bpf_cty::c_void, + data_len: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(126usize); + fun(m, fmt, fmt_size, data, data_len) +} +pub unsafe fn bpf_seq_write( + m: *mut seq_file, + data: *const ::aya_bpf_cty::c_void, + len: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + m: *mut seq_file, + data: *const ::aya_bpf_cty::c_void, + len: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(127usize); + fun(m, data, len) +} +pub unsafe fn bpf_sk_cgroup_id(sk: *mut ::aya_bpf_cty::c_void) -> __u64 { + let fun: unsafe extern "C" fn(sk: *mut ::aya_bpf_cty::c_void) -> __u64 = + ::core::mem::transmute(128usize); + fun(sk) +} +pub unsafe fn bpf_sk_ancestor_cgroup_id( + sk: *mut ::aya_bpf_cty::c_void, + ancestor_level: ::aya_bpf_cty::c_int, +) -> __u64 { + let fun: unsafe extern "C" fn( + sk: *mut ::aya_bpf_cty::c_void, + ancestor_level: ::aya_bpf_cty::c_int, + ) -> __u64 = ::core::mem::transmute(129usize); + fun(sk, ancestor_level) +} +pub unsafe fn bpf_ringbuf_output( + ringbuf: *mut ::aya_bpf_cty::c_void, + data: *mut ::aya_bpf_cty::c_void, + size: __u64, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ringbuf: *mut ::aya_bpf_cty::c_void, + data: *mut ::aya_bpf_cty::c_void, + size: __u64, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(130usize); + fun(ringbuf, data, size, flags) +} +pub unsafe fn bpf_ringbuf_reserve( + ringbuf: *mut ::aya_bpf_cty::c_void, + size: __u64, + flags: __u64, +) -> *mut ::aya_bpf_cty::c_void { + let fun: unsafe extern "C" fn( + ringbuf: *mut ::aya_bpf_cty::c_void, + size: __u64, + flags: __u64, + ) -> *mut ::aya_bpf_cty::c_void = ::core::mem::transmute(131usize); + fun(ringbuf, size, flags) +} +pub unsafe fn bpf_ringbuf_submit(data: *mut ::aya_bpf_cty::c_void, flags: __u64) { + let fun: unsafe extern "C" fn(data: *mut ::aya_bpf_cty::c_void, flags: __u64) = + ::core::mem::transmute(132usize); + fun(data, flags) +} +pub unsafe fn bpf_ringbuf_discard(data: *mut ::aya_bpf_cty::c_void, flags: __u64) { + let fun: unsafe extern "C" fn(data: *mut ::aya_bpf_cty::c_void, flags: __u64) = + ::core::mem::transmute(133usize); + fun(data, flags) +} +pub unsafe fn bpf_ringbuf_query(ringbuf: *mut ::aya_bpf_cty::c_void, flags: __u64) -> __u64 { + let fun: unsafe extern "C" fn(ringbuf: *mut ::aya_bpf_cty::c_void, flags: __u64) -> __u64 = + ::core::mem::transmute(134usize); + fun(ringbuf, flags) +} +pub unsafe fn bpf_csum_level(skb: *mut __sk_buff, level: __u64) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff, level: __u64) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(135usize); + fun(skb, level) +} +pub unsafe fn bpf_skc_to_tcp6_sock(sk: *mut ::aya_bpf_cty::c_void) -> *mut tcp6_sock { + let fun: unsafe extern "C" fn(sk: *mut ::aya_bpf_cty::c_void) -> *mut tcp6_sock = + ::core::mem::transmute(136usize); + fun(sk) +} +pub unsafe fn bpf_skc_to_tcp_sock(sk: *mut ::aya_bpf_cty::c_void) -> *mut tcp_sock { + let fun: unsafe extern "C" fn(sk: *mut ::aya_bpf_cty::c_void) -> *mut tcp_sock = + ::core::mem::transmute(137usize); + fun(sk) +} +pub unsafe fn bpf_skc_to_tcp_timewait_sock( + sk: *mut ::aya_bpf_cty::c_void, +) -> *mut tcp_timewait_sock { + let fun: unsafe extern "C" fn(sk: *mut ::aya_bpf_cty::c_void) -> *mut tcp_timewait_sock = + ::core::mem::transmute(138usize); + fun(sk) +} +pub unsafe fn bpf_skc_to_tcp_request_sock(sk: *mut ::aya_bpf_cty::c_void) -> *mut tcp_request_sock { + let fun: unsafe extern "C" fn(sk: *mut ::aya_bpf_cty::c_void) -> *mut tcp_request_sock = + ::core::mem::transmute(139usize); + fun(sk) +} +pub unsafe fn bpf_skc_to_udp6_sock(sk: *mut ::aya_bpf_cty::c_void) -> *mut udp6_sock { + let fun: unsafe extern "C" fn(sk: *mut ::aya_bpf_cty::c_void) -> *mut udp6_sock = + ::core::mem::transmute(140usize); + fun(sk) +} +pub unsafe fn bpf_get_task_stack( + task: *mut task_struct, + buf: *mut ::aya_bpf_cty::c_void, + size: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + task: *mut task_struct, + buf: *mut ::aya_bpf_cty::c_void, + size: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(141usize); + fun(task, buf, size, flags) +} +pub unsafe fn bpf_load_hdr_opt( + skops: *mut bpf_sock_ops, + searchby_res: *mut ::aya_bpf_cty::c_void, + len: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skops: *mut bpf_sock_ops, + searchby_res: *mut ::aya_bpf_cty::c_void, + len: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(142usize); + fun(skops, searchby_res, len, flags) +} +pub unsafe fn bpf_store_hdr_opt( + skops: *mut bpf_sock_ops, + from: *const ::aya_bpf_cty::c_void, + len: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skops: *mut bpf_sock_ops, + from: *const ::aya_bpf_cty::c_void, + len: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(143usize); + fun(skops, from, len, flags) +} +pub unsafe fn bpf_reserve_hdr_opt( + skops: *mut bpf_sock_ops, + len: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skops: *mut bpf_sock_ops, + len: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(144usize); + fun(skops, len, flags) +} +pub unsafe fn bpf_inode_storage_get( + map: *mut ::aya_bpf_cty::c_void, + inode: *mut ::aya_bpf_cty::c_void, + value: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> *mut ::aya_bpf_cty::c_void { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + inode: *mut ::aya_bpf_cty::c_void, + value: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> *mut ::aya_bpf_cty::c_void = ::core::mem::transmute(145usize); + fun(map, inode, value, flags) +} +pub unsafe fn bpf_inode_storage_delete( + map: *mut ::aya_bpf_cty::c_void, + inode: *mut ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_int { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + inode: *mut ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_int = ::core::mem::transmute(146usize); + fun(map, inode) +} +pub unsafe fn bpf_d_path( + path: *mut path, + buf: *mut ::aya_bpf_cty::c_char, + sz: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + path: *mut path, + buf: *mut ::aya_bpf_cty::c_char, + sz: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(147usize); + fun(path, buf, sz) +} +pub unsafe fn bpf_copy_from_user( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + user_ptr: *const ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + user_ptr: *const ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(148usize); + fun(dst, size, user_ptr) +} +pub unsafe fn bpf_snprintf_btf( + str_: *mut ::aya_bpf_cty::c_char, + str_size: __u32, + ptr: *mut btf_ptr, + btf_ptr_size: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + str_: *mut ::aya_bpf_cty::c_char, + str_size: __u32, + ptr: *mut btf_ptr, + btf_ptr_size: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(149usize); + fun(str_, str_size, ptr, btf_ptr_size, flags) +} +pub unsafe fn bpf_seq_printf_btf( + m: *mut seq_file, + ptr: *mut btf_ptr, + ptr_size: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + m: *mut seq_file, + ptr: *mut btf_ptr, + ptr_size: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(150usize); + fun(m, ptr, ptr_size, flags) +} +pub unsafe fn bpf_skb_cgroup_classid(skb: *mut __sk_buff) -> __u64 { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff) -> __u64 = ::core::mem::transmute(151usize); + fun(skb) +} +pub unsafe fn bpf_redirect_neigh( + ifindex: __u32, + params: *mut bpf_redir_neigh, + plen: ::aya_bpf_cty::c_int, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ifindex: __u32, + params: *mut bpf_redir_neigh, + plen: ::aya_bpf_cty::c_int, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(152usize); + fun(ifindex, params, plen, flags) +} +pub unsafe fn bpf_per_cpu_ptr( + percpu_ptr: *const ::aya_bpf_cty::c_void, + cpu: __u32, +) -> *mut ::aya_bpf_cty::c_void { + let fun: unsafe extern "C" fn( + percpu_ptr: *const ::aya_bpf_cty::c_void, + cpu: __u32, + ) -> *mut ::aya_bpf_cty::c_void = ::core::mem::transmute(153usize); + fun(percpu_ptr, cpu) +} +pub unsafe fn bpf_this_cpu_ptr( + percpu_ptr: *const ::aya_bpf_cty::c_void, +) -> *mut ::aya_bpf_cty::c_void { + let fun: unsafe extern "C" fn( + percpu_ptr: *const ::aya_bpf_cty::c_void, + ) -> *mut ::aya_bpf_cty::c_void = ::core::mem::transmute(154usize); + fun(percpu_ptr) +} +pub unsafe fn bpf_redirect_peer(ifindex: __u32, flags: __u64) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(ifindex: __u32, flags: __u64) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(155usize); + fun(ifindex, flags) +} +pub unsafe fn bpf_task_storage_get( + map: *mut ::aya_bpf_cty::c_void, + task: *mut task_struct, + value: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> *mut ::aya_bpf_cty::c_void { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + task: *mut task_struct, + value: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> *mut ::aya_bpf_cty::c_void = ::core::mem::transmute(156usize); + fun(map, task, value, flags) +} +pub unsafe fn bpf_task_storage_delete( + map: *mut ::aya_bpf_cty::c_void, + task: *mut task_struct, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + task: *mut task_struct, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(157usize); + fun(map, task) +} +pub unsafe fn bpf_get_current_task_btf() -> *mut task_struct { + let fun: unsafe extern "C" fn() -> *mut task_struct = ::core::mem::transmute(158usize); + fun() +} +pub unsafe fn bpf_bprm_opts_set(bprm: *mut linux_binprm, flags: __u64) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(bprm: *mut linux_binprm, flags: __u64) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(159usize); + fun(bprm, flags) +} +pub unsafe fn bpf_ktime_get_coarse_ns() -> __u64 { + let fun: unsafe extern "C" fn() -> __u64 = ::core::mem::transmute(160usize); + fun() +} +pub unsafe fn bpf_ima_inode_hash( + inode: *mut inode, + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + inode: *mut inode, + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(161usize); + fun(inode, dst, size) +} +pub unsafe fn bpf_sock_from_file(file: *mut file) -> *mut socket { + let fun: unsafe extern "C" fn(file: *mut file) -> *mut socket = + ::core::mem::transmute(162usize); + fun(file) +} +pub unsafe fn bpf_check_mtu( + ctx: *mut ::aya_bpf_cty::c_void, + ifindex: __u32, + mtu_len: *mut __u32, + len_diff: __s32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + ifindex: __u32, + mtu_len: *mut __u32, + len_diff: __s32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(163usize); + fun(ctx, ifindex, mtu_len, len_diff, flags) +} +pub unsafe fn bpf_for_each_map_elem( + map: *mut ::aya_bpf_cty::c_void, + callback_fn: *mut ::aya_bpf_cty::c_void, + callback_ctx: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + callback_fn: *mut ::aya_bpf_cty::c_void, + callback_ctx: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(164usize); + fun(map, callback_fn, callback_ctx, flags) +} +pub unsafe fn bpf_snprintf( + str_: *mut ::aya_bpf_cty::c_char, + str_size: __u32, + fmt: *const ::aya_bpf_cty::c_char, + data: *mut __u64, + data_len: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + str_: *mut ::aya_bpf_cty::c_char, + str_size: __u32, + fmt: *const ::aya_bpf_cty::c_char, + data: *mut __u64, + data_len: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(165usize); + fun(str_, str_size, fmt, data, data_len) +} +pub unsafe fn bpf_sys_bpf( + cmd: __u32, + attr: *mut ::aya_bpf_cty::c_void, + attr_size: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + cmd: __u32, + attr: *mut ::aya_bpf_cty::c_void, + attr_size: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(166usize); + fun(cmd, attr, attr_size) +} +pub unsafe fn bpf_btf_find_by_name_kind( + name: *mut ::aya_bpf_cty::c_char, + name_sz: ::aya_bpf_cty::c_int, + kind: __u32, + flags: ::aya_bpf_cty::c_int, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + name: *mut ::aya_bpf_cty::c_char, + name_sz: ::aya_bpf_cty::c_int, + kind: __u32, + flags: ::aya_bpf_cty::c_int, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(167usize); + fun(name, name_sz, kind, flags) +} +pub unsafe fn bpf_sys_close(fd: __u32) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(fd: __u32) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(168usize); + fun(fd) +} diff --git a/bpf/aya-bpf-bindings/src/armv7/mod.rs b/bpf/aya-bpf-bindings/src/armv7/mod.rs new file mode 100644 index 00000000..bd6ebbcb --- /dev/null +++ b/bpf/aya-bpf-bindings/src/armv7/mod.rs @@ -0,0 +1,3 @@ +pub mod bindings; +pub mod getters; +pub mod helpers; diff --git a/bpf/aya-bpf-bindings/src/lib.rs b/bpf/aya-bpf-bindings/src/lib.rs index 1a4557a1..5100f6e8 100644 --- a/bpf/aya-bpf-bindings/src/lib.rs +++ b/bpf/aya-bpf-bindings/src/lib.rs @@ -4,6 +4,9 @@ #[cfg(bpf_target_arch = "x86_64")] mod x86_64; +#[cfg(bpf_target_arch = "arm")] +mod armv7; + #[cfg(bpf_target_arch = "aarch64")] mod aarch64; @@ -11,6 +14,9 @@ mod gen { #[cfg(bpf_target_arch = "x86_64")] pub use super::x86_64::*; + #[cfg(bpf_target_arch = "arm")] + pub use super::armv7::*; + #[cfg(bpf_target_arch = "aarch64")] pub use super::aarch64::*; } diff --git a/bpf/aya-bpf-bindings/src/x86_64/bindings.rs b/bpf/aya-bpf-bindings/src/x86_64/bindings.rs index dc0a0703..0e8a177d 100644 --- a/bpf/aya-bpf-bindings/src/x86_64/bindings.rs +++ b/bpf/aya-bpf-bindings/src/x86_64/bindings.rs @@ -228,6 +228,8 @@ pub const SO_TIMESTAMPING_NEW: u32 = 65; pub const SO_RCVTIMEO_NEW: u32 = 66; pub const SO_SNDTIMEO_NEW: u32 = 67; pub const SO_DETACH_REUSEPORT_BPF: u32 = 68; +pub const SO_PREFER_BUSY_POLL: u32 = 69; +pub const SO_BUSY_POLL_BUDGET: u32 = 70; pub const SO_TIMESTAMP: u32 = 29; pub const SO_TIMESTAMPNS: u32 = 35; pub const SO_TIMESTAMPING: u32 = 37; diff --git a/bpf/aya-bpf-cty/src/lib.rs b/bpf/aya-bpf-cty/src/lib.rs index 86bc3576..2588d86a 100644 --- a/bpf/aya-bpf-cty/src/lib.rs +++ b/bpf/aya-bpf-cty/src/lib.rs @@ -20,6 +20,9 @@ mod ad { pub type c_int = i32; pub type c_uint = u32; + #[cfg(bpf_target_arch = "arm")] + pub type c_char = super::c_uchar; + #[cfg(bpf_target_arch = "aarch64")] pub type c_char = super::c_uchar; diff --git a/xtask/src/codegen/aya.rs b/xtask/src/codegen/aya.rs index 082beb53..69d043ef 100644 --- a/xtask/src/codegen/aya.rs +++ b/xtask/src/codegen/aya.rs @@ -168,6 +168,9 @@ fn codegen_bindings(opts: &Options) -> Result<(), anyhow::Error> { Architecture::X86_64 => { bindgen = bindgen.clang_args(&["-I", "/usr/include/x86_64-linux-gnu"]); } + Architecture::ARMv7 => { + bindgen = bindgen.clang_args(&["-I", "/usr/arm-linux-gnueabi/include"]); + } Architecture::AArch64 => { bindgen = bindgen.clang_args(&["-I", "/usr/aarch64-linux-gnu/include"]); } diff --git a/xtask/src/codegen/mod.rs b/xtask/src/codegen/mod.rs index c603fdd9..d9d66726 100644 --- a/xtask/src/codegen/mod.rs +++ b/xtask/src/codegen/mod.rs @@ -6,11 +6,16 @@ use std::path::PathBuf; use structopt::StructOpt; -const SUPPORTED_ARCHS: &'static [Architecture] = &[Architecture::X86_64, Architecture::AArch64]; +const SUPPORTED_ARCHS: &'static [Architecture] = &[ + Architecture::X86_64, + Architecture::ARMv7, + Architecture::AArch64, +]; #[derive(Debug, Copy, Clone)] pub enum Architecture { X86_64, + ARMv7, AArch64, } @@ -26,6 +31,7 @@ impl std::str::FromStr for Architecture { fn from_str(s: &str) -> Result { Ok(match s { "x86_64" => Architecture::X86_64, + "armv7" => Architecture::ARMv7, "aarch64" => Architecture::AArch64, _ => return Err("invalid architecture".to_owned()), }) @@ -36,6 +42,7 @@ impl std::fmt::Display for Architecture { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str(match self { Architecture::X86_64 => "x86_64", + Architecture::ARMv7 => "armv7", Architecture::AArch64 => "aarch64", }) }