diff --git a/aya/src/generated/linux_bindings_riscv64.rs b/aya/src/generated/linux_bindings_riscv64.rs index 8b137891..6e248082 100644 --- a/aya/src/generated/linux_bindings_riscv64.rs +++ b/aya/src/generated/linux_bindings_riscv64.rs @@ -1 +1,2013 @@ +/* automatically generated by rust-bindgen 0.59.2 */ +#[repr(C)] +#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct __BindgenBitfieldUnit { + storage: Storage, +} +impl __BindgenBitfieldUnit { + #[inline] + pub const fn new(storage: Storage) -> Self { + Self { storage } + } +} +impl __BindgenBitfieldUnit +where + Storage: AsRef<[u8]> + AsMut<[u8]>, +{ + #[inline] + pub fn get_bit(&self, index: usize) -> bool { + debug_assert!(index / 8 < self.storage.as_ref().len()); + let byte_index = index / 8; + let byte = self.storage.as_ref()[byte_index]; + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + let mask = 1 << bit_index; + byte & mask == mask + } + #[inline] + pub fn set_bit(&mut self, index: usize, val: bool) { + debug_assert!(index / 8 < self.storage.as_ref().len()); + let byte_index = index / 8; + let byte = &mut self.storage.as_mut()[byte_index]; + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + let mask = 1 << bit_index; + if val { + *byte |= mask; + } else { + *byte &= !mask; + } + } + #[inline] + pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len()); + let mut val = 0; + for i in 0..(bit_width as usize) { + if self.get_bit(i + bit_offset) { + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + val |= 1 << index; + } + } + val + } + #[inline] + pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len()); + for i in 0..(bit_width as usize) { + let mask = 1 << i; + let val_bit_is_set = val & mask == mask; + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + self.set_bit(index + bit_offset, val_bit_is_set); + } + } +} +#[repr(C)] +#[derive(Default)] +pub struct __IncompleteArrayField(::std::marker::PhantomData, [T; 0]); +impl __IncompleteArrayField { + #[inline] + pub const fn new() -> Self { + __IncompleteArrayField(::std::marker::PhantomData, []) + } + #[inline] + pub fn as_ptr(&self) -> *const T { + self as *const _ as *const T + } + #[inline] + pub fn as_mut_ptr(&mut self) -> *mut T { + self as *mut _ as *mut T + } + #[inline] + pub unsafe fn as_slice(&self, len: usize) -> &[T] { + ::std::slice::from_raw_parts(self.as_ptr(), len) + } + #[inline] + pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { + ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len) + } +} +impl ::std::fmt::Debug for __IncompleteArrayField { + fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + fmt.write_str("__IncompleteArrayField") + } +} +pub const BPF_LD: u32 = 0; +pub const BPF_LDX: u32 = 1; +pub const BPF_ST: u32 = 2; +pub const BPF_STX: u32 = 3; +pub const BPF_ALU: u32 = 4; +pub const BPF_JMP: u32 = 5; +pub const BPF_W: u32 = 0; +pub const BPF_H: u32 = 8; +pub const BPF_B: u32 = 16; +pub const BPF_K: u32 = 0; +pub const BPF_ALU64: u32 = 7; +pub const BPF_DW: u32 = 24; +pub const BPF_CALL: u32 = 128; +pub const BPF_F_ALLOW_OVERRIDE: u32 = 1; +pub const BPF_F_ALLOW_MULTI: u32 = 2; +pub const BPF_F_REPLACE: u32 = 4; +pub const BPF_F_STRICT_ALIGNMENT: u32 = 1; +pub const BPF_F_ANY_ALIGNMENT: u32 = 2; +pub const BPF_F_TEST_RND_HI32: u32 = 4; +pub const BPF_F_TEST_STATE_FREQ: u32 = 8; +pub const BPF_F_SLEEPABLE: u32 = 16; +pub const BPF_F_XDP_HAS_FRAGS: u32 = 32; +pub const BPF_F_KPROBE_MULTI_RETURN: u32 = 1; +pub const BPF_PSEUDO_MAP_FD: u32 = 1; +pub const BPF_PSEUDO_MAP_IDX: u32 = 5; +pub const BPF_PSEUDO_MAP_VALUE: u32 = 2; +pub const BPF_PSEUDO_MAP_IDX_VALUE: u32 = 6; +pub const BPF_PSEUDO_BTF_ID: u32 = 3; +pub const BPF_PSEUDO_FUNC: u32 = 4; +pub const BPF_PSEUDO_CALL: u32 = 1; +pub const BPF_PSEUDO_KFUNC_CALL: u32 = 2; +pub const BPF_F_QUERY_EFFECTIVE: u32 = 1; +pub const BPF_F_TEST_RUN_ON_CPU: u32 = 1; +pub const BPF_F_TEST_XDP_LIVE_FRAMES: u32 = 2; +pub const BTF_INT_SIGNED: u32 = 1; +pub const BTF_INT_CHAR: u32 = 2; +pub const BTF_INT_BOOL: u32 = 4; +pub const PERF_MAX_STACK_DEPTH: u32 = 127; +pub const PERF_MAX_CONTEXTS_PER_STACK: u32 = 8; +pub const PERF_FLAG_FD_NO_GROUP: u32 = 1; +pub const PERF_FLAG_FD_OUTPUT: u32 = 2; +pub const PERF_FLAG_PID_CGROUP: u32 = 4; +pub const PERF_FLAG_FD_CLOEXEC: u32 = 8; +pub const NLMSG_ALIGNTO: u32 = 4; +pub const XDP_FLAGS_UPDATE_IF_NOEXIST: u32 = 1; +pub const XDP_FLAGS_SKB_MODE: u32 = 2; +pub const XDP_FLAGS_DRV_MODE: u32 = 4; +pub const XDP_FLAGS_HW_MODE: u32 = 8; +pub const XDP_FLAGS_REPLACE: u32 = 16; +pub const XDP_FLAGS_MODES: u32 = 14; +pub const XDP_FLAGS_MASK: u32 = 31; +pub const SO_ATTACH_BPF: u32 = 50; +pub const SO_DETACH_BPF: u32 = 27; +pub const TC_H_MAJ_MASK: u32 = 4294901760; +pub const TC_H_MIN_MASK: u32 = 65535; +pub const TC_H_UNSPEC: u32 = 0; +pub const TC_H_ROOT: u32 = 4294967295; +pub const TC_H_INGRESS: u32 = 4294967281; +pub const TC_H_CLSACT: u32 = 4294967281; +pub const TC_H_MIN_PRIORITY: u32 = 65504; +pub const TC_H_MIN_INGRESS: u32 = 65522; +pub const TC_H_MIN_EGRESS: u32 = 65523; +pub const TCA_BPF_FLAG_ACT_DIRECT: u32 = 1; +pub type __u8 = ::std::os::raw::c_uchar; +pub type __s16 = ::std::os::raw::c_short; +pub type __u16 = ::std::os::raw::c_ushort; +pub type __s32 = ::std::os::raw::c_int; +pub type __u32 = ::std::os::raw::c_uint; +pub type __s64 = ::std::os::raw::c_longlong; +pub type __u64 = ::std::os::raw::c_ulonglong; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_insn { + pub code: __u8, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 1usize]>, + pub off: __s16, + pub imm: __s32, +} +impl bpf_insn { + #[inline] + pub fn dst_reg(&self) -> __u8 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u8) } + } + #[inline] + pub fn set_dst_reg(&mut self, val: __u8) { + unsafe { + let val: u8 = ::std::mem::transmute(val); + self._bitfield_1.set(0usize, 4u8, val as u64) + } + } + #[inline] + pub fn src_reg(&self) -> __u8 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u8) } + } + #[inline] + pub fn set_src_reg(&mut self, val: __u8) { + unsafe { + let val: u8 = ::std::mem::transmute(val); + self._bitfield_1.set(4usize, 4u8, val as u64) + } + } + #[inline] + pub fn new_bitfield_1(dst_reg: __u8, src_reg: __u8) -> __BindgenBitfieldUnit<[u8; 1usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); + __bindgen_bitfield_unit.set(0usize, 4u8, { + let dst_reg: u8 = unsafe { ::std::mem::transmute(dst_reg) }; + dst_reg as u64 + }); + __bindgen_bitfield_unit.set(4usize, 4u8, { + let src_reg: u8 = unsafe { ::std::mem::transmute(src_reg) }; + src_reg as u64 + }); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Debug)] +pub struct bpf_lpm_trie_key { + pub prefixlen: __u32, + pub data: __IncompleteArrayField<__u8>, +} +impl bpf_cmd { + pub const BPF_PROG_RUN: bpf_cmd = bpf_cmd::BPF_PROG_TEST_RUN; +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum bpf_cmd { + BPF_MAP_CREATE = 0, + BPF_MAP_LOOKUP_ELEM = 1, + BPF_MAP_UPDATE_ELEM = 2, + BPF_MAP_DELETE_ELEM = 3, + BPF_MAP_GET_NEXT_KEY = 4, + BPF_PROG_LOAD = 5, + BPF_OBJ_PIN = 6, + BPF_OBJ_GET = 7, + BPF_PROG_ATTACH = 8, + BPF_PROG_DETACH = 9, + BPF_PROG_TEST_RUN = 10, + BPF_PROG_GET_NEXT_ID = 11, + BPF_MAP_GET_NEXT_ID = 12, + BPF_PROG_GET_FD_BY_ID = 13, + BPF_MAP_GET_FD_BY_ID = 14, + BPF_OBJ_GET_INFO_BY_FD = 15, + BPF_PROG_QUERY = 16, + BPF_RAW_TRACEPOINT_OPEN = 17, + BPF_BTF_LOAD = 18, + BPF_BTF_GET_FD_BY_ID = 19, + BPF_TASK_FD_QUERY = 20, + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 21, + BPF_MAP_FREEZE = 22, + BPF_BTF_GET_NEXT_ID = 23, + BPF_MAP_LOOKUP_BATCH = 24, + BPF_MAP_LOOKUP_AND_DELETE_BATCH = 25, + BPF_MAP_UPDATE_BATCH = 26, + BPF_MAP_DELETE_BATCH = 27, + BPF_LINK_CREATE = 28, + BPF_LINK_UPDATE = 29, + BPF_LINK_GET_FD_BY_ID = 30, + BPF_LINK_GET_NEXT_ID = 31, + BPF_ENABLE_STATS = 32, + BPF_ITER_CREATE = 33, + BPF_LINK_DETACH = 34, + BPF_PROG_BIND_MAP = 35, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum bpf_map_type { + BPF_MAP_TYPE_UNSPEC = 0, + BPF_MAP_TYPE_HASH = 1, + BPF_MAP_TYPE_ARRAY = 2, + BPF_MAP_TYPE_PROG_ARRAY = 3, + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4, + BPF_MAP_TYPE_PERCPU_HASH = 5, + BPF_MAP_TYPE_PERCPU_ARRAY = 6, + BPF_MAP_TYPE_STACK_TRACE = 7, + BPF_MAP_TYPE_CGROUP_ARRAY = 8, + BPF_MAP_TYPE_LRU_HASH = 9, + BPF_MAP_TYPE_LRU_PERCPU_HASH = 10, + BPF_MAP_TYPE_LPM_TRIE = 11, + BPF_MAP_TYPE_ARRAY_OF_MAPS = 12, + BPF_MAP_TYPE_HASH_OF_MAPS = 13, + BPF_MAP_TYPE_DEVMAP = 14, + BPF_MAP_TYPE_SOCKMAP = 15, + BPF_MAP_TYPE_CPUMAP = 16, + BPF_MAP_TYPE_XSKMAP = 17, + BPF_MAP_TYPE_SOCKHASH = 18, + BPF_MAP_TYPE_CGROUP_STORAGE = 19, + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 21, + BPF_MAP_TYPE_QUEUE = 22, + BPF_MAP_TYPE_STACK = 23, + BPF_MAP_TYPE_SK_STORAGE = 24, + BPF_MAP_TYPE_DEVMAP_HASH = 25, + BPF_MAP_TYPE_STRUCT_OPS = 26, + BPF_MAP_TYPE_RINGBUF = 27, + BPF_MAP_TYPE_INODE_STORAGE = 28, + BPF_MAP_TYPE_TASK_STORAGE = 29, + BPF_MAP_TYPE_BLOOM_FILTER = 30, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum bpf_prog_type { + BPF_PROG_TYPE_UNSPEC = 0, + BPF_PROG_TYPE_SOCKET_FILTER = 1, + BPF_PROG_TYPE_KPROBE = 2, + BPF_PROG_TYPE_SCHED_CLS = 3, + BPF_PROG_TYPE_SCHED_ACT = 4, + BPF_PROG_TYPE_TRACEPOINT = 5, + BPF_PROG_TYPE_XDP = 6, + BPF_PROG_TYPE_PERF_EVENT = 7, + BPF_PROG_TYPE_CGROUP_SKB = 8, + BPF_PROG_TYPE_CGROUP_SOCK = 9, + BPF_PROG_TYPE_LWT_IN = 10, + BPF_PROG_TYPE_LWT_OUT = 11, + BPF_PROG_TYPE_LWT_XMIT = 12, + BPF_PROG_TYPE_SOCK_OPS = 13, + BPF_PROG_TYPE_SK_SKB = 14, + BPF_PROG_TYPE_CGROUP_DEVICE = 15, + BPF_PROG_TYPE_SK_MSG = 16, + BPF_PROG_TYPE_RAW_TRACEPOINT = 17, + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 18, + BPF_PROG_TYPE_LWT_SEG6LOCAL = 19, + BPF_PROG_TYPE_LIRC_MODE2 = 20, + BPF_PROG_TYPE_SK_REUSEPORT = 21, + BPF_PROG_TYPE_FLOW_DISSECTOR = 22, + BPF_PROG_TYPE_CGROUP_SYSCTL = 23, + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 24, + BPF_PROG_TYPE_CGROUP_SOCKOPT = 25, + BPF_PROG_TYPE_TRACING = 26, + BPF_PROG_TYPE_STRUCT_OPS = 27, + BPF_PROG_TYPE_EXT = 28, + BPF_PROG_TYPE_LSM = 29, + BPF_PROG_TYPE_SK_LOOKUP = 30, + BPF_PROG_TYPE_SYSCALL = 31, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum bpf_attach_type { + BPF_CGROUP_INET_INGRESS = 0, + BPF_CGROUP_INET_EGRESS = 1, + BPF_CGROUP_INET_SOCK_CREATE = 2, + BPF_CGROUP_SOCK_OPS = 3, + BPF_SK_SKB_STREAM_PARSER = 4, + BPF_SK_SKB_STREAM_VERDICT = 5, + BPF_CGROUP_DEVICE = 6, + BPF_SK_MSG_VERDICT = 7, + BPF_CGROUP_INET4_BIND = 8, + BPF_CGROUP_INET6_BIND = 9, + BPF_CGROUP_INET4_CONNECT = 10, + BPF_CGROUP_INET6_CONNECT = 11, + BPF_CGROUP_INET4_POST_BIND = 12, + BPF_CGROUP_INET6_POST_BIND = 13, + BPF_CGROUP_UDP4_SENDMSG = 14, + BPF_CGROUP_UDP6_SENDMSG = 15, + BPF_LIRC_MODE2 = 16, + BPF_FLOW_DISSECTOR = 17, + BPF_CGROUP_SYSCTL = 18, + BPF_CGROUP_UDP4_RECVMSG = 19, + BPF_CGROUP_UDP6_RECVMSG = 20, + BPF_CGROUP_GETSOCKOPT = 21, + BPF_CGROUP_SETSOCKOPT = 22, + BPF_TRACE_RAW_TP = 23, + BPF_TRACE_FENTRY = 24, + BPF_TRACE_FEXIT = 25, + BPF_MODIFY_RETURN = 26, + BPF_LSM_MAC = 27, + BPF_TRACE_ITER = 28, + BPF_CGROUP_INET4_GETPEERNAME = 29, + BPF_CGROUP_INET6_GETPEERNAME = 30, + BPF_CGROUP_INET4_GETSOCKNAME = 31, + BPF_CGROUP_INET6_GETSOCKNAME = 32, + BPF_XDP_DEVMAP = 33, + BPF_CGROUP_INET_SOCK_RELEASE = 34, + BPF_XDP_CPUMAP = 35, + BPF_SK_LOOKUP = 36, + BPF_XDP = 37, + BPF_SK_SKB_VERDICT = 38, + BPF_SK_REUSEPORT_SELECT = 39, + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE = 40, + BPF_PERF_EVENT = 41, + BPF_TRACE_KPROBE_MULTI = 42, + __MAX_BPF_ATTACH_TYPE = 43, +} +pub const BPF_ANY: ::std::os::raw::c_uint = 0; +pub const BPF_NOEXIST: ::std::os::raw::c_uint = 1; +pub const BPF_EXIST: ::std::os::raw::c_uint = 2; +pub const BPF_F_LOCK: ::std::os::raw::c_uint = 4; +pub type _bindgen_ty_2 = ::std::os::raw::c_uint; +pub const BPF_F_NO_PREALLOC: ::std::os::raw::c_uint = 1; +pub const BPF_F_NO_COMMON_LRU: ::std::os::raw::c_uint = 2; +pub const BPF_F_NUMA_NODE: ::std::os::raw::c_uint = 4; +pub const BPF_F_RDONLY: ::std::os::raw::c_uint = 8; +pub const BPF_F_WRONLY: ::std::os::raw::c_uint = 16; +pub const BPF_F_STACK_BUILD_ID: ::std::os::raw::c_uint = 32; +pub const BPF_F_ZERO_SEED: ::std::os::raw::c_uint = 64; +pub const BPF_F_RDONLY_PROG: ::std::os::raw::c_uint = 128; +pub const BPF_F_WRONLY_PROG: ::std::os::raw::c_uint = 256; +pub const BPF_F_CLONE: ::std::os::raw::c_uint = 512; +pub const BPF_F_MMAPABLE: ::std::os::raw::c_uint = 1024; +pub const BPF_F_PRESERVE_ELEMS: ::std::os::raw::c_uint = 2048; +pub const BPF_F_INNER_MAP: ::std::os::raw::c_uint = 4096; +pub type _bindgen_ty_3 = ::std::os::raw::c_uint; +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr { + pub __bindgen_anon_1: bpf_attr__bindgen_ty_1, + pub __bindgen_anon_2: bpf_attr__bindgen_ty_2, + pub batch: bpf_attr__bindgen_ty_3, + pub __bindgen_anon_3: bpf_attr__bindgen_ty_4, + pub __bindgen_anon_4: bpf_attr__bindgen_ty_5, + pub __bindgen_anon_5: bpf_attr__bindgen_ty_6, + pub test: bpf_attr__bindgen_ty_7, + pub __bindgen_anon_6: bpf_attr__bindgen_ty_8, + pub info: bpf_attr__bindgen_ty_9, + pub query: bpf_attr__bindgen_ty_10, + pub raw_tracepoint: bpf_attr__bindgen_ty_11, + pub __bindgen_anon_7: bpf_attr__bindgen_ty_12, + pub task_fd_query: bpf_attr__bindgen_ty_13, + pub link_create: bpf_attr__bindgen_ty_14, + pub link_update: bpf_attr__bindgen_ty_15, + pub link_detach: bpf_attr__bindgen_ty_16, + pub enable_stats: bpf_attr__bindgen_ty_17, + pub iter_create: bpf_attr__bindgen_ty_18, + pub prog_bind_map: bpf_attr__bindgen_ty_19, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_1 { + pub map_type: __u32, + pub key_size: __u32, + pub value_size: __u32, + pub max_entries: __u32, + pub map_flags: __u32, + pub inner_map_fd: __u32, + pub numa_node: __u32, + pub map_name: [::std::os::raw::c_char; 16usize], + pub map_ifindex: __u32, + pub btf_fd: __u32, + pub btf_key_type_id: __u32, + pub btf_value_type_id: __u32, + pub btf_vmlinux_value_type_id: __u32, + pub map_extra: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_2 { + pub map_fd: __u32, + pub key: __u64, + pub __bindgen_anon_1: bpf_attr__bindgen_ty_2__bindgen_ty_1, + pub flags: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_2__bindgen_ty_1 { + pub value: __u64, + pub next_key: __u64, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_3 { + pub in_batch: __u64, + pub out_batch: __u64, + pub keys: __u64, + pub values: __u64, + pub count: __u32, + pub map_fd: __u32, + pub elem_flags: __u64, + pub flags: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_4 { + pub prog_type: __u32, + pub insn_cnt: __u32, + pub insns: __u64, + pub license: __u64, + pub log_level: __u32, + pub log_size: __u32, + pub log_buf: __u64, + pub kern_version: __u32, + pub prog_flags: __u32, + pub prog_name: [::std::os::raw::c_char; 16usize], + pub prog_ifindex: __u32, + pub expected_attach_type: __u32, + pub prog_btf_fd: __u32, + pub func_info_rec_size: __u32, + pub func_info: __u64, + pub func_info_cnt: __u32, + pub line_info_rec_size: __u32, + pub line_info: __u64, + pub line_info_cnt: __u32, + pub attach_btf_id: __u32, + pub __bindgen_anon_1: bpf_attr__bindgen_ty_4__bindgen_ty_1, + pub core_relo_cnt: __u32, + pub fd_array: __u64, + pub core_relos: __u64, + pub core_relo_rec_size: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_4__bindgen_ty_1 { + pub attach_prog_fd: __u32, + pub attach_btf_obj_fd: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_5 { + pub pathname: __u64, + pub bpf_fd: __u32, + pub file_flags: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_6 { + pub target_fd: __u32, + pub attach_bpf_fd: __u32, + pub attach_type: __u32, + pub attach_flags: __u32, + pub replace_bpf_fd: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_7 { + pub prog_fd: __u32, + pub retval: __u32, + pub data_size_in: __u32, + pub data_size_out: __u32, + pub data_in: __u64, + pub data_out: __u64, + pub repeat: __u32, + pub duration: __u32, + pub ctx_size_in: __u32, + pub ctx_size_out: __u32, + pub ctx_in: __u64, + pub ctx_out: __u64, + pub flags: __u32, + pub cpu: __u32, + pub batch_size: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_8 { + pub __bindgen_anon_1: bpf_attr__bindgen_ty_8__bindgen_ty_1, + pub next_id: __u32, + pub open_flags: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_8__bindgen_ty_1 { + pub start_id: __u32, + pub prog_id: __u32, + pub map_id: __u32, + pub btf_id: __u32, + pub link_id: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_9 { + pub bpf_fd: __u32, + pub info_len: __u32, + pub info: __u64, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_10 { + pub target_fd: __u32, + pub attach_type: __u32, + pub query_flags: __u32, + pub attach_flags: __u32, + pub prog_ids: __u64, + pub prog_cnt: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_11 { + pub name: __u64, + pub prog_fd: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_12 { + pub btf: __u64, + pub btf_log_buf: __u64, + pub btf_size: __u32, + pub btf_log_size: __u32, + pub btf_log_level: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_13 { + pub pid: __u32, + pub fd: __u32, + pub flags: __u32, + pub buf_len: __u32, + pub buf: __u64, + pub prog_id: __u32, + pub fd_type: __u32, + pub probe_offset: __u64, + pub probe_addr: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_14 { + pub prog_fd: __u32, + pub __bindgen_anon_1: bpf_attr__bindgen_ty_14__bindgen_ty_1, + pub attach_type: __u32, + pub flags: __u32, + pub __bindgen_anon_2: bpf_attr__bindgen_ty_14__bindgen_ty_2, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_14__bindgen_ty_1 { + pub target_fd: __u32, + pub target_ifindex: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_14__bindgen_ty_2 { + pub target_btf_id: __u32, + pub __bindgen_anon_1: bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_1, + pub perf_event: bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_2, + pub kprobe_multi: bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_3, + pub tracing: bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_4, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_1 { + pub iter_info: __u64, + pub iter_info_len: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_2 { + pub bpf_cookie: __u64, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_3 { + pub flags: __u32, + pub cnt: __u32, + pub syms: __u64, + pub addrs: __u64, + pub cookies: __u64, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_4 { + pub target_btf_id: __u32, + pub cookie: __u64, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_15 { + pub link_fd: __u32, + pub new_prog_fd: __u32, + pub flags: __u32, + pub old_prog_fd: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_16 { + pub link_fd: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_17 { + pub type_: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_18 { + pub link_fd: __u32, + pub flags: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_19 { + pub prog_fd: __u32, + pub map_fd: __u32, + pub flags: __u32, +} +pub const BPF_F_RECOMPUTE_CSUM: ::std::os::raw::c_uint = 1; +pub const BPF_F_INVALIDATE_HASH: ::std::os::raw::c_uint = 2; +pub type _bindgen_ty_4 = ::std::os::raw::c_uint; +pub const BPF_F_HDR_FIELD_MASK: ::std::os::raw::c_uint = 15; +pub type _bindgen_ty_5 = ::std::os::raw::c_uint; +pub const BPF_F_PSEUDO_HDR: ::std::os::raw::c_uint = 16; +pub const BPF_F_MARK_MANGLED_0: ::std::os::raw::c_uint = 32; +pub const BPF_F_MARK_ENFORCE: ::std::os::raw::c_uint = 64; +pub type _bindgen_ty_6 = ::std::os::raw::c_uint; +pub const BPF_F_INGRESS: ::std::os::raw::c_uint = 1; +pub type _bindgen_ty_7 = ::std::os::raw::c_uint; +pub const BPF_F_TUNINFO_IPV6: ::std::os::raw::c_uint = 1; +pub type _bindgen_ty_8 = ::std::os::raw::c_uint; +pub const BPF_F_SKIP_FIELD_MASK: ::std::os::raw::c_uint = 255; +pub const BPF_F_USER_STACK: ::std::os::raw::c_uint = 256; +pub const BPF_F_FAST_STACK_CMP: ::std::os::raw::c_uint = 512; +pub const BPF_F_REUSE_STACKID: ::std::os::raw::c_uint = 1024; +pub const BPF_F_USER_BUILD_ID: ::std::os::raw::c_uint = 2048; +pub type _bindgen_ty_9 = ::std::os::raw::c_uint; +pub const BPF_F_ZERO_CSUM_TX: ::std::os::raw::c_uint = 2; +pub const BPF_F_DONT_FRAGMENT: ::std::os::raw::c_uint = 4; +pub const BPF_F_SEQ_NUMBER: ::std::os::raw::c_uint = 8; +pub type _bindgen_ty_10 = ::std::os::raw::c_uint; +pub const BPF_F_INDEX_MASK: ::std::os::raw::c_ulong = 4294967295; +pub const BPF_F_CURRENT_CPU: ::std::os::raw::c_ulong = 4294967295; +pub const BPF_F_CTXLEN_MASK: ::std::os::raw::c_ulong = 4503595332403200; +pub type _bindgen_ty_11 = ::std::os::raw::c_ulong; +pub const BPF_F_CURRENT_NETNS: ::std::os::raw::c_int = -1; +pub type _bindgen_ty_12 = ::std::os::raw::c_int; +pub const BPF_F_ADJ_ROOM_FIXED_GSO: ::std::os::raw::c_uint = 1; +pub const BPF_F_ADJ_ROOM_ENCAP_L3_IPV4: ::std::os::raw::c_uint = 2; +pub const BPF_F_ADJ_ROOM_ENCAP_L3_IPV6: ::std::os::raw::c_uint = 4; +pub const BPF_F_ADJ_ROOM_ENCAP_L4_GRE: ::std::os::raw::c_uint = 8; +pub const BPF_F_ADJ_ROOM_ENCAP_L4_UDP: ::std::os::raw::c_uint = 16; +pub const BPF_F_ADJ_ROOM_NO_CSUM_RESET: ::std::os::raw::c_uint = 32; +pub const BPF_F_ADJ_ROOM_ENCAP_L2_ETH: ::std::os::raw::c_uint = 64; +pub type _bindgen_ty_14 = ::std::os::raw::c_uint; +pub const BPF_F_SYSCTL_BASE_NAME: ::std::os::raw::c_uint = 1; +pub type _bindgen_ty_16 = ::std::os::raw::c_uint; +pub const BPF_F_GET_BRANCH_RECORDS_SIZE: ::std::os::raw::c_uint = 1; +pub type _bindgen_ty_18 = ::std::os::raw::c_uint; +pub const BPF_F_BPRM_SECUREEXEC: ::std::os::raw::c_uint = 1; +pub type _bindgen_ty_23 = ::std::os::raw::c_uint; +pub const BPF_F_BROADCAST: ::std::os::raw::c_uint = 8; +pub const BPF_F_EXCLUDE_INGRESS: ::std::os::raw::c_uint = 16; +pub type _bindgen_ty_24 = ::std::os::raw::c_uint; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_prog_info { + pub type_: __u32, + pub id: __u32, + pub tag: [__u8; 8usize], + pub jited_prog_len: __u32, + pub xlated_prog_len: __u32, + pub jited_prog_insns: __u64, + pub xlated_prog_insns: __u64, + pub load_time: __u64, + pub created_by_uid: __u32, + pub nr_map_ids: __u32, + pub map_ids: __u64, + pub name: [::std::os::raw::c_char; 16usize], + pub ifindex: __u32, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>, + pub netns_dev: __u64, + pub netns_ino: __u64, + pub nr_jited_ksyms: __u32, + pub nr_jited_func_lens: __u32, + pub jited_ksyms: __u64, + pub jited_func_lens: __u64, + pub btf_id: __u32, + pub func_info_rec_size: __u32, + pub func_info: __u64, + pub nr_func_info: __u32, + pub nr_line_info: __u32, + pub line_info: __u64, + pub jited_line_info: __u64, + pub nr_jited_line_info: __u32, + pub line_info_rec_size: __u32, + pub jited_line_info_rec_size: __u32, + pub nr_prog_tags: __u32, + pub prog_tags: __u64, + pub run_time_ns: __u64, + pub run_cnt: __u64, + pub recursion_misses: __u64, + pub verified_insns: __u32, +} +impl bpf_prog_info { + #[inline] + pub fn gpl_compatible(&self) -> __u32 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + } + #[inline] + pub fn set_gpl_compatible(&mut self, val: __u32) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(0usize, 1u8, val as u64) + } + } + #[inline] + pub fn new_bitfield_1(gpl_compatible: __u32) -> __BindgenBitfieldUnit<[u8; 4usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); + __bindgen_bitfield_unit.set(0usize, 1u8, { + let gpl_compatible: u32 = unsafe { ::std::mem::transmute(gpl_compatible) }; + gpl_compatible as u64 + }); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_btf_info { + pub btf: __u64, + pub btf_size: __u32, + pub id: __u32, + pub name: __u64, + pub name_len: __u32, + pub kernel_btf: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_func_info { + pub insn_off: __u32, + pub type_id: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_line_info { + pub insn_off: __u32, + pub file_name_off: __u32, + pub line_off: __u32, + pub line_col: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_header { + pub magic: __u16, + pub version: __u8, + pub flags: __u8, + pub hdr_len: __u32, + pub type_off: __u32, + pub type_len: __u32, + pub str_off: __u32, + pub str_len: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct btf_type { + pub name_off: __u32, + pub info: __u32, + pub __bindgen_anon_1: btf_type__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union btf_type__bindgen_ty_1 { + pub size: __u32, + pub type_: __u32, +} +pub const BTF_KIND_UNKN: ::std::os::raw::c_uint = 0; +pub const BTF_KIND_INT: ::std::os::raw::c_uint = 1; +pub const BTF_KIND_PTR: ::std::os::raw::c_uint = 2; +pub const BTF_KIND_ARRAY: ::std::os::raw::c_uint = 3; +pub const BTF_KIND_STRUCT: ::std::os::raw::c_uint = 4; +pub const BTF_KIND_UNION: ::std::os::raw::c_uint = 5; +pub const BTF_KIND_ENUM: ::std::os::raw::c_uint = 6; +pub const BTF_KIND_FWD: ::std::os::raw::c_uint = 7; +pub const BTF_KIND_TYPEDEF: ::std::os::raw::c_uint = 8; +pub const BTF_KIND_VOLATILE: ::std::os::raw::c_uint = 9; +pub const BTF_KIND_CONST: ::std::os::raw::c_uint = 10; +pub const BTF_KIND_RESTRICT: ::std::os::raw::c_uint = 11; +pub const BTF_KIND_FUNC: ::std::os::raw::c_uint = 12; +pub const BTF_KIND_FUNC_PROTO: ::std::os::raw::c_uint = 13; +pub const BTF_KIND_VAR: ::std::os::raw::c_uint = 14; +pub const BTF_KIND_DATASEC: ::std::os::raw::c_uint = 15; +pub const BTF_KIND_FLOAT: ::std::os::raw::c_uint = 16; +pub const BTF_KIND_DECL_TAG: ::std::os::raw::c_uint = 17; +pub const BTF_KIND_TYPE_TAG: ::std::os::raw::c_uint = 18; +pub const NR_BTF_KINDS: ::std::os::raw::c_uint = 19; +pub const BTF_KIND_MAX: ::std::os::raw::c_uint = 18; +pub type _bindgen_ty_38 = ::std::os::raw::c_uint; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_enum { + pub name_off: __u32, + pub val: __s32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_array { + pub type_: __u32, + pub index_type: __u32, + pub nelems: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_member { + pub name_off: __u32, + pub type_: __u32, + pub offset: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_param { + pub name_off: __u32, + pub type_: __u32, +} +pub const BTF_VAR_STATIC: ::std::os::raw::c_uint = 0; +pub const BTF_VAR_GLOBAL_ALLOCATED: ::std::os::raw::c_uint = 1; +pub const BTF_VAR_GLOBAL_EXTERN: ::std::os::raw::c_uint = 2; +pub type _bindgen_ty_39 = ::std::os::raw::c_uint; +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum btf_func_linkage { + BTF_FUNC_STATIC = 0, + BTF_FUNC_GLOBAL = 1, + BTF_FUNC_EXTERN = 2, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_var { + pub linkage: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_var_secinfo { + pub type_: __u32, + pub offset: __u32, + pub size: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_decl_tag { + pub component_idx: __s32, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum perf_type_id { + PERF_TYPE_HARDWARE = 0, + PERF_TYPE_SOFTWARE = 1, + PERF_TYPE_TRACEPOINT = 2, + PERF_TYPE_HW_CACHE = 3, + PERF_TYPE_RAW = 4, + PERF_TYPE_BREAKPOINT = 5, + PERF_TYPE_MAX = 6, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum perf_hw_id { + PERF_COUNT_HW_CPU_CYCLES = 0, + PERF_COUNT_HW_INSTRUCTIONS = 1, + PERF_COUNT_HW_CACHE_REFERENCES = 2, + PERF_COUNT_HW_CACHE_MISSES = 3, + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_HW_BRANCH_MISSES = 5, + PERF_COUNT_HW_BUS_CYCLES = 6, + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, + PERF_COUNT_HW_REF_CPU_CYCLES = 9, + PERF_COUNT_HW_MAX = 10, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum perf_hw_cache_id { + PERF_COUNT_HW_CACHE_L1D = 0, + PERF_COUNT_HW_CACHE_L1I = 1, + PERF_COUNT_HW_CACHE_LL = 2, + PERF_COUNT_HW_CACHE_DTLB = 3, + PERF_COUNT_HW_CACHE_ITLB = 4, + PERF_COUNT_HW_CACHE_BPU = 5, + PERF_COUNT_HW_CACHE_NODE = 6, + PERF_COUNT_HW_CACHE_MAX = 7, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum perf_hw_cache_op_id { + PERF_COUNT_HW_CACHE_OP_READ = 0, + PERF_COUNT_HW_CACHE_OP_WRITE = 1, + PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, + PERF_COUNT_HW_CACHE_OP_MAX = 3, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum perf_hw_cache_op_result_id { + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, + PERF_COUNT_HW_CACHE_RESULT_MISS = 1, + PERF_COUNT_HW_CACHE_RESULT_MAX = 2, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum perf_sw_ids { + PERF_COUNT_SW_CPU_CLOCK = 0, + PERF_COUNT_SW_TASK_CLOCK = 1, + PERF_COUNT_SW_PAGE_FAULTS = 2, + PERF_COUNT_SW_CONTEXT_SWITCHES = 3, + PERF_COUNT_SW_CPU_MIGRATIONS = 4, + PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, + PERF_COUNT_SW_EMULATION_FAULTS = 8, + PERF_COUNT_SW_DUMMY = 9, + PERF_COUNT_SW_BPF_OUTPUT = 10, + PERF_COUNT_SW_CGROUP_SWITCHES = 11, + PERF_COUNT_SW_MAX = 12, +} +#[repr(u64)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum perf_event_sample_format { + PERF_SAMPLE_IP = 1, + PERF_SAMPLE_TID = 2, + PERF_SAMPLE_TIME = 4, + PERF_SAMPLE_ADDR = 8, + PERF_SAMPLE_READ = 16, + PERF_SAMPLE_CALLCHAIN = 32, + PERF_SAMPLE_ID = 64, + PERF_SAMPLE_CPU = 128, + PERF_SAMPLE_PERIOD = 256, + PERF_SAMPLE_STREAM_ID = 512, + PERF_SAMPLE_RAW = 1024, + PERF_SAMPLE_BRANCH_STACK = 2048, + PERF_SAMPLE_REGS_USER = 4096, + PERF_SAMPLE_STACK_USER = 8192, + PERF_SAMPLE_WEIGHT = 16384, + PERF_SAMPLE_DATA_SRC = 32768, + PERF_SAMPLE_IDENTIFIER = 65536, + PERF_SAMPLE_TRANSACTION = 131072, + PERF_SAMPLE_REGS_INTR = 262144, + PERF_SAMPLE_PHYS_ADDR = 524288, + PERF_SAMPLE_AUX = 1048576, + PERF_SAMPLE_CGROUP = 2097152, + PERF_SAMPLE_DATA_PAGE_SIZE = 4194304, + PERF_SAMPLE_CODE_PAGE_SIZE = 8388608, + PERF_SAMPLE_WEIGHT_STRUCT = 16777216, + PERF_SAMPLE_MAX = 33554432, + __PERF_SAMPLE_CALLCHAIN_EARLY = 9223372036854775808, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct perf_event_attr { + pub type_: __u32, + pub size: __u32, + pub config: __u64, + pub __bindgen_anon_1: perf_event_attr__bindgen_ty_1, + pub sample_type: __u64, + pub read_format: __u64, + pub _bitfield_align_1: [u32; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, + pub __bindgen_anon_2: perf_event_attr__bindgen_ty_2, + pub bp_type: __u32, + pub __bindgen_anon_3: perf_event_attr__bindgen_ty_3, + pub __bindgen_anon_4: perf_event_attr__bindgen_ty_4, + pub branch_sample_type: __u64, + pub sample_regs_user: __u64, + pub sample_stack_user: __u32, + pub clockid: __s32, + pub sample_regs_intr: __u64, + pub aux_watermark: __u32, + pub sample_max_stack: __u16, + pub __reserved_2: __u16, + pub aux_sample_size: __u32, + pub __reserved_3: __u32, + pub sig_data: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union perf_event_attr__bindgen_ty_1 { + pub sample_period: __u64, + pub sample_freq: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union perf_event_attr__bindgen_ty_2 { + pub wakeup_events: __u32, + pub wakeup_watermark: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union perf_event_attr__bindgen_ty_3 { + pub bp_addr: __u64, + pub kprobe_func: __u64, + pub uprobe_path: __u64, + pub config1: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union perf_event_attr__bindgen_ty_4 { + pub bp_len: __u64, + pub kprobe_addr: __u64, + pub probe_offset: __u64, + pub config2: __u64, +} +impl perf_event_attr { + #[inline] + pub fn disabled(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u64) } + } + #[inline] + pub fn set_disabled(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(0usize, 1u8, val as u64) + } + } + #[inline] + pub fn inherit(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u64) } + } + #[inline] + pub fn set_inherit(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(1usize, 1u8, val as u64) + } + } + #[inline] + pub fn pinned(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u64) } + } + #[inline] + pub fn set_pinned(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(2usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclusive(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclusive(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(3usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_user(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_user(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(4usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_kernel(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_kernel(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(5usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_hv(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(6usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_hv(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(6usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_idle(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(7usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_idle(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(7usize, 1u8, val as u64) + } + } + #[inline] + pub fn mmap(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 1u8) as u64) } + } + #[inline] + pub fn set_mmap(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(8usize, 1u8, val as u64) + } + } + #[inline] + pub fn comm(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(9usize, 1u8) as u64) } + } + #[inline] + pub fn set_comm(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(9usize, 1u8, val as u64) + } + } + #[inline] + pub fn freq(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(10usize, 1u8) as u64) } + } + #[inline] + pub fn set_freq(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(10usize, 1u8, val as u64) + } + } + #[inline] + pub fn inherit_stat(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(11usize, 1u8) as u64) } + } + #[inline] + pub fn set_inherit_stat(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(11usize, 1u8, val as u64) + } + } + #[inline] + pub fn enable_on_exec(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(12usize, 1u8) as u64) } + } + #[inline] + pub fn set_enable_on_exec(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(12usize, 1u8, val as u64) + } + } + #[inline] + pub fn task(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(13usize, 1u8) as u64) } + } + #[inline] + pub fn set_task(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(13usize, 1u8, val as u64) + } + } + #[inline] + pub fn watermark(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(14usize, 1u8) as u64) } + } + #[inline] + pub fn set_watermark(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(14usize, 1u8, val as u64) + } + } + #[inline] + pub fn precise_ip(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(15usize, 2u8) as u64) } + } + #[inline] + pub fn set_precise_ip(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(15usize, 2u8, val as u64) + } + } + #[inline] + pub fn mmap_data(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(17usize, 1u8) as u64) } + } + #[inline] + pub fn set_mmap_data(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(17usize, 1u8, val as u64) + } + } + #[inline] + pub fn sample_id_all(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(18usize, 1u8) as u64) } + } + #[inline] + pub fn set_sample_id_all(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(18usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_host(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(19usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_host(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(19usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_guest(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(20usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_guest(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(20usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_callchain_kernel(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(21usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_callchain_kernel(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(21usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_callchain_user(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(22usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_callchain_user(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(22usize, 1u8, val as u64) + } + } + #[inline] + pub fn mmap2(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(23usize, 1u8) as u64) } + } + #[inline] + pub fn set_mmap2(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(23usize, 1u8, val as u64) + } + } + #[inline] + pub fn comm_exec(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 1u8) as u64) } + } + #[inline] + pub fn set_comm_exec(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(24usize, 1u8, val as u64) + } + } + #[inline] + pub fn use_clockid(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(25usize, 1u8) as u64) } + } + #[inline] + pub fn set_use_clockid(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(25usize, 1u8, val as u64) + } + } + #[inline] + pub fn context_switch(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(26usize, 1u8) as u64) } + } + #[inline] + pub fn set_context_switch(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(26usize, 1u8, val as u64) + } + } + #[inline] + pub fn write_backward(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(27usize, 1u8) as u64) } + } + #[inline] + pub fn set_write_backward(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(27usize, 1u8, val as u64) + } + } + #[inline] + pub fn namespaces(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(28usize, 1u8) as u64) } + } + #[inline] + pub fn set_namespaces(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(28usize, 1u8, val as u64) + } + } + #[inline] + pub fn ksymbol(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(29usize, 1u8) as u64) } + } + #[inline] + pub fn set_ksymbol(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(29usize, 1u8, val as u64) + } + } + #[inline] + pub fn bpf_event(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(30usize, 1u8) as u64) } + } + #[inline] + pub fn set_bpf_event(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(30usize, 1u8, val as u64) + } + } + #[inline] + pub fn aux_output(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(31usize, 1u8) as u64) } + } + #[inline] + pub fn set_aux_output(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(31usize, 1u8, val as u64) + } + } + #[inline] + pub fn cgroup(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(32usize, 1u8) as u64) } + } + #[inline] + pub fn set_cgroup(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(32usize, 1u8, val as u64) + } + } + #[inline] + pub fn text_poke(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(33usize, 1u8) as u64) } + } + #[inline] + pub fn set_text_poke(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(33usize, 1u8, val as u64) + } + } + #[inline] + pub fn build_id(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(34usize, 1u8) as u64) } + } + #[inline] + pub fn set_build_id(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(34usize, 1u8, val as u64) + } + } + #[inline] + pub fn inherit_thread(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(35usize, 1u8) as u64) } + } + #[inline] + pub fn set_inherit_thread(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(35usize, 1u8, val as u64) + } + } + #[inline] + pub fn remove_on_exec(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(36usize, 1u8) as u64) } + } + #[inline] + pub fn set_remove_on_exec(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(36usize, 1u8, val as u64) + } + } + #[inline] + pub fn sigtrap(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(37usize, 1u8) as u64) } + } + #[inline] + pub fn set_sigtrap(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(37usize, 1u8, val as u64) + } + } + #[inline] + pub fn __reserved_1(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(38usize, 26u8) as u64) } + } + #[inline] + pub fn set___reserved_1(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(38usize, 26u8, val as u64) + } + } + #[inline] + pub fn new_bitfield_1( + disabled: __u64, + inherit: __u64, + pinned: __u64, + exclusive: __u64, + exclude_user: __u64, + exclude_kernel: __u64, + exclude_hv: __u64, + exclude_idle: __u64, + mmap: __u64, + comm: __u64, + freq: __u64, + inherit_stat: __u64, + enable_on_exec: __u64, + task: __u64, + watermark: __u64, + precise_ip: __u64, + mmap_data: __u64, + sample_id_all: __u64, + exclude_host: __u64, + exclude_guest: __u64, + exclude_callchain_kernel: __u64, + exclude_callchain_user: __u64, + mmap2: __u64, + comm_exec: __u64, + use_clockid: __u64, + context_switch: __u64, + write_backward: __u64, + namespaces: __u64, + ksymbol: __u64, + bpf_event: __u64, + aux_output: __u64, + cgroup: __u64, + text_poke: __u64, + build_id: __u64, + inherit_thread: __u64, + remove_on_exec: __u64, + sigtrap: __u64, + __reserved_1: __u64, + ) -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit.set(0usize, 1u8, { + let disabled: u64 = unsafe { ::std::mem::transmute(disabled) }; + disabled as u64 + }); + __bindgen_bitfield_unit.set(1usize, 1u8, { + let inherit: u64 = unsafe { ::std::mem::transmute(inherit) }; + inherit as u64 + }); + __bindgen_bitfield_unit.set(2usize, 1u8, { + let pinned: u64 = unsafe { ::std::mem::transmute(pinned) }; + pinned as u64 + }); + __bindgen_bitfield_unit.set(3usize, 1u8, { + let exclusive: u64 = unsafe { ::std::mem::transmute(exclusive) }; + exclusive as u64 + }); + __bindgen_bitfield_unit.set(4usize, 1u8, { + let exclude_user: u64 = unsafe { ::std::mem::transmute(exclude_user) }; + exclude_user as u64 + }); + __bindgen_bitfield_unit.set(5usize, 1u8, { + let exclude_kernel: u64 = unsafe { ::std::mem::transmute(exclude_kernel) }; + exclude_kernel as u64 + }); + __bindgen_bitfield_unit.set(6usize, 1u8, { + let exclude_hv: u64 = unsafe { ::std::mem::transmute(exclude_hv) }; + exclude_hv as u64 + }); + __bindgen_bitfield_unit.set(7usize, 1u8, { + let exclude_idle: u64 = unsafe { ::std::mem::transmute(exclude_idle) }; + exclude_idle as u64 + }); + __bindgen_bitfield_unit.set(8usize, 1u8, { + let mmap: u64 = unsafe { ::std::mem::transmute(mmap) }; + mmap as u64 + }); + __bindgen_bitfield_unit.set(9usize, 1u8, { + let comm: u64 = unsafe { ::std::mem::transmute(comm) }; + comm as u64 + }); + __bindgen_bitfield_unit.set(10usize, 1u8, { + let freq: u64 = unsafe { ::std::mem::transmute(freq) }; + freq as u64 + }); + __bindgen_bitfield_unit.set(11usize, 1u8, { + let inherit_stat: u64 = unsafe { ::std::mem::transmute(inherit_stat) }; + inherit_stat as u64 + }); + __bindgen_bitfield_unit.set(12usize, 1u8, { + let enable_on_exec: u64 = unsafe { ::std::mem::transmute(enable_on_exec) }; + enable_on_exec as u64 + }); + __bindgen_bitfield_unit.set(13usize, 1u8, { + let task: u64 = unsafe { ::std::mem::transmute(task) }; + task as u64 + }); + __bindgen_bitfield_unit.set(14usize, 1u8, { + let watermark: u64 = unsafe { ::std::mem::transmute(watermark) }; + watermark as u64 + }); + __bindgen_bitfield_unit.set(15usize, 2u8, { + let precise_ip: u64 = unsafe { ::std::mem::transmute(precise_ip) }; + precise_ip as u64 + }); + __bindgen_bitfield_unit.set(17usize, 1u8, { + let mmap_data: u64 = unsafe { ::std::mem::transmute(mmap_data) }; + mmap_data as u64 + }); + __bindgen_bitfield_unit.set(18usize, 1u8, { + let sample_id_all: u64 = unsafe { ::std::mem::transmute(sample_id_all) }; + sample_id_all as u64 + }); + __bindgen_bitfield_unit.set(19usize, 1u8, { + let exclude_host: u64 = unsafe { ::std::mem::transmute(exclude_host) }; + exclude_host as u64 + }); + __bindgen_bitfield_unit.set(20usize, 1u8, { + let exclude_guest: u64 = unsafe { ::std::mem::transmute(exclude_guest) }; + exclude_guest as u64 + }); + __bindgen_bitfield_unit.set(21usize, 1u8, { + let exclude_callchain_kernel: u64 = + unsafe { ::std::mem::transmute(exclude_callchain_kernel) }; + exclude_callchain_kernel as u64 + }); + __bindgen_bitfield_unit.set(22usize, 1u8, { + let exclude_callchain_user: u64 = + unsafe { ::std::mem::transmute(exclude_callchain_user) }; + exclude_callchain_user as u64 + }); + __bindgen_bitfield_unit.set(23usize, 1u8, { + let mmap2: u64 = unsafe { ::std::mem::transmute(mmap2) }; + mmap2 as u64 + }); + __bindgen_bitfield_unit.set(24usize, 1u8, { + let comm_exec: u64 = unsafe { ::std::mem::transmute(comm_exec) }; + comm_exec as u64 + }); + __bindgen_bitfield_unit.set(25usize, 1u8, { + let use_clockid: u64 = unsafe { ::std::mem::transmute(use_clockid) }; + use_clockid as u64 + }); + __bindgen_bitfield_unit.set(26usize, 1u8, { + let context_switch: u64 = unsafe { ::std::mem::transmute(context_switch) }; + context_switch as u64 + }); + __bindgen_bitfield_unit.set(27usize, 1u8, { + let write_backward: u64 = unsafe { ::std::mem::transmute(write_backward) }; + write_backward as u64 + }); + __bindgen_bitfield_unit.set(28usize, 1u8, { + let namespaces: u64 = unsafe { ::std::mem::transmute(namespaces) }; + namespaces as u64 + }); + __bindgen_bitfield_unit.set(29usize, 1u8, { + let ksymbol: u64 = unsafe { ::std::mem::transmute(ksymbol) }; + ksymbol as u64 + }); + __bindgen_bitfield_unit.set(30usize, 1u8, { + let bpf_event: u64 = unsafe { ::std::mem::transmute(bpf_event) }; + bpf_event as u64 + }); + __bindgen_bitfield_unit.set(31usize, 1u8, { + let aux_output: u64 = unsafe { ::std::mem::transmute(aux_output) }; + aux_output as u64 + }); + __bindgen_bitfield_unit.set(32usize, 1u8, { + let cgroup: u64 = unsafe { ::std::mem::transmute(cgroup) }; + cgroup as u64 + }); + __bindgen_bitfield_unit.set(33usize, 1u8, { + let text_poke: u64 = unsafe { ::std::mem::transmute(text_poke) }; + text_poke as u64 + }); + __bindgen_bitfield_unit.set(34usize, 1u8, { + let build_id: u64 = unsafe { ::std::mem::transmute(build_id) }; + build_id as u64 + }); + __bindgen_bitfield_unit.set(35usize, 1u8, { + let inherit_thread: u64 = unsafe { ::std::mem::transmute(inherit_thread) }; + inherit_thread as u64 + }); + __bindgen_bitfield_unit.set(36usize, 1u8, { + let remove_on_exec: u64 = unsafe { ::std::mem::transmute(remove_on_exec) }; + remove_on_exec as u64 + }); + __bindgen_bitfield_unit.set(37usize, 1u8, { + let sigtrap: u64 = unsafe { ::std::mem::transmute(sigtrap) }; + sigtrap as u64 + }); + __bindgen_bitfield_unit.set(38usize, 26u8, { + let __reserved_1: u64 = unsafe { ::std::mem::transmute(__reserved_1) }; + __reserved_1 as u64 + }); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct perf_event_mmap_page { + pub version: __u32, + pub compat_version: __u32, + pub lock: __u32, + pub index: __u32, + pub offset: __s64, + pub time_enabled: __u64, + pub time_running: __u64, + pub __bindgen_anon_1: perf_event_mmap_page__bindgen_ty_1, + pub pmc_width: __u16, + pub time_shift: __u16, + pub time_mult: __u32, + pub time_offset: __u64, + pub time_zero: __u64, + pub size: __u32, + pub __reserved_1: __u32, + pub time_cycles: __u64, + pub time_mask: __u64, + pub __reserved: [__u8; 928usize], + pub data_head: __u64, + pub data_tail: __u64, + pub data_offset: __u64, + pub data_size: __u64, + pub aux_head: __u64, + pub aux_tail: __u64, + pub aux_offset: __u64, + pub aux_size: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union perf_event_mmap_page__bindgen_ty_1 { + pub capabilities: __u64, + pub __bindgen_anon_1: perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1, +} +#[repr(C)] +#[repr(align(8))] +#[derive(Debug, Copy, Clone)] +pub struct perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1 { + pub _bitfield_align_1: [u64; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, +} +impl perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1 { + #[inline] + pub fn cap_bit0(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u64) } + } + #[inline] + pub fn set_cap_bit0(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(0usize, 1u8, val as u64) + } + } + #[inline] + pub fn cap_bit0_is_deprecated(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u64) } + } + #[inline] + pub fn set_cap_bit0_is_deprecated(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(1usize, 1u8, val as u64) + } + } + #[inline] + pub fn cap_user_rdpmc(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u64) } + } + #[inline] + pub fn set_cap_user_rdpmc(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(2usize, 1u8, val as u64) + } + } + #[inline] + pub fn cap_user_time(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u64) } + } + #[inline] + pub fn set_cap_user_time(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(3usize, 1u8, val as u64) + } + } + #[inline] + pub fn cap_user_time_zero(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u64) } + } + #[inline] + pub fn set_cap_user_time_zero(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(4usize, 1u8, val as u64) + } + } + #[inline] + pub fn cap_user_time_short(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u64) } + } + #[inline] + pub fn set_cap_user_time_short(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(5usize, 1u8, val as u64) + } + } + #[inline] + pub fn cap_____res(&self) -> __u64 { + unsafe { ::std::mem::transmute(self._bitfield_1.get(6usize, 58u8) as u64) } + } + #[inline] + pub fn set_cap_____res(&mut self, val: __u64) { + unsafe { + let val: u64 = ::std::mem::transmute(val); + self._bitfield_1.set(6usize, 58u8, val as u64) + } + } + #[inline] + pub fn new_bitfield_1( + cap_bit0: __u64, + cap_bit0_is_deprecated: __u64, + cap_user_rdpmc: __u64, + cap_user_time: __u64, + cap_user_time_zero: __u64, + cap_user_time_short: __u64, + cap_____res: __u64, + ) -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit.set(0usize, 1u8, { + let cap_bit0: u64 = unsafe { ::std::mem::transmute(cap_bit0) }; + cap_bit0 as u64 + }); + __bindgen_bitfield_unit.set(1usize, 1u8, { + let cap_bit0_is_deprecated: u64 = + unsafe { ::std::mem::transmute(cap_bit0_is_deprecated) }; + cap_bit0_is_deprecated as u64 + }); + __bindgen_bitfield_unit.set(2usize, 1u8, { + let cap_user_rdpmc: u64 = unsafe { ::std::mem::transmute(cap_user_rdpmc) }; + cap_user_rdpmc as u64 + }); + __bindgen_bitfield_unit.set(3usize, 1u8, { + let cap_user_time: u64 = unsafe { ::std::mem::transmute(cap_user_time) }; + cap_user_time as u64 + }); + __bindgen_bitfield_unit.set(4usize, 1u8, { + let cap_user_time_zero: u64 = unsafe { ::std::mem::transmute(cap_user_time_zero) }; + cap_user_time_zero as u64 + }); + __bindgen_bitfield_unit.set(5usize, 1u8, { + let cap_user_time_short: u64 = unsafe { ::std::mem::transmute(cap_user_time_short) }; + cap_user_time_short as u64 + }); + __bindgen_bitfield_unit.set(6usize, 58u8, { + let cap_____res: u64 = unsafe { ::std::mem::transmute(cap_____res) }; + cap_____res as u64 + }); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct perf_event_header { + pub type_: __u32, + pub misc: __u16, + pub size: __u16, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum perf_event_type { + PERF_RECORD_MMAP = 1, + PERF_RECORD_LOST = 2, + PERF_RECORD_COMM = 3, + PERF_RECORD_EXIT = 4, + PERF_RECORD_THROTTLE = 5, + PERF_RECORD_UNTHROTTLE = 6, + PERF_RECORD_FORK = 7, + PERF_RECORD_READ = 8, + PERF_RECORD_SAMPLE = 9, + PERF_RECORD_MMAP2 = 10, + PERF_RECORD_AUX = 11, + PERF_RECORD_ITRACE_START = 12, + PERF_RECORD_LOST_SAMPLES = 13, + PERF_RECORD_SWITCH = 14, + PERF_RECORD_SWITCH_CPU_WIDE = 15, + PERF_RECORD_NAMESPACES = 16, + PERF_RECORD_KSYMBOL = 17, + PERF_RECORD_BPF_EVENT = 18, + PERF_RECORD_CGROUP = 19, + PERF_RECORD_TEXT_POKE = 20, + PERF_RECORD_AUX_OUTPUT_HW_ID = 21, + PERF_RECORD_MAX = 22, +} +pub const IFLA_XDP_UNSPEC: _bindgen_ty_85 = _bindgen_ty_85::IFLA_XDP_UNSPEC; +pub const IFLA_XDP_FD: _bindgen_ty_85 = _bindgen_ty_85::IFLA_XDP_FD; +pub const IFLA_XDP_ATTACHED: _bindgen_ty_85 = _bindgen_ty_85::IFLA_XDP_ATTACHED; +pub const IFLA_XDP_FLAGS: _bindgen_ty_85 = _bindgen_ty_85::IFLA_XDP_FLAGS; +pub const IFLA_XDP_PROG_ID: _bindgen_ty_85 = _bindgen_ty_85::IFLA_XDP_PROG_ID; +pub const IFLA_XDP_DRV_PROG_ID: _bindgen_ty_85 = _bindgen_ty_85::IFLA_XDP_DRV_PROG_ID; +pub const IFLA_XDP_SKB_PROG_ID: _bindgen_ty_85 = _bindgen_ty_85::IFLA_XDP_SKB_PROG_ID; +pub const IFLA_XDP_HW_PROG_ID: _bindgen_ty_85 = _bindgen_ty_85::IFLA_XDP_HW_PROG_ID; +pub const IFLA_XDP_EXPECTED_FD: _bindgen_ty_85 = _bindgen_ty_85::IFLA_XDP_EXPECTED_FD; +pub const __IFLA_XDP_MAX: _bindgen_ty_85 = _bindgen_ty_85::__IFLA_XDP_MAX; +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum _bindgen_ty_85 { + IFLA_XDP_UNSPEC = 0, + IFLA_XDP_FD = 1, + IFLA_XDP_ATTACHED = 2, + IFLA_XDP_FLAGS = 3, + IFLA_XDP_PROG_ID = 4, + IFLA_XDP_DRV_PROG_ID = 5, + IFLA_XDP_SKB_PROG_ID = 6, + IFLA_XDP_HW_PROG_ID = 7, + IFLA_XDP_EXPECTED_FD = 8, + __IFLA_XDP_MAX = 9, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct ifinfomsg { + pub ifi_family: ::std::os::raw::c_uchar, + pub __ifi_pad: ::std::os::raw::c_uchar, + pub ifi_type: ::std::os::raw::c_ushort, + pub ifi_index: ::std::os::raw::c_int, + pub ifi_flags: ::std::os::raw::c_uint, + pub ifi_change: ::std::os::raw::c_uint, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct tcmsg { + pub tcm_family: ::std::os::raw::c_uchar, + pub tcm__pad1: ::std::os::raw::c_uchar, + pub tcm__pad2: ::std::os::raw::c_ushort, + pub tcm_ifindex: ::std::os::raw::c_int, + pub tcm_handle: __u32, + pub tcm_parent: __u32, + pub tcm_info: __u32, +} +pub const TCA_UNSPEC: _bindgen_ty_98 = _bindgen_ty_98::TCA_UNSPEC; +pub const TCA_KIND: _bindgen_ty_98 = _bindgen_ty_98::TCA_KIND; +pub const TCA_OPTIONS: _bindgen_ty_98 = _bindgen_ty_98::TCA_OPTIONS; +pub const TCA_STATS: _bindgen_ty_98 = _bindgen_ty_98::TCA_STATS; +pub const TCA_XSTATS: _bindgen_ty_98 = _bindgen_ty_98::TCA_XSTATS; +pub const TCA_RATE: _bindgen_ty_98 = _bindgen_ty_98::TCA_RATE; +pub const TCA_FCNT: _bindgen_ty_98 = _bindgen_ty_98::TCA_FCNT; +pub const TCA_STATS2: _bindgen_ty_98 = _bindgen_ty_98::TCA_STATS2; +pub const TCA_STAB: _bindgen_ty_98 = _bindgen_ty_98::TCA_STAB; +pub const TCA_PAD: _bindgen_ty_98 = _bindgen_ty_98::TCA_PAD; +pub const TCA_DUMP_INVISIBLE: _bindgen_ty_98 = _bindgen_ty_98::TCA_DUMP_INVISIBLE; +pub const TCA_CHAIN: _bindgen_ty_98 = _bindgen_ty_98::TCA_CHAIN; +pub const TCA_HW_OFFLOAD: _bindgen_ty_98 = _bindgen_ty_98::TCA_HW_OFFLOAD; +pub const TCA_INGRESS_BLOCK: _bindgen_ty_98 = _bindgen_ty_98::TCA_INGRESS_BLOCK; +pub const TCA_EGRESS_BLOCK: _bindgen_ty_98 = _bindgen_ty_98::TCA_EGRESS_BLOCK; +pub const __TCA_MAX: _bindgen_ty_98 = _bindgen_ty_98::__TCA_MAX; +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum _bindgen_ty_98 { + TCA_UNSPEC = 0, + TCA_KIND = 1, + TCA_OPTIONS = 2, + TCA_STATS = 3, + TCA_XSTATS = 4, + TCA_RATE = 5, + TCA_FCNT = 6, + TCA_STATS2 = 7, + TCA_STAB = 8, + TCA_PAD = 9, + TCA_DUMP_INVISIBLE = 10, + TCA_CHAIN = 11, + TCA_HW_OFFLOAD = 12, + TCA_INGRESS_BLOCK = 13, + TCA_EGRESS_BLOCK = 14, + __TCA_MAX = 15, +} +pub const TCA_BPF_UNSPEC: _bindgen_ty_154 = _bindgen_ty_154::TCA_BPF_UNSPEC; +pub const TCA_BPF_ACT: _bindgen_ty_154 = _bindgen_ty_154::TCA_BPF_ACT; +pub const TCA_BPF_POLICE: _bindgen_ty_154 = _bindgen_ty_154::TCA_BPF_POLICE; +pub const TCA_BPF_CLASSID: _bindgen_ty_154 = _bindgen_ty_154::TCA_BPF_CLASSID; +pub const TCA_BPF_OPS_LEN: _bindgen_ty_154 = _bindgen_ty_154::TCA_BPF_OPS_LEN; +pub const TCA_BPF_OPS: _bindgen_ty_154 = _bindgen_ty_154::TCA_BPF_OPS; +pub const TCA_BPF_FD: _bindgen_ty_154 = _bindgen_ty_154::TCA_BPF_FD; +pub const TCA_BPF_NAME: _bindgen_ty_154 = _bindgen_ty_154::TCA_BPF_NAME; +pub const TCA_BPF_FLAGS: _bindgen_ty_154 = _bindgen_ty_154::TCA_BPF_FLAGS; +pub const TCA_BPF_FLAGS_GEN: _bindgen_ty_154 = _bindgen_ty_154::TCA_BPF_FLAGS_GEN; +pub const TCA_BPF_TAG: _bindgen_ty_154 = _bindgen_ty_154::TCA_BPF_TAG; +pub const TCA_BPF_ID: _bindgen_ty_154 = _bindgen_ty_154::TCA_BPF_ID; +pub const __TCA_BPF_MAX: _bindgen_ty_154 = _bindgen_ty_154::__TCA_BPF_MAX; +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum _bindgen_ty_154 { + TCA_BPF_UNSPEC = 0, + TCA_BPF_ACT = 1, + TCA_BPF_POLICE = 2, + TCA_BPF_CLASSID = 3, + TCA_BPF_OPS_LEN = 4, + TCA_BPF_OPS = 5, + TCA_BPF_FD = 6, + TCA_BPF_NAME = 7, + TCA_BPF_FLAGS = 8, + TCA_BPF_FLAGS_GEN = 9, + TCA_BPF_TAG = 10, + TCA_BPF_ID = 11, + __TCA_BPF_MAX = 12, +} +pub const AYA_PERF_EVENT_IOC_ENABLE: ::std::os::raw::c_int = 9216; +pub const AYA_PERF_EVENT_IOC_DISABLE: ::std::os::raw::c_int = 9217; +pub const AYA_PERF_EVENT_IOC_SET_BPF: ::std::os::raw::c_int = 1074013192; diff --git a/bpf/aya-bpf-bindings/src/riscv64/bindings.rs b/bpf/aya-bpf-bindings/src/riscv64/bindings.rs index e69de29b..13fb4a26 100644 --- a/bpf/aya-bpf-bindings/src/riscv64/bindings.rs +++ b/bpf/aya-bpf-bindings/src/riscv64/bindings.rs @@ -0,0 +1,2280 @@ +#[repr(C)] +#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct __BindgenBitfieldUnit { + storage: Storage, +} +impl __BindgenBitfieldUnit { + #[inline] + pub const fn new(storage: Storage) -> Self { + Self { storage } + } +} +impl __BindgenBitfieldUnit +where + Storage: AsRef<[u8]> + AsMut<[u8]>, +{ + #[inline] + pub fn get_bit(&self, index: usize) -> bool { + debug_assert!(index / 8 < self.storage.as_ref().len()); + let byte_index = index / 8; + let byte = self.storage.as_ref()[byte_index]; + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + let mask = 1 << bit_index; + byte & mask == mask + } + #[inline] + pub fn set_bit(&mut self, index: usize, val: bool) { + debug_assert!(index / 8 < self.storage.as_ref().len()); + let byte_index = index / 8; + let byte = &mut self.storage.as_mut()[byte_index]; + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + let mask = 1 << bit_index; + if val { + *byte |= mask; + } else { + *byte &= !mask; + } + } + #[inline] + pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len()); + let mut val = 0; + for i in 0..(bit_width as usize) { + if self.get_bit(i + bit_offset) { + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + val |= 1 << index; + } + } + val + } + #[inline] + pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len()); + for i in 0..(bit_width as usize) { + let mask = 1 << i; + let val_bit_is_set = val & mask == mask; + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + self.set_bit(index + bit_offset, val_bit_is_set); + } + } +} +#[repr(C)] +#[derive(Default)] +pub struct __IncompleteArrayField(::core::marker::PhantomData, [T; 0]); +impl __IncompleteArrayField { + #[inline] + pub const fn new() -> Self { + __IncompleteArrayField(::core::marker::PhantomData, []) + } + #[inline] + pub fn as_ptr(&self) -> *const T { + self as *const _ as *const T + } + #[inline] + pub fn as_mut_ptr(&mut self) -> *mut T { + self as *mut _ as *mut T + } + #[inline] + pub unsafe fn as_slice(&self, len: usize) -> &[T] { + ::core::slice::from_raw_parts(self.as_ptr(), len) + } + #[inline] + pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { + ::core::slice::from_raw_parts_mut(self.as_mut_ptr(), len) + } +} +impl ::core::fmt::Debug for __IncompleteArrayField { + fn fmt(&self, fmt: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + fmt.write_str("__IncompleteArrayField") + } +} +pub const BPF_LD: u32 = 0; +pub const BPF_LDX: u32 = 1; +pub const BPF_ST: u32 = 2; +pub const BPF_STX: u32 = 3; +pub const BPF_ALU: u32 = 4; +pub const BPF_JMP: u32 = 5; +pub const BPF_RET: u32 = 6; +pub const BPF_MISC: u32 = 7; +pub const BPF_W: u32 = 0; +pub const BPF_H: u32 = 8; +pub const BPF_B: u32 = 16; +pub const BPF_IMM: u32 = 0; +pub const BPF_ABS: u32 = 32; +pub const BPF_IND: u32 = 64; +pub const BPF_MEM: u32 = 96; +pub const BPF_LEN: u32 = 128; +pub const BPF_MSH: u32 = 160; +pub const BPF_ADD: u32 = 0; +pub const BPF_SUB: u32 = 16; +pub const BPF_MUL: u32 = 32; +pub const BPF_DIV: u32 = 48; +pub const BPF_OR: u32 = 64; +pub const BPF_AND: u32 = 80; +pub const BPF_LSH: u32 = 96; +pub const BPF_RSH: u32 = 112; +pub const BPF_NEG: u32 = 128; +pub const BPF_MOD: u32 = 144; +pub const BPF_XOR: u32 = 160; +pub const BPF_JA: u32 = 0; +pub const BPF_JEQ: u32 = 16; +pub const BPF_JGT: u32 = 32; +pub const BPF_JGE: u32 = 48; +pub const BPF_JSET: u32 = 64; +pub const BPF_K: u32 = 0; +pub const BPF_X: u32 = 8; +pub const BPF_MAXINSNS: u32 = 4096; +pub const BPF_JMP32: u32 = 6; +pub const BPF_ALU64: u32 = 7; +pub const BPF_DW: u32 = 24; +pub const BPF_ATOMIC: u32 = 192; +pub const BPF_XADD: u32 = 192; +pub const BPF_MOV: u32 = 176; +pub const BPF_ARSH: u32 = 192; +pub const BPF_END: u32 = 208; +pub const BPF_TO_LE: u32 = 0; +pub const BPF_TO_BE: u32 = 8; +pub const BPF_FROM_LE: u32 = 0; +pub const BPF_FROM_BE: u32 = 8; +pub const BPF_JNE: u32 = 80; +pub const BPF_JLT: u32 = 160; +pub const BPF_JLE: u32 = 176; +pub const BPF_JSGT: u32 = 96; +pub const BPF_JSGE: u32 = 112; +pub const BPF_JSLT: u32 = 192; +pub const BPF_JSLE: u32 = 208; +pub const BPF_CALL: u32 = 128; +pub const BPF_EXIT: u32 = 144; +pub const BPF_FETCH: u32 = 1; +pub const BPF_XCHG: u32 = 225; +pub const BPF_CMPXCHG: u32 = 241; +pub const BPF_F_ALLOW_OVERRIDE: u32 = 1; +pub const BPF_F_ALLOW_MULTI: u32 = 2; +pub const BPF_F_REPLACE: u32 = 4; +pub const BPF_F_STRICT_ALIGNMENT: u32 = 1; +pub const BPF_F_ANY_ALIGNMENT: u32 = 2; +pub const BPF_F_TEST_RND_HI32: u32 = 4; +pub const BPF_F_TEST_STATE_FREQ: u32 = 8; +pub const BPF_F_SLEEPABLE: u32 = 16; +pub const BPF_F_XDP_HAS_FRAGS: u32 = 32; +pub const BPF_F_KPROBE_MULTI_RETURN: u32 = 1; +pub const BPF_PSEUDO_MAP_FD: u32 = 1; +pub const BPF_PSEUDO_MAP_IDX: u32 = 5; +pub const BPF_PSEUDO_MAP_VALUE: u32 = 2; +pub const BPF_PSEUDO_MAP_IDX_VALUE: u32 = 6; +pub const BPF_PSEUDO_BTF_ID: u32 = 3; +pub const BPF_PSEUDO_FUNC: u32 = 4; +pub const BPF_PSEUDO_CALL: u32 = 1; +pub const BPF_PSEUDO_KFUNC_CALL: u32 = 2; +pub const BPF_F_QUERY_EFFECTIVE: u32 = 1; +pub const BPF_F_TEST_RUN_ON_CPU: u32 = 1; +pub const BPF_F_TEST_XDP_LIVE_FRAMES: u32 = 2; +pub const BPF_BUILD_ID_SIZE: u32 = 20; +pub const BPF_OBJ_NAME_LEN: u32 = 16; +pub const BPF_TAG_SIZE: u32 = 8; +pub const SOL_SOCKET: u32 = 1; +pub const SO_DEBUG: u32 = 1; +pub const SO_REUSEADDR: u32 = 2; +pub const SO_TYPE: u32 = 3; +pub const SO_ERROR: u32 = 4; +pub const SO_DONTROUTE: u32 = 5; +pub const SO_BROADCAST: u32 = 6; +pub const SO_SNDBUF: u32 = 7; +pub const SO_RCVBUF: u32 = 8; +pub const SO_SNDBUFFORCE: u32 = 32; +pub const SO_RCVBUFFORCE: u32 = 33; +pub const SO_KEEPALIVE: u32 = 9; +pub const SO_OOBINLINE: u32 = 10; +pub const SO_NO_CHECK: u32 = 11; +pub const SO_PRIORITY: u32 = 12; +pub const SO_LINGER: u32 = 13; +pub const SO_BSDCOMPAT: u32 = 14; +pub const SO_REUSEPORT: u32 = 15; +pub const SO_PASSCRED: u32 = 16; +pub const SO_PEERCRED: u32 = 17; +pub const SO_RCVLOWAT: u32 = 18; +pub const SO_SNDLOWAT: u32 = 19; +pub const SO_RCVTIMEO_OLD: u32 = 20; +pub const SO_SNDTIMEO_OLD: u32 = 21; +pub const SO_SECURITY_AUTHENTICATION: u32 = 22; +pub const SO_SECURITY_ENCRYPTION_TRANSPORT: u32 = 23; +pub const SO_SECURITY_ENCRYPTION_NETWORK: u32 = 24; +pub const SO_BINDTODEVICE: u32 = 25; +pub const SO_ATTACH_FILTER: u32 = 26; +pub const SO_DETACH_FILTER: u32 = 27; +pub const SO_GET_FILTER: u32 = 26; +pub const SO_PEERNAME: u32 = 28; +pub const SO_ACCEPTCONN: u32 = 30; +pub const SO_PEERSEC: u32 = 31; +pub const SO_PASSSEC: u32 = 34; +pub const SO_MARK: u32 = 36; +pub const SO_PROTOCOL: u32 = 38; +pub const SO_DOMAIN: u32 = 39; +pub const SO_RXQ_OVFL: u32 = 40; +pub const SO_WIFI_STATUS: u32 = 41; +pub const SO_PEEK_OFF: u32 = 42; +pub const SO_NOFCS: u32 = 43; +pub const SO_LOCK_FILTER: u32 = 44; +pub const SO_SELECT_ERR_QUEUE: u32 = 45; +pub const SO_BUSY_POLL: u32 = 46; +pub const SO_MAX_PACING_RATE: u32 = 47; +pub const SO_BPF_EXTENSIONS: u32 = 48; +pub const SO_INCOMING_CPU: u32 = 49; +pub const SO_ATTACH_BPF: u32 = 50; +pub const SO_DETACH_BPF: u32 = 27; +pub const SO_ATTACH_REUSEPORT_CBPF: u32 = 51; +pub const SO_ATTACH_REUSEPORT_EBPF: u32 = 52; +pub const SO_CNX_ADVICE: u32 = 53; +pub const SO_MEMINFO: u32 = 55; +pub const SO_INCOMING_NAPI_ID: u32 = 56; +pub const SO_COOKIE: u32 = 57; +pub const SO_PEERGROUPS: u32 = 59; +pub const SO_ZEROCOPY: u32 = 60; +pub const SO_TXTIME: u32 = 61; +pub const SO_BINDTOIFINDEX: u32 = 62; +pub const SO_TIMESTAMP_OLD: u32 = 29; +pub const SO_TIMESTAMPNS_OLD: u32 = 35; +pub const SO_TIMESTAMPING_OLD: u32 = 37; +pub const SO_TIMESTAMP_NEW: u32 = 63; +pub const SO_TIMESTAMPNS_NEW: u32 = 64; +pub const SO_TIMESTAMPING_NEW: u32 = 65; +pub const SO_RCVTIMEO_NEW: u32 = 66; +pub const SO_SNDTIMEO_NEW: u32 = 67; +pub const SO_DETACH_REUSEPORT_BPF: u32 = 68; +pub const SO_TIMESTAMP: u32 = 29; +pub const SO_TIMESTAMPNS: u32 = 35; +pub const SO_TIMESTAMPING: u32 = 37; +pub const SO_RCVTIMEO: u32 = 20; +pub const SO_SNDTIMEO: u32 = 21; +pub const TC_ACT_UNSPEC: i32 = -1; +pub const TC_ACT_OK: u32 = 0; +pub const TC_ACT_RECLASSIFY: u32 = 1; +pub const TC_ACT_SHOT: u32 = 2; +pub const TC_ACT_PIPE: u32 = 3; +pub const TC_ACT_STOLEN: u32 = 4; +pub const TC_ACT_QUEUED: u32 = 5; +pub const TC_ACT_REPEAT: u32 = 6; +pub const TC_ACT_REDIRECT: u32 = 7; +pub const TC_ACT_TRAP: u32 = 8; +pub const TC_ACT_VALUE_MAX: u32 = 8; +pub const TC_ACT_EXT_VAL_MASK: u32 = 268435455; +pub type __u8 = ::aya_bpf_cty::c_uchar; +pub type __s16 = ::aya_bpf_cty::c_short; +pub type __u16 = ::aya_bpf_cty::c_ushort; +pub type __s32 = ::aya_bpf_cty::c_int; +pub type __u32 = ::aya_bpf_cty::c_uint; +pub type __s64 = ::aya_bpf_cty::c_longlong; +pub type __u64 = ::aya_bpf_cty::c_ulonglong; +pub type __be16 = __u16; +pub type __be32 = __u32; +pub type __wsum = __u32; +pub const BPF_REG_0: ::aya_bpf_cty::c_uint = 0; +pub const BPF_REG_1: ::aya_bpf_cty::c_uint = 1; +pub const BPF_REG_2: ::aya_bpf_cty::c_uint = 2; +pub const BPF_REG_3: ::aya_bpf_cty::c_uint = 3; +pub const BPF_REG_4: ::aya_bpf_cty::c_uint = 4; +pub const BPF_REG_5: ::aya_bpf_cty::c_uint = 5; +pub const BPF_REG_6: ::aya_bpf_cty::c_uint = 6; +pub const BPF_REG_7: ::aya_bpf_cty::c_uint = 7; +pub const BPF_REG_8: ::aya_bpf_cty::c_uint = 8; +pub const BPF_REG_9: ::aya_bpf_cty::c_uint = 9; +pub const BPF_REG_10: ::aya_bpf_cty::c_uint = 10; +pub const __MAX_BPF_REG: ::aya_bpf_cty::c_uint = 11; +pub type _bindgen_ty_1 = ::aya_bpf_cty::c_uint; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_insn { + pub code: __u8, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 1usize]>, + pub off: __s16, + pub imm: __s32, +} +impl bpf_insn { + #[inline] + pub fn dst_reg(&self) -> __u8 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u8) } + } + #[inline] + pub fn set_dst_reg(&mut self, val: __u8) { + unsafe { + let val: u8 = ::core::mem::transmute(val); + self._bitfield_1.set(0usize, 4u8, val as u64) + } + } + #[inline] + pub fn src_reg(&self) -> __u8 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u8) } + } + #[inline] + pub fn set_src_reg(&mut self, val: __u8) { + unsafe { + let val: u8 = ::core::mem::transmute(val); + self._bitfield_1.set(4usize, 4u8, val as u64) + } + } + #[inline] + pub fn new_bitfield_1(dst_reg: __u8, src_reg: __u8) -> __BindgenBitfieldUnit<[u8; 1usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); + __bindgen_bitfield_unit.set(0usize, 4u8, { + let dst_reg: u8 = unsafe { ::core::mem::transmute(dst_reg) }; + dst_reg as u64 + }); + __bindgen_bitfield_unit.set(4usize, 4u8, { + let src_reg: u8 = unsafe { ::core::mem::transmute(src_reg) }; + src_reg as u64 + }); + __bindgen_bitfield_unit + } +} +#[repr(C)] +pub struct bpf_lpm_trie_key { + pub prefixlen: __u32, + pub data: __IncompleteArrayField<__u8>, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_cgroup_storage_key { + pub cgroup_inode_id: __u64, + pub attach_type: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_iter_link_info { + pub map: bpf_iter_link_info__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_iter_link_info__bindgen_ty_1 { + pub map_fd: __u32, +} +pub mod bpf_cmd { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_MAP_CREATE: Type = 0; + pub const BPF_MAP_LOOKUP_ELEM: Type = 1; + pub const BPF_MAP_UPDATE_ELEM: Type = 2; + pub const BPF_MAP_DELETE_ELEM: Type = 3; + pub const BPF_MAP_GET_NEXT_KEY: Type = 4; + pub const BPF_PROG_LOAD: Type = 5; + pub const BPF_OBJ_PIN: Type = 6; + pub const BPF_OBJ_GET: Type = 7; + pub const BPF_PROG_ATTACH: Type = 8; + pub const BPF_PROG_DETACH: Type = 9; + pub const BPF_PROG_TEST_RUN: Type = 10; + pub const BPF_PROG_RUN: Type = 10; + pub const BPF_PROG_GET_NEXT_ID: Type = 11; + pub const BPF_MAP_GET_NEXT_ID: Type = 12; + pub const BPF_PROG_GET_FD_BY_ID: Type = 13; + pub const BPF_MAP_GET_FD_BY_ID: Type = 14; + pub const BPF_OBJ_GET_INFO_BY_FD: Type = 15; + pub const BPF_PROG_QUERY: Type = 16; + pub const BPF_RAW_TRACEPOINT_OPEN: Type = 17; + pub const BPF_BTF_LOAD: Type = 18; + pub const BPF_BTF_GET_FD_BY_ID: Type = 19; + pub const BPF_TASK_FD_QUERY: Type = 20; + pub const BPF_MAP_LOOKUP_AND_DELETE_ELEM: Type = 21; + pub const BPF_MAP_FREEZE: Type = 22; + pub const BPF_BTF_GET_NEXT_ID: Type = 23; + pub const BPF_MAP_LOOKUP_BATCH: Type = 24; + pub const BPF_MAP_LOOKUP_AND_DELETE_BATCH: Type = 25; + pub const BPF_MAP_UPDATE_BATCH: Type = 26; + pub const BPF_MAP_DELETE_BATCH: Type = 27; + pub const BPF_LINK_CREATE: Type = 28; + pub const BPF_LINK_UPDATE: Type = 29; + pub const BPF_LINK_GET_FD_BY_ID: Type = 30; + pub const BPF_LINK_GET_NEXT_ID: Type = 31; + pub const BPF_ENABLE_STATS: Type = 32; + pub const BPF_ITER_CREATE: Type = 33; + pub const BPF_LINK_DETACH: Type = 34; + pub const BPF_PROG_BIND_MAP: Type = 35; +} +pub mod bpf_map_type { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_MAP_TYPE_UNSPEC: Type = 0; + pub const BPF_MAP_TYPE_HASH: Type = 1; + pub const BPF_MAP_TYPE_ARRAY: Type = 2; + pub const BPF_MAP_TYPE_PROG_ARRAY: Type = 3; + pub const BPF_MAP_TYPE_PERF_EVENT_ARRAY: Type = 4; + pub const BPF_MAP_TYPE_PERCPU_HASH: Type = 5; + pub const BPF_MAP_TYPE_PERCPU_ARRAY: Type = 6; + pub const BPF_MAP_TYPE_STACK_TRACE: Type = 7; + pub const BPF_MAP_TYPE_CGROUP_ARRAY: Type = 8; + pub const BPF_MAP_TYPE_LRU_HASH: Type = 9; + pub const BPF_MAP_TYPE_LRU_PERCPU_HASH: Type = 10; + pub const BPF_MAP_TYPE_LPM_TRIE: Type = 11; + pub const BPF_MAP_TYPE_ARRAY_OF_MAPS: Type = 12; + pub const BPF_MAP_TYPE_HASH_OF_MAPS: Type = 13; + pub const BPF_MAP_TYPE_DEVMAP: Type = 14; + pub const BPF_MAP_TYPE_SOCKMAP: Type = 15; + pub const BPF_MAP_TYPE_CPUMAP: Type = 16; + pub const BPF_MAP_TYPE_XSKMAP: Type = 17; + pub const BPF_MAP_TYPE_SOCKHASH: Type = 18; + pub const BPF_MAP_TYPE_CGROUP_STORAGE: Type = 19; + pub const BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: Type = 20; + pub const BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: Type = 21; + pub const BPF_MAP_TYPE_QUEUE: Type = 22; + pub const BPF_MAP_TYPE_STACK: Type = 23; + pub const BPF_MAP_TYPE_SK_STORAGE: Type = 24; + pub const BPF_MAP_TYPE_DEVMAP_HASH: Type = 25; + pub const BPF_MAP_TYPE_STRUCT_OPS: Type = 26; + pub const BPF_MAP_TYPE_RINGBUF: Type = 27; + pub const BPF_MAP_TYPE_INODE_STORAGE: Type = 28; + pub const BPF_MAP_TYPE_TASK_STORAGE: Type = 29; + pub const BPF_MAP_TYPE_BLOOM_FILTER: Type = 30; +} +pub mod bpf_prog_type { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_PROG_TYPE_UNSPEC: Type = 0; + pub const BPF_PROG_TYPE_SOCKET_FILTER: Type = 1; + pub const BPF_PROG_TYPE_KPROBE: Type = 2; + pub const BPF_PROG_TYPE_SCHED_CLS: Type = 3; + pub const BPF_PROG_TYPE_SCHED_ACT: Type = 4; + pub const BPF_PROG_TYPE_TRACEPOINT: Type = 5; + pub const BPF_PROG_TYPE_XDP: Type = 6; + pub const BPF_PROG_TYPE_PERF_EVENT: Type = 7; + pub const BPF_PROG_TYPE_CGROUP_SKB: Type = 8; + pub const BPF_PROG_TYPE_CGROUP_SOCK: Type = 9; + pub const BPF_PROG_TYPE_LWT_IN: Type = 10; + pub const BPF_PROG_TYPE_LWT_OUT: Type = 11; + pub const BPF_PROG_TYPE_LWT_XMIT: Type = 12; + pub const BPF_PROG_TYPE_SOCK_OPS: Type = 13; + pub const BPF_PROG_TYPE_SK_SKB: Type = 14; + pub const BPF_PROG_TYPE_CGROUP_DEVICE: Type = 15; + pub const BPF_PROG_TYPE_SK_MSG: Type = 16; + pub const BPF_PROG_TYPE_RAW_TRACEPOINT: Type = 17; + pub const BPF_PROG_TYPE_CGROUP_SOCK_ADDR: Type = 18; + pub const BPF_PROG_TYPE_LWT_SEG6LOCAL: Type = 19; + pub const BPF_PROG_TYPE_LIRC_MODE2: Type = 20; + pub const BPF_PROG_TYPE_SK_REUSEPORT: Type = 21; + pub const BPF_PROG_TYPE_FLOW_DISSECTOR: Type = 22; + pub const BPF_PROG_TYPE_CGROUP_SYSCTL: Type = 23; + pub const BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: Type = 24; + pub const BPF_PROG_TYPE_CGROUP_SOCKOPT: Type = 25; + pub const BPF_PROG_TYPE_TRACING: Type = 26; + pub const BPF_PROG_TYPE_STRUCT_OPS: Type = 27; + pub const BPF_PROG_TYPE_EXT: Type = 28; + pub const BPF_PROG_TYPE_LSM: Type = 29; + pub const BPF_PROG_TYPE_SK_LOOKUP: Type = 30; + pub const BPF_PROG_TYPE_SYSCALL: Type = 31; +} +pub mod bpf_attach_type { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_CGROUP_INET_INGRESS: Type = 0; + pub const BPF_CGROUP_INET_EGRESS: Type = 1; + pub const BPF_CGROUP_INET_SOCK_CREATE: Type = 2; + pub const BPF_CGROUP_SOCK_OPS: Type = 3; + pub const BPF_SK_SKB_STREAM_PARSER: Type = 4; + pub const BPF_SK_SKB_STREAM_VERDICT: Type = 5; + pub const BPF_CGROUP_DEVICE: Type = 6; + pub const BPF_SK_MSG_VERDICT: Type = 7; + pub const BPF_CGROUP_INET4_BIND: Type = 8; + pub const BPF_CGROUP_INET6_BIND: Type = 9; + pub const BPF_CGROUP_INET4_CONNECT: Type = 10; + pub const BPF_CGROUP_INET6_CONNECT: Type = 11; + pub const BPF_CGROUP_INET4_POST_BIND: Type = 12; + pub const BPF_CGROUP_INET6_POST_BIND: Type = 13; + pub const BPF_CGROUP_UDP4_SENDMSG: Type = 14; + pub const BPF_CGROUP_UDP6_SENDMSG: Type = 15; + pub const BPF_LIRC_MODE2: Type = 16; + pub const BPF_FLOW_DISSECTOR: Type = 17; + pub const BPF_CGROUP_SYSCTL: Type = 18; + pub const BPF_CGROUP_UDP4_RECVMSG: Type = 19; + pub const BPF_CGROUP_UDP6_RECVMSG: Type = 20; + pub const BPF_CGROUP_GETSOCKOPT: Type = 21; + pub const BPF_CGROUP_SETSOCKOPT: Type = 22; + pub const BPF_TRACE_RAW_TP: Type = 23; + pub const BPF_TRACE_FENTRY: Type = 24; + pub const BPF_TRACE_FEXIT: Type = 25; + pub const BPF_MODIFY_RETURN: Type = 26; + pub const BPF_LSM_MAC: Type = 27; + pub const BPF_TRACE_ITER: Type = 28; + pub const BPF_CGROUP_INET4_GETPEERNAME: Type = 29; + pub const BPF_CGROUP_INET6_GETPEERNAME: Type = 30; + pub const BPF_CGROUP_INET4_GETSOCKNAME: Type = 31; + pub const BPF_CGROUP_INET6_GETSOCKNAME: Type = 32; + pub const BPF_XDP_DEVMAP: Type = 33; + pub const BPF_CGROUP_INET_SOCK_RELEASE: Type = 34; + pub const BPF_XDP_CPUMAP: Type = 35; + pub const BPF_SK_LOOKUP: Type = 36; + pub const BPF_XDP: Type = 37; + pub const BPF_SK_SKB_VERDICT: Type = 38; + pub const BPF_SK_REUSEPORT_SELECT: Type = 39; + pub const BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: Type = 40; + pub const BPF_PERF_EVENT: Type = 41; + pub const BPF_TRACE_KPROBE_MULTI: Type = 42; + pub const __MAX_BPF_ATTACH_TYPE: Type = 43; +} +pub mod bpf_link_type { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_LINK_TYPE_UNSPEC: Type = 0; + pub const BPF_LINK_TYPE_RAW_TRACEPOINT: Type = 1; + pub const BPF_LINK_TYPE_TRACING: Type = 2; + pub const BPF_LINK_TYPE_CGROUP: Type = 3; + pub const BPF_LINK_TYPE_ITER: Type = 4; + pub const BPF_LINK_TYPE_NETNS: Type = 5; + pub const BPF_LINK_TYPE_XDP: Type = 6; + pub const BPF_LINK_TYPE_PERF_EVENT: Type = 7; + pub const BPF_LINK_TYPE_KPROBE_MULTI: Type = 8; + pub const BPF_LINK_TYPE_STRUCT_OPS: Type = 9; + pub const MAX_BPF_LINK_TYPE: Type = 10; +} +pub const BPF_ANY: ::aya_bpf_cty::c_uint = 0; +pub const BPF_NOEXIST: ::aya_bpf_cty::c_uint = 1; +pub const BPF_EXIST: ::aya_bpf_cty::c_uint = 2; +pub const BPF_F_LOCK: ::aya_bpf_cty::c_uint = 4; +pub type _bindgen_ty_2 = ::aya_bpf_cty::c_uint; +pub const BPF_F_NO_PREALLOC: ::aya_bpf_cty::c_uint = 1; +pub const BPF_F_NO_COMMON_LRU: ::aya_bpf_cty::c_uint = 2; +pub const BPF_F_NUMA_NODE: ::aya_bpf_cty::c_uint = 4; +pub const BPF_F_RDONLY: ::aya_bpf_cty::c_uint = 8; +pub const BPF_F_WRONLY: ::aya_bpf_cty::c_uint = 16; +pub const BPF_F_STACK_BUILD_ID: ::aya_bpf_cty::c_uint = 32; +pub const BPF_F_ZERO_SEED: ::aya_bpf_cty::c_uint = 64; +pub const BPF_F_RDONLY_PROG: ::aya_bpf_cty::c_uint = 128; +pub const BPF_F_WRONLY_PROG: ::aya_bpf_cty::c_uint = 256; +pub const BPF_F_CLONE: ::aya_bpf_cty::c_uint = 512; +pub const BPF_F_MMAPABLE: ::aya_bpf_cty::c_uint = 1024; +pub const BPF_F_PRESERVE_ELEMS: ::aya_bpf_cty::c_uint = 2048; +pub const BPF_F_INNER_MAP: ::aya_bpf_cty::c_uint = 4096; +pub type _bindgen_ty_3 = ::aya_bpf_cty::c_uint; +pub mod bpf_stats_type { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_STATS_RUN_TIME: Type = 0; +} +pub mod bpf_stack_build_id_status { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_STACK_BUILD_ID_EMPTY: Type = 0; + pub const BPF_STACK_BUILD_ID_VALID: Type = 1; + pub const BPF_STACK_BUILD_ID_IP: Type = 2; +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_stack_build_id { + pub status: __s32, + pub build_id: [::aya_bpf_cty::c_uchar; 20usize], + pub __bindgen_anon_1: bpf_stack_build_id__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_stack_build_id__bindgen_ty_1 { + pub offset: __u64, + pub ip: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr { + pub __bindgen_anon_1: bpf_attr__bindgen_ty_1, + pub __bindgen_anon_2: bpf_attr__bindgen_ty_2, + pub batch: bpf_attr__bindgen_ty_3, + pub __bindgen_anon_3: bpf_attr__bindgen_ty_4, + pub __bindgen_anon_4: bpf_attr__bindgen_ty_5, + pub __bindgen_anon_5: bpf_attr__bindgen_ty_6, + pub test: bpf_attr__bindgen_ty_7, + pub __bindgen_anon_6: bpf_attr__bindgen_ty_8, + pub info: bpf_attr__bindgen_ty_9, + pub query: bpf_attr__bindgen_ty_10, + pub raw_tracepoint: bpf_attr__bindgen_ty_11, + pub __bindgen_anon_7: bpf_attr__bindgen_ty_12, + pub task_fd_query: bpf_attr__bindgen_ty_13, + pub link_create: bpf_attr__bindgen_ty_14, + pub link_update: bpf_attr__bindgen_ty_15, + pub link_detach: bpf_attr__bindgen_ty_16, + pub enable_stats: bpf_attr__bindgen_ty_17, + pub iter_create: bpf_attr__bindgen_ty_18, + pub prog_bind_map: bpf_attr__bindgen_ty_19, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_1 { + pub map_type: __u32, + pub key_size: __u32, + pub value_size: __u32, + pub max_entries: __u32, + pub map_flags: __u32, + pub inner_map_fd: __u32, + pub numa_node: __u32, + pub map_name: [::aya_bpf_cty::c_char; 16usize], + pub map_ifindex: __u32, + pub btf_fd: __u32, + pub btf_key_type_id: __u32, + pub btf_value_type_id: __u32, + pub btf_vmlinux_value_type_id: __u32, + pub map_extra: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_2 { + pub map_fd: __u32, + pub key: __u64, + pub __bindgen_anon_1: bpf_attr__bindgen_ty_2__bindgen_ty_1, + pub flags: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_2__bindgen_ty_1 { + pub value: __u64, + pub next_key: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_3 { + pub in_batch: __u64, + pub out_batch: __u64, + pub keys: __u64, + pub values: __u64, + pub count: __u32, + pub map_fd: __u32, + pub elem_flags: __u64, + pub flags: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_4 { + pub prog_type: __u32, + pub insn_cnt: __u32, + pub insns: __u64, + pub license: __u64, + pub log_level: __u32, + pub log_size: __u32, + pub log_buf: __u64, + pub kern_version: __u32, + pub prog_flags: __u32, + pub prog_name: [::aya_bpf_cty::c_char; 16usize], + pub prog_ifindex: __u32, + pub expected_attach_type: __u32, + pub prog_btf_fd: __u32, + pub func_info_rec_size: __u32, + pub func_info: __u64, + pub func_info_cnt: __u32, + pub line_info_rec_size: __u32, + pub line_info: __u64, + pub line_info_cnt: __u32, + pub attach_btf_id: __u32, + pub __bindgen_anon_1: bpf_attr__bindgen_ty_4__bindgen_ty_1, + pub core_relo_cnt: __u32, + pub fd_array: __u64, + pub core_relos: __u64, + pub core_relo_rec_size: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_4__bindgen_ty_1 { + pub attach_prog_fd: __u32, + pub attach_btf_obj_fd: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_5 { + pub pathname: __u64, + pub bpf_fd: __u32, + pub file_flags: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_6 { + pub target_fd: __u32, + pub attach_bpf_fd: __u32, + pub attach_type: __u32, + pub attach_flags: __u32, + pub replace_bpf_fd: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_7 { + pub prog_fd: __u32, + pub retval: __u32, + pub data_size_in: __u32, + pub data_size_out: __u32, + pub data_in: __u64, + pub data_out: __u64, + pub repeat: __u32, + pub duration: __u32, + pub ctx_size_in: __u32, + pub ctx_size_out: __u32, + pub ctx_in: __u64, + pub ctx_out: __u64, + pub flags: __u32, + pub cpu: __u32, + pub batch_size: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_8 { + pub __bindgen_anon_1: bpf_attr__bindgen_ty_8__bindgen_ty_1, + pub next_id: __u32, + pub open_flags: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_8__bindgen_ty_1 { + pub start_id: __u32, + pub prog_id: __u32, + pub map_id: __u32, + pub btf_id: __u32, + pub link_id: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_9 { + pub bpf_fd: __u32, + pub info_len: __u32, + pub info: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_10 { + pub target_fd: __u32, + pub attach_type: __u32, + pub query_flags: __u32, + pub attach_flags: __u32, + pub prog_ids: __u64, + pub prog_cnt: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_11 { + pub name: __u64, + pub prog_fd: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_12 { + pub btf: __u64, + pub btf_log_buf: __u64, + pub btf_size: __u32, + pub btf_log_size: __u32, + pub btf_log_level: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_13 { + pub pid: __u32, + pub fd: __u32, + pub flags: __u32, + pub buf_len: __u32, + pub buf: __u64, + pub prog_id: __u32, + pub fd_type: __u32, + pub probe_offset: __u64, + pub probe_addr: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_14 { + pub prog_fd: __u32, + pub __bindgen_anon_1: bpf_attr__bindgen_ty_14__bindgen_ty_1, + pub attach_type: __u32, + pub flags: __u32, + pub __bindgen_anon_2: bpf_attr__bindgen_ty_14__bindgen_ty_2, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_14__bindgen_ty_1 { + pub target_fd: __u32, + pub target_ifindex: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_14__bindgen_ty_2 { + pub target_btf_id: __u32, + pub __bindgen_anon_1: bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_1, + pub perf_event: bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_2, + pub kprobe_multi: bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_3, + pub tracing: bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_4, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_1 { + pub iter_info: __u64, + pub iter_info_len: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_2 { + pub bpf_cookie: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_3 { + pub flags: __u32, + pub cnt: __u32, + pub syms: __u64, + pub addrs: __u64, + pub cookies: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_4 { + pub target_btf_id: __u32, + pub cookie: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_15 { + pub link_fd: __u32, + pub new_prog_fd: __u32, + pub flags: __u32, + pub old_prog_fd: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_16 { + pub link_fd: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_17 { + pub type_: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_18 { + pub link_fd: __u32, + pub flags: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_19 { + pub prog_fd: __u32, + pub map_fd: __u32, + pub flags: __u32, +} +pub mod bpf_func_id { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_FUNC_unspec: Type = 0; + pub const BPF_FUNC_map_lookup_elem: Type = 1; + pub const BPF_FUNC_map_update_elem: Type = 2; + pub const BPF_FUNC_map_delete_elem: Type = 3; + pub const BPF_FUNC_probe_read: Type = 4; + pub const BPF_FUNC_ktime_get_ns: Type = 5; + pub const BPF_FUNC_trace_printk: Type = 6; + pub const BPF_FUNC_get_prandom_u32: Type = 7; + pub const BPF_FUNC_get_smp_processor_id: Type = 8; + pub const BPF_FUNC_skb_store_bytes: Type = 9; + pub const BPF_FUNC_l3_csum_replace: Type = 10; + pub const BPF_FUNC_l4_csum_replace: Type = 11; + pub const BPF_FUNC_tail_call: Type = 12; + pub const BPF_FUNC_clone_redirect: Type = 13; + pub const BPF_FUNC_get_current_pid_tgid: Type = 14; + pub const BPF_FUNC_get_current_uid_gid: Type = 15; + pub const BPF_FUNC_get_current_comm: Type = 16; + pub const BPF_FUNC_get_cgroup_classid: Type = 17; + pub const BPF_FUNC_skb_vlan_push: Type = 18; + pub const BPF_FUNC_skb_vlan_pop: Type = 19; + pub const BPF_FUNC_skb_get_tunnel_key: Type = 20; + pub const BPF_FUNC_skb_set_tunnel_key: Type = 21; + pub const BPF_FUNC_perf_event_read: Type = 22; + pub const BPF_FUNC_redirect: Type = 23; + pub const BPF_FUNC_get_route_realm: Type = 24; + pub const BPF_FUNC_perf_event_output: Type = 25; + pub const BPF_FUNC_skb_load_bytes: Type = 26; + pub const BPF_FUNC_get_stackid: Type = 27; + pub const BPF_FUNC_csum_diff: Type = 28; + pub const BPF_FUNC_skb_get_tunnel_opt: Type = 29; + pub const BPF_FUNC_skb_set_tunnel_opt: Type = 30; + pub const BPF_FUNC_skb_change_proto: Type = 31; + pub const BPF_FUNC_skb_change_type: Type = 32; + pub const BPF_FUNC_skb_under_cgroup: Type = 33; + pub const BPF_FUNC_get_hash_recalc: Type = 34; + pub const BPF_FUNC_get_current_task: Type = 35; + pub const BPF_FUNC_probe_write_user: Type = 36; + pub const BPF_FUNC_current_task_under_cgroup: Type = 37; + pub const BPF_FUNC_skb_change_tail: Type = 38; + pub const BPF_FUNC_skb_pull_data: Type = 39; + pub const BPF_FUNC_csum_update: Type = 40; + pub const BPF_FUNC_set_hash_invalid: Type = 41; + pub const BPF_FUNC_get_numa_node_id: Type = 42; + pub const BPF_FUNC_skb_change_head: Type = 43; + pub const BPF_FUNC_xdp_adjust_head: Type = 44; + pub const BPF_FUNC_probe_read_str: Type = 45; + pub const BPF_FUNC_get_socket_cookie: Type = 46; + pub const BPF_FUNC_get_socket_uid: Type = 47; + pub const BPF_FUNC_set_hash: Type = 48; + pub const BPF_FUNC_setsockopt: Type = 49; + pub const BPF_FUNC_skb_adjust_room: Type = 50; + pub const BPF_FUNC_redirect_map: Type = 51; + pub const BPF_FUNC_sk_redirect_map: Type = 52; + pub const BPF_FUNC_sock_map_update: Type = 53; + pub const BPF_FUNC_xdp_adjust_meta: Type = 54; + pub const BPF_FUNC_perf_event_read_value: Type = 55; + pub const BPF_FUNC_perf_prog_read_value: Type = 56; + pub const BPF_FUNC_getsockopt: Type = 57; + pub const BPF_FUNC_override_return: Type = 58; + pub const BPF_FUNC_sock_ops_cb_flags_set: Type = 59; + pub const BPF_FUNC_msg_redirect_map: Type = 60; + pub const BPF_FUNC_msg_apply_bytes: Type = 61; + pub const BPF_FUNC_msg_cork_bytes: Type = 62; + pub const BPF_FUNC_msg_pull_data: Type = 63; + pub const BPF_FUNC_bind: Type = 64; + pub const BPF_FUNC_xdp_adjust_tail: Type = 65; + pub const BPF_FUNC_skb_get_xfrm_state: Type = 66; + pub const BPF_FUNC_get_stack: Type = 67; + pub const BPF_FUNC_skb_load_bytes_relative: Type = 68; + pub const BPF_FUNC_fib_lookup: Type = 69; + pub const BPF_FUNC_sock_hash_update: Type = 70; + pub const BPF_FUNC_msg_redirect_hash: Type = 71; + pub const BPF_FUNC_sk_redirect_hash: Type = 72; + pub const BPF_FUNC_lwt_push_encap: Type = 73; + pub const BPF_FUNC_lwt_seg6_store_bytes: Type = 74; + pub const BPF_FUNC_lwt_seg6_adjust_srh: Type = 75; + pub const BPF_FUNC_lwt_seg6_action: Type = 76; + pub const BPF_FUNC_rc_repeat: Type = 77; + pub const BPF_FUNC_rc_keydown: Type = 78; + pub const BPF_FUNC_skb_cgroup_id: Type = 79; + pub const BPF_FUNC_get_current_cgroup_id: Type = 80; + pub const BPF_FUNC_get_local_storage: Type = 81; + pub const BPF_FUNC_sk_select_reuseport: Type = 82; + pub const BPF_FUNC_skb_ancestor_cgroup_id: Type = 83; + pub const BPF_FUNC_sk_lookup_tcp: Type = 84; + pub const BPF_FUNC_sk_lookup_udp: Type = 85; + pub const BPF_FUNC_sk_release: Type = 86; + pub const BPF_FUNC_map_push_elem: Type = 87; + pub const BPF_FUNC_map_pop_elem: Type = 88; + pub const BPF_FUNC_map_peek_elem: Type = 89; + pub const BPF_FUNC_msg_push_data: Type = 90; + pub const BPF_FUNC_msg_pop_data: Type = 91; + pub const BPF_FUNC_rc_pointer_rel: Type = 92; + pub const BPF_FUNC_spin_lock: Type = 93; + pub const BPF_FUNC_spin_unlock: Type = 94; + pub const BPF_FUNC_sk_fullsock: Type = 95; + pub const BPF_FUNC_tcp_sock: Type = 96; + pub const BPF_FUNC_skb_ecn_set_ce: Type = 97; + pub const BPF_FUNC_get_listener_sock: Type = 98; + pub const BPF_FUNC_skc_lookup_tcp: Type = 99; + pub const BPF_FUNC_tcp_check_syncookie: Type = 100; + pub const BPF_FUNC_sysctl_get_name: Type = 101; + pub const BPF_FUNC_sysctl_get_current_value: Type = 102; + pub const BPF_FUNC_sysctl_get_new_value: Type = 103; + pub const BPF_FUNC_sysctl_set_new_value: Type = 104; + pub const BPF_FUNC_strtol: Type = 105; + pub const BPF_FUNC_strtoul: Type = 106; + pub const BPF_FUNC_sk_storage_get: Type = 107; + pub const BPF_FUNC_sk_storage_delete: Type = 108; + pub const BPF_FUNC_send_signal: Type = 109; + pub const BPF_FUNC_tcp_gen_syncookie: Type = 110; + pub const BPF_FUNC_skb_output: Type = 111; + pub const BPF_FUNC_probe_read_user: Type = 112; + pub const BPF_FUNC_probe_read_kernel: Type = 113; + pub const BPF_FUNC_probe_read_user_str: Type = 114; + pub const BPF_FUNC_probe_read_kernel_str: Type = 115; + pub const BPF_FUNC_tcp_send_ack: Type = 116; + pub const BPF_FUNC_send_signal_thread: Type = 117; + pub const BPF_FUNC_jiffies64: Type = 118; + pub const BPF_FUNC_read_branch_records: Type = 119; + pub const BPF_FUNC_get_ns_current_pid_tgid: Type = 120; + pub const BPF_FUNC_xdp_output: Type = 121; + pub const BPF_FUNC_get_netns_cookie: Type = 122; + pub const BPF_FUNC_get_current_ancestor_cgroup_id: Type = 123; + pub const BPF_FUNC_sk_assign: Type = 124; + pub const BPF_FUNC_ktime_get_boot_ns: Type = 125; + pub const BPF_FUNC_seq_printf: Type = 126; + pub const BPF_FUNC_seq_write: Type = 127; + pub const BPF_FUNC_sk_cgroup_id: Type = 128; + pub const BPF_FUNC_sk_ancestor_cgroup_id: Type = 129; + pub const BPF_FUNC_ringbuf_output: Type = 130; + pub const BPF_FUNC_ringbuf_reserve: Type = 131; + pub const BPF_FUNC_ringbuf_submit: Type = 132; + pub const BPF_FUNC_ringbuf_discard: Type = 133; + pub const BPF_FUNC_ringbuf_query: Type = 134; + pub const BPF_FUNC_csum_level: Type = 135; + pub const BPF_FUNC_skc_to_tcp6_sock: Type = 136; + pub const BPF_FUNC_skc_to_tcp_sock: Type = 137; + pub const BPF_FUNC_skc_to_tcp_timewait_sock: Type = 138; + pub const BPF_FUNC_skc_to_tcp_request_sock: Type = 139; + pub const BPF_FUNC_skc_to_udp6_sock: Type = 140; + pub const BPF_FUNC_get_task_stack: Type = 141; + pub const BPF_FUNC_load_hdr_opt: Type = 142; + pub const BPF_FUNC_store_hdr_opt: Type = 143; + pub const BPF_FUNC_reserve_hdr_opt: Type = 144; + pub const BPF_FUNC_inode_storage_get: Type = 145; + pub const BPF_FUNC_inode_storage_delete: Type = 146; + pub const BPF_FUNC_d_path: Type = 147; + pub const BPF_FUNC_copy_from_user: Type = 148; + pub const BPF_FUNC_snprintf_btf: Type = 149; + pub const BPF_FUNC_seq_printf_btf: Type = 150; + pub const BPF_FUNC_skb_cgroup_classid: Type = 151; + pub const BPF_FUNC_redirect_neigh: Type = 152; + pub const BPF_FUNC_per_cpu_ptr: Type = 153; + pub const BPF_FUNC_this_cpu_ptr: Type = 154; + pub const BPF_FUNC_redirect_peer: Type = 155; + pub const BPF_FUNC_task_storage_get: Type = 156; + pub const BPF_FUNC_task_storage_delete: Type = 157; + pub const BPF_FUNC_get_current_task_btf: Type = 158; + pub const BPF_FUNC_bprm_opts_set: Type = 159; + pub const BPF_FUNC_ktime_get_coarse_ns: Type = 160; + pub const BPF_FUNC_ima_inode_hash: Type = 161; + pub const BPF_FUNC_sock_from_file: Type = 162; + pub const BPF_FUNC_check_mtu: Type = 163; + pub const BPF_FUNC_for_each_map_elem: Type = 164; + pub const BPF_FUNC_snprintf: Type = 165; + pub const BPF_FUNC_sys_bpf: Type = 166; + pub const BPF_FUNC_btf_find_by_name_kind: Type = 167; + pub const BPF_FUNC_sys_close: Type = 168; + pub const BPF_FUNC_timer_init: Type = 169; + pub const BPF_FUNC_timer_set_callback: Type = 170; + pub const BPF_FUNC_timer_start: Type = 171; + pub const BPF_FUNC_timer_cancel: Type = 172; + pub const BPF_FUNC_get_func_ip: Type = 173; + pub const BPF_FUNC_get_attach_cookie: Type = 174; + pub const BPF_FUNC_task_pt_regs: Type = 175; + pub const BPF_FUNC_get_branch_snapshot: Type = 176; + pub const BPF_FUNC_trace_vprintk: Type = 177; + pub const BPF_FUNC_skc_to_unix_sock: Type = 178; + pub const BPF_FUNC_kallsyms_lookup_name: Type = 179; + pub const BPF_FUNC_find_vma: Type = 180; + pub const BPF_FUNC_loop: Type = 181; + pub const BPF_FUNC_strncmp: Type = 182; + pub const BPF_FUNC_get_func_arg: Type = 183; + pub const BPF_FUNC_get_func_ret: Type = 184; + pub const BPF_FUNC_get_func_arg_cnt: Type = 185; + pub const BPF_FUNC_get_retval: Type = 186; + pub const BPF_FUNC_set_retval: Type = 187; + pub const BPF_FUNC_xdp_get_buff_len: Type = 188; + pub const BPF_FUNC_xdp_load_bytes: Type = 189; + pub const BPF_FUNC_xdp_store_bytes: Type = 190; + pub const BPF_FUNC_copy_from_user_task: Type = 191; + pub const BPF_FUNC_skb_set_tstamp: Type = 192; + pub const BPF_FUNC_ima_file_hash: Type = 193; + pub const BPF_FUNC_kptr_xchg: Type = 194; + pub const BPF_FUNC_map_lookup_percpu_elem: Type = 195; + pub const __BPF_FUNC_MAX_ID: Type = 196; +} +pub const BPF_F_RECOMPUTE_CSUM: ::aya_bpf_cty::c_uint = 1; +pub const BPF_F_INVALIDATE_HASH: ::aya_bpf_cty::c_uint = 2; +pub type _bindgen_ty_4 = ::aya_bpf_cty::c_uint; +pub const BPF_F_HDR_FIELD_MASK: ::aya_bpf_cty::c_uint = 15; +pub type _bindgen_ty_5 = ::aya_bpf_cty::c_uint; +pub const BPF_F_PSEUDO_HDR: ::aya_bpf_cty::c_uint = 16; +pub const BPF_F_MARK_MANGLED_0: ::aya_bpf_cty::c_uint = 32; +pub const BPF_F_MARK_ENFORCE: ::aya_bpf_cty::c_uint = 64; +pub type _bindgen_ty_6 = ::aya_bpf_cty::c_uint; +pub const BPF_F_INGRESS: ::aya_bpf_cty::c_uint = 1; +pub type _bindgen_ty_7 = ::aya_bpf_cty::c_uint; +pub const BPF_F_TUNINFO_IPV6: ::aya_bpf_cty::c_uint = 1; +pub type _bindgen_ty_8 = ::aya_bpf_cty::c_uint; +pub const BPF_F_SKIP_FIELD_MASK: ::aya_bpf_cty::c_uint = 255; +pub const BPF_F_USER_STACK: ::aya_bpf_cty::c_uint = 256; +pub const BPF_F_FAST_STACK_CMP: ::aya_bpf_cty::c_uint = 512; +pub const BPF_F_REUSE_STACKID: ::aya_bpf_cty::c_uint = 1024; +pub const BPF_F_USER_BUILD_ID: ::aya_bpf_cty::c_uint = 2048; +pub type _bindgen_ty_9 = ::aya_bpf_cty::c_uint; +pub const BPF_F_ZERO_CSUM_TX: ::aya_bpf_cty::c_uint = 2; +pub const BPF_F_DONT_FRAGMENT: ::aya_bpf_cty::c_uint = 4; +pub const BPF_F_SEQ_NUMBER: ::aya_bpf_cty::c_uint = 8; +pub type _bindgen_ty_10 = ::aya_bpf_cty::c_uint; +pub const BPF_F_INDEX_MASK: ::aya_bpf_cty::c_ulong = 4294967295; +pub const BPF_F_CURRENT_CPU: ::aya_bpf_cty::c_ulong = 4294967295; +pub const BPF_F_CTXLEN_MASK: ::aya_bpf_cty::c_ulong = 4503595332403200; +pub type _bindgen_ty_11 = ::aya_bpf_cty::c_ulong; +pub const BPF_F_CURRENT_NETNS: ::aya_bpf_cty::c_int = -1; +pub type _bindgen_ty_12 = ::aya_bpf_cty::c_int; +pub const BPF_CSUM_LEVEL_QUERY: ::aya_bpf_cty::c_uint = 0; +pub const BPF_CSUM_LEVEL_INC: ::aya_bpf_cty::c_uint = 1; +pub const BPF_CSUM_LEVEL_DEC: ::aya_bpf_cty::c_uint = 2; +pub const BPF_CSUM_LEVEL_RESET: ::aya_bpf_cty::c_uint = 3; +pub type _bindgen_ty_13 = ::aya_bpf_cty::c_uint; +pub const BPF_F_ADJ_ROOM_FIXED_GSO: ::aya_bpf_cty::c_uint = 1; +pub const BPF_F_ADJ_ROOM_ENCAP_L3_IPV4: ::aya_bpf_cty::c_uint = 2; +pub const BPF_F_ADJ_ROOM_ENCAP_L3_IPV6: ::aya_bpf_cty::c_uint = 4; +pub const BPF_F_ADJ_ROOM_ENCAP_L4_GRE: ::aya_bpf_cty::c_uint = 8; +pub const BPF_F_ADJ_ROOM_ENCAP_L4_UDP: ::aya_bpf_cty::c_uint = 16; +pub const BPF_F_ADJ_ROOM_NO_CSUM_RESET: ::aya_bpf_cty::c_uint = 32; +pub const BPF_F_ADJ_ROOM_ENCAP_L2_ETH: ::aya_bpf_cty::c_uint = 64; +pub type _bindgen_ty_14 = ::aya_bpf_cty::c_uint; +pub const BPF_ADJ_ROOM_ENCAP_L2_MASK: ::aya_bpf_cty::c_uint = 255; +pub const BPF_ADJ_ROOM_ENCAP_L2_SHIFT: ::aya_bpf_cty::c_uint = 56; +pub type _bindgen_ty_15 = ::aya_bpf_cty::c_uint; +pub const BPF_F_SYSCTL_BASE_NAME: ::aya_bpf_cty::c_uint = 1; +pub type _bindgen_ty_16 = ::aya_bpf_cty::c_uint; +pub const BPF_LOCAL_STORAGE_GET_F_CREATE: ::aya_bpf_cty::c_uint = 1; +pub const BPF_SK_STORAGE_GET_F_CREATE: ::aya_bpf_cty::c_uint = 1; +pub type _bindgen_ty_17 = ::aya_bpf_cty::c_uint; +pub const BPF_F_GET_BRANCH_RECORDS_SIZE: ::aya_bpf_cty::c_uint = 1; +pub type _bindgen_ty_18 = ::aya_bpf_cty::c_uint; +pub const BPF_RB_NO_WAKEUP: ::aya_bpf_cty::c_uint = 1; +pub const BPF_RB_FORCE_WAKEUP: ::aya_bpf_cty::c_uint = 2; +pub type _bindgen_ty_19 = ::aya_bpf_cty::c_uint; +pub const BPF_RB_AVAIL_DATA: ::aya_bpf_cty::c_uint = 0; +pub const BPF_RB_RING_SIZE: ::aya_bpf_cty::c_uint = 1; +pub const BPF_RB_CONS_POS: ::aya_bpf_cty::c_uint = 2; +pub const BPF_RB_PROD_POS: ::aya_bpf_cty::c_uint = 3; +pub type _bindgen_ty_20 = ::aya_bpf_cty::c_uint; +pub const BPF_RINGBUF_BUSY_BIT: ::aya_bpf_cty::c_uint = 2147483648; +pub const BPF_RINGBUF_DISCARD_BIT: ::aya_bpf_cty::c_uint = 1073741824; +pub const BPF_RINGBUF_HDR_SZ: ::aya_bpf_cty::c_uint = 8; +pub type _bindgen_ty_21 = ::aya_bpf_cty::c_uint; +pub const BPF_SK_LOOKUP_F_REPLACE: ::aya_bpf_cty::c_uint = 1; +pub const BPF_SK_LOOKUP_F_NO_REUSEPORT: ::aya_bpf_cty::c_uint = 2; +pub type _bindgen_ty_22 = ::aya_bpf_cty::c_uint; +pub mod bpf_adj_room_mode { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_ADJ_ROOM_NET: Type = 0; + pub const BPF_ADJ_ROOM_MAC: Type = 1; +} +pub mod bpf_hdr_start_off { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_HDR_START_MAC: Type = 0; + pub const BPF_HDR_START_NET: Type = 1; +} +pub mod bpf_lwt_encap_mode { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_LWT_ENCAP_SEG6: Type = 0; + pub const BPF_LWT_ENCAP_SEG6_INLINE: Type = 1; + pub const BPF_LWT_ENCAP_IP: Type = 2; +} +pub const BPF_F_BPRM_SECUREEXEC: ::aya_bpf_cty::c_uint = 1; +pub type _bindgen_ty_23 = ::aya_bpf_cty::c_uint; +pub const BPF_F_BROADCAST: ::aya_bpf_cty::c_uint = 8; +pub const BPF_F_EXCLUDE_INGRESS: ::aya_bpf_cty::c_uint = 16; +pub type _bindgen_ty_24 = ::aya_bpf_cty::c_uint; +pub mod _bindgen_ty_25 { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_SKB_TSTAMP_UNSPEC: Type = 0; + pub const BPF_SKB_TSTAMP_DELIVERY_MONO: Type = 1; +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct __sk_buff { + pub len: __u32, + pub pkt_type: __u32, + pub mark: __u32, + pub queue_mapping: __u32, + pub protocol: __u32, + pub vlan_present: __u32, + pub vlan_tci: __u32, + pub vlan_proto: __u32, + pub priority: __u32, + pub ingress_ifindex: __u32, + pub ifindex: __u32, + pub tc_index: __u32, + pub cb: [__u32; 5usize], + pub hash: __u32, + pub tc_classid: __u32, + pub data: __u32, + pub data_end: __u32, + pub napi_id: __u32, + pub family: __u32, + pub remote_ip4: __u32, + pub local_ip4: __u32, + pub remote_ip6: [__u32; 4usize], + pub local_ip6: [__u32; 4usize], + pub remote_port: __u32, + pub local_port: __u32, + pub data_meta: __u32, + pub __bindgen_anon_1: __sk_buff__bindgen_ty_1, + pub tstamp: __u64, + pub wire_len: __u32, + pub gso_segs: __u32, + pub __bindgen_anon_2: __sk_buff__bindgen_ty_2, + pub gso_size: __u32, + pub tstamp_type: __u8, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 3usize]>, + pub hwtstamp: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union __sk_buff__bindgen_ty_1 { + pub flow_keys: *mut bpf_flow_keys, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, +} +impl __sk_buff__bindgen_ty_1 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union __sk_buff__bindgen_ty_2 { + pub sk: *mut bpf_sock, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, +} +impl __sk_buff__bindgen_ty_2 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +impl __sk_buff { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 3usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 3usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_tunnel_key { + pub tunnel_id: __u32, + pub __bindgen_anon_1: bpf_tunnel_key__bindgen_ty_1, + pub tunnel_tos: __u8, + pub tunnel_ttl: __u8, + pub tunnel_ext: __u16, + pub tunnel_label: __u32, + pub __bindgen_anon_2: bpf_tunnel_key__bindgen_ty_2, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_tunnel_key__bindgen_ty_1 { + pub remote_ipv4: __u32, + pub remote_ipv6: [__u32; 4usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_tunnel_key__bindgen_ty_2 { + pub local_ipv4: __u32, + pub local_ipv6: [__u32; 4usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_xfrm_state { + pub reqid: __u32, + pub spi: __u32, + pub family: __u16, + pub ext: __u16, + pub __bindgen_anon_1: bpf_xfrm_state__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_xfrm_state__bindgen_ty_1 { + pub remote_ipv4: __u32, + pub remote_ipv6: [__u32; 4usize], +} +pub mod bpf_ret_code { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_OK: Type = 0; + pub const BPF_DROP: Type = 2; + pub const BPF_REDIRECT: Type = 7; + pub const BPF_LWT_REROUTE: Type = 128; +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_sock { + pub bound_dev_if: __u32, + pub family: __u32, + pub type_: __u32, + pub protocol: __u32, + pub mark: __u32, + pub priority: __u32, + pub src_ip4: __u32, + pub src_ip6: [__u32; 4usize], + pub src_port: __u32, + pub dst_port: __be16, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 2usize]>, + pub dst_ip4: __u32, + pub dst_ip6: [__u32; 4usize], + pub state: __u32, + pub rx_queue_mapping: __s32, +} +impl bpf_sock { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 2usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 2usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_tcp_sock { + pub snd_cwnd: __u32, + pub srtt_us: __u32, + pub rtt_min: __u32, + pub snd_ssthresh: __u32, + pub rcv_nxt: __u32, + pub snd_nxt: __u32, + pub snd_una: __u32, + pub mss_cache: __u32, + pub ecn_flags: __u32, + pub rate_delivered: __u32, + pub rate_interval_us: __u32, + pub packets_out: __u32, + pub retrans_out: __u32, + pub total_retrans: __u32, + pub segs_in: __u32, + pub data_segs_in: __u32, + pub segs_out: __u32, + pub data_segs_out: __u32, + pub lost_out: __u32, + pub sacked_out: __u32, + pub bytes_received: __u64, + pub bytes_acked: __u64, + pub dsack_dups: __u32, + pub delivered: __u32, + pub delivered_ce: __u32, + pub icsk_retransmits: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_sock_tuple { + pub __bindgen_anon_1: bpf_sock_tuple__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_sock_tuple__bindgen_ty_1 { + pub ipv4: bpf_sock_tuple__bindgen_ty_1__bindgen_ty_1, + pub ipv6: bpf_sock_tuple__bindgen_ty_1__bindgen_ty_2, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_sock_tuple__bindgen_ty_1__bindgen_ty_1 { + pub saddr: __be32, + pub daddr: __be32, + pub sport: __be16, + pub dport: __be16, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_sock_tuple__bindgen_ty_1__bindgen_ty_2 { + pub saddr: [__be32; 4usize], + pub daddr: [__be32; 4usize], + pub sport: __be16, + pub dport: __be16, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_xdp_sock { + pub queue_id: __u32, +} +pub mod xdp_action { + pub type Type = ::aya_bpf_cty::c_uint; + pub const XDP_ABORTED: Type = 0; + pub const XDP_DROP: Type = 1; + pub const XDP_PASS: Type = 2; + pub const XDP_TX: Type = 3; + pub const XDP_REDIRECT: Type = 4; +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct xdp_md { + pub data: __u32, + pub data_end: __u32, + pub data_meta: __u32, + pub ingress_ifindex: __u32, + pub rx_queue_index: __u32, + pub egress_ifindex: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_devmap_val { + pub ifindex: __u32, + pub bpf_prog: bpf_devmap_val__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_devmap_val__bindgen_ty_1 { + pub fd: ::aya_bpf_cty::c_int, + pub id: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_cpumap_val { + pub qsize: __u32, + pub bpf_prog: bpf_cpumap_val__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_cpumap_val__bindgen_ty_1 { + pub fd: ::aya_bpf_cty::c_int, + pub id: __u32, +} +pub mod sk_action { + pub type Type = ::aya_bpf_cty::c_uint; + pub const SK_DROP: Type = 0; + pub const SK_PASS: Type = 1; +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct sk_msg_md { + pub __bindgen_anon_1: sk_msg_md__bindgen_ty_1, + pub __bindgen_anon_2: sk_msg_md__bindgen_ty_2, + pub family: __u32, + pub remote_ip4: __u32, + pub local_ip4: __u32, + pub remote_ip6: [__u32; 4usize], + pub local_ip6: [__u32; 4usize], + pub remote_port: __u32, + pub local_port: __u32, + pub size: __u32, + pub __bindgen_anon_3: sk_msg_md__bindgen_ty_3, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union sk_msg_md__bindgen_ty_1 { + pub data: *mut ::aya_bpf_cty::c_void, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, +} +impl sk_msg_md__bindgen_ty_1 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union sk_msg_md__bindgen_ty_2 { + pub data_end: *mut ::aya_bpf_cty::c_void, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, +} +impl sk_msg_md__bindgen_ty_2 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union sk_msg_md__bindgen_ty_3 { + pub sk: *mut bpf_sock, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, +} +impl sk_msg_md__bindgen_ty_3 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct sk_reuseport_md { + pub __bindgen_anon_1: sk_reuseport_md__bindgen_ty_1, + pub __bindgen_anon_2: sk_reuseport_md__bindgen_ty_2, + pub len: __u32, + pub eth_protocol: __u32, + pub ip_protocol: __u32, + pub bind_inany: __u32, + pub hash: __u32, + pub __bindgen_anon_3: sk_reuseport_md__bindgen_ty_3, + pub __bindgen_anon_4: sk_reuseport_md__bindgen_ty_4, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union sk_reuseport_md__bindgen_ty_1 { + pub data: *mut ::aya_bpf_cty::c_void, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, +} +impl sk_reuseport_md__bindgen_ty_1 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union sk_reuseport_md__bindgen_ty_2 { + pub data_end: *mut ::aya_bpf_cty::c_void, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, +} +impl sk_reuseport_md__bindgen_ty_2 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union sk_reuseport_md__bindgen_ty_3 { + pub sk: *mut bpf_sock, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, +} +impl sk_reuseport_md__bindgen_ty_3 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union sk_reuseport_md__bindgen_ty_4 { + pub migrating_sk: *mut bpf_sock, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, +} +impl sk_reuseport_md__bindgen_ty_4 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_prog_info { + pub type_: __u32, + pub id: __u32, + pub tag: [__u8; 8usize], + pub jited_prog_len: __u32, + pub xlated_prog_len: __u32, + pub jited_prog_insns: __u64, + pub xlated_prog_insns: __u64, + pub load_time: __u64, + pub created_by_uid: __u32, + pub nr_map_ids: __u32, + pub map_ids: __u64, + pub name: [::aya_bpf_cty::c_char; 16usize], + pub ifindex: __u32, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>, + pub netns_dev: __u64, + pub netns_ino: __u64, + pub nr_jited_ksyms: __u32, + pub nr_jited_func_lens: __u32, + pub jited_ksyms: __u64, + pub jited_func_lens: __u64, + pub btf_id: __u32, + pub func_info_rec_size: __u32, + pub func_info: __u64, + pub nr_func_info: __u32, + pub nr_line_info: __u32, + pub line_info: __u64, + pub jited_line_info: __u64, + pub nr_jited_line_info: __u32, + pub line_info_rec_size: __u32, + pub jited_line_info_rec_size: __u32, + pub nr_prog_tags: __u32, + pub prog_tags: __u64, + pub run_time_ns: __u64, + pub run_cnt: __u64, + pub recursion_misses: __u64, + pub verified_insns: __u32, +} +impl bpf_prog_info { + #[inline] + pub fn gpl_compatible(&self) -> __u32 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + } + #[inline] + pub fn set_gpl_compatible(&mut self, val: __u32) { + unsafe { + let val: u32 = ::core::mem::transmute(val); + self._bitfield_1.set(0usize, 1u8, val as u64) + } + } + #[inline] + pub fn new_bitfield_1(gpl_compatible: __u32) -> __BindgenBitfieldUnit<[u8; 4usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); + __bindgen_bitfield_unit.set(0usize, 1u8, { + let gpl_compatible: u32 = unsafe { ::core::mem::transmute(gpl_compatible) }; + gpl_compatible as u64 + }); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_map_info { + pub type_: __u32, + pub id: __u32, + pub key_size: __u32, + pub value_size: __u32, + pub max_entries: __u32, + pub map_flags: __u32, + pub name: [::aya_bpf_cty::c_char; 16usize], + pub ifindex: __u32, + pub btf_vmlinux_value_type_id: __u32, + pub netns_dev: __u64, + pub netns_ino: __u64, + pub btf_id: __u32, + pub btf_key_type_id: __u32, + pub btf_value_type_id: __u32, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>, + pub map_extra: __u64, +} +impl bpf_map_info { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 4usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_btf_info { + pub btf: __u64, + pub btf_size: __u32, + pub id: __u32, + pub name: __u64, + pub name_len: __u32, + pub kernel_btf: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_link_info { + pub type_: __u32, + pub id: __u32, + pub prog_id: __u32, + pub __bindgen_anon_1: bpf_link_info__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_link_info__bindgen_ty_1 { + pub raw_tracepoint: bpf_link_info__bindgen_ty_1__bindgen_ty_1, + pub tracing: bpf_link_info__bindgen_ty_1__bindgen_ty_2, + pub cgroup: bpf_link_info__bindgen_ty_1__bindgen_ty_3, + pub iter: bpf_link_info__bindgen_ty_1__bindgen_ty_4, + pub netns: bpf_link_info__bindgen_ty_1__bindgen_ty_5, + pub xdp: bpf_link_info__bindgen_ty_1__bindgen_ty_6, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_1 { + pub tp_name: __u64, + pub tp_name_len: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_2 { + pub attach_type: __u32, + pub target_obj_id: __u32, + pub target_btf_id: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_3 { + pub cgroup_id: __u64, + pub attach_type: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_4 { + pub target_name: __u64, + pub target_name_len: __u32, + pub __bindgen_anon_1: bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_1 { + pub map: bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1 { + pub map_id: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_5 { + pub netns_ino: __u32, + pub attach_type: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_6 { + pub ifindex: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_sock_addr { + pub user_family: __u32, + pub user_ip4: __u32, + pub user_ip6: [__u32; 4usize], + pub user_port: __u32, + pub family: __u32, + pub type_: __u32, + pub protocol: __u32, + pub msg_src_ip4: __u32, + pub msg_src_ip6: [__u32; 4usize], + pub __bindgen_anon_1: bpf_sock_addr__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_sock_addr__bindgen_ty_1 { + pub sk: *mut bpf_sock, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, +} +impl bpf_sock_addr__bindgen_ty_1 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_sock_ops { + pub op: __u32, + pub __bindgen_anon_1: bpf_sock_ops__bindgen_ty_1, + pub family: __u32, + pub remote_ip4: __u32, + pub local_ip4: __u32, + pub remote_ip6: [__u32; 4usize], + pub local_ip6: [__u32; 4usize], + pub remote_port: __u32, + pub local_port: __u32, + pub is_fullsock: __u32, + pub snd_cwnd: __u32, + pub srtt_us: __u32, + pub bpf_sock_ops_cb_flags: __u32, + pub state: __u32, + pub rtt_min: __u32, + pub snd_ssthresh: __u32, + pub rcv_nxt: __u32, + pub snd_nxt: __u32, + pub snd_una: __u32, + pub mss_cache: __u32, + pub ecn_flags: __u32, + pub rate_delivered: __u32, + pub rate_interval_us: __u32, + pub packets_out: __u32, + pub retrans_out: __u32, + pub total_retrans: __u32, + pub segs_in: __u32, + pub data_segs_in: __u32, + pub segs_out: __u32, + pub data_segs_out: __u32, + pub lost_out: __u32, + pub sacked_out: __u32, + pub sk_txhash: __u32, + pub bytes_received: __u64, + pub bytes_acked: __u64, + pub __bindgen_anon_2: bpf_sock_ops__bindgen_ty_2, + pub __bindgen_anon_3: bpf_sock_ops__bindgen_ty_3, + pub __bindgen_anon_4: bpf_sock_ops__bindgen_ty_4, + pub skb_len: __u32, + pub skb_tcp_flags: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_sock_ops__bindgen_ty_1 { + pub args: [__u32; 4usize], + pub reply: __u32, + pub replylong: [__u32; 4usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_sock_ops__bindgen_ty_2 { + pub sk: *mut bpf_sock, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, +} +impl bpf_sock_ops__bindgen_ty_2 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_sock_ops__bindgen_ty_3 { + pub skb_data: *mut ::aya_bpf_cty::c_void, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, +} +impl bpf_sock_ops__bindgen_ty_3 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_sock_ops__bindgen_ty_4 { + pub skb_data_end: *mut ::aya_bpf_cty::c_void, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, +} +impl bpf_sock_ops__bindgen_ty_4 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +pub const BPF_SOCK_OPS_RTO_CB_FLAG: ::aya_bpf_cty::c_uint = 1; +pub const BPF_SOCK_OPS_RETRANS_CB_FLAG: ::aya_bpf_cty::c_uint = 2; +pub const BPF_SOCK_OPS_STATE_CB_FLAG: ::aya_bpf_cty::c_uint = 4; +pub const BPF_SOCK_OPS_RTT_CB_FLAG: ::aya_bpf_cty::c_uint = 8; +pub const BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG: ::aya_bpf_cty::c_uint = 16; +pub const BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG: ::aya_bpf_cty::c_uint = 32; +pub const BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG: ::aya_bpf_cty::c_uint = 64; +pub const BPF_SOCK_OPS_ALL_CB_FLAGS: ::aya_bpf_cty::c_uint = 127; +pub type _bindgen_ty_26 = ::aya_bpf_cty::c_uint; +pub const BPF_SOCK_OPS_VOID: ::aya_bpf_cty::c_uint = 0; +pub const BPF_SOCK_OPS_TIMEOUT_INIT: ::aya_bpf_cty::c_uint = 1; +pub const BPF_SOCK_OPS_RWND_INIT: ::aya_bpf_cty::c_uint = 2; +pub const BPF_SOCK_OPS_TCP_CONNECT_CB: ::aya_bpf_cty::c_uint = 3; +pub const BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: ::aya_bpf_cty::c_uint = 4; +pub const BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: ::aya_bpf_cty::c_uint = 5; +pub const BPF_SOCK_OPS_NEEDS_ECN: ::aya_bpf_cty::c_uint = 6; +pub const BPF_SOCK_OPS_BASE_RTT: ::aya_bpf_cty::c_uint = 7; +pub const BPF_SOCK_OPS_RTO_CB: ::aya_bpf_cty::c_uint = 8; +pub const BPF_SOCK_OPS_RETRANS_CB: ::aya_bpf_cty::c_uint = 9; +pub const BPF_SOCK_OPS_STATE_CB: ::aya_bpf_cty::c_uint = 10; +pub const BPF_SOCK_OPS_TCP_LISTEN_CB: ::aya_bpf_cty::c_uint = 11; +pub const BPF_SOCK_OPS_RTT_CB: ::aya_bpf_cty::c_uint = 12; +pub const BPF_SOCK_OPS_PARSE_HDR_OPT_CB: ::aya_bpf_cty::c_uint = 13; +pub const BPF_SOCK_OPS_HDR_OPT_LEN_CB: ::aya_bpf_cty::c_uint = 14; +pub const BPF_SOCK_OPS_WRITE_HDR_OPT_CB: ::aya_bpf_cty::c_uint = 15; +pub type _bindgen_ty_27 = ::aya_bpf_cty::c_uint; +pub const BPF_TCP_ESTABLISHED: ::aya_bpf_cty::c_uint = 1; +pub const BPF_TCP_SYN_SENT: ::aya_bpf_cty::c_uint = 2; +pub const BPF_TCP_SYN_RECV: ::aya_bpf_cty::c_uint = 3; +pub const BPF_TCP_FIN_WAIT1: ::aya_bpf_cty::c_uint = 4; +pub const BPF_TCP_FIN_WAIT2: ::aya_bpf_cty::c_uint = 5; +pub const BPF_TCP_TIME_WAIT: ::aya_bpf_cty::c_uint = 6; +pub const BPF_TCP_CLOSE: ::aya_bpf_cty::c_uint = 7; +pub const BPF_TCP_CLOSE_WAIT: ::aya_bpf_cty::c_uint = 8; +pub const BPF_TCP_LAST_ACK: ::aya_bpf_cty::c_uint = 9; +pub const BPF_TCP_LISTEN: ::aya_bpf_cty::c_uint = 10; +pub const BPF_TCP_CLOSING: ::aya_bpf_cty::c_uint = 11; +pub const BPF_TCP_NEW_SYN_RECV: ::aya_bpf_cty::c_uint = 12; +pub const BPF_TCP_MAX_STATES: ::aya_bpf_cty::c_uint = 13; +pub type _bindgen_ty_28 = ::aya_bpf_cty::c_uint; +pub mod _bindgen_ty_30 { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_LOAD_HDR_OPT_TCP_SYN: Type = 1; +} +pub mod _bindgen_ty_31 { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_WRITE_HDR_TCP_CURRENT_MSS: Type = 1; + pub const BPF_WRITE_HDR_TCP_SYNACK_COOKIE: Type = 2; +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_perf_event_value { + pub counter: __u64, + pub enabled: __u64, + pub running: __u64, +} +pub const BPF_DEVCG_ACC_MKNOD: ::aya_bpf_cty::c_uint = 1; +pub const BPF_DEVCG_ACC_READ: ::aya_bpf_cty::c_uint = 2; +pub const BPF_DEVCG_ACC_WRITE: ::aya_bpf_cty::c_uint = 4; +pub type _bindgen_ty_32 = ::aya_bpf_cty::c_uint; +pub const BPF_DEVCG_DEV_BLOCK: ::aya_bpf_cty::c_uint = 1; +pub const BPF_DEVCG_DEV_CHAR: ::aya_bpf_cty::c_uint = 2; +pub type _bindgen_ty_33 = ::aya_bpf_cty::c_uint; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_cgroup_dev_ctx { + pub access_type: __u32, + pub major: __u32, + pub minor: __u32, +} +#[repr(C)] +pub struct bpf_raw_tracepoint_args { + pub args: __IncompleteArrayField<__u64>, +} +pub const BPF_FIB_LOOKUP_DIRECT: ::aya_bpf_cty::c_uint = 1; +pub const BPF_FIB_LOOKUP_OUTPUT: ::aya_bpf_cty::c_uint = 2; +pub type _bindgen_ty_34 = ::aya_bpf_cty::c_uint; +pub const BPF_FIB_LKUP_RET_SUCCESS: ::aya_bpf_cty::c_uint = 0; +pub const BPF_FIB_LKUP_RET_BLACKHOLE: ::aya_bpf_cty::c_uint = 1; +pub const BPF_FIB_LKUP_RET_UNREACHABLE: ::aya_bpf_cty::c_uint = 2; +pub const BPF_FIB_LKUP_RET_PROHIBIT: ::aya_bpf_cty::c_uint = 3; +pub const BPF_FIB_LKUP_RET_NOT_FWDED: ::aya_bpf_cty::c_uint = 4; +pub const BPF_FIB_LKUP_RET_FWD_DISABLED: ::aya_bpf_cty::c_uint = 5; +pub const BPF_FIB_LKUP_RET_UNSUPP_LWT: ::aya_bpf_cty::c_uint = 6; +pub const BPF_FIB_LKUP_RET_NO_NEIGH: ::aya_bpf_cty::c_uint = 7; +pub const BPF_FIB_LKUP_RET_FRAG_NEEDED: ::aya_bpf_cty::c_uint = 8; +pub type _bindgen_ty_35 = ::aya_bpf_cty::c_uint; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_fib_lookup { + pub family: __u8, + pub l4_protocol: __u8, + pub sport: __be16, + pub dport: __be16, + pub __bindgen_anon_1: bpf_fib_lookup__bindgen_ty_1, + pub ifindex: __u32, + pub __bindgen_anon_2: bpf_fib_lookup__bindgen_ty_2, + pub __bindgen_anon_3: bpf_fib_lookup__bindgen_ty_3, + pub __bindgen_anon_4: bpf_fib_lookup__bindgen_ty_4, + pub h_vlan_proto: __be16, + pub h_vlan_TCI: __be16, + pub smac: [__u8; 6usize], + pub dmac: [__u8; 6usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_fib_lookup__bindgen_ty_1 { + pub tot_len: __u16, + pub mtu_result: __u16, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_fib_lookup__bindgen_ty_2 { + pub tos: __u8, + pub flowinfo: __be32, + pub rt_metric: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_fib_lookup__bindgen_ty_3 { + pub ipv4_src: __be32, + pub ipv6_src: [__u32; 4usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_fib_lookup__bindgen_ty_4 { + pub ipv4_dst: __be32, + pub ipv6_dst: [__u32; 4usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_redir_neigh { + pub nh_family: __u32, + pub __bindgen_anon_1: bpf_redir_neigh__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_redir_neigh__bindgen_ty_1 { + pub ipv4_nh: __be32, + pub ipv6_nh: [__u32; 4usize], +} +pub mod bpf_check_mtu_flags { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_MTU_CHK_SEGS: Type = 1; +} +pub mod bpf_check_mtu_ret { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_MTU_CHK_RET_SUCCESS: Type = 0; + pub const BPF_MTU_CHK_RET_FRAG_NEEDED: Type = 1; + pub const BPF_MTU_CHK_RET_SEGS_TOOBIG: Type = 2; +} +pub mod bpf_task_fd_type { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_FD_TYPE_RAW_TRACEPOINT: Type = 0; + pub const BPF_FD_TYPE_TRACEPOINT: Type = 1; + pub const BPF_FD_TYPE_KPROBE: Type = 2; + pub const BPF_FD_TYPE_KRETPROBE: Type = 3; + pub const BPF_FD_TYPE_UPROBE: Type = 4; + pub const BPF_FD_TYPE_URETPROBE: Type = 5; +} +pub const BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG: ::aya_bpf_cty::c_uint = 1; +pub const BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL: ::aya_bpf_cty::c_uint = 2; +pub const BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP: ::aya_bpf_cty::c_uint = 4; +pub type _bindgen_ty_36 = ::aya_bpf_cty::c_uint; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_flow_keys { + pub nhoff: __u16, + pub thoff: __u16, + pub addr_proto: __u16, + pub is_frag: __u8, + pub is_first_frag: __u8, + pub is_encap: __u8, + pub ip_proto: __u8, + pub n_proto: __be16, + pub sport: __be16, + pub dport: __be16, + pub __bindgen_anon_1: bpf_flow_keys__bindgen_ty_1, + pub flags: __u32, + pub flow_label: __be32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_flow_keys__bindgen_ty_1 { + pub __bindgen_anon_1: bpf_flow_keys__bindgen_ty_1__bindgen_ty_1, + pub __bindgen_anon_2: bpf_flow_keys__bindgen_ty_1__bindgen_ty_2, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_flow_keys__bindgen_ty_1__bindgen_ty_1 { + pub ipv4_src: __be32, + pub ipv4_dst: __be32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_flow_keys__bindgen_ty_1__bindgen_ty_2 { + pub ipv6_src: [__u32; 4usize], + pub ipv6_dst: [__u32; 4usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_func_info { + pub insn_off: __u32, + pub type_id: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_line_info { + pub insn_off: __u32, + pub file_name_off: __u32, + pub line_off: __u32, + pub line_col: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_spin_lock { + pub val: __u32, +} +#[repr(C)] +#[repr(align(8))] +#[derive(Copy, Clone)] +pub struct bpf_timer { + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 16usize]>, +} +impl bpf_timer { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 16usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 16usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_sysctl { + pub write: __u32, + pub file_pos: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_sockopt { + pub __bindgen_anon_1: bpf_sockopt__bindgen_ty_1, + pub __bindgen_anon_2: bpf_sockopt__bindgen_ty_2, + pub __bindgen_anon_3: bpf_sockopt__bindgen_ty_3, + pub level: __s32, + pub optname: __s32, + pub optlen: __s32, + pub retval: __s32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_sockopt__bindgen_ty_1 { + pub sk: *mut bpf_sock, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, +} +impl bpf_sockopt__bindgen_ty_1 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_sockopt__bindgen_ty_2 { + pub optval: *mut ::aya_bpf_cty::c_void, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, +} +impl bpf_sockopt__bindgen_ty_2 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_sockopt__bindgen_ty_3 { + pub optval_end: *mut ::aya_bpf_cty::c_void, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, +} +impl bpf_sockopt__bindgen_ty_3 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_pidns_info { + pub pid: __u32, + pub tgid: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_sk_lookup { + pub __bindgen_anon_1: bpf_sk_lookup__bindgen_ty_1, + pub family: __u32, + pub protocol: __u32, + pub remote_ip4: __u32, + pub remote_ip6: [__u32; 4usize], + pub remote_port: __be16, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 2usize]>, + pub local_ip4: __u32, + pub local_ip6: [__u32; 4usize], + pub local_port: __u32, + pub ingress_ifindex: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_sk_lookup__bindgen_ty_1 { + pub __bindgen_anon_1: bpf_sk_lookup__bindgen_ty_1__bindgen_ty_1, + pub cookie: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_sk_lookup__bindgen_ty_1__bindgen_ty_1 { + pub sk: *mut bpf_sock, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, +} +impl bpf_sk_lookup__bindgen_ty_1__bindgen_ty_1 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +impl bpf_sk_lookup { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 2usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 2usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct btf_ptr { + pub ptr: *mut ::aya_bpf_cty::c_void, + pub type_id: __u32, + pub flags: __u32, +} +pub mod bpf_core_relo_kind { + pub type Type = ::aya_bpf_cty::c_uint; + pub const BPF_CORE_FIELD_BYTE_OFFSET: Type = 0; + pub const BPF_CORE_FIELD_BYTE_SIZE: Type = 1; + pub const BPF_CORE_FIELD_EXISTS: Type = 2; + pub const BPF_CORE_FIELD_SIGNED: Type = 3; + pub const BPF_CORE_FIELD_LSHIFT_U64: Type = 4; + pub const BPF_CORE_FIELD_RSHIFT_U64: Type = 5; + pub const BPF_CORE_TYPE_ID_LOCAL: Type = 6; + pub const BPF_CORE_TYPE_ID_TARGET: Type = 7; + pub const BPF_CORE_TYPE_EXISTS: Type = 8; + pub const BPF_CORE_TYPE_SIZE: Type = 9; + pub const BPF_CORE_ENUMVAL_EXISTS: Type = 10; + pub const BPF_CORE_ENUMVAL_VALUE: Type = 11; +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_core_relo { + pub insn_off: __u32, + pub type_id: __u32, + pub access_str_off: __u32, + pub kind: bpf_core_relo_kind::Type, +} +pub type sa_family_t = ::aya_bpf_cty::c_ushort; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct sockaddr { + pub sa_family: sa_family_t, + pub sa_data: [::aya_bpf_cty::c_char; 14usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_perf_event_data { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct linux_binprm { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct pt_regs { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct tcphdr { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct seq_file { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct tcp6_sock { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct tcp_sock { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct tcp_timewait_sock { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct tcp_request_sock { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct udp6_sock { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct unix_sock { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct task_struct { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct path { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct inode { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct socket { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct file { + _unused: [u8; 0], +} diff --git a/bpf/aya-bpf-bindings/src/riscv64/getters.rs b/bpf/aya-bpf-bindings/src/riscv64/getters.rs index e69de29b..78aad2bd 100644 --- a/bpf/aya-bpf-bindings/src/riscv64/getters.rs +++ b/bpf/aya-bpf-bindings/src/riscv64/getters.rs @@ -0,0 +1,2390 @@ +use super::bindings::*; +impl __BindgenBitfieldUnit {} +impl bpf_insn { + pub fn code(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.code) }.ok() + } + pub fn off(&self) -> Option<__s16> { + unsafe { crate::bpf_probe_read(&self.off) }.ok() + } + pub fn imm(&self) -> Option<__s32> { + unsafe { crate::bpf_probe_read(&self.imm) }.ok() + } +} +impl bpf_lpm_trie_key { + pub fn prefixlen(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.prefixlen) }.ok() + } + pub fn data(&self) -> Option<__IncompleteArrayField<__u8>> { + unsafe { crate::bpf_probe_read(&self.data) }.ok() + } +} +impl bpf_cgroup_storage_key { + pub fn cgroup_inode_id(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.cgroup_inode_id) }.ok() + } + pub fn attach_type(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.attach_type) }.ok() + } +} +impl bpf_iter_link_info { + pub fn map(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.map) }.ok() + } +} +impl bpf_iter_link_info__bindgen_ty_1 { + pub fn map_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.map_fd) }.ok() + } +} +impl bpf_stack_build_id { + pub fn status(&self) -> Option<__s32> { + unsafe { crate::bpf_probe_read(&self.status) }.ok() + } + pub fn build_id(&self) -> Option<[::aya_bpf_cty::c_uchar; 20usize]> { + unsafe { crate::bpf_probe_read(&self.build_id) }.ok() + } + pub fn offset(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.offset) }.ok() + } + pub fn ip(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.ip) }.ok() + } +} +impl bpf_stack_build_id__bindgen_ty_1 { + pub fn offset(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.offset) }.ok() + } + pub fn ip(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.ip) }.ok() + } +} +impl bpf_attr { + pub fn map_type(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.map_type) }.ok() + } + pub fn key_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.key_size) }.ok() + } + pub fn value_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.value_size) }.ok() + } + pub fn max_entries(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.max_entries) }.ok() + } + pub fn map_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.map_flags) }.ok() + } + pub fn inner_map_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.inner_map_fd) }.ok() + } + pub fn numa_node(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.numa_node) }.ok() + } + pub fn map_name(&self) -> Option<[::aya_bpf_cty::c_char; 16usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.map_name) }.ok() + } + pub fn map_ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.map_ifindex) }.ok() + } + pub fn btf_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.btf_fd) }.ok() + } + pub fn btf_key_type_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.btf_key_type_id) }.ok() + } + pub fn btf_value_type_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.btf_value_type_id) }.ok() + } + pub fn btf_vmlinux_value_type_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.btf_vmlinux_value_type_id) }.ok() + } + pub fn map_extra(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.map_extra) }.ok() + } + pub fn map_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.map_fd) }.ok() + } + pub fn key(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.key) }.ok() + } + pub fn value(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.__bindgen_anon_1.value) }.ok() + } + pub fn next_key(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.__bindgen_anon_1.next_key) }.ok() + } + pub fn flags(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.flags) }.ok() + } + pub fn batch(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.batch) }.ok() + } + pub fn prog_type(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.prog_type) }.ok() + } + pub fn insn_cnt(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.insn_cnt) }.ok() + } + pub fn insns(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.insns) }.ok() + } + pub fn license(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.license) }.ok() + } + pub fn log_level(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.log_level) }.ok() + } + pub fn log_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.log_size) }.ok() + } + pub fn log_buf(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.log_buf) }.ok() + } + pub fn kern_version(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.kern_version) }.ok() + } + pub fn prog_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.prog_flags) }.ok() + } + pub fn prog_name(&self) -> Option<[::aya_bpf_cty::c_char; 16usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.prog_name) }.ok() + } + pub fn prog_ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.prog_ifindex) }.ok() + } + pub fn expected_attach_type(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.expected_attach_type) }.ok() + } + pub fn prog_btf_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.prog_btf_fd) }.ok() + } + pub fn func_info_rec_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.func_info_rec_size) }.ok() + } + pub fn func_info(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.func_info) }.ok() + } + pub fn func_info_cnt(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.func_info_cnt) }.ok() + } + pub fn line_info_rec_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.line_info_rec_size) }.ok() + } + pub fn line_info(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.line_info) }.ok() + } + pub fn line_info_cnt(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.line_info_cnt) }.ok() + } + pub fn attach_btf_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.attach_btf_id) }.ok() + } + pub fn attach_prog_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.__bindgen_anon_1.attach_prog_fd) } + .ok() + } + pub fn attach_btf_obj_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.__bindgen_anon_1.attach_btf_obj_fd) } + .ok() + } + pub fn core_relo_cnt(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.core_relo_cnt) }.ok() + } + pub fn fd_array(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.fd_array) }.ok() + } + pub fn core_relos(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.core_relos) }.ok() + } + pub fn core_relo_rec_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.core_relo_rec_size) }.ok() + } + pub fn pathname(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_4.pathname) }.ok() + } + pub fn bpf_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_4.bpf_fd) }.ok() + } + pub fn file_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_4.file_flags) }.ok() + } + pub fn target_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_5.target_fd) }.ok() + } + pub fn attach_bpf_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_5.attach_bpf_fd) }.ok() + } + pub fn attach_type(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_5.attach_type) }.ok() + } + pub fn attach_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_5.attach_flags) }.ok() + } + pub fn replace_bpf_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_5.replace_bpf_fd) }.ok() + } + pub fn test(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.test) }.ok() + } + pub fn start_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_6.__bindgen_anon_1.start_id) }.ok() + } + pub fn prog_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_6.__bindgen_anon_1.prog_id) }.ok() + } + pub fn map_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_6.__bindgen_anon_1.map_id) }.ok() + } + pub fn btf_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_6.__bindgen_anon_1.btf_id) }.ok() + } + pub fn link_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_6.__bindgen_anon_1.link_id) }.ok() + } + pub fn next_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_6.next_id) }.ok() + } + pub fn open_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_6.open_flags) }.ok() + } + pub fn info(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.info) }.ok() + } + pub fn query(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.query) }.ok() + } + pub fn raw_tracepoint(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.raw_tracepoint) }.ok() + } + pub fn btf(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_7.btf) }.ok() + } + pub fn btf_log_buf(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_7.btf_log_buf) }.ok() + } + pub fn btf_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_7.btf_size) }.ok() + } + pub fn btf_log_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_7.btf_log_size) }.ok() + } + pub fn btf_log_level(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_7.btf_log_level) }.ok() + } + pub fn task_fd_query(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.task_fd_query) }.ok() + } + pub fn link_create(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.link_create) }.ok() + } + pub fn link_update(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.link_update) }.ok() + } + pub fn link_detach(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.link_detach) }.ok() + } + pub fn enable_stats(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.enable_stats) }.ok() + } + pub fn iter_create(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.iter_create) }.ok() + } + pub fn prog_bind_map(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.prog_bind_map) }.ok() + } +} +impl bpf_attr__bindgen_ty_1 { + pub fn map_type(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.map_type) }.ok() + } + pub fn key_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.key_size) }.ok() + } + pub fn value_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.value_size) }.ok() + } + pub fn max_entries(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.max_entries) }.ok() + } + pub fn map_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.map_flags) }.ok() + } + pub fn inner_map_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.inner_map_fd) }.ok() + } + pub fn numa_node(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.numa_node) }.ok() + } + pub fn map_name(&self) -> Option<[::aya_bpf_cty::c_char; 16usize]> { + unsafe { crate::bpf_probe_read(&self.map_name) }.ok() + } + pub fn map_ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.map_ifindex) }.ok() + } + pub fn btf_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.btf_fd) }.ok() + } + pub fn btf_key_type_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.btf_key_type_id) }.ok() + } + pub fn btf_value_type_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.btf_value_type_id) }.ok() + } + pub fn btf_vmlinux_value_type_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.btf_vmlinux_value_type_id) }.ok() + } + pub fn map_extra(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.map_extra) }.ok() + } +} +impl bpf_attr__bindgen_ty_2 { + pub fn map_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.map_fd) }.ok() + } + pub fn key(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.key) }.ok() + } + pub fn value(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.value) }.ok() + } + pub fn next_key(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.next_key) }.ok() + } + pub fn flags(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.flags) }.ok() + } +} +impl bpf_attr__bindgen_ty_2__bindgen_ty_1 { + pub fn value(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.value) }.ok() + } + pub fn next_key(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.next_key) }.ok() + } +} +impl bpf_attr__bindgen_ty_3 { + pub fn in_batch(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.in_batch) }.ok() + } + pub fn out_batch(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.out_batch) }.ok() + } + pub fn keys(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.keys) }.ok() + } + pub fn values(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.values) }.ok() + } + pub fn count(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.count) }.ok() + } + pub fn map_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.map_fd) }.ok() + } + pub fn elem_flags(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.elem_flags) }.ok() + } + pub fn flags(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.flags) }.ok() + } +} +impl bpf_attr__bindgen_ty_4 { + pub fn prog_type(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.prog_type) }.ok() + } + pub fn insn_cnt(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.insn_cnt) }.ok() + } + pub fn insns(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.insns) }.ok() + } + pub fn license(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.license) }.ok() + } + pub fn log_level(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.log_level) }.ok() + } + pub fn log_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.log_size) }.ok() + } + pub fn log_buf(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.log_buf) }.ok() + } + pub fn kern_version(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.kern_version) }.ok() + } + pub fn prog_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.prog_flags) }.ok() + } + pub fn prog_name(&self) -> Option<[::aya_bpf_cty::c_char; 16usize]> { + unsafe { crate::bpf_probe_read(&self.prog_name) }.ok() + } + pub fn prog_ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.prog_ifindex) }.ok() + } + pub fn expected_attach_type(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.expected_attach_type) }.ok() + } + pub fn prog_btf_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.prog_btf_fd) }.ok() + } + pub fn func_info_rec_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.func_info_rec_size) }.ok() + } + pub fn func_info(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.func_info) }.ok() + } + pub fn func_info_cnt(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.func_info_cnt) }.ok() + } + pub fn line_info_rec_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.line_info_rec_size) }.ok() + } + pub fn line_info(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.line_info) }.ok() + } + pub fn line_info_cnt(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.line_info_cnt) }.ok() + } + pub fn attach_btf_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.attach_btf_id) }.ok() + } + pub fn attach_prog_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.attach_prog_fd) }.ok() + } + pub fn attach_btf_obj_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.attach_btf_obj_fd) }.ok() + } + pub fn core_relo_cnt(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.core_relo_cnt) }.ok() + } + pub fn fd_array(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.fd_array) }.ok() + } + pub fn core_relos(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.core_relos) }.ok() + } + pub fn core_relo_rec_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.core_relo_rec_size) }.ok() + } +} +impl bpf_attr__bindgen_ty_4__bindgen_ty_1 { + pub fn attach_prog_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.attach_prog_fd) }.ok() + } + pub fn attach_btf_obj_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.attach_btf_obj_fd) }.ok() + } +} +impl bpf_attr__bindgen_ty_5 { + pub fn pathname(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.pathname) }.ok() + } + pub fn bpf_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.bpf_fd) }.ok() + } + pub fn file_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.file_flags) }.ok() + } +} +impl bpf_attr__bindgen_ty_6 { + pub fn target_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.target_fd) }.ok() + } + pub fn attach_bpf_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.attach_bpf_fd) }.ok() + } + pub fn attach_type(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.attach_type) }.ok() + } + pub fn attach_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.attach_flags) }.ok() + } + pub fn replace_bpf_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.replace_bpf_fd) }.ok() + } +} +impl bpf_attr__bindgen_ty_7 { + pub fn prog_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.prog_fd) }.ok() + } + pub fn retval(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.retval) }.ok() + } + pub fn data_size_in(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data_size_in) }.ok() + } + pub fn data_size_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data_size_out) }.ok() + } + pub fn data_in(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.data_in) }.ok() + } + pub fn data_out(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.data_out) }.ok() + } + pub fn repeat(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.repeat) }.ok() + } + pub fn duration(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.duration) }.ok() + } + pub fn ctx_size_in(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ctx_size_in) }.ok() + } + pub fn ctx_size_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ctx_size_out) }.ok() + } + pub fn ctx_in(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.ctx_in) }.ok() + } + pub fn ctx_out(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.ctx_out) }.ok() + } + pub fn flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.flags) }.ok() + } + pub fn cpu(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.cpu) }.ok() + } + pub fn batch_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.batch_size) }.ok() + } +} +impl bpf_attr__bindgen_ty_8 { + pub fn start_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.start_id) }.ok() + } + pub fn prog_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.prog_id) }.ok() + } + pub fn map_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.map_id) }.ok() + } + pub fn btf_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.btf_id) }.ok() + } + pub fn link_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.link_id) }.ok() + } + pub fn next_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.next_id) }.ok() + } + pub fn open_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.open_flags) }.ok() + } +} +impl bpf_attr__bindgen_ty_8__bindgen_ty_1 { + pub fn start_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.start_id) }.ok() + } + pub fn prog_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.prog_id) }.ok() + } + pub fn map_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.map_id) }.ok() + } + pub fn btf_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.btf_id) }.ok() + } + pub fn link_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.link_id) }.ok() + } +} +impl bpf_attr__bindgen_ty_9 { + pub fn bpf_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.bpf_fd) }.ok() + } + pub fn info_len(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.info_len) }.ok() + } + pub fn info(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.info) }.ok() + } +} +impl bpf_attr__bindgen_ty_10 { + pub fn target_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.target_fd) }.ok() + } + pub fn attach_type(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.attach_type) }.ok() + } + pub fn query_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.query_flags) }.ok() + } + pub fn attach_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.attach_flags) }.ok() + } + pub fn prog_ids(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.prog_ids) }.ok() + } + pub fn prog_cnt(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.prog_cnt) }.ok() + } +} +impl bpf_attr__bindgen_ty_11 { + pub fn name(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.name) }.ok() + } + pub fn prog_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.prog_fd) }.ok() + } +} +impl bpf_attr__bindgen_ty_12 { + pub fn btf(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.btf) }.ok() + } + pub fn btf_log_buf(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.btf_log_buf) }.ok() + } + pub fn btf_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.btf_size) }.ok() + } + pub fn btf_log_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.btf_log_size) }.ok() + } + pub fn btf_log_level(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.btf_log_level) }.ok() + } +} +impl bpf_attr__bindgen_ty_13 { + pub fn pid(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.pid) }.ok() + } + pub fn fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.fd) }.ok() + } + pub fn flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.flags) }.ok() + } + pub fn buf_len(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.buf_len) }.ok() + } + pub fn buf(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.buf) }.ok() + } + pub fn prog_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.prog_id) }.ok() + } + pub fn fd_type(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.fd_type) }.ok() + } + pub fn probe_offset(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.probe_offset) }.ok() + } + pub fn probe_addr(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.probe_addr) }.ok() + } +} +impl bpf_attr__bindgen_ty_14 { + pub fn prog_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.prog_fd) }.ok() + } + pub fn target_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.target_fd) }.ok() + } + pub fn target_ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.target_ifindex) }.ok() + } + pub fn attach_type(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.attach_type) }.ok() + } + pub fn flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.flags) }.ok() + } + pub fn target_btf_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.target_btf_id) }.ok() + } + pub fn iter_info(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.__bindgen_anon_1.iter_info) }.ok() + } + pub fn iter_info_len(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.__bindgen_anon_1.iter_info_len) }.ok() + } + pub fn perf_event(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.perf_event) }.ok() + } + pub fn kprobe_multi(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.kprobe_multi) }.ok() + } + pub fn tracing(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.tracing) }.ok() + } +} +impl bpf_attr__bindgen_ty_14__bindgen_ty_1 { + pub fn target_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.target_fd) }.ok() + } + pub fn target_ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.target_ifindex) }.ok() + } +} +impl bpf_attr__bindgen_ty_14__bindgen_ty_2 { + pub fn target_btf_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.target_btf_id) }.ok() + } + pub fn iter_info(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.iter_info) }.ok() + } + pub fn iter_info_len(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.iter_info_len) }.ok() + } + pub fn perf_event(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.perf_event) }.ok() + } + pub fn kprobe_multi(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.kprobe_multi) }.ok() + } + pub fn tracing(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.tracing) }.ok() + } +} +impl bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_1 { + pub fn iter_info(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.iter_info) }.ok() + } + pub fn iter_info_len(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.iter_info_len) }.ok() + } +} +impl bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_2 { + pub fn bpf_cookie(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.bpf_cookie) }.ok() + } +} +impl bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_3 { + pub fn flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.flags) }.ok() + } + pub fn cnt(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.cnt) }.ok() + } + pub fn syms(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.syms) }.ok() + } + pub fn addrs(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.addrs) }.ok() + } + pub fn cookies(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.cookies) }.ok() + } +} +impl bpf_attr__bindgen_ty_14__bindgen_ty_2__bindgen_ty_4 { + pub fn target_btf_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.target_btf_id) }.ok() + } + pub fn cookie(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.cookie) }.ok() + } +} +impl bpf_attr__bindgen_ty_15 { + pub fn link_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.link_fd) }.ok() + } + pub fn new_prog_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.new_prog_fd) }.ok() + } + pub fn flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.flags) }.ok() + } + pub fn old_prog_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.old_prog_fd) }.ok() + } +} +impl bpf_attr__bindgen_ty_16 { + pub fn link_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.link_fd) }.ok() + } +} +impl bpf_attr__bindgen_ty_17 { + pub fn type_(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.type_) }.ok() + } +} +impl bpf_attr__bindgen_ty_18 { + pub fn link_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.link_fd) }.ok() + } + pub fn flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.flags) }.ok() + } +} +impl bpf_attr__bindgen_ty_19 { + pub fn prog_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.prog_fd) }.ok() + } + pub fn map_fd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.map_fd) }.ok() + } + pub fn flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.flags) }.ok() + } +} +impl __sk_buff { + pub fn len(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.len) }.ok() + } + pub fn pkt_type(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.pkt_type) }.ok() + } + pub fn mark(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.mark) }.ok() + } + pub fn queue_mapping(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.queue_mapping) }.ok() + } + pub fn protocol(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.protocol) }.ok() + } + pub fn vlan_present(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.vlan_present) }.ok() + } + pub fn vlan_tci(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.vlan_tci) }.ok() + } + pub fn vlan_proto(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.vlan_proto) }.ok() + } + pub fn priority(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.priority) }.ok() + } + pub fn ingress_ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ingress_ifindex) }.ok() + } + pub fn ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ifindex) }.ok() + } + pub fn tc_index(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.tc_index) }.ok() + } + pub fn cb(&self) -> Option<[__u32; 5usize]> { + unsafe { crate::bpf_probe_read(&self.cb) }.ok() + } + pub fn hash(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.hash) }.ok() + } + pub fn tc_classid(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.tc_classid) }.ok() + } + pub fn data(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data) }.ok() + } + pub fn data_end(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data_end) }.ok() + } + pub fn napi_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.napi_id) }.ok() + } + pub fn family(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.family) }.ok() + } + pub fn remote_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.remote_ip4) }.ok() + } + pub fn local_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.local_ip4) }.ok() + } + pub fn remote_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.remote_ip6) }.ok() + } + pub fn local_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.local_ip6) }.ok() + } + pub fn remote_port(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.remote_port) }.ok() + } + pub fn local_port(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.local_port) }.ok() + } + pub fn data_meta(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data_meta) }.ok() + } + pub fn flow_keys(&self) -> Option<*mut bpf_flow_keys> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.flow_keys) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn tstamp(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.tstamp) }.ok() + } + pub fn wire_len(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.wire_len) }.ok() + } + pub fn gso_segs(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.gso_segs) }.ok() + } + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn gso_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.gso_size) }.ok() + } + pub fn tstamp_type(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.tstamp_type) }.ok() + } + pub fn hwtstamp(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.hwtstamp) }.ok() + } +} +impl __sk_buff__bindgen_ty_1 { + pub fn flow_keys(&self) -> Option<*mut bpf_flow_keys> { + let v = unsafe { crate::bpf_probe_read(&self.flow_keys) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl __sk_buff__bindgen_ty_2 { + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl bpf_tunnel_key { + pub fn tunnel_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.tunnel_id) }.ok() + } + pub fn remote_ipv4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.remote_ipv4) }.ok() + } + pub fn remote_ipv6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.remote_ipv6) }.ok() + } + pub fn tunnel_tos(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.tunnel_tos) }.ok() + } + pub fn tunnel_ttl(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.tunnel_ttl) }.ok() + } + pub fn tunnel_ext(&self) -> Option<__u16> { + unsafe { crate::bpf_probe_read(&self.tunnel_ext) }.ok() + } + pub fn tunnel_label(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.tunnel_label) }.ok() + } + pub fn local_ipv4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.local_ipv4) }.ok() + } + pub fn local_ipv6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.local_ipv6) }.ok() + } +} +impl bpf_tunnel_key__bindgen_ty_1 { + pub fn remote_ipv4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.remote_ipv4) }.ok() + } + pub fn remote_ipv6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.remote_ipv6) }.ok() + } +} +impl bpf_tunnel_key__bindgen_ty_2 { + pub fn local_ipv4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.local_ipv4) }.ok() + } + pub fn local_ipv6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.local_ipv6) }.ok() + } +} +impl bpf_xfrm_state { + pub fn reqid(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.reqid) }.ok() + } + pub fn spi(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.spi) }.ok() + } + pub fn family(&self) -> Option<__u16> { + unsafe { crate::bpf_probe_read(&self.family) }.ok() + } + pub fn ext(&self) -> Option<__u16> { + unsafe { crate::bpf_probe_read(&self.ext) }.ok() + } + pub fn remote_ipv4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.remote_ipv4) }.ok() + } + pub fn remote_ipv6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.remote_ipv6) }.ok() + } +} +impl bpf_xfrm_state__bindgen_ty_1 { + pub fn remote_ipv4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.remote_ipv4) }.ok() + } + pub fn remote_ipv6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.remote_ipv6) }.ok() + } +} +impl bpf_sock { + pub fn bound_dev_if(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.bound_dev_if) }.ok() + } + pub fn family(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.family) }.ok() + } + pub fn type_(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.type_) }.ok() + } + pub fn protocol(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.protocol) }.ok() + } + pub fn mark(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.mark) }.ok() + } + pub fn priority(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.priority) }.ok() + } + pub fn src_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.src_ip4) }.ok() + } + pub fn src_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.src_ip6) }.ok() + } + pub fn src_port(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.src_port) }.ok() + } + pub fn dst_port(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.dst_port) }.ok() + } + pub fn dst_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.dst_ip4) }.ok() + } + pub fn dst_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.dst_ip6) }.ok() + } + pub fn state(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.state) }.ok() + } + pub fn rx_queue_mapping(&self) -> Option<__s32> { + unsafe { crate::bpf_probe_read(&self.rx_queue_mapping) }.ok() + } +} +impl bpf_tcp_sock { + pub fn snd_cwnd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.snd_cwnd) }.ok() + } + pub fn srtt_us(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.srtt_us) }.ok() + } + pub fn rtt_min(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.rtt_min) }.ok() + } + pub fn snd_ssthresh(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.snd_ssthresh) }.ok() + } + pub fn rcv_nxt(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.rcv_nxt) }.ok() + } + pub fn snd_nxt(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.snd_nxt) }.ok() + } + pub fn snd_una(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.snd_una) }.ok() + } + pub fn mss_cache(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.mss_cache) }.ok() + } + pub fn ecn_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ecn_flags) }.ok() + } + pub fn rate_delivered(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.rate_delivered) }.ok() + } + pub fn rate_interval_us(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.rate_interval_us) }.ok() + } + pub fn packets_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.packets_out) }.ok() + } + pub fn retrans_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.retrans_out) }.ok() + } + pub fn total_retrans(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.total_retrans) }.ok() + } + pub fn segs_in(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.segs_in) }.ok() + } + pub fn data_segs_in(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data_segs_in) }.ok() + } + pub fn segs_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.segs_out) }.ok() + } + pub fn data_segs_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data_segs_out) }.ok() + } + pub fn lost_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.lost_out) }.ok() + } + pub fn sacked_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.sacked_out) }.ok() + } + pub fn bytes_received(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.bytes_received) }.ok() + } + pub fn bytes_acked(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.bytes_acked) }.ok() + } + pub fn dsack_dups(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.dsack_dups) }.ok() + } + pub fn delivered(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.delivered) }.ok() + } + pub fn delivered_ce(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.delivered_ce) }.ok() + } + pub fn icsk_retransmits(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.icsk_retransmits) }.ok() + } +} +impl bpf_sock_tuple { + pub fn ipv4(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.ipv4) }.ok() + } + pub fn ipv6(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.ipv6) }.ok() + } +} +impl bpf_sock_tuple__bindgen_ty_1 { + pub fn ipv4(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.ipv4) }.ok() + } + pub fn ipv6(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.ipv6) }.ok() + } +} +impl bpf_sock_tuple__bindgen_ty_1__bindgen_ty_1 { + pub fn saddr(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.saddr) }.ok() + } + pub fn daddr(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.daddr) }.ok() + } + pub fn sport(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.sport) }.ok() + } + pub fn dport(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.dport) }.ok() + } +} +impl bpf_sock_tuple__bindgen_ty_1__bindgen_ty_2 { + pub fn saddr(&self) -> Option<[__be32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.saddr) }.ok() + } + pub fn daddr(&self) -> Option<[__be32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.daddr) }.ok() + } + pub fn sport(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.sport) }.ok() + } + pub fn dport(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.dport) }.ok() + } +} +impl bpf_xdp_sock { + pub fn queue_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.queue_id) }.ok() + } +} +impl xdp_md { + pub fn data(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data) }.ok() + } + pub fn data_end(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data_end) }.ok() + } + pub fn data_meta(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data_meta) }.ok() + } + pub fn ingress_ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ingress_ifindex) }.ok() + } + pub fn rx_queue_index(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.rx_queue_index) }.ok() + } + pub fn egress_ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.egress_ifindex) }.ok() + } +} +impl bpf_devmap_val { + pub fn ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ifindex) }.ok() + } + pub fn bpf_prog(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.bpf_prog) }.ok() + } +} +impl bpf_devmap_val__bindgen_ty_1 { + pub fn fd(&self) -> Option<::aya_bpf_cty::c_int> { + unsafe { crate::bpf_probe_read(&self.fd) }.ok() + } + pub fn id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.id) }.ok() + } +} +impl bpf_cpumap_val { + pub fn qsize(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.qsize) }.ok() + } + pub fn bpf_prog(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.bpf_prog) }.ok() + } +} +impl bpf_cpumap_val__bindgen_ty_1 { + pub fn fd(&self) -> Option<::aya_bpf_cty::c_int> { + unsafe { crate::bpf_probe_read(&self.fd) }.ok() + } + pub fn id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.id) }.ok() + } +} +impl sk_msg_md { + pub fn data(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.data) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn data_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.data_end) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn family(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.family) }.ok() + } + pub fn remote_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.remote_ip4) }.ok() + } + pub fn local_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.local_ip4) }.ok() + } + pub fn remote_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.remote_ip6) }.ok() + } + pub fn local_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.local_ip6) }.ok() + } + pub fn remote_port(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.remote_port) }.ok() + } + pub fn local_port(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.local_port) }.ok() + } + pub fn size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.size) }.ok() + } + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl sk_msg_md__bindgen_ty_1 { + pub fn data(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.data) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl sk_msg_md__bindgen_ty_2 { + pub fn data_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.data_end) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl sk_msg_md__bindgen_ty_3 { + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl sk_reuseport_md { + pub fn data(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.data) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn data_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.data_end) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn len(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.len) }.ok() + } + pub fn eth_protocol(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.eth_protocol) }.ok() + } + pub fn ip_protocol(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ip_protocol) }.ok() + } + pub fn bind_inany(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.bind_inany) }.ok() + } + pub fn hash(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.hash) }.ok() + } + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn migrating_sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_4.migrating_sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl sk_reuseport_md__bindgen_ty_1 { + pub fn data(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.data) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl sk_reuseport_md__bindgen_ty_2 { + pub fn data_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.data_end) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl sk_reuseport_md__bindgen_ty_3 { + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl sk_reuseport_md__bindgen_ty_4 { + pub fn migrating_sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.migrating_sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl bpf_prog_info { + pub fn type_(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.type_) }.ok() + } + pub fn id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.id) }.ok() + } + pub fn tag(&self) -> Option<[__u8; 8usize]> { + unsafe { crate::bpf_probe_read(&self.tag) }.ok() + } + pub fn jited_prog_len(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.jited_prog_len) }.ok() + } + pub fn xlated_prog_len(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.xlated_prog_len) }.ok() + } + pub fn jited_prog_insns(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.jited_prog_insns) }.ok() + } + pub fn xlated_prog_insns(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.xlated_prog_insns) }.ok() + } + pub fn load_time(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.load_time) }.ok() + } + pub fn created_by_uid(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.created_by_uid) }.ok() + } + pub fn nr_map_ids(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.nr_map_ids) }.ok() + } + pub fn map_ids(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.map_ids) }.ok() + } + pub fn name(&self) -> Option<[::aya_bpf_cty::c_char; 16usize]> { + unsafe { crate::bpf_probe_read(&self.name) }.ok() + } + pub fn ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ifindex) }.ok() + } + pub fn netns_dev(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.netns_dev) }.ok() + } + pub fn netns_ino(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.netns_ino) }.ok() + } + pub fn nr_jited_ksyms(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.nr_jited_ksyms) }.ok() + } + pub fn nr_jited_func_lens(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.nr_jited_func_lens) }.ok() + } + pub fn jited_ksyms(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.jited_ksyms) }.ok() + } + pub fn jited_func_lens(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.jited_func_lens) }.ok() + } + pub fn btf_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.btf_id) }.ok() + } + pub fn func_info_rec_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.func_info_rec_size) }.ok() + } + pub fn func_info(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.func_info) }.ok() + } + pub fn nr_func_info(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.nr_func_info) }.ok() + } + pub fn nr_line_info(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.nr_line_info) }.ok() + } + pub fn line_info(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.line_info) }.ok() + } + pub fn jited_line_info(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.jited_line_info) }.ok() + } + pub fn nr_jited_line_info(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.nr_jited_line_info) }.ok() + } + pub fn line_info_rec_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.line_info_rec_size) }.ok() + } + pub fn jited_line_info_rec_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.jited_line_info_rec_size) }.ok() + } + pub fn nr_prog_tags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.nr_prog_tags) }.ok() + } + pub fn prog_tags(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.prog_tags) }.ok() + } + pub fn run_time_ns(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.run_time_ns) }.ok() + } + pub fn run_cnt(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.run_cnt) }.ok() + } + pub fn recursion_misses(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.recursion_misses) }.ok() + } + pub fn verified_insns(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.verified_insns) }.ok() + } +} +impl bpf_map_info { + pub fn type_(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.type_) }.ok() + } + pub fn id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.id) }.ok() + } + pub fn key_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.key_size) }.ok() + } + pub fn value_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.value_size) }.ok() + } + pub fn max_entries(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.max_entries) }.ok() + } + pub fn map_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.map_flags) }.ok() + } + pub fn name(&self) -> Option<[::aya_bpf_cty::c_char; 16usize]> { + unsafe { crate::bpf_probe_read(&self.name) }.ok() + } + pub fn ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ifindex) }.ok() + } + pub fn btf_vmlinux_value_type_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.btf_vmlinux_value_type_id) }.ok() + } + pub fn netns_dev(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.netns_dev) }.ok() + } + pub fn netns_ino(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.netns_ino) }.ok() + } + pub fn btf_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.btf_id) }.ok() + } + pub fn btf_key_type_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.btf_key_type_id) }.ok() + } + pub fn btf_value_type_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.btf_value_type_id) }.ok() + } + pub fn map_extra(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.map_extra) }.ok() + } +} +impl bpf_btf_info { + pub fn btf(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.btf) }.ok() + } + pub fn btf_size(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.btf_size) }.ok() + } + pub fn id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.id) }.ok() + } + pub fn name(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.name) }.ok() + } + pub fn name_len(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.name_len) }.ok() + } + pub fn kernel_btf(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.kernel_btf) }.ok() + } +} +impl bpf_link_info { + pub fn type_(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.type_) }.ok() + } + pub fn id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.id) }.ok() + } + pub fn prog_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.prog_id) }.ok() + } + pub fn raw_tracepoint(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.raw_tracepoint) }.ok() + } + pub fn tracing(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.tracing) }.ok() + } + pub fn cgroup(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.cgroup) }.ok() + } + pub fn iter(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.iter) }.ok() + } + pub fn netns(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.netns) }.ok() + } + pub fn xdp(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.xdp) }.ok() + } +} +impl bpf_link_info__bindgen_ty_1 { + pub fn raw_tracepoint(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.raw_tracepoint) }.ok() + } + pub fn tracing(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.tracing) }.ok() + } + pub fn cgroup(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.cgroup) }.ok() + } + pub fn iter(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.iter) }.ok() + } + pub fn netns(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.netns) }.ok() + } + pub fn xdp(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.xdp) }.ok() + } +} +impl bpf_link_info__bindgen_ty_1__bindgen_ty_1 { + pub fn tp_name(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.tp_name) }.ok() + } + pub fn tp_name_len(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.tp_name_len) }.ok() + } +} +impl bpf_link_info__bindgen_ty_1__bindgen_ty_2 { + pub fn attach_type(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.attach_type) }.ok() + } + pub fn target_obj_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.target_obj_id) }.ok() + } + pub fn target_btf_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.target_btf_id) }.ok() + } +} +impl bpf_link_info__bindgen_ty_1__bindgen_ty_3 { + pub fn cgroup_id(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.cgroup_id) }.ok() + } + pub fn attach_type(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.attach_type) }.ok() + } +} +impl bpf_link_info__bindgen_ty_1__bindgen_ty_4 { + pub fn target_name(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.target_name) }.ok() + } + pub fn target_name_len(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.target_name_len) }.ok() + } + pub fn map( + &self, + ) -> Option { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.map) }.ok() + } +} +impl bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_1 { + pub fn map( + &self, + ) -> Option { + unsafe { crate::bpf_probe_read(&self.map) }.ok() + } +} +impl bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1 { + pub fn map_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.map_id) }.ok() + } +} +impl bpf_link_info__bindgen_ty_1__bindgen_ty_5 { + pub fn netns_ino(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.netns_ino) }.ok() + } + pub fn attach_type(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.attach_type) }.ok() + } +} +impl bpf_link_info__bindgen_ty_1__bindgen_ty_6 { + pub fn ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ifindex) }.ok() + } +} +impl bpf_sock_addr { + pub fn user_family(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.user_family) }.ok() + } + pub fn user_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.user_ip4) }.ok() + } + pub fn user_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.user_ip6) }.ok() + } + pub fn user_port(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.user_port) }.ok() + } + pub fn family(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.family) }.ok() + } + pub fn type_(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.type_) }.ok() + } + pub fn protocol(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.protocol) }.ok() + } + pub fn msg_src_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.msg_src_ip4) }.ok() + } + pub fn msg_src_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.msg_src_ip6) }.ok() + } + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl bpf_sock_addr__bindgen_ty_1 { + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl bpf_sock_ops { + pub fn op(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.op) }.ok() + } + pub fn args(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.args) }.ok() + } + pub fn reply(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.reply) }.ok() + } + pub fn replylong(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.replylong) }.ok() + } + pub fn family(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.family) }.ok() + } + pub fn remote_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.remote_ip4) }.ok() + } + pub fn local_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.local_ip4) }.ok() + } + pub fn remote_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.remote_ip6) }.ok() + } + pub fn local_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.local_ip6) }.ok() + } + pub fn remote_port(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.remote_port) }.ok() + } + pub fn local_port(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.local_port) }.ok() + } + pub fn is_fullsock(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.is_fullsock) }.ok() + } + pub fn snd_cwnd(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.snd_cwnd) }.ok() + } + pub fn srtt_us(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.srtt_us) }.ok() + } + pub fn bpf_sock_ops_cb_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.bpf_sock_ops_cb_flags) }.ok() + } + pub fn state(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.state) }.ok() + } + pub fn rtt_min(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.rtt_min) }.ok() + } + pub fn snd_ssthresh(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.snd_ssthresh) }.ok() + } + pub fn rcv_nxt(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.rcv_nxt) }.ok() + } + pub fn snd_nxt(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.snd_nxt) }.ok() + } + pub fn snd_una(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.snd_una) }.ok() + } + pub fn mss_cache(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.mss_cache) }.ok() + } + pub fn ecn_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ecn_flags) }.ok() + } + pub fn rate_delivered(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.rate_delivered) }.ok() + } + pub fn rate_interval_us(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.rate_interval_us) }.ok() + } + pub fn packets_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.packets_out) }.ok() + } + pub fn retrans_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.retrans_out) }.ok() + } + pub fn total_retrans(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.total_retrans) }.ok() + } + pub fn segs_in(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.segs_in) }.ok() + } + pub fn data_segs_in(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data_segs_in) }.ok() + } + pub fn segs_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.segs_out) }.ok() + } + pub fn data_segs_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.data_segs_out) }.ok() + } + pub fn lost_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.lost_out) }.ok() + } + pub fn sacked_out(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.sacked_out) }.ok() + } + pub fn sk_txhash(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.sk_txhash) }.ok() + } + pub fn bytes_received(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.bytes_received) }.ok() + } + pub fn bytes_acked(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.bytes_acked) }.ok() + } + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn skb_data(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.skb_data) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn skb_data_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_4.skb_data_end) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn skb_len(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.skb_len) }.ok() + } + pub fn skb_tcp_flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.skb_tcp_flags) }.ok() + } +} +impl bpf_sock_ops__bindgen_ty_1 { + pub fn args(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.args) }.ok() + } + pub fn reply(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.reply) }.ok() + } + pub fn replylong(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.replylong) }.ok() + } +} +impl bpf_sock_ops__bindgen_ty_2 { + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl bpf_sock_ops__bindgen_ty_3 { + pub fn skb_data(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.skb_data) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl bpf_sock_ops__bindgen_ty_4 { + pub fn skb_data_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.skb_data_end) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl bpf_perf_event_value { + pub fn counter(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.counter) }.ok() + } + pub fn enabled(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.enabled) }.ok() + } + pub fn running(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.running) }.ok() + } +} +impl bpf_cgroup_dev_ctx { + pub fn access_type(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.access_type) }.ok() + } + pub fn major(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.major) }.ok() + } + pub fn minor(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.minor) }.ok() + } +} +impl bpf_raw_tracepoint_args { + pub fn args(&self) -> Option<__IncompleteArrayField<__u64>> { + unsafe { crate::bpf_probe_read(&self.args) }.ok() + } +} +impl bpf_fib_lookup { + pub fn family(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.family) }.ok() + } + pub fn l4_protocol(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.l4_protocol) }.ok() + } + pub fn sport(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.sport) }.ok() + } + pub fn dport(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.dport) }.ok() + } + pub fn tot_len(&self) -> Option<__u16> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.tot_len) }.ok() + } + pub fn mtu_result(&self) -> Option<__u16> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.mtu_result) }.ok() + } + pub fn ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ifindex) }.ok() + } + pub fn tos(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.tos) }.ok() + } + pub fn flowinfo(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.flowinfo) }.ok() + } + pub fn rt_metric(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.rt_metric) }.ok() + } + pub fn ipv4_src(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.ipv4_src) }.ok() + } + pub fn ipv6_src(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.ipv6_src) }.ok() + } + pub fn ipv4_dst(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_4.ipv4_dst) }.ok() + } + pub fn ipv6_dst(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_4.ipv6_dst) }.ok() + } + pub fn h_vlan_proto(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.h_vlan_proto) }.ok() + } + pub fn h_vlan_TCI(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.h_vlan_TCI) }.ok() + } + pub fn smac(&self) -> Option<[__u8; 6usize]> { + unsafe { crate::bpf_probe_read(&self.smac) }.ok() + } + pub fn dmac(&self) -> Option<[__u8; 6usize]> { + unsafe { crate::bpf_probe_read(&self.dmac) }.ok() + } +} +impl bpf_fib_lookup__bindgen_ty_1 { + pub fn tot_len(&self) -> Option<__u16> { + unsafe { crate::bpf_probe_read(&self.tot_len) }.ok() + } + pub fn mtu_result(&self) -> Option<__u16> { + unsafe { crate::bpf_probe_read(&self.mtu_result) }.ok() + } +} +impl bpf_fib_lookup__bindgen_ty_2 { + pub fn tos(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.tos) }.ok() + } + pub fn flowinfo(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.flowinfo) }.ok() + } + pub fn rt_metric(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.rt_metric) }.ok() + } +} +impl bpf_fib_lookup__bindgen_ty_3 { + pub fn ipv4_src(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.ipv4_src) }.ok() + } + pub fn ipv6_src(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.ipv6_src) }.ok() + } +} +impl bpf_fib_lookup__bindgen_ty_4 { + pub fn ipv4_dst(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.ipv4_dst) }.ok() + } + pub fn ipv6_dst(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.ipv6_dst) }.ok() + } +} +impl bpf_redir_neigh { + pub fn nh_family(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.nh_family) }.ok() + } + pub fn ipv4_nh(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.ipv4_nh) }.ok() + } + pub fn ipv6_nh(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.ipv6_nh) }.ok() + } +} +impl bpf_redir_neigh__bindgen_ty_1 { + pub fn ipv4_nh(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.ipv4_nh) }.ok() + } + pub fn ipv6_nh(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.ipv6_nh) }.ok() + } +} +impl bpf_flow_keys { + pub fn nhoff(&self) -> Option<__u16> { + unsafe { crate::bpf_probe_read(&self.nhoff) }.ok() + } + pub fn thoff(&self) -> Option<__u16> { + unsafe { crate::bpf_probe_read(&self.thoff) }.ok() + } + pub fn addr_proto(&self) -> Option<__u16> { + unsafe { crate::bpf_probe_read(&self.addr_proto) }.ok() + } + pub fn is_frag(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.is_frag) }.ok() + } + pub fn is_first_frag(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.is_first_frag) }.ok() + } + pub fn is_encap(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.is_encap) }.ok() + } + pub fn ip_proto(&self) -> Option<__u8> { + unsafe { crate::bpf_probe_read(&self.ip_proto) }.ok() + } + pub fn n_proto(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.n_proto) }.ok() + } + pub fn sport(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.sport) }.ok() + } + pub fn dport(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.dport) }.ok() + } + pub fn ipv4_src(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.__bindgen_anon_1.ipv4_src) }.ok() + } + pub fn ipv4_dst(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.__bindgen_anon_1.ipv4_dst) }.ok() + } + pub fn ipv6_src(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.__bindgen_anon_2.ipv6_src) }.ok() + } + pub fn ipv6_dst(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.__bindgen_anon_2.ipv6_dst) }.ok() + } + pub fn flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.flags) }.ok() + } + pub fn flow_label(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.flow_label) }.ok() + } +} +impl bpf_flow_keys__bindgen_ty_1 { + pub fn ipv4_src(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.ipv4_src) }.ok() + } + pub fn ipv4_dst(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.ipv4_dst) }.ok() + } + pub fn ipv6_src(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.ipv6_src) }.ok() + } + pub fn ipv6_dst(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.ipv6_dst) }.ok() + } +} +impl bpf_flow_keys__bindgen_ty_1__bindgen_ty_1 { + pub fn ipv4_src(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.ipv4_src) }.ok() + } + pub fn ipv4_dst(&self) -> Option<__be32> { + unsafe { crate::bpf_probe_read(&self.ipv4_dst) }.ok() + } +} +impl bpf_flow_keys__bindgen_ty_1__bindgen_ty_2 { + pub fn ipv6_src(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.ipv6_src) }.ok() + } + pub fn ipv6_dst(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.ipv6_dst) }.ok() + } +} +impl bpf_func_info { + pub fn insn_off(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.insn_off) }.ok() + } + pub fn type_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.type_id) }.ok() + } +} +impl bpf_line_info { + pub fn insn_off(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.insn_off) }.ok() + } + pub fn file_name_off(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.file_name_off) }.ok() + } + pub fn line_off(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.line_off) }.ok() + } + pub fn line_col(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.line_col) }.ok() + } +} +impl bpf_spin_lock { + pub fn val(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.val) }.ok() + } +} +impl bpf_timer {} +impl bpf_sysctl { + pub fn write(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.write) }.ok() + } + pub fn file_pos(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.file_pos) }.ok() + } +} +impl bpf_sockopt { + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn optval(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_2.optval) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn optval_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_3.optval_end) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn level(&self) -> Option<__s32> { + unsafe { crate::bpf_probe_read(&self.level) }.ok() + } + pub fn optname(&self) -> Option<__s32> { + unsafe { crate::bpf_probe_read(&self.optname) }.ok() + } + pub fn optlen(&self) -> Option<__s32> { + unsafe { crate::bpf_probe_read(&self.optlen) }.ok() + } + pub fn retval(&self) -> Option<__s32> { + unsafe { crate::bpf_probe_read(&self.retval) }.ok() + } +} +impl bpf_sockopt__bindgen_ty_1 { + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl bpf_sockopt__bindgen_ty_2 { + pub fn optval(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.optval) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl bpf_sockopt__bindgen_ty_3 { + pub fn optval_end(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.optval_end) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl bpf_pidns_info { + pub fn pid(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.pid) }.ok() + } + pub fn tgid(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.tgid) }.ok() + } +} +impl bpf_sk_lookup { + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.__bindgen_anon_1.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn cookie(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.cookie) }.ok() + } + pub fn family(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.family) }.ok() + } + pub fn protocol(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.protocol) }.ok() + } + pub fn remote_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.remote_ip4) }.ok() + } + pub fn remote_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.remote_ip6) }.ok() + } + pub fn remote_port(&self) -> Option<__be16> { + unsafe { crate::bpf_probe_read(&self.remote_port) }.ok() + } + pub fn local_ip4(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.local_ip4) }.ok() + } + pub fn local_ip6(&self) -> Option<[__u32; 4usize]> { + unsafe { crate::bpf_probe_read(&self.local_ip6) }.ok() + } + pub fn local_port(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.local_port) }.ok() + } + pub fn ingress_ifindex(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.ingress_ifindex) }.ok() + } +} +impl bpf_sk_lookup__bindgen_ty_1 { + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.__bindgen_anon_1.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn cookie(&self) -> Option<__u64> { + unsafe { crate::bpf_probe_read(&self.cookie) }.ok() + } +} +impl bpf_sk_lookup__bindgen_ty_1__bindgen_ty_1 { + pub fn sk(&self) -> Option<*mut bpf_sock> { + let v = unsafe { crate::bpf_probe_read(&self.sk) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } +} +impl btf_ptr { + pub fn ptr(&self) -> Option<*mut ::aya_bpf_cty::c_void> { + let v = unsafe { crate::bpf_probe_read(&self.ptr) }.ok()?; + if v.is_null() { + None + } else { + Some(v) + } + } + pub fn type_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.type_id) }.ok() + } + pub fn flags(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.flags) }.ok() + } +} +impl bpf_core_relo { + pub fn insn_off(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.insn_off) }.ok() + } + pub fn type_id(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.type_id) }.ok() + } + pub fn access_str_off(&self) -> Option<__u32> { + unsafe { crate::bpf_probe_read(&self.access_str_off) }.ok() + } + pub fn kind(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.kind) }.ok() + } +} +impl sockaddr { + pub fn sa_family(&self) -> Option { + unsafe { crate::bpf_probe_read(&self.sa_family) }.ok() + } + pub fn sa_data(&self) -> Option<[::aya_bpf_cty::c_char; 14usize]> { + unsafe { crate::bpf_probe_read(&self.sa_data) }.ok() + } +} +impl bpf_perf_event_data {} +impl linux_binprm {} +impl pt_regs {} +impl tcphdr {} +impl seq_file {} +impl tcp6_sock {} +impl tcp_sock {} +impl tcp_timewait_sock {} +impl tcp_request_sock {} +impl udp6_sock {} +impl unix_sock {} +impl task_struct {} +impl path {} +impl inode {} +impl socket {} +impl file {} diff --git a/bpf/aya-bpf-bindings/src/riscv64/helpers.rs b/bpf/aya-bpf-bindings/src/riscv64/helpers.rs index e69de29b..454652a6 100644 --- a/bpf/aya-bpf-bindings/src/riscv64/helpers.rs +++ b/bpf/aya-bpf-bindings/src/riscv64/helpers.rs @@ -0,0 +1,1983 @@ +use super::bindings::*; +pub unsafe fn bpf_map_lookup_elem( + map: *mut ::aya_bpf_cty::c_void, + key: *const ::aya_bpf_cty::c_void, +) -> *mut ::aya_bpf_cty::c_void { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + key: *const ::aya_bpf_cty::c_void, + ) -> *mut ::aya_bpf_cty::c_void = ::core::mem::transmute(1usize); + fun(map, key) +} +pub unsafe fn bpf_map_update_elem( + map: *mut ::aya_bpf_cty::c_void, + key: *const ::aya_bpf_cty::c_void, + value: *const ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + key: *const ::aya_bpf_cty::c_void, + value: *const ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(2usize); + fun(map, key, value, flags) +} +pub unsafe fn bpf_map_delete_elem( + map: *mut ::aya_bpf_cty::c_void, + key: *const ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + key: *const ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(3usize); + fun(map, key) +} +pub unsafe fn bpf_probe_read( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(4usize); + fun(dst, size, unsafe_ptr) +} +pub unsafe fn bpf_ktime_get_ns() -> __u64 { + let fun: unsafe extern "C" fn() -> __u64 = ::core::mem::transmute(5usize); + fun() +} +pub unsafe fn bpf_get_prandom_u32() -> __u32 { + let fun: unsafe extern "C" fn() -> __u32 = ::core::mem::transmute(7usize); + fun() +} +pub unsafe fn bpf_get_smp_processor_id() -> __u32 { + let fun: unsafe extern "C" fn() -> __u32 = ::core::mem::transmute(8usize); + fun() +} +pub unsafe fn bpf_skb_store_bytes( + skb: *mut __sk_buff, + offset: __u32, + from: *const ::aya_bpf_cty::c_void, + len: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + offset: __u32, + from: *const ::aya_bpf_cty::c_void, + len: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(9usize); + fun(skb, offset, from, len, flags) +} +pub unsafe fn bpf_l3_csum_replace( + skb: *mut __sk_buff, + offset: __u32, + from: __u64, + to: __u64, + size: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + offset: __u32, + from: __u64, + to: __u64, + size: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(10usize); + fun(skb, offset, from, to, size) +} +pub unsafe fn bpf_l4_csum_replace( + skb: *mut __sk_buff, + offset: __u32, + from: __u64, + to: __u64, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + offset: __u32, + from: __u64, + to: __u64, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(11usize); + fun(skb, offset, from, to, flags) +} +pub unsafe fn bpf_tail_call( + ctx: *mut ::aya_bpf_cty::c_void, + prog_array_map: *mut ::aya_bpf_cty::c_void, + index: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + prog_array_map: *mut ::aya_bpf_cty::c_void, + index: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(12usize); + fun(ctx, prog_array_map, index) +} +pub unsafe fn bpf_clone_redirect( + skb: *mut __sk_buff, + ifindex: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + ifindex: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(13usize); + fun(skb, ifindex, flags) +} +pub unsafe fn bpf_get_current_pid_tgid() -> __u64 { + let fun: unsafe extern "C" fn() -> __u64 = ::core::mem::transmute(14usize); + fun() +} +pub unsafe fn bpf_get_current_uid_gid() -> __u64 { + let fun: unsafe extern "C" fn() -> __u64 = ::core::mem::transmute(15usize); + fun() +} +pub unsafe fn bpf_get_current_comm( + buf: *mut ::aya_bpf_cty::c_void, + size_of_buf: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + buf: *mut ::aya_bpf_cty::c_void, + size_of_buf: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(16usize); + fun(buf, size_of_buf) +} +pub unsafe fn bpf_get_cgroup_classid(skb: *mut __sk_buff) -> __u32 { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff) -> __u32 = ::core::mem::transmute(17usize); + fun(skb) +} +pub unsafe fn bpf_skb_vlan_push( + skb: *mut __sk_buff, + vlan_proto: __be16, + vlan_tci: __u16, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + vlan_proto: __be16, + vlan_tci: __u16, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(18usize); + fun(skb, vlan_proto, vlan_tci) +} +pub unsafe fn bpf_skb_vlan_pop(skb: *mut __sk_buff) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(19usize); + fun(skb) +} +pub unsafe fn bpf_skb_get_tunnel_key( + skb: *mut __sk_buff, + key: *mut bpf_tunnel_key, + size: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + key: *mut bpf_tunnel_key, + size: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(20usize); + fun(skb, key, size, flags) +} +pub unsafe fn bpf_skb_set_tunnel_key( + skb: *mut __sk_buff, + key: *mut bpf_tunnel_key, + size: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + key: *mut bpf_tunnel_key, + size: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(21usize); + fun(skb, key, size, flags) +} +pub unsafe fn bpf_perf_event_read(map: *mut ::aya_bpf_cty::c_void, flags: __u64) -> __u64 { + let fun: unsafe extern "C" fn(map: *mut ::aya_bpf_cty::c_void, flags: __u64) -> __u64 = + ::core::mem::transmute(22usize); + fun(map, flags) +} +pub unsafe fn bpf_redirect(ifindex: __u32, flags: __u64) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(ifindex: __u32, flags: __u64) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(23usize); + fun(ifindex, flags) +} +pub unsafe fn bpf_get_route_realm(skb: *mut __sk_buff) -> __u32 { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff) -> __u32 = ::core::mem::transmute(24usize); + fun(skb) +} +pub unsafe fn bpf_perf_event_output( + ctx: *mut ::aya_bpf_cty::c_void, + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, + data: *mut ::aya_bpf_cty::c_void, + size: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, + data: *mut ::aya_bpf_cty::c_void, + size: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(25usize); + fun(ctx, map, flags, data, size) +} +pub unsafe fn bpf_skb_load_bytes( + skb: *const ::aya_bpf_cty::c_void, + offset: __u32, + to: *mut ::aya_bpf_cty::c_void, + len: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *const ::aya_bpf_cty::c_void, + offset: __u32, + to: *mut ::aya_bpf_cty::c_void, + len: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(26usize); + fun(skb, offset, to, len) +} +pub unsafe fn bpf_get_stackid( + ctx: *mut ::aya_bpf_cty::c_void, + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(27usize); + fun(ctx, map, flags) +} +pub unsafe fn bpf_csum_diff( + from: *mut __be32, + from_size: __u32, + to: *mut __be32, + to_size: __u32, + seed: __wsum, +) -> __s64 { + let fun: unsafe extern "C" fn( + from: *mut __be32, + from_size: __u32, + to: *mut __be32, + to_size: __u32, + seed: __wsum, + ) -> __s64 = ::core::mem::transmute(28usize); + fun(from, from_size, to, to_size, seed) +} +pub unsafe fn bpf_skb_get_tunnel_opt( + skb: *mut __sk_buff, + opt: *mut ::aya_bpf_cty::c_void, + size: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + opt: *mut ::aya_bpf_cty::c_void, + size: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(29usize); + fun(skb, opt, size) +} +pub unsafe fn bpf_skb_set_tunnel_opt( + skb: *mut __sk_buff, + opt: *mut ::aya_bpf_cty::c_void, + size: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + opt: *mut ::aya_bpf_cty::c_void, + size: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(30usize); + fun(skb, opt, size) +} +pub unsafe fn bpf_skb_change_proto( + skb: *mut __sk_buff, + proto: __be16, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + proto: __be16, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(31usize); + fun(skb, proto, flags) +} +pub unsafe fn bpf_skb_change_type(skb: *mut __sk_buff, type_: __u32) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff, type_: __u32) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(32usize); + fun(skb, type_) +} +pub unsafe fn bpf_skb_under_cgroup( + skb: *mut __sk_buff, + map: *mut ::aya_bpf_cty::c_void, + index: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + map: *mut ::aya_bpf_cty::c_void, + index: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(33usize); + fun(skb, map, index) +} +pub unsafe fn bpf_get_hash_recalc(skb: *mut __sk_buff) -> __u32 { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff) -> __u32 = ::core::mem::transmute(34usize); + fun(skb) +} +pub unsafe fn bpf_get_current_task() -> __u64 { + let fun: unsafe extern "C" fn() -> __u64 = ::core::mem::transmute(35usize); + fun() +} +pub unsafe fn bpf_probe_write_user( + dst: *mut ::aya_bpf_cty::c_void, + src: *const ::aya_bpf_cty::c_void, + len: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + dst: *mut ::aya_bpf_cty::c_void, + src: *const ::aya_bpf_cty::c_void, + len: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(36usize); + fun(dst, src, len) +} +pub unsafe fn bpf_current_task_under_cgroup( + map: *mut ::aya_bpf_cty::c_void, + index: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + index: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(37usize); + fun(map, index) +} +pub unsafe fn bpf_skb_change_tail( + skb: *mut __sk_buff, + len: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + len: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(38usize); + fun(skb, len, flags) +} +pub unsafe fn bpf_skb_pull_data(skb: *mut __sk_buff, len: __u32) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff, len: __u32) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(39usize); + fun(skb, len) +} +pub unsafe fn bpf_csum_update(skb: *mut __sk_buff, csum: __wsum) -> __s64 { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff, csum: __wsum) -> __s64 = + ::core::mem::transmute(40usize); + fun(skb, csum) +} +pub unsafe fn bpf_set_hash_invalid(skb: *mut __sk_buff) { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff) = ::core::mem::transmute(41usize); + fun(skb) +} +pub unsafe fn bpf_get_numa_node_id() -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn() -> ::aya_bpf_cty::c_long = ::core::mem::transmute(42usize); + fun() +} +pub unsafe fn bpf_skb_change_head( + skb: *mut __sk_buff, + len: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + len: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(43usize); + fun(skb, len, flags) +} +pub unsafe fn bpf_xdp_adjust_head( + xdp_md: *mut xdp_md, + delta: ::aya_bpf_cty::c_int, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + xdp_md: *mut xdp_md, + delta: ::aya_bpf_cty::c_int, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(44usize); + fun(xdp_md, delta) +} +pub unsafe fn bpf_probe_read_str( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(45usize); + fun(dst, size, unsafe_ptr) +} +pub unsafe fn bpf_get_socket_cookie(ctx: *mut ::aya_bpf_cty::c_void) -> __u64 { + let fun: unsafe extern "C" fn(ctx: *mut ::aya_bpf_cty::c_void) -> __u64 = + ::core::mem::transmute(46usize); + fun(ctx) +} +pub unsafe fn bpf_get_socket_uid(skb: *mut __sk_buff) -> __u32 { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff) -> __u32 = ::core::mem::transmute(47usize); + fun(skb) +} +pub unsafe fn bpf_set_hash(skb: *mut __sk_buff, hash: __u32) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff, hash: __u32) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(48usize); + fun(skb, hash) +} +pub unsafe fn bpf_setsockopt( + bpf_socket: *mut ::aya_bpf_cty::c_void, + level: ::aya_bpf_cty::c_int, + optname: ::aya_bpf_cty::c_int, + optval: *mut ::aya_bpf_cty::c_void, + optlen: ::aya_bpf_cty::c_int, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + bpf_socket: *mut ::aya_bpf_cty::c_void, + level: ::aya_bpf_cty::c_int, + optname: ::aya_bpf_cty::c_int, + optval: *mut ::aya_bpf_cty::c_void, + optlen: ::aya_bpf_cty::c_int, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(49usize); + fun(bpf_socket, level, optname, optval, optlen) +} +pub unsafe fn bpf_skb_adjust_room( + skb: *mut __sk_buff, + len_diff: __s32, + mode: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + len_diff: __s32, + mode: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(50usize); + fun(skb, len_diff, mode, flags) +} +pub unsafe fn bpf_redirect_map( + map: *mut ::aya_bpf_cty::c_void, + key: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + key: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(51usize); + fun(map, key, flags) +} +pub unsafe fn bpf_sk_redirect_map( + skb: *mut __sk_buff, + map: *mut ::aya_bpf_cty::c_void, + key: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + map: *mut ::aya_bpf_cty::c_void, + key: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(52usize); + fun(skb, map, key, flags) +} +pub unsafe fn bpf_sock_map_update( + skops: *mut bpf_sock_ops, + map: *mut ::aya_bpf_cty::c_void, + key: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skops: *mut bpf_sock_ops, + map: *mut ::aya_bpf_cty::c_void, + key: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(53usize); + fun(skops, map, key, flags) +} +pub unsafe fn bpf_xdp_adjust_meta( + xdp_md: *mut xdp_md, + delta: ::aya_bpf_cty::c_int, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + xdp_md: *mut xdp_md, + delta: ::aya_bpf_cty::c_int, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(54usize); + fun(xdp_md, delta) +} +pub unsafe fn bpf_perf_event_read_value( + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, + buf: *mut bpf_perf_event_value, + buf_size: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, + buf: *mut bpf_perf_event_value, + buf_size: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(55usize); + fun(map, flags, buf, buf_size) +} +pub unsafe fn bpf_perf_prog_read_value( + ctx: *mut bpf_perf_event_data, + buf: *mut bpf_perf_event_value, + buf_size: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut bpf_perf_event_data, + buf: *mut bpf_perf_event_value, + buf_size: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(56usize); + fun(ctx, buf, buf_size) +} +pub unsafe fn bpf_getsockopt( + bpf_socket: *mut ::aya_bpf_cty::c_void, + level: ::aya_bpf_cty::c_int, + optname: ::aya_bpf_cty::c_int, + optval: *mut ::aya_bpf_cty::c_void, + optlen: ::aya_bpf_cty::c_int, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + bpf_socket: *mut ::aya_bpf_cty::c_void, + level: ::aya_bpf_cty::c_int, + optname: ::aya_bpf_cty::c_int, + optval: *mut ::aya_bpf_cty::c_void, + optlen: ::aya_bpf_cty::c_int, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(57usize); + fun(bpf_socket, level, optname, optval, optlen) +} +pub unsafe fn bpf_override_return(regs: *mut pt_regs, rc: __u64) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(regs: *mut pt_regs, rc: __u64) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(58usize); + fun(regs, rc) +} +pub unsafe fn bpf_sock_ops_cb_flags_set( + bpf_sock: *mut bpf_sock_ops, + argval: ::aya_bpf_cty::c_int, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + bpf_sock: *mut bpf_sock_ops, + argval: ::aya_bpf_cty::c_int, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(59usize); + fun(bpf_sock, argval) +} +pub unsafe fn bpf_msg_redirect_map( + msg: *mut sk_msg_md, + map: *mut ::aya_bpf_cty::c_void, + key: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + msg: *mut sk_msg_md, + map: *mut ::aya_bpf_cty::c_void, + key: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(60usize); + fun(msg, map, key, flags) +} +pub unsafe fn bpf_msg_apply_bytes(msg: *mut sk_msg_md, bytes: __u32) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(msg: *mut sk_msg_md, bytes: __u32) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(61usize); + fun(msg, bytes) +} +pub unsafe fn bpf_msg_cork_bytes(msg: *mut sk_msg_md, bytes: __u32) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(msg: *mut sk_msg_md, bytes: __u32) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(62usize); + fun(msg, bytes) +} +pub unsafe fn bpf_msg_pull_data( + msg: *mut sk_msg_md, + start: __u32, + end: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + msg: *mut sk_msg_md, + start: __u32, + end: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(63usize); + fun(msg, start, end, flags) +} +pub unsafe fn bpf_bind( + ctx: *mut bpf_sock_addr, + addr: *mut sockaddr, + addr_len: ::aya_bpf_cty::c_int, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut bpf_sock_addr, + addr: *mut sockaddr, + addr_len: ::aya_bpf_cty::c_int, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(64usize); + fun(ctx, addr, addr_len) +} +pub unsafe fn bpf_xdp_adjust_tail( + xdp_md: *mut xdp_md, + delta: ::aya_bpf_cty::c_int, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + xdp_md: *mut xdp_md, + delta: ::aya_bpf_cty::c_int, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(65usize); + fun(xdp_md, delta) +} +pub unsafe fn bpf_skb_get_xfrm_state( + skb: *mut __sk_buff, + index: __u32, + xfrm_state: *mut bpf_xfrm_state, + size: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + index: __u32, + xfrm_state: *mut bpf_xfrm_state, + size: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(66usize); + fun(skb, index, xfrm_state, size, flags) +} +pub unsafe fn bpf_get_stack( + ctx: *mut ::aya_bpf_cty::c_void, + buf: *mut ::aya_bpf_cty::c_void, + size: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + buf: *mut ::aya_bpf_cty::c_void, + size: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(67usize); + fun(ctx, buf, size, flags) +} +pub unsafe fn bpf_skb_load_bytes_relative( + skb: *const ::aya_bpf_cty::c_void, + offset: __u32, + to: *mut ::aya_bpf_cty::c_void, + len: __u32, + start_header: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *const ::aya_bpf_cty::c_void, + offset: __u32, + to: *mut ::aya_bpf_cty::c_void, + len: __u32, + start_header: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(68usize); + fun(skb, offset, to, len, start_header) +} +pub unsafe fn bpf_fib_lookup( + ctx: *mut ::aya_bpf_cty::c_void, + params: *mut bpf_fib_lookup, + plen: ::aya_bpf_cty::c_int, + flags: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + params: *mut bpf_fib_lookup, + plen: ::aya_bpf_cty::c_int, + flags: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(69usize); + fun(ctx, params, plen, flags) +} +pub unsafe fn bpf_sock_hash_update( + skops: *mut bpf_sock_ops, + map: *mut ::aya_bpf_cty::c_void, + key: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skops: *mut bpf_sock_ops, + map: *mut ::aya_bpf_cty::c_void, + key: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(70usize); + fun(skops, map, key, flags) +} +pub unsafe fn bpf_msg_redirect_hash( + msg: *mut sk_msg_md, + map: *mut ::aya_bpf_cty::c_void, + key: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + msg: *mut sk_msg_md, + map: *mut ::aya_bpf_cty::c_void, + key: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(71usize); + fun(msg, map, key, flags) +} +pub unsafe fn bpf_sk_redirect_hash( + skb: *mut __sk_buff, + map: *mut ::aya_bpf_cty::c_void, + key: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + map: *mut ::aya_bpf_cty::c_void, + key: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(72usize); + fun(skb, map, key, flags) +} +pub unsafe fn bpf_lwt_push_encap( + skb: *mut __sk_buff, + type_: __u32, + hdr: *mut ::aya_bpf_cty::c_void, + len: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + type_: __u32, + hdr: *mut ::aya_bpf_cty::c_void, + len: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(73usize); + fun(skb, type_, hdr, len) +} +pub unsafe fn bpf_lwt_seg6_store_bytes( + skb: *mut __sk_buff, + offset: __u32, + from: *const ::aya_bpf_cty::c_void, + len: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + offset: __u32, + from: *const ::aya_bpf_cty::c_void, + len: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(74usize); + fun(skb, offset, from, len) +} +pub unsafe fn bpf_lwt_seg6_adjust_srh( + skb: *mut __sk_buff, + offset: __u32, + delta: __s32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + offset: __u32, + delta: __s32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(75usize); + fun(skb, offset, delta) +} +pub unsafe fn bpf_lwt_seg6_action( + skb: *mut __sk_buff, + action: __u32, + param: *mut ::aya_bpf_cty::c_void, + param_len: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + action: __u32, + param: *mut ::aya_bpf_cty::c_void, + param_len: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(76usize); + fun(skb, action, param, param_len) +} +pub unsafe fn bpf_rc_repeat(ctx: *mut ::aya_bpf_cty::c_void) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(ctx: *mut ::aya_bpf_cty::c_void) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(77usize); + fun(ctx) +} +pub unsafe fn bpf_rc_keydown( + ctx: *mut ::aya_bpf_cty::c_void, + protocol: __u32, + scancode: __u64, + toggle: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + protocol: __u32, + scancode: __u64, + toggle: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(78usize); + fun(ctx, protocol, scancode, toggle) +} +pub unsafe fn bpf_skb_cgroup_id(skb: *mut __sk_buff) -> __u64 { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff) -> __u64 = ::core::mem::transmute(79usize); + fun(skb) +} +pub unsafe fn bpf_get_current_cgroup_id() -> __u64 { + let fun: unsafe extern "C" fn() -> __u64 = ::core::mem::transmute(80usize); + fun() +} +pub unsafe fn bpf_get_local_storage( + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> *mut ::aya_bpf_cty::c_void { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> *mut ::aya_bpf_cty::c_void = ::core::mem::transmute(81usize); + fun(map, flags) +} +pub unsafe fn bpf_sk_select_reuseport( + reuse: *mut sk_reuseport_md, + map: *mut ::aya_bpf_cty::c_void, + key: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + reuse: *mut sk_reuseport_md, + map: *mut ::aya_bpf_cty::c_void, + key: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(82usize); + fun(reuse, map, key, flags) +} +pub unsafe fn bpf_skb_ancestor_cgroup_id( + skb: *mut __sk_buff, + ancestor_level: ::aya_bpf_cty::c_int, +) -> __u64 { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + ancestor_level: ::aya_bpf_cty::c_int, + ) -> __u64 = ::core::mem::transmute(83usize); + fun(skb, ancestor_level) +} +pub unsafe fn bpf_sk_lookup_tcp( + ctx: *mut ::aya_bpf_cty::c_void, + tuple: *mut bpf_sock_tuple, + tuple_size: __u32, + netns: __u64, + flags: __u64, +) -> *mut bpf_sock { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + tuple: *mut bpf_sock_tuple, + tuple_size: __u32, + netns: __u64, + flags: __u64, + ) -> *mut bpf_sock = ::core::mem::transmute(84usize); + fun(ctx, tuple, tuple_size, netns, flags) +} +pub unsafe fn bpf_sk_lookup_udp( + ctx: *mut ::aya_bpf_cty::c_void, + tuple: *mut bpf_sock_tuple, + tuple_size: __u32, + netns: __u64, + flags: __u64, +) -> *mut bpf_sock { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + tuple: *mut bpf_sock_tuple, + tuple_size: __u32, + netns: __u64, + flags: __u64, + ) -> *mut bpf_sock = ::core::mem::transmute(85usize); + fun(ctx, tuple, tuple_size, netns, flags) +} +pub unsafe fn bpf_sk_release(sock: *mut ::aya_bpf_cty::c_void) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(sock: *mut ::aya_bpf_cty::c_void) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(86usize); + fun(sock) +} +pub unsafe fn bpf_map_push_elem( + map: *mut ::aya_bpf_cty::c_void, + value: *const ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + value: *const ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(87usize); + fun(map, value, flags) +} +pub unsafe fn bpf_map_pop_elem( + map: *mut ::aya_bpf_cty::c_void, + value: *mut ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + value: *mut ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(88usize); + fun(map, value) +} +pub unsafe fn bpf_map_peek_elem( + map: *mut ::aya_bpf_cty::c_void, + value: *mut ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + value: *mut ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(89usize); + fun(map, value) +} +pub unsafe fn bpf_msg_push_data( + msg: *mut sk_msg_md, + start: __u32, + len: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + msg: *mut sk_msg_md, + start: __u32, + len: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(90usize); + fun(msg, start, len, flags) +} +pub unsafe fn bpf_msg_pop_data( + msg: *mut sk_msg_md, + start: __u32, + len: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + msg: *mut sk_msg_md, + start: __u32, + len: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(91usize); + fun(msg, start, len, flags) +} +pub unsafe fn bpf_rc_pointer_rel( + ctx: *mut ::aya_bpf_cty::c_void, + rel_x: __s32, + rel_y: __s32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + rel_x: __s32, + rel_y: __s32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(92usize); + fun(ctx, rel_x, rel_y) +} +pub unsafe fn bpf_spin_lock(lock: *mut bpf_spin_lock) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(lock: *mut bpf_spin_lock) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(93usize); + fun(lock) +} +pub unsafe fn bpf_spin_unlock(lock: *mut bpf_spin_lock) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(lock: *mut bpf_spin_lock) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(94usize); + fun(lock) +} +pub unsafe fn bpf_sk_fullsock(sk: *mut bpf_sock) -> *mut bpf_sock { + let fun: unsafe extern "C" fn(sk: *mut bpf_sock) -> *mut bpf_sock = + ::core::mem::transmute(95usize); + fun(sk) +} +pub unsafe fn bpf_tcp_sock(sk: *mut bpf_sock) -> *mut bpf_tcp_sock { + let fun: unsafe extern "C" fn(sk: *mut bpf_sock) -> *mut bpf_tcp_sock = + ::core::mem::transmute(96usize); + fun(sk) +} +pub unsafe fn bpf_skb_ecn_set_ce(skb: *mut __sk_buff) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(97usize); + fun(skb) +} +pub unsafe fn bpf_get_listener_sock(sk: *mut bpf_sock) -> *mut bpf_sock { + let fun: unsafe extern "C" fn(sk: *mut bpf_sock) -> *mut bpf_sock = + ::core::mem::transmute(98usize); + fun(sk) +} +pub unsafe fn bpf_skc_lookup_tcp( + ctx: *mut ::aya_bpf_cty::c_void, + tuple: *mut bpf_sock_tuple, + tuple_size: __u32, + netns: __u64, + flags: __u64, +) -> *mut bpf_sock { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + tuple: *mut bpf_sock_tuple, + tuple_size: __u32, + netns: __u64, + flags: __u64, + ) -> *mut bpf_sock = ::core::mem::transmute(99usize); + fun(ctx, tuple, tuple_size, netns, flags) +} +pub unsafe fn bpf_tcp_check_syncookie( + sk: *mut ::aya_bpf_cty::c_void, + iph: *mut ::aya_bpf_cty::c_void, + iph_len: __u32, + th: *mut tcphdr, + th_len: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + sk: *mut ::aya_bpf_cty::c_void, + iph: *mut ::aya_bpf_cty::c_void, + iph_len: __u32, + th: *mut tcphdr, + th_len: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(100usize); + fun(sk, iph, iph_len, th, th_len) +} +pub unsafe fn bpf_sysctl_get_name( + ctx: *mut bpf_sysctl, + buf: *mut ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut bpf_sysctl, + buf: *mut ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(101usize); + fun(ctx, buf, buf_len, flags) +} +pub unsafe fn bpf_sysctl_get_current_value( + ctx: *mut bpf_sysctl, + buf: *mut ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut bpf_sysctl, + buf: *mut ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(102usize); + fun(ctx, buf, buf_len) +} +pub unsafe fn bpf_sysctl_get_new_value( + ctx: *mut bpf_sysctl, + buf: *mut ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut bpf_sysctl, + buf: *mut ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(103usize); + fun(ctx, buf, buf_len) +} +pub unsafe fn bpf_sysctl_set_new_value( + ctx: *mut bpf_sysctl, + buf: *const ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut bpf_sysctl, + buf: *const ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(104usize); + fun(ctx, buf, buf_len) +} +pub unsafe fn bpf_strtol( + buf: *const ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, + flags: __u64, + res: *mut ::aya_bpf_cty::c_long, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + buf: *const ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, + flags: __u64, + res: *mut ::aya_bpf_cty::c_long, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(105usize); + fun(buf, buf_len, flags, res) +} +pub unsafe fn bpf_strtoul( + buf: *const ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, + flags: __u64, + res: *mut ::aya_bpf_cty::c_ulong, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + buf: *const ::aya_bpf_cty::c_char, + buf_len: ::aya_bpf_cty::c_ulong, + flags: __u64, + res: *mut ::aya_bpf_cty::c_ulong, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(106usize); + fun(buf, buf_len, flags, res) +} +pub unsafe fn bpf_sk_storage_get( + map: *mut ::aya_bpf_cty::c_void, + sk: *mut ::aya_bpf_cty::c_void, + value: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> *mut ::aya_bpf_cty::c_void { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + sk: *mut ::aya_bpf_cty::c_void, + value: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> *mut ::aya_bpf_cty::c_void = ::core::mem::transmute(107usize); + fun(map, sk, value, flags) +} +pub unsafe fn bpf_sk_storage_delete( + map: *mut ::aya_bpf_cty::c_void, + sk: *mut ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + sk: *mut ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(108usize); + fun(map, sk) +} +pub unsafe fn bpf_send_signal(sig: __u32) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(sig: __u32) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(109usize); + fun(sig) +} +pub unsafe fn bpf_tcp_gen_syncookie( + sk: *mut ::aya_bpf_cty::c_void, + iph: *mut ::aya_bpf_cty::c_void, + iph_len: __u32, + th: *mut tcphdr, + th_len: __u32, +) -> __s64 { + let fun: unsafe extern "C" fn( + sk: *mut ::aya_bpf_cty::c_void, + iph: *mut ::aya_bpf_cty::c_void, + iph_len: __u32, + th: *mut tcphdr, + th_len: __u32, + ) -> __s64 = ::core::mem::transmute(110usize); + fun(sk, iph, iph_len, th, th_len) +} +pub unsafe fn bpf_skb_output( + ctx: *mut ::aya_bpf_cty::c_void, + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, + data: *mut ::aya_bpf_cty::c_void, + size: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, + data: *mut ::aya_bpf_cty::c_void, + size: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(111usize); + fun(ctx, map, flags, data, size) +} +pub unsafe fn bpf_probe_read_user( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(112usize); + fun(dst, size, unsafe_ptr) +} +pub unsafe fn bpf_probe_read_kernel( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(113usize); + fun(dst, size, unsafe_ptr) +} +pub unsafe fn bpf_probe_read_user_str( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(114usize); + fun(dst, size, unsafe_ptr) +} +pub unsafe fn bpf_probe_read_kernel_str( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + unsafe_ptr: *const ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(115usize); + fun(dst, size, unsafe_ptr) +} +pub unsafe fn bpf_tcp_send_ack( + tp: *mut ::aya_bpf_cty::c_void, + rcv_nxt: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + tp: *mut ::aya_bpf_cty::c_void, + rcv_nxt: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(116usize); + fun(tp, rcv_nxt) +} +pub unsafe fn bpf_send_signal_thread(sig: __u32) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(sig: __u32) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(117usize); + fun(sig) +} +pub unsafe fn bpf_jiffies64() -> __u64 { + let fun: unsafe extern "C" fn() -> __u64 = ::core::mem::transmute(118usize); + fun() +} +pub unsafe fn bpf_read_branch_records( + ctx: *mut bpf_perf_event_data, + buf: *mut ::aya_bpf_cty::c_void, + size: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut bpf_perf_event_data, + buf: *mut ::aya_bpf_cty::c_void, + size: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(119usize); + fun(ctx, buf, size, flags) +} +pub unsafe fn bpf_get_ns_current_pid_tgid( + dev: __u64, + ino: __u64, + nsdata: *mut bpf_pidns_info, + size: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + dev: __u64, + ino: __u64, + nsdata: *mut bpf_pidns_info, + size: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(120usize); + fun(dev, ino, nsdata, size) +} +pub unsafe fn bpf_xdp_output( + ctx: *mut ::aya_bpf_cty::c_void, + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, + data: *mut ::aya_bpf_cty::c_void, + size: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, + data: *mut ::aya_bpf_cty::c_void, + size: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(121usize); + fun(ctx, map, flags, data, size) +} +pub unsafe fn bpf_get_netns_cookie(ctx: *mut ::aya_bpf_cty::c_void) -> __u64 { + let fun: unsafe extern "C" fn(ctx: *mut ::aya_bpf_cty::c_void) -> __u64 = + ::core::mem::transmute(122usize); + fun(ctx) +} +pub unsafe fn bpf_get_current_ancestor_cgroup_id(ancestor_level: ::aya_bpf_cty::c_int) -> __u64 { + let fun: unsafe extern "C" fn(ancestor_level: ::aya_bpf_cty::c_int) -> __u64 = + ::core::mem::transmute(123usize); + fun(ancestor_level) +} +pub unsafe fn bpf_sk_assign( + ctx: *mut ::aya_bpf_cty::c_void, + sk: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + sk: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(124usize); + fun(ctx, sk, flags) +} +pub unsafe fn bpf_ktime_get_boot_ns() -> __u64 { + let fun: unsafe extern "C" fn() -> __u64 = ::core::mem::transmute(125usize); + fun() +} +pub unsafe fn bpf_seq_printf( + m: *mut seq_file, + fmt: *const ::aya_bpf_cty::c_char, + fmt_size: __u32, + data: *const ::aya_bpf_cty::c_void, + data_len: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + m: *mut seq_file, + fmt: *const ::aya_bpf_cty::c_char, + fmt_size: __u32, + data: *const ::aya_bpf_cty::c_void, + data_len: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(126usize); + fun(m, fmt, fmt_size, data, data_len) +} +pub unsafe fn bpf_seq_write( + m: *mut seq_file, + data: *const ::aya_bpf_cty::c_void, + len: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + m: *mut seq_file, + data: *const ::aya_bpf_cty::c_void, + len: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(127usize); + fun(m, data, len) +} +pub unsafe fn bpf_sk_cgroup_id(sk: *mut ::aya_bpf_cty::c_void) -> __u64 { + let fun: unsafe extern "C" fn(sk: *mut ::aya_bpf_cty::c_void) -> __u64 = + ::core::mem::transmute(128usize); + fun(sk) +} +pub unsafe fn bpf_sk_ancestor_cgroup_id( + sk: *mut ::aya_bpf_cty::c_void, + ancestor_level: ::aya_bpf_cty::c_int, +) -> __u64 { + let fun: unsafe extern "C" fn( + sk: *mut ::aya_bpf_cty::c_void, + ancestor_level: ::aya_bpf_cty::c_int, + ) -> __u64 = ::core::mem::transmute(129usize); + fun(sk, ancestor_level) +} +pub unsafe fn bpf_ringbuf_output( + ringbuf: *mut ::aya_bpf_cty::c_void, + data: *mut ::aya_bpf_cty::c_void, + size: __u64, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ringbuf: *mut ::aya_bpf_cty::c_void, + data: *mut ::aya_bpf_cty::c_void, + size: __u64, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(130usize); + fun(ringbuf, data, size, flags) +} +pub unsafe fn bpf_ringbuf_reserve( + ringbuf: *mut ::aya_bpf_cty::c_void, + size: __u64, + flags: __u64, +) -> *mut ::aya_bpf_cty::c_void { + let fun: unsafe extern "C" fn( + ringbuf: *mut ::aya_bpf_cty::c_void, + size: __u64, + flags: __u64, + ) -> *mut ::aya_bpf_cty::c_void = ::core::mem::transmute(131usize); + fun(ringbuf, size, flags) +} +pub unsafe fn bpf_ringbuf_submit(data: *mut ::aya_bpf_cty::c_void, flags: __u64) { + let fun: unsafe extern "C" fn(data: *mut ::aya_bpf_cty::c_void, flags: __u64) = + ::core::mem::transmute(132usize); + fun(data, flags) +} +pub unsafe fn bpf_ringbuf_discard(data: *mut ::aya_bpf_cty::c_void, flags: __u64) { + let fun: unsafe extern "C" fn(data: *mut ::aya_bpf_cty::c_void, flags: __u64) = + ::core::mem::transmute(133usize); + fun(data, flags) +} +pub unsafe fn bpf_ringbuf_query(ringbuf: *mut ::aya_bpf_cty::c_void, flags: __u64) -> __u64 { + let fun: unsafe extern "C" fn(ringbuf: *mut ::aya_bpf_cty::c_void, flags: __u64) -> __u64 = + ::core::mem::transmute(134usize); + fun(ringbuf, flags) +} +pub unsafe fn bpf_csum_level(skb: *mut __sk_buff, level: __u64) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff, level: __u64) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(135usize); + fun(skb, level) +} +pub unsafe fn bpf_skc_to_tcp6_sock(sk: *mut ::aya_bpf_cty::c_void) -> *mut tcp6_sock { + let fun: unsafe extern "C" fn(sk: *mut ::aya_bpf_cty::c_void) -> *mut tcp6_sock = + ::core::mem::transmute(136usize); + fun(sk) +} +pub unsafe fn bpf_skc_to_tcp_sock(sk: *mut ::aya_bpf_cty::c_void) -> *mut tcp_sock { + let fun: unsafe extern "C" fn(sk: *mut ::aya_bpf_cty::c_void) -> *mut tcp_sock = + ::core::mem::transmute(137usize); + fun(sk) +} +pub unsafe fn bpf_skc_to_tcp_timewait_sock( + sk: *mut ::aya_bpf_cty::c_void, +) -> *mut tcp_timewait_sock { + let fun: unsafe extern "C" fn(sk: *mut ::aya_bpf_cty::c_void) -> *mut tcp_timewait_sock = + ::core::mem::transmute(138usize); + fun(sk) +} +pub unsafe fn bpf_skc_to_tcp_request_sock(sk: *mut ::aya_bpf_cty::c_void) -> *mut tcp_request_sock { + let fun: unsafe extern "C" fn(sk: *mut ::aya_bpf_cty::c_void) -> *mut tcp_request_sock = + ::core::mem::transmute(139usize); + fun(sk) +} +pub unsafe fn bpf_skc_to_udp6_sock(sk: *mut ::aya_bpf_cty::c_void) -> *mut udp6_sock { + let fun: unsafe extern "C" fn(sk: *mut ::aya_bpf_cty::c_void) -> *mut udp6_sock = + ::core::mem::transmute(140usize); + fun(sk) +} +pub unsafe fn bpf_get_task_stack( + task: *mut task_struct, + buf: *mut ::aya_bpf_cty::c_void, + size: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + task: *mut task_struct, + buf: *mut ::aya_bpf_cty::c_void, + size: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(141usize); + fun(task, buf, size, flags) +} +pub unsafe fn bpf_load_hdr_opt( + skops: *mut bpf_sock_ops, + searchby_res: *mut ::aya_bpf_cty::c_void, + len: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skops: *mut bpf_sock_ops, + searchby_res: *mut ::aya_bpf_cty::c_void, + len: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(142usize); + fun(skops, searchby_res, len, flags) +} +pub unsafe fn bpf_store_hdr_opt( + skops: *mut bpf_sock_ops, + from: *const ::aya_bpf_cty::c_void, + len: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skops: *mut bpf_sock_ops, + from: *const ::aya_bpf_cty::c_void, + len: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(143usize); + fun(skops, from, len, flags) +} +pub unsafe fn bpf_reserve_hdr_opt( + skops: *mut bpf_sock_ops, + len: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skops: *mut bpf_sock_ops, + len: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(144usize); + fun(skops, len, flags) +} +pub unsafe fn bpf_inode_storage_get( + map: *mut ::aya_bpf_cty::c_void, + inode: *mut ::aya_bpf_cty::c_void, + value: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> *mut ::aya_bpf_cty::c_void { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + inode: *mut ::aya_bpf_cty::c_void, + value: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> *mut ::aya_bpf_cty::c_void = ::core::mem::transmute(145usize); + fun(map, inode, value, flags) +} +pub unsafe fn bpf_inode_storage_delete( + map: *mut ::aya_bpf_cty::c_void, + inode: *mut ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_int { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + inode: *mut ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_int = ::core::mem::transmute(146usize); + fun(map, inode) +} +pub unsafe fn bpf_d_path( + path: *mut path, + buf: *mut ::aya_bpf_cty::c_char, + sz: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + path: *mut path, + buf: *mut ::aya_bpf_cty::c_char, + sz: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(147usize); + fun(path, buf, sz) +} +pub unsafe fn bpf_copy_from_user( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + user_ptr: *const ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + user_ptr: *const ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(148usize); + fun(dst, size, user_ptr) +} +pub unsafe fn bpf_snprintf_btf( + str_: *mut ::aya_bpf_cty::c_char, + str_size: __u32, + ptr: *mut btf_ptr, + btf_ptr_size: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + str_: *mut ::aya_bpf_cty::c_char, + str_size: __u32, + ptr: *mut btf_ptr, + btf_ptr_size: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(149usize); + fun(str_, str_size, ptr, btf_ptr_size, flags) +} +pub unsafe fn bpf_seq_printf_btf( + m: *mut seq_file, + ptr: *mut btf_ptr, + ptr_size: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + m: *mut seq_file, + ptr: *mut btf_ptr, + ptr_size: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(150usize); + fun(m, ptr, ptr_size, flags) +} +pub unsafe fn bpf_skb_cgroup_classid(skb: *mut __sk_buff) -> __u64 { + let fun: unsafe extern "C" fn(skb: *mut __sk_buff) -> __u64 = ::core::mem::transmute(151usize); + fun(skb) +} +pub unsafe fn bpf_redirect_neigh( + ifindex: __u32, + params: *mut bpf_redir_neigh, + plen: ::aya_bpf_cty::c_int, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ifindex: __u32, + params: *mut bpf_redir_neigh, + plen: ::aya_bpf_cty::c_int, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(152usize); + fun(ifindex, params, plen, flags) +} +pub unsafe fn bpf_per_cpu_ptr( + percpu_ptr: *const ::aya_bpf_cty::c_void, + cpu: __u32, +) -> *mut ::aya_bpf_cty::c_void { + let fun: unsafe extern "C" fn( + percpu_ptr: *const ::aya_bpf_cty::c_void, + cpu: __u32, + ) -> *mut ::aya_bpf_cty::c_void = ::core::mem::transmute(153usize); + fun(percpu_ptr, cpu) +} +pub unsafe fn bpf_this_cpu_ptr( + percpu_ptr: *const ::aya_bpf_cty::c_void, +) -> *mut ::aya_bpf_cty::c_void { + let fun: unsafe extern "C" fn( + percpu_ptr: *const ::aya_bpf_cty::c_void, + ) -> *mut ::aya_bpf_cty::c_void = ::core::mem::transmute(154usize); + fun(percpu_ptr) +} +pub unsafe fn bpf_redirect_peer(ifindex: __u32, flags: __u64) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(ifindex: __u32, flags: __u64) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(155usize); + fun(ifindex, flags) +} +pub unsafe fn bpf_task_storage_get( + map: *mut ::aya_bpf_cty::c_void, + task: *mut task_struct, + value: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> *mut ::aya_bpf_cty::c_void { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + task: *mut task_struct, + value: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> *mut ::aya_bpf_cty::c_void = ::core::mem::transmute(156usize); + fun(map, task, value, flags) +} +pub unsafe fn bpf_task_storage_delete( + map: *mut ::aya_bpf_cty::c_void, + task: *mut task_struct, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + task: *mut task_struct, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(157usize); + fun(map, task) +} +pub unsafe fn bpf_get_current_task_btf() -> *mut task_struct { + let fun: unsafe extern "C" fn() -> *mut task_struct = ::core::mem::transmute(158usize); + fun() +} +pub unsafe fn bpf_bprm_opts_set(bprm: *mut linux_binprm, flags: __u64) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(bprm: *mut linux_binprm, flags: __u64) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(159usize); + fun(bprm, flags) +} +pub unsafe fn bpf_ktime_get_coarse_ns() -> __u64 { + let fun: unsafe extern "C" fn() -> __u64 = ::core::mem::transmute(160usize); + fun() +} +pub unsafe fn bpf_ima_inode_hash( + inode: *mut inode, + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + inode: *mut inode, + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(161usize); + fun(inode, dst, size) +} +pub unsafe fn bpf_sock_from_file(file: *mut file) -> *mut socket { + let fun: unsafe extern "C" fn(file: *mut file) -> *mut socket = + ::core::mem::transmute(162usize); + fun(file) +} +pub unsafe fn bpf_check_mtu( + ctx: *mut ::aya_bpf_cty::c_void, + ifindex: __u32, + mtu_len: *mut __u32, + len_diff: __s32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + ifindex: __u32, + mtu_len: *mut __u32, + len_diff: __s32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(163usize); + fun(ctx, ifindex, mtu_len, len_diff, flags) +} +pub unsafe fn bpf_for_each_map_elem( + map: *mut ::aya_bpf_cty::c_void, + callback_fn: *mut ::aya_bpf_cty::c_void, + callback_ctx: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + callback_fn: *mut ::aya_bpf_cty::c_void, + callback_ctx: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(164usize); + fun(map, callback_fn, callback_ctx, flags) +} +pub unsafe fn bpf_snprintf( + str_: *mut ::aya_bpf_cty::c_char, + str_size: __u32, + fmt: *const ::aya_bpf_cty::c_char, + data: *mut __u64, + data_len: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + str_: *mut ::aya_bpf_cty::c_char, + str_size: __u32, + fmt: *const ::aya_bpf_cty::c_char, + data: *mut __u64, + data_len: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(165usize); + fun(str_, str_size, fmt, data, data_len) +} +pub unsafe fn bpf_sys_bpf( + cmd: __u32, + attr: *mut ::aya_bpf_cty::c_void, + attr_size: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + cmd: __u32, + attr: *mut ::aya_bpf_cty::c_void, + attr_size: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(166usize); + fun(cmd, attr, attr_size) +} +pub unsafe fn bpf_btf_find_by_name_kind( + name: *mut ::aya_bpf_cty::c_char, + name_sz: ::aya_bpf_cty::c_int, + kind: __u32, + flags: ::aya_bpf_cty::c_int, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + name: *mut ::aya_bpf_cty::c_char, + name_sz: ::aya_bpf_cty::c_int, + kind: __u32, + flags: ::aya_bpf_cty::c_int, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(167usize); + fun(name, name_sz, kind, flags) +} +pub unsafe fn bpf_sys_close(fd: __u32) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(fd: __u32) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(168usize); + fun(fd) +} +pub unsafe fn bpf_timer_init( + timer: *mut bpf_timer, + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + timer: *mut bpf_timer, + map: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(169usize); + fun(timer, map, flags) +} +pub unsafe fn bpf_timer_set_callback( + timer: *mut bpf_timer, + callback_fn: *mut ::aya_bpf_cty::c_void, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + timer: *mut bpf_timer, + callback_fn: *mut ::aya_bpf_cty::c_void, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(170usize); + fun(timer, callback_fn) +} +pub unsafe fn bpf_timer_start( + timer: *mut bpf_timer, + nsecs: __u64, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + timer: *mut bpf_timer, + nsecs: __u64, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(171usize); + fun(timer, nsecs, flags) +} +pub unsafe fn bpf_timer_cancel(timer: *mut bpf_timer) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(timer: *mut bpf_timer) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(172usize); + fun(timer) +} +pub unsafe fn bpf_get_func_ip(ctx: *mut ::aya_bpf_cty::c_void) -> __u64 { + let fun: unsafe extern "C" fn(ctx: *mut ::aya_bpf_cty::c_void) -> __u64 = + ::core::mem::transmute(173usize); + fun(ctx) +} +pub unsafe fn bpf_get_attach_cookie(ctx: *mut ::aya_bpf_cty::c_void) -> __u64 { + let fun: unsafe extern "C" fn(ctx: *mut ::aya_bpf_cty::c_void) -> __u64 = + ::core::mem::transmute(174usize); + fun(ctx) +} +pub unsafe fn bpf_task_pt_regs(task: *mut task_struct) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(task: *mut task_struct) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(175usize); + fun(task) +} +pub unsafe fn bpf_get_branch_snapshot( + entries: *mut ::aya_bpf_cty::c_void, + size: __u32, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + entries: *mut ::aya_bpf_cty::c_void, + size: __u32, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(176usize); + fun(entries, size, flags) +} +pub unsafe fn bpf_trace_vprintk( + fmt: *const ::aya_bpf_cty::c_char, + fmt_size: __u32, + data: *const ::aya_bpf_cty::c_void, + data_len: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + fmt: *const ::aya_bpf_cty::c_char, + fmt_size: __u32, + data: *const ::aya_bpf_cty::c_void, + data_len: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(177usize); + fun(fmt, fmt_size, data, data_len) +} +pub unsafe fn bpf_skc_to_unix_sock(sk: *mut ::aya_bpf_cty::c_void) -> *mut unix_sock { + let fun: unsafe extern "C" fn(sk: *mut ::aya_bpf_cty::c_void) -> *mut unix_sock = + ::core::mem::transmute(178usize); + fun(sk) +} +pub unsafe fn bpf_kallsyms_lookup_name( + name: *const ::aya_bpf_cty::c_char, + name_sz: ::aya_bpf_cty::c_int, + flags: ::aya_bpf_cty::c_int, + res: *mut __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + name: *const ::aya_bpf_cty::c_char, + name_sz: ::aya_bpf_cty::c_int, + flags: ::aya_bpf_cty::c_int, + res: *mut __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(179usize); + fun(name, name_sz, flags, res) +} +pub unsafe fn bpf_find_vma( + task: *mut task_struct, + addr: __u64, + callback_fn: *mut ::aya_bpf_cty::c_void, + callback_ctx: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + task: *mut task_struct, + addr: __u64, + callback_fn: *mut ::aya_bpf_cty::c_void, + callback_ctx: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(180usize); + fun(task, addr, callback_fn, callback_ctx, flags) +} +pub unsafe fn bpf_loop( + nr_loops: __u32, + callback_fn: *mut ::aya_bpf_cty::c_void, + callback_ctx: *mut ::aya_bpf_cty::c_void, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + nr_loops: __u32, + callback_fn: *mut ::aya_bpf_cty::c_void, + callback_ctx: *mut ::aya_bpf_cty::c_void, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(181usize); + fun(nr_loops, callback_fn, callback_ctx, flags) +} +pub unsafe fn bpf_strncmp( + s1: *const ::aya_bpf_cty::c_char, + s1_sz: __u32, + s2: *const ::aya_bpf_cty::c_char, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + s1: *const ::aya_bpf_cty::c_char, + s1_sz: __u32, + s2: *const ::aya_bpf_cty::c_char, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(182usize); + fun(s1, s1_sz, s2) +} +pub unsafe fn bpf_get_func_arg( + ctx: *mut ::aya_bpf_cty::c_void, + n: __u32, + value: *mut __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + n: __u32, + value: *mut __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(183usize); + fun(ctx, n, value) +} +pub unsafe fn bpf_get_func_ret( + ctx: *mut ::aya_bpf_cty::c_void, + value: *mut __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + ctx: *mut ::aya_bpf_cty::c_void, + value: *mut __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(184usize); + fun(ctx, value) +} +pub unsafe fn bpf_get_func_arg_cnt(ctx: *mut ::aya_bpf_cty::c_void) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn(ctx: *mut ::aya_bpf_cty::c_void) -> ::aya_bpf_cty::c_long = + ::core::mem::transmute(185usize); + fun(ctx) +} +pub unsafe fn bpf_get_retval() -> ::aya_bpf_cty::c_int { + let fun: unsafe extern "C" fn() -> ::aya_bpf_cty::c_int = ::core::mem::transmute(186usize); + fun() +} +pub unsafe fn bpf_set_retval(retval: ::aya_bpf_cty::c_int) -> ::aya_bpf_cty::c_int { + let fun: unsafe extern "C" fn(retval: ::aya_bpf_cty::c_int) -> ::aya_bpf_cty::c_int = + ::core::mem::transmute(187usize); + fun(retval) +} +pub unsafe fn bpf_xdp_get_buff_len(xdp_md: *mut xdp_md) -> __u64 { + let fun: unsafe extern "C" fn(xdp_md: *mut xdp_md) -> __u64 = ::core::mem::transmute(188usize); + fun(xdp_md) +} +pub unsafe fn bpf_xdp_load_bytes( + xdp_md: *mut xdp_md, + offset: __u32, + buf: *mut ::aya_bpf_cty::c_void, + len: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + xdp_md: *mut xdp_md, + offset: __u32, + buf: *mut ::aya_bpf_cty::c_void, + len: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(189usize); + fun(xdp_md, offset, buf, len) +} +pub unsafe fn bpf_xdp_store_bytes( + xdp_md: *mut xdp_md, + offset: __u32, + buf: *mut ::aya_bpf_cty::c_void, + len: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + xdp_md: *mut xdp_md, + offset: __u32, + buf: *mut ::aya_bpf_cty::c_void, + len: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(190usize); + fun(xdp_md, offset, buf, len) +} +pub unsafe fn bpf_copy_from_user_task( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + user_ptr: *const ::aya_bpf_cty::c_void, + tsk: *mut task_struct, + flags: __u64, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + user_ptr: *const ::aya_bpf_cty::c_void, + tsk: *mut task_struct, + flags: __u64, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(191usize); + fun(dst, size, user_ptr, tsk, flags) +} +pub unsafe fn bpf_skb_set_tstamp( + skb: *mut __sk_buff, + tstamp: __u64, + tstamp_type: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + skb: *mut __sk_buff, + tstamp: __u64, + tstamp_type: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(192usize); + fun(skb, tstamp, tstamp_type) +} +pub unsafe fn bpf_ima_file_hash( + file: *mut file, + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, +) -> ::aya_bpf_cty::c_long { + let fun: unsafe extern "C" fn( + file: *mut file, + dst: *mut ::aya_bpf_cty::c_void, + size: __u32, + ) -> ::aya_bpf_cty::c_long = ::core::mem::transmute(193usize); + fun(file, dst, size) +} +pub unsafe fn bpf_kptr_xchg( + map_value: *mut ::aya_bpf_cty::c_void, + ptr: *mut ::aya_bpf_cty::c_void, +) -> *mut ::aya_bpf_cty::c_void { + let fun: unsafe extern "C" fn( + map_value: *mut ::aya_bpf_cty::c_void, + ptr: *mut ::aya_bpf_cty::c_void, + ) -> *mut ::aya_bpf_cty::c_void = ::core::mem::transmute(194usize); + fun(map_value, ptr) +} +pub unsafe fn bpf_map_lookup_percpu_elem( + map: *mut ::aya_bpf_cty::c_void, + key: *const ::aya_bpf_cty::c_void, + cpu: __u32, +) -> *mut ::aya_bpf_cty::c_void { + let fun: unsafe extern "C" fn( + map: *mut ::aya_bpf_cty::c_void, + key: *const ::aya_bpf_cty::c_void, + cpu: __u32, + ) -> *mut ::aya_bpf_cty::c_void = ::core::mem::transmute(195usize); + fun(map, key, cpu) +}