Merge pull request #1155 from aya-rs/codegen

Update libbpf to 324f3c3846d99c8a1e1384a55591f893f0ae5de4
reviewable/pr690/r3
Dave Tucker 3 weeks ago committed by GitHub
commit 66da8742fe
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -1,4 +1,4 @@
/* automatically generated by rust-bindgen 0.70.1 */
/* automatically generated by rust-bindgen 0.71.1 */
pub type __u8 = ::core::ffi::c_uchar;
pub type __u16 = ::core::ffi::c_ushort;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -14,10 +14,7 @@ where
Storage: AsRef<[u8]> + AsMut<[u8]>,
{
#[inline]
pub fn get_bit(&self, index: usize) -> bool {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = self.storage.as_ref()[byte_index];
fn extract_bit(byte: u8, index: usize) -> bool {
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
@ -27,10 +24,21 @@ where
byte & mask == mask
}
#[inline]
pub fn set_bit(&mut self, index: usize, val: bool) {
pub fn get_bit(&self, index: usize) -> bool {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = &mut self.storage.as_mut()[byte_index];
let byte = self.storage.as_ref()[byte_index];
Self::extract_bit(byte, index)
}
#[inline]
pub unsafe fn raw_get_bit(this: *const Self, index: usize) -> bool {
debug_assert!(index / 8 < core::mem::size_of::<Storage>());
let byte_index = index / 8;
let byte = *(core::ptr::addr_of!((*this).storage) as *const u8).offset(byte_index as isize);
Self::extract_bit(byte, index)
}
#[inline]
fn change_bit(byte: u8, index: usize, val: bool) -> u8 {
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
@ -38,10 +46,25 @@ where
};
let mask = 1 << bit_index;
if val {
*byte |= mask;
byte | mask
} else {
*byte &= !mask;
byte & !mask
}
}
#[inline]
pub fn set_bit(&mut self, index: usize, val: bool) {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = &mut self.storage.as_mut()[byte_index];
*byte = Self::change_bit(*byte, index, val);
}
#[inline]
pub unsafe fn raw_set_bit(this: *mut Self, index: usize, val: bool) {
debug_assert!(index / 8 < core::mem::size_of::<Storage>());
let byte_index = index / 8;
let byte =
(core::ptr::addr_of_mut!((*this).storage) as *mut u8).offset(byte_index as isize);
*byte = Self::change_bit(*byte, index, val);
}
#[inline]
pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
@ -62,6 +85,24 @@ where
val
}
#[inline]
pub unsafe fn raw_get(this: *const Self, bit_offset: usize, bit_width: u8) -> u64 {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < core::mem::size_of::<Storage>());
debug_assert!((bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::<Storage>());
let mut val = 0;
for i in 0..(bit_width as usize) {
if Self::raw_get_bit(this, i + bit_offset) {
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
val |= 1 << index;
}
}
val
}
#[inline]
pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
@ -77,6 +118,22 @@ where
self.set_bit(index + bit_offset, val_bit_is_set);
}
}
#[inline]
pub unsafe fn raw_set(this: *mut Self, bit_offset: usize, bit_width: u8, val: u64) {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < core::mem::size_of::<Storage>());
debug_assert!((bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::<Storage>());
for i in 0..(bit_width as usize) {
let mask = 1 << i;
let val_bit_is_set = val & mask == mask;
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
Self::raw_set_bit(this, index + bit_offset, val_bit_is_set);
}
}
}
#[repr(C)]
#[derive(Default)]
@ -211,6 +268,9 @@ pub const TC_ACT_REDIRECT: u32 = 7;
pub const TC_ACT_TRAP: u32 = 8;
pub const TC_ACT_VALUE_MAX: u32 = 8;
pub const TC_ACT_EXT_VAL_MASK: u32 = 268435455;
pub const TC_ACT_JUMP: u32 = 268435456;
pub const TC_ACT_GOTO_CHAIN: u32 = 536870912;
pub const TC_ACT_EXT_OPCODE_MAX: u32 = 536870912;
pub const SOL_SOCKET: u32 = 1;
pub const SO_DEBUG: u32 = 1;
pub const SO_REUSEADDR: u32 = 2;
@ -434,6 +494,28 @@ impl bpf_insn {
}
}
#[inline]
pub unsafe fn dst_reg_raw(this: *const Self) -> __u8 {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
0usize,
4u8,
) as u8)
}
}
#[inline]
pub unsafe fn set_dst_reg_raw(this: *mut Self, val: __u8) {
unsafe {
let val: u8 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
0usize,
4u8,
val as u64,
)
}
}
#[inline]
pub fn src_reg(&self) -> __u8 {
unsafe { ::core::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u8) }
}
@ -445,6 +527,28 @@ impl bpf_insn {
}
}
#[inline]
pub unsafe fn src_reg_raw(this: *const Self) -> __u8 {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
4usize,
4u8,
) as u8)
}
}
#[inline]
pub unsafe fn set_src_reg_raw(this: *mut Self, val: __u8) {
unsafe {
let val: u8 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
4usize,
4u8,
val as u64,
)
}
}
#[inline]
pub fn new_bitfield_1(dst_reg: __u8, src_reg: __u8) -> __BindgenBitfieldUnit<[u8; 1usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default();
__bindgen_bitfield_unit.set(0usize, 4u8, {
@ -1957,6 +2061,28 @@ impl bpf_prog_info {
}
}
#[inline]
pub unsafe fn gpl_compatible_raw(this: *const Self) -> __u32 {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 4usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
0usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_gpl_compatible_raw(this: *mut Self, val: __u32) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 4usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
0usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn new_bitfield_1(gpl_compatible: __u32) -> __BindgenBitfieldUnit<[u8; 4usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default();
__bindgen_bitfield_unit.set(0usize, 1u8, {

@ -14,10 +14,7 @@ where
Storage: AsRef<[u8]> + AsMut<[u8]>,
{
#[inline]
pub fn get_bit(&self, index: usize) -> bool {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = self.storage.as_ref()[byte_index];
fn extract_bit(byte: u8, index: usize) -> bool {
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
@ -27,10 +24,21 @@ where
byte & mask == mask
}
#[inline]
pub fn set_bit(&mut self, index: usize, val: bool) {
pub fn get_bit(&self, index: usize) -> bool {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = &mut self.storage.as_mut()[byte_index];
let byte = self.storage.as_ref()[byte_index];
Self::extract_bit(byte, index)
}
#[inline]
pub unsafe fn raw_get_bit(this: *const Self, index: usize) -> bool {
debug_assert!(index / 8 < core::mem::size_of::<Storage>());
let byte_index = index / 8;
let byte = *(core::ptr::addr_of!((*this).storage) as *const u8).offset(byte_index as isize);
Self::extract_bit(byte, index)
}
#[inline]
fn change_bit(byte: u8, index: usize, val: bool) -> u8 {
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
@ -38,10 +46,25 @@ where
};
let mask = 1 << bit_index;
if val {
*byte |= mask;
byte | mask
} else {
*byte &= !mask;
byte & !mask
}
}
#[inline]
pub fn set_bit(&mut self, index: usize, val: bool) {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = &mut self.storage.as_mut()[byte_index];
*byte = Self::change_bit(*byte, index, val);
}
#[inline]
pub unsafe fn raw_set_bit(this: *mut Self, index: usize, val: bool) {
debug_assert!(index / 8 < core::mem::size_of::<Storage>());
let byte_index = index / 8;
let byte =
(core::ptr::addr_of_mut!((*this).storage) as *mut u8).offset(byte_index as isize);
*byte = Self::change_bit(*byte, index, val);
}
#[inline]
pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
@ -62,6 +85,24 @@ where
val
}
#[inline]
pub unsafe fn raw_get(this: *const Self, bit_offset: usize, bit_width: u8) -> u64 {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < core::mem::size_of::<Storage>());
debug_assert!((bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::<Storage>());
let mut val = 0;
for i in 0..(bit_width as usize) {
if Self::raw_get_bit(this, i + bit_offset) {
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
val |= 1 << index;
}
}
val
}
#[inline]
pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
@ -77,6 +118,22 @@ where
self.set_bit(index + bit_offset, val_bit_is_set);
}
}
#[inline]
pub unsafe fn raw_set(this: *mut Self, bit_offset: usize, bit_width: u8, val: u64) {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < core::mem::size_of::<Storage>());
debug_assert!((bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::<Storage>());
for i in 0..(bit_width as usize) {
let mask = 1 << i;
let val_bit_is_set = val & mask == mask;
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
Self::raw_set_bit(this, index + bit_offset, val_bit_is_set);
}
}
}
#[repr(C)]
#[derive(Default)]
@ -211,6 +268,9 @@ pub const TC_ACT_REDIRECT: u32 = 7;
pub const TC_ACT_TRAP: u32 = 8;
pub const TC_ACT_VALUE_MAX: u32 = 8;
pub const TC_ACT_EXT_VAL_MASK: u32 = 268435455;
pub const TC_ACT_JUMP: u32 = 268435456;
pub const TC_ACT_GOTO_CHAIN: u32 = 536870912;
pub const TC_ACT_EXT_OPCODE_MAX: u32 = 536870912;
pub const SOL_SOCKET: u32 = 1;
pub const SO_DEBUG: u32 = 1;
pub const SO_REUSEADDR: u32 = 2;
@ -280,6 +340,11 @@ pub const SO_TIMESTAMPING_NEW: u32 = 65;
pub const SO_RCVTIMEO_NEW: u32 = 66;
pub const SO_SNDTIMEO_NEW: u32 = 67;
pub const SO_DETACH_REUSEPORT_BPF: u32 = 68;
pub const SO_TIMESTAMP: u32 = 29;
pub const SO_TIMESTAMPNS: u32 = 35;
pub const SO_TIMESTAMPING: u32 = 37;
pub const SO_RCVTIMEO: u32 = 20;
pub const SO_SNDTIMEO: u32 = 21;
pub type __u8 = ::aya_ebpf_cty::c_uchar;
pub type __s16 = ::aya_ebpf_cty::c_short;
pub type __u16 = ::aya_ebpf_cty::c_ushort;
@ -424,6 +489,28 @@ impl bpf_insn {
}
}
#[inline]
pub unsafe fn dst_reg_raw(this: *const Self) -> __u8 {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
0usize,
4u8,
) as u8)
}
}
#[inline]
pub unsafe fn set_dst_reg_raw(this: *mut Self, val: __u8) {
unsafe {
let val: u8 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
0usize,
4u8,
val as u64,
)
}
}
#[inline]
pub fn src_reg(&self) -> __u8 {
unsafe { ::core::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u8) }
}
@ -435,6 +522,28 @@ impl bpf_insn {
}
}
#[inline]
pub unsafe fn src_reg_raw(this: *const Self) -> __u8 {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
4usize,
4u8,
) as u8)
}
}
#[inline]
pub unsafe fn set_src_reg_raw(this: *mut Self, val: __u8) {
unsafe {
let val: u8 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
4usize,
4u8,
val as u64,
)
}
}
#[inline]
pub fn new_bitfield_1(dst_reg: __u8, src_reg: __u8) -> __BindgenBitfieldUnit<[u8; 1usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default();
__bindgen_bitfield_unit.set(0usize, 4u8, {
@ -1956,6 +2065,28 @@ impl bpf_prog_info {
}
}
#[inline]
pub unsafe fn gpl_compatible_raw(this: *const Self) -> __u32 {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 4usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
0usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_gpl_compatible_raw(this: *mut Self, val: __u32) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 4usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
0usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn new_bitfield_1(gpl_compatible: __u32) -> __BindgenBitfieldUnit<[u8; 4usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default();
__bindgen_bitfield_unit.set(0usize, 1u8, {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -14,10 +14,7 @@ where
Storage: AsRef<[u8]> + AsMut<[u8]>,
{
#[inline]
pub fn get_bit(&self, index: usize) -> bool {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = self.storage.as_ref()[byte_index];
fn extract_bit(byte: u8, index: usize) -> bool {
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
@ -27,10 +24,21 @@ where
byte & mask == mask
}
#[inline]
pub fn set_bit(&mut self, index: usize, val: bool) {
pub fn get_bit(&self, index: usize) -> bool {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = &mut self.storage.as_mut()[byte_index];
let byte = self.storage.as_ref()[byte_index];
Self::extract_bit(byte, index)
}
#[inline]
pub unsafe fn raw_get_bit(this: *const Self, index: usize) -> bool {
debug_assert!(index / 8 < core::mem::size_of::<Storage>());
let byte_index = index / 8;
let byte = *(core::ptr::addr_of!((*this).storage) as *const u8).offset(byte_index as isize);
Self::extract_bit(byte, index)
}
#[inline]
fn change_bit(byte: u8, index: usize, val: bool) -> u8 {
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
@ -38,10 +46,25 @@ where
};
let mask = 1 << bit_index;
if val {
*byte |= mask;
byte | mask
} else {
*byte &= !mask;
byte & !mask
}
}
#[inline]
pub fn set_bit(&mut self, index: usize, val: bool) {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = &mut self.storage.as_mut()[byte_index];
*byte = Self::change_bit(*byte, index, val);
}
#[inline]
pub unsafe fn raw_set_bit(this: *mut Self, index: usize, val: bool) {
debug_assert!(index / 8 < core::mem::size_of::<Storage>());
let byte_index = index / 8;
let byte =
(core::ptr::addr_of_mut!((*this).storage) as *mut u8).offset(byte_index as isize);
*byte = Self::change_bit(*byte, index, val);
}
#[inline]
pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
@ -62,6 +85,24 @@ where
val
}
#[inline]
pub unsafe fn raw_get(this: *const Self, bit_offset: usize, bit_width: u8) -> u64 {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < core::mem::size_of::<Storage>());
debug_assert!((bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::<Storage>());
let mut val = 0;
for i in 0..(bit_width as usize) {
if Self::raw_get_bit(this, i + bit_offset) {
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
val |= 1 << index;
}
}
val
}
#[inline]
pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
@ -77,6 +118,22 @@ where
self.set_bit(index + bit_offset, val_bit_is_set);
}
}
#[inline]
pub unsafe fn raw_set(this: *mut Self, bit_offset: usize, bit_width: u8, val: u64) {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < core::mem::size_of::<Storage>());
debug_assert!((bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::<Storage>());
for i in 0..(bit_width as usize) {
let mask = 1 << i;
let val_bit_is_set = val & mask == mask;
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
Self::raw_set_bit(this, index + bit_offset, val_bit_is_set);
}
}
}
#[repr(C)]
#[derive(Default)]
@ -211,6 +268,9 @@ pub const TC_ACT_REDIRECT: u32 = 7;
pub const TC_ACT_TRAP: u32 = 8;
pub const TC_ACT_VALUE_MAX: u32 = 8;
pub const TC_ACT_EXT_VAL_MASK: u32 = 268435455;
pub const TC_ACT_JUMP: u32 = 268435456;
pub const TC_ACT_GOTO_CHAIN: u32 = 536870912;
pub const TC_ACT_EXT_OPCODE_MAX: u32 = 536870912;
pub const SO_RCVLOWAT: u32 = 16;
pub const SO_SNDLOWAT: u32 = 17;
pub const SO_RCVTIMEO_OLD: u32 = 18;
@ -429,6 +489,28 @@ impl bpf_insn {
}
}
#[inline]
pub unsafe fn dst_reg_raw(this: *const Self) -> __u8 {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
0usize,
4u8,
) as u8)
}
}
#[inline]
pub unsafe fn set_dst_reg_raw(this: *mut Self, val: __u8) {
unsafe {
let val: u8 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
0usize,
4u8,
val as u64,
)
}
}
#[inline]
pub fn src_reg(&self) -> __u8 {
unsafe { ::core::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u8) }
}
@ -440,6 +522,28 @@ impl bpf_insn {
}
}
#[inline]
pub unsafe fn src_reg_raw(this: *const Self) -> __u8 {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
4usize,
4u8,
) as u8)
}
}
#[inline]
pub unsafe fn set_src_reg_raw(this: *mut Self, val: __u8) {
unsafe {
let val: u8 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
4usize,
4u8,
val as u64,
)
}
}
#[inline]
pub fn new_bitfield_1(dst_reg: __u8, src_reg: __u8) -> __BindgenBitfieldUnit<[u8; 1usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default();
__bindgen_bitfield_unit.set(0usize, 4u8, {
@ -1952,6 +2056,28 @@ impl bpf_prog_info {
}
}
#[inline]
pub unsafe fn gpl_compatible_raw(this: *const Self) -> __u32 {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 4usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
0usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_gpl_compatible_raw(this: *mut Self, val: __u32) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 4usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
0usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn new_bitfield_1(gpl_compatible: __u32) -> __BindgenBitfieldUnit<[u8; 4usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default();
__bindgen_bitfield_unit.set(0usize, 1u8, {

@ -14,10 +14,7 @@ where
Storage: AsRef<[u8]> + AsMut<[u8]>,
{
#[inline]
pub fn get_bit(&self, index: usize) -> bool {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = self.storage.as_ref()[byte_index];
fn extract_bit(byte: u8, index: usize) -> bool {
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
@ -27,10 +24,21 @@ where
byte & mask == mask
}
#[inline]
pub fn set_bit(&mut self, index: usize, val: bool) {
pub fn get_bit(&self, index: usize) -> bool {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = &mut self.storage.as_mut()[byte_index];
let byte = self.storage.as_ref()[byte_index];
Self::extract_bit(byte, index)
}
#[inline]
pub unsafe fn raw_get_bit(this: *const Self, index: usize) -> bool {
debug_assert!(index / 8 < core::mem::size_of::<Storage>());
let byte_index = index / 8;
let byte = *(core::ptr::addr_of!((*this).storage) as *const u8).offset(byte_index as isize);
Self::extract_bit(byte, index)
}
#[inline]
fn change_bit(byte: u8, index: usize, val: bool) -> u8 {
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
@ -38,10 +46,25 @@ where
};
let mask = 1 << bit_index;
if val {
*byte |= mask;
byte | mask
} else {
*byte &= !mask;
byte & !mask
}
}
#[inline]
pub fn set_bit(&mut self, index: usize, val: bool) {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = &mut self.storage.as_mut()[byte_index];
*byte = Self::change_bit(*byte, index, val);
}
#[inline]
pub unsafe fn raw_set_bit(this: *mut Self, index: usize, val: bool) {
debug_assert!(index / 8 < core::mem::size_of::<Storage>());
let byte_index = index / 8;
let byte =
(core::ptr::addr_of_mut!((*this).storage) as *mut u8).offset(byte_index as isize);
*byte = Self::change_bit(*byte, index, val);
}
#[inline]
pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
@ -62,6 +85,24 @@ where
val
}
#[inline]
pub unsafe fn raw_get(this: *const Self, bit_offset: usize, bit_width: u8) -> u64 {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < core::mem::size_of::<Storage>());
debug_assert!((bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::<Storage>());
let mut val = 0;
for i in 0..(bit_width as usize) {
if Self::raw_get_bit(this, i + bit_offset) {
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
val |= 1 << index;
}
}
val
}
#[inline]
pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
@ -77,6 +118,22 @@ where
self.set_bit(index + bit_offset, val_bit_is_set);
}
}
#[inline]
pub unsafe fn raw_set(this: *mut Self, bit_offset: usize, bit_width: u8, val: u64) {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < core::mem::size_of::<Storage>());
debug_assert!((bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::<Storage>());
for i in 0..(bit_width as usize) {
let mask = 1 << i;
let val_bit_is_set = val & mask == mask;
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
Self::raw_set_bit(this, index + bit_offset, val_bit_is_set);
}
}
}
#[repr(C)]
#[derive(Default)]
@ -211,6 +268,9 @@ pub const TC_ACT_REDIRECT: u32 = 7;
pub const TC_ACT_TRAP: u32 = 8;
pub const TC_ACT_VALUE_MAX: u32 = 8;
pub const TC_ACT_EXT_VAL_MASK: u32 = 268435455;
pub const TC_ACT_JUMP: u32 = 268435456;
pub const TC_ACT_GOTO_CHAIN: u32 = 536870912;
pub const TC_ACT_EXT_OPCODE_MAX: u32 = 536870912;
pub const SOL_SOCKET: u32 = 1;
pub const SO_DEBUG: u32 = 1;
pub const SO_REUSEADDR: u32 = 2;
@ -434,6 +494,28 @@ impl bpf_insn {
}
}
#[inline]
pub unsafe fn dst_reg_raw(this: *const Self) -> __u8 {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
0usize,
4u8,
) as u8)
}
}
#[inline]
pub unsafe fn set_dst_reg_raw(this: *mut Self, val: __u8) {
unsafe {
let val: u8 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
0usize,
4u8,
val as u64,
)
}
}
#[inline]
pub fn src_reg(&self) -> __u8 {
unsafe { ::core::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u8) }
}
@ -445,6 +527,28 @@ impl bpf_insn {
}
}
#[inline]
pub unsafe fn src_reg_raw(this: *const Self) -> __u8 {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
4usize,
4u8,
) as u8)
}
}
#[inline]
pub unsafe fn set_src_reg_raw(this: *mut Self, val: __u8) {
unsafe {
let val: u8 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
4usize,
4u8,
val as u64,
)
}
}
#[inline]
pub fn new_bitfield_1(dst_reg: __u8, src_reg: __u8) -> __BindgenBitfieldUnit<[u8; 1usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default();
__bindgen_bitfield_unit.set(0usize, 4u8, {
@ -1957,6 +2061,28 @@ impl bpf_prog_info {
}
}
#[inline]
pub unsafe fn gpl_compatible_raw(this: *const Self) -> __u32 {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 4usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
0usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_gpl_compatible_raw(this: *mut Self, val: __u32) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 4usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
0usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn new_bitfield_1(gpl_compatible: __u32) -> __BindgenBitfieldUnit<[u8; 4usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default();
__bindgen_bitfield_unit.set(0usize, 1u8, {

@ -14,10 +14,7 @@ where
Storage: AsRef<[u8]> + AsMut<[u8]>,
{
#[inline]
pub fn get_bit(&self, index: usize) -> bool {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = self.storage.as_ref()[byte_index];
fn extract_bit(byte: u8, index: usize) -> bool {
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
@ -27,10 +24,21 @@ where
byte & mask == mask
}
#[inline]
pub fn set_bit(&mut self, index: usize, val: bool) {
pub fn get_bit(&self, index: usize) -> bool {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = &mut self.storage.as_mut()[byte_index];
let byte = self.storage.as_ref()[byte_index];
Self::extract_bit(byte, index)
}
#[inline]
pub unsafe fn raw_get_bit(this: *const Self, index: usize) -> bool {
debug_assert!(index / 8 < core::mem::size_of::<Storage>());
let byte_index = index / 8;
let byte = *(core::ptr::addr_of!((*this).storage) as *const u8).offset(byte_index as isize);
Self::extract_bit(byte, index)
}
#[inline]
fn change_bit(byte: u8, index: usize, val: bool) -> u8 {
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
@ -38,12 +46,27 @@ where
};
let mask = 1 << bit_index;
if val {
*byte |= mask;
byte | mask
} else {
*byte &= !mask;
byte & !mask
}
}
#[inline]
pub fn set_bit(&mut self, index: usize, val: bool) {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = &mut self.storage.as_mut()[byte_index];
*byte = Self::change_bit(*byte, index, val);
}
#[inline]
pub unsafe fn raw_set_bit(this: *mut Self, index: usize, val: bool) {
debug_assert!(index / 8 < core::mem::size_of::<Storage>());
let byte_index = index / 8;
let byte =
(core::ptr::addr_of_mut!((*this).storage) as *mut u8).offset(byte_index as isize);
*byte = Self::change_bit(*byte, index, val);
}
#[inline]
pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
@ -62,6 +85,24 @@ where
val
}
#[inline]
pub unsafe fn raw_get(this: *const Self, bit_offset: usize, bit_width: u8) -> u64 {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < core::mem::size_of::<Storage>());
debug_assert!((bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::<Storage>());
let mut val = 0;
for i in 0..(bit_width as usize) {
if Self::raw_get_bit(this, i + bit_offset) {
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
val |= 1 << index;
}
}
val
}
#[inline]
pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
@ -77,6 +118,22 @@ where
self.set_bit(index + bit_offset, val_bit_is_set);
}
}
#[inline]
pub unsafe fn raw_set(this: *mut Self, bit_offset: usize, bit_width: u8, val: u64) {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < core::mem::size_of::<Storage>());
debug_assert!((bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::<Storage>());
for i in 0..(bit_width as usize) {
let mask = 1 << i;
let val_bit_is_set = val & mask == mask;
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
Self::raw_set_bit(this, index + bit_offset, val_bit_is_set);
}
}
}
#[repr(C)]
#[derive(Default)]
@ -211,6 +268,9 @@ pub const TC_ACT_REDIRECT: u32 = 7;
pub const TC_ACT_TRAP: u32 = 8;
pub const TC_ACT_VALUE_MAX: u32 = 8;
pub const TC_ACT_EXT_VAL_MASK: u32 = 268435455;
pub const TC_ACT_JUMP: u32 = 268435456;
pub const TC_ACT_GOTO_CHAIN: u32 = 536870912;
pub const TC_ACT_EXT_OPCODE_MAX: u32 = 536870912;
pub const SOL_SOCKET: u32 = 1;
pub const SO_DEBUG: u32 = 1;
pub const SO_REUSEADDR: u32 = 2;
@ -434,6 +494,28 @@ impl bpf_insn {
}
}
#[inline]
pub unsafe fn dst_reg_raw(this: *const Self) -> __u8 {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
0usize,
4u8,
) as u8)
}
}
#[inline]
pub unsafe fn set_dst_reg_raw(this: *mut Self, val: __u8) {
unsafe {
let val: u8 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
0usize,
4u8,
val as u64,
)
}
}
#[inline]
pub fn src_reg(&self) -> __u8 {
unsafe { ::core::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u8) }
}
@ -445,6 +527,28 @@ impl bpf_insn {
}
}
#[inline]
pub unsafe fn src_reg_raw(this: *const Self) -> __u8 {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
4usize,
4u8,
) as u8)
}
}
#[inline]
pub unsafe fn set_src_reg_raw(this: *mut Self, val: __u8) {
unsafe {
let val: u8 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
4usize,
4u8,
val as u64,
)
}
}
#[inline]
pub fn new_bitfield_1(dst_reg: __u8, src_reg: __u8) -> __BindgenBitfieldUnit<[u8; 1usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default();
__bindgen_bitfield_unit.set(0usize, 4u8, {
@ -1957,6 +2061,28 @@ impl bpf_prog_info {
}
}
#[inline]
pub unsafe fn gpl_compatible_raw(this: *const Self) -> __u32 {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 4usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
0usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_gpl_compatible_raw(this: *mut Self, val: __u32) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 4usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
0usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn new_bitfield_1(gpl_compatible: __u32) -> __BindgenBitfieldUnit<[u8; 4usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default();
__bindgen_bitfield_unit.set(0usize, 1u8, {
@ -2805,6 +2931,28 @@ impl per_cr_bits {
}
}
#[inline]
pub unsafe fn em_branching_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
32usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_em_branching_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
32usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn em_instruction_fetch(&self) -> ::aya_ebpf_cty::c_uint {
unsafe { ::core::mem::transmute(self._bitfield_1.get(33usize, 1u8) as u32) }
}
@ -2816,6 +2964,28 @@ impl per_cr_bits {
}
}
#[inline]
pub unsafe fn em_instruction_fetch_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
33usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_em_instruction_fetch_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
33usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn em_storage_alteration(&self) -> ::aya_ebpf_cty::c_uint {
unsafe { ::core::mem::transmute(self._bitfield_1.get(34usize, 1u8) as u32) }
}
@ -2827,6 +2997,28 @@ impl per_cr_bits {
}
}
#[inline]
pub unsafe fn em_storage_alteration_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
34usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_em_storage_alteration_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
34usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn em_gpr_alt_unused(&self) -> ::aya_ebpf_cty::c_uint {
unsafe { ::core::mem::transmute(self._bitfield_1.get(35usize, 1u8) as u32) }
}
@ -2838,6 +3030,28 @@ impl per_cr_bits {
}
}
#[inline]
pub unsafe fn em_gpr_alt_unused_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
35usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_em_gpr_alt_unused_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
35usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn em_store_real_address(&self) -> ::aya_ebpf_cty::c_uint {
unsafe { ::core::mem::transmute(self._bitfield_1.get(36usize, 1u8) as u32) }
}
@ -2849,6 +3063,28 @@ impl per_cr_bits {
}
}
#[inline]
pub unsafe fn em_store_real_address_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
36usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_em_store_real_address_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
36usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn branch_addr_ctl(&self) -> ::aya_ebpf_cty::c_uint {
unsafe { ::core::mem::transmute(self._bitfield_1.get(40usize, 1u8) as u32) }
}
@ -2860,6 +3096,28 @@ impl per_cr_bits {
}
}
#[inline]
pub unsafe fn branch_addr_ctl_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
40usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_branch_addr_ctl_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
40usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn storage_alt_space_ctl(&self) -> ::aya_ebpf_cty::c_uint {
unsafe { ::core::mem::transmute(self._bitfield_1.get(42usize, 1u8) as u32) }
}
@ -2871,6 +3129,28 @@ impl per_cr_bits {
}
}
#[inline]
pub unsafe fn storage_alt_space_ctl_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
42usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_storage_alt_space_ctl_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
42usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn new_bitfield_1(
em_branching: ::aya_ebpf_cty::c_uint,
em_instruction_fetch: ::aya_ebpf_cty::c_uint,
@ -2945,6 +3225,28 @@ impl per_lowcore_bits {
}
}
#[inline]
pub unsafe fn perc_branching_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
0usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_perc_branching_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
0usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn perc_instruction_fetch(&self) -> ::aya_ebpf_cty::c_uint {
unsafe { ::core::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u32) }
}
@ -2956,6 +3258,28 @@ impl per_lowcore_bits {
}
}
#[inline]
pub unsafe fn perc_instruction_fetch_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
1usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_perc_instruction_fetch_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
1usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn perc_storage_alteration(&self) -> ::aya_ebpf_cty::c_uint {
unsafe { ::core::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u32) }
}
@ -2967,6 +3291,28 @@ impl per_lowcore_bits {
}
}
#[inline]
pub unsafe fn perc_storage_alteration_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
2usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_perc_storage_alteration_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
2usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn perc_gpr_alt_unused(&self) -> ::aya_ebpf_cty::c_uint {
unsafe { ::core::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u32) }
}
@ -2978,6 +3324,28 @@ impl per_lowcore_bits {
}
}
#[inline]
pub unsafe fn perc_gpr_alt_unused_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
3usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_perc_gpr_alt_unused_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
3usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn perc_store_real_address(&self) -> ::aya_ebpf_cty::c_uint {
unsafe { ::core::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u32) }
}
@ -2989,6 +3357,28 @@ impl per_lowcore_bits {
}
}
#[inline]
pub unsafe fn perc_store_real_address_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
4usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_perc_store_real_address_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
4usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn atmid_psw_bit_31(&self) -> ::aya_ebpf_cty::c_uint {
unsafe { ::core::mem::transmute(self._bitfield_1.get(8usize, 1u8) as u32) }
}
@ -3000,6 +3390,28 @@ impl per_lowcore_bits {
}
}
#[inline]
pub unsafe fn atmid_psw_bit_31_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
8usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_atmid_psw_bit_31_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
8usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn atmid_validity_bit(&self) -> ::aya_ebpf_cty::c_uint {
unsafe { ::core::mem::transmute(self._bitfield_1.get(9usize, 1u8) as u32) }
}
@ -3011,6 +3423,28 @@ impl per_lowcore_bits {
}
}
#[inline]
pub unsafe fn atmid_validity_bit_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
9usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_atmid_validity_bit_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
9usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn atmid_psw_bit_32(&self) -> ::aya_ebpf_cty::c_uint {
unsafe { ::core::mem::transmute(self._bitfield_1.get(10usize, 1u8) as u32) }
}
@ -3022,6 +3456,28 @@ impl per_lowcore_bits {
}
}
#[inline]
pub unsafe fn atmid_psw_bit_32_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
10usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_atmid_psw_bit_32_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
10usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn atmid_psw_bit_5(&self) -> ::aya_ebpf_cty::c_uint {
unsafe { ::core::mem::transmute(self._bitfield_1.get(11usize, 1u8) as u32) }
}
@ -3033,6 +3489,28 @@ impl per_lowcore_bits {
}
}
#[inline]
pub unsafe fn atmid_psw_bit_5_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
11usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_atmid_psw_bit_5_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
11usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn atmid_psw_bit_16(&self) -> ::aya_ebpf_cty::c_uint {
unsafe { ::core::mem::transmute(self._bitfield_1.get(12usize, 1u8) as u32) }
}
@ -3044,6 +3522,28 @@ impl per_lowcore_bits {
}
}
#[inline]
pub unsafe fn atmid_psw_bit_16_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
12usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_atmid_psw_bit_16_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
12usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn atmid_psw_bit_17(&self) -> ::aya_ebpf_cty::c_uint {
unsafe { ::core::mem::transmute(self._bitfield_1.get(13usize, 1u8) as u32) }
}
@ -3055,6 +3555,28 @@ impl per_lowcore_bits {
}
}
#[inline]
pub unsafe fn atmid_psw_bit_17_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
13usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_atmid_psw_bit_17_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
13usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn si(&self) -> ::aya_ebpf_cty::c_uint {
unsafe { ::core::mem::transmute(self._bitfield_1.get(14usize, 2u8) as u32) }
}
@ -3066,6 +3588,28 @@ impl per_lowcore_bits {
}
}
#[inline]
pub unsafe fn si_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
14usize,
2u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_si_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 2usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
14usize,
2u8,
val as u64,
)
}
}
#[inline]
pub fn new_bitfield_1(
perc_branching: ::aya_ebpf_cty::c_uint,
perc_instruction_fetch: ::aya_ebpf_cty::c_uint,
@ -3146,6 +3690,28 @@ impl per_lowcore_bits {
}
}
#[inline]
pub unsafe fn access_id_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_2),
4usize,
4u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_access_id_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_2),
4usize,
4u8,
val as u64,
)
}
}
#[inline]
pub fn new_bitfield_2(
access_id: ::aya_ebpf_cty::c_uint,
) -> __BindgenBitfieldUnit<[u8; 1usize]> {
@ -3192,6 +3758,28 @@ impl per_struct {
}
}
#[inline]
pub unsafe fn single_step_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 4usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
0usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_single_step_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 4usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
0usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn instruction_fetch(&self) -> ::aya_ebpf_cty::c_uint {
unsafe { ::core::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u32) }
}
@ -3203,6 +3791,28 @@ impl per_struct {
}
}
#[inline]
pub unsafe fn instruction_fetch_raw(this: *const Self) -> ::aya_ebpf_cty::c_uint {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 4usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
1usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_instruction_fetch_raw(this: *mut Self, val: ::aya_ebpf_cty::c_uint) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 4usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
1usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn new_bitfield_1(
single_step: ::aya_ebpf_cty::c_uint,
instruction_fetch: ::aya_ebpf_cty::c_uint,

@ -14,10 +14,7 @@ where
Storage: AsRef<[u8]> + AsMut<[u8]>,
{
#[inline]
pub fn get_bit(&self, index: usize) -> bool {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = self.storage.as_ref()[byte_index];
fn extract_bit(byte: u8, index: usize) -> bool {
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
@ -27,10 +24,21 @@ where
byte & mask == mask
}
#[inline]
pub fn set_bit(&mut self, index: usize, val: bool) {
pub fn get_bit(&self, index: usize) -> bool {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = &mut self.storage.as_mut()[byte_index];
let byte = self.storage.as_ref()[byte_index];
Self::extract_bit(byte, index)
}
#[inline]
pub unsafe fn raw_get_bit(this: *const Self, index: usize) -> bool {
debug_assert!(index / 8 < core::mem::size_of::<Storage>());
let byte_index = index / 8;
let byte = *(core::ptr::addr_of!((*this).storage) as *const u8).offset(byte_index as isize);
Self::extract_bit(byte, index)
}
#[inline]
fn change_bit(byte: u8, index: usize, val: bool) -> u8 {
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
@ -38,10 +46,25 @@ where
};
let mask = 1 << bit_index;
if val {
*byte |= mask;
byte | mask
} else {
*byte &= !mask;
byte & !mask
}
}
#[inline]
pub fn set_bit(&mut self, index: usize, val: bool) {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = &mut self.storage.as_mut()[byte_index];
*byte = Self::change_bit(*byte, index, val);
}
#[inline]
pub unsafe fn raw_set_bit(this: *mut Self, index: usize, val: bool) {
debug_assert!(index / 8 < core::mem::size_of::<Storage>());
let byte_index = index / 8;
let byte =
(core::ptr::addr_of_mut!((*this).storage) as *mut u8).offset(byte_index as isize);
*byte = Self::change_bit(*byte, index, val);
}
#[inline]
pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
@ -62,6 +85,24 @@ where
val
}
#[inline]
pub unsafe fn raw_get(this: *const Self, bit_offset: usize, bit_width: u8) -> u64 {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < core::mem::size_of::<Storage>());
debug_assert!((bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::<Storage>());
let mut val = 0;
for i in 0..(bit_width as usize) {
if Self::raw_get_bit(this, i + bit_offset) {
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
val |= 1 << index;
}
}
val
}
#[inline]
pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
@ -77,6 +118,22 @@ where
self.set_bit(index + bit_offset, val_bit_is_set);
}
}
#[inline]
pub unsafe fn raw_set(this: *mut Self, bit_offset: usize, bit_width: u8, val: u64) {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < core::mem::size_of::<Storage>());
debug_assert!((bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::<Storage>());
for i in 0..(bit_width as usize) {
let mask = 1 << i;
let val_bit_is_set = val & mask == mask;
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
Self::raw_set_bit(this, index + bit_offset, val_bit_is_set);
}
}
}
#[repr(C)]
#[derive(Default)]
@ -211,6 +268,9 @@ pub const TC_ACT_REDIRECT: u32 = 7;
pub const TC_ACT_TRAP: u32 = 8;
pub const TC_ACT_VALUE_MAX: u32 = 8;
pub const TC_ACT_EXT_VAL_MASK: u32 = 268435455;
pub const TC_ACT_JUMP: u32 = 268435456;
pub const TC_ACT_GOTO_CHAIN: u32 = 536870912;
pub const TC_ACT_EXT_OPCODE_MAX: u32 = 536870912;
pub const SOL_SOCKET: u32 = 1;
pub const SO_DEBUG: u32 = 1;
pub const SO_REUSEADDR: u32 = 2;
@ -429,6 +489,28 @@ impl bpf_insn {
}
}
#[inline]
pub unsafe fn dst_reg_raw(this: *const Self) -> __u8 {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
0usize,
4u8,
) as u8)
}
}
#[inline]
pub unsafe fn set_dst_reg_raw(this: *mut Self, val: __u8) {
unsafe {
let val: u8 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
0usize,
4u8,
val as u64,
)
}
}
#[inline]
pub fn src_reg(&self) -> __u8 {
unsafe { ::core::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u8) }
}
@ -440,6 +522,28 @@ impl bpf_insn {
}
}
#[inline]
pub unsafe fn src_reg_raw(this: *const Self) -> __u8 {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
4usize,
4u8,
) as u8)
}
}
#[inline]
pub unsafe fn set_src_reg_raw(this: *mut Self, val: __u8) {
unsafe {
let val: u8 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
4usize,
4u8,
val as u64,
)
}
}
#[inline]
pub fn new_bitfield_1(dst_reg: __u8, src_reg: __u8) -> __BindgenBitfieldUnit<[u8; 1usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default();
__bindgen_bitfield_unit.set(0usize, 4u8, {
@ -1952,6 +2056,28 @@ impl bpf_prog_info {
}
}
#[inline]
pub unsafe fn gpl_compatible_raw(this: *const Self) -> __u32 {
unsafe {
::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 4usize]>>::raw_get(
::core::ptr::addr_of!((*this)._bitfield_1),
0usize,
1u8,
) as u32)
}
}
#[inline]
pub unsafe fn set_gpl_compatible_raw(this: *mut Self, val: __u32) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
<__BindgenBitfieldUnit<[u8; 4usize]>>::raw_set(
::core::ptr::addr_of_mut!((*this)._bitfield_1),
0usize,
1u8,
val as u64,
)
}
}
#[inline]
pub fn new_bitfield_1(gpl_compatible: __u32) -> __BindgenBitfieldUnit<[u8; 4usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default();
__bindgen_bitfield_unit.set(0usize, 1u8, {

@ -2264,6 +2264,10 @@ pub fn aya_ebpf_bindings::bindings::sk_reuseport_md__bindgen_ty_4::from(t: T) ->
impl<Storage> aya_ebpf_bindings::bindings::__BindgenBitfieldUnit<Storage> where Storage: core::convert::AsRef<[u8]> + core::convert::AsMut<[u8]>
pub fn aya_ebpf_bindings::bindings::__BindgenBitfieldUnit<Storage>::get(&self, bit_offset: usize, bit_width: u8) -> u64
pub fn aya_ebpf_bindings::bindings::__BindgenBitfieldUnit<Storage>::get_bit(&self, index: usize) -> bool
pub unsafe fn aya_ebpf_bindings::bindings::__BindgenBitfieldUnit<Storage>::raw_get(this: *const Self, bit_offset: usize, bit_width: u8) -> u64
pub unsafe fn aya_ebpf_bindings::bindings::__BindgenBitfieldUnit<Storage>::raw_get_bit(this: *const Self, index: usize) -> bool
pub unsafe fn aya_ebpf_bindings::bindings::__BindgenBitfieldUnit<Storage>::raw_set(this: *mut Self, bit_offset: usize, bit_width: u8, val: u64)
pub unsafe fn aya_ebpf_bindings::bindings::__BindgenBitfieldUnit<Storage>::raw_set_bit(this: *mut Self, index: usize, val: bool)
pub fn aya_ebpf_bindings::bindings::__BindgenBitfieldUnit<Storage>::set(&mut self, bit_offset: usize, bit_width: u8, val: u64)
pub fn aya_ebpf_bindings::bindings::__BindgenBitfieldUnit<Storage>::set_bit(&mut self, index: usize, val: bool)
impl<Storage> aya_ebpf_bindings::bindings::__BindgenBitfieldUnit<Storage>
@ -3841,10 +3845,14 @@ pub aya_ebpf_bindings::bindings::bpf_insn::imm: aya_ebpf_bindings::bindings::__s
pub aya_ebpf_bindings::bindings::bpf_insn::off: aya_ebpf_bindings::bindings::__s16
impl aya_ebpf_bindings::bindings::bpf_insn
pub fn aya_ebpf_bindings::bindings::bpf_insn::dst_reg(&self) -> aya_ebpf_bindings::bindings::__u8
pub unsafe fn aya_ebpf_bindings::bindings::bpf_insn::dst_reg_raw(this: *const Self) -> aya_ebpf_bindings::bindings::__u8
pub fn aya_ebpf_bindings::bindings::bpf_insn::new_bitfield_1(dst_reg: aya_ebpf_bindings::bindings::__u8, src_reg: aya_ebpf_bindings::bindings::__u8) -> aya_ebpf_bindings::bindings::__BindgenBitfieldUnit<[u8; 1]>
pub fn aya_ebpf_bindings::bindings::bpf_insn::set_dst_reg(&mut self, val: aya_ebpf_bindings::bindings::__u8)
pub unsafe fn aya_ebpf_bindings::bindings::bpf_insn::set_dst_reg_raw(this: *mut Self, val: aya_ebpf_bindings::bindings::__u8)
pub fn aya_ebpf_bindings::bindings::bpf_insn::set_src_reg(&mut self, val: aya_ebpf_bindings::bindings::__u8)
pub unsafe fn aya_ebpf_bindings::bindings::bpf_insn::set_src_reg_raw(this: *mut Self, val: aya_ebpf_bindings::bindings::__u8)
pub fn aya_ebpf_bindings::bindings::bpf_insn::src_reg(&self) -> aya_ebpf_bindings::bindings::__u8
pub unsafe fn aya_ebpf_bindings::bindings::bpf_insn::src_reg_raw(this: *const Self) -> aya_ebpf_bindings::bindings::__u8
impl core::clone::Clone for aya_ebpf_bindings::bindings::bpf_insn
pub fn aya_ebpf_bindings::bindings::bpf_insn::clone(&self) -> aya_ebpf_bindings::bindings::bpf_insn
impl core::fmt::Debug for aya_ebpf_bindings::bindings::bpf_insn
@ -5100,8 +5108,10 @@ pub aya_ebpf_bindings::bindings::bpf_prog_info::xlated_prog_insns: aya_ebpf_bind
pub aya_ebpf_bindings::bindings::bpf_prog_info::xlated_prog_len: aya_ebpf_bindings::bindings::__u32
impl aya_ebpf_bindings::bindings::bpf_prog_info
pub fn aya_ebpf_bindings::bindings::bpf_prog_info::gpl_compatible(&self) -> aya_ebpf_bindings::bindings::__u32
pub unsafe fn aya_ebpf_bindings::bindings::bpf_prog_info::gpl_compatible_raw(this: *const Self) -> aya_ebpf_bindings::bindings::__u32
pub fn aya_ebpf_bindings::bindings::bpf_prog_info::new_bitfield_1(gpl_compatible: aya_ebpf_bindings::bindings::__u32) -> aya_ebpf_bindings::bindings::__BindgenBitfieldUnit<[u8; 4]>
pub fn aya_ebpf_bindings::bindings::bpf_prog_info::set_gpl_compatible(&mut self, val: aya_ebpf_bindings::bindings::__u32)
pub unsafe fn aya_ebpf_bindings::bindings::bpf_prog_info::set_gpl_compatible_raw(this: *mut Self, val: aya_ebpf_bindings::bindings::__u32)
impl core::clone::Clone for aya_ebpf_bindings::bindings::bpf_prog_info
pub fn aya_ebpf_bindings::bindings::bpf_prog_info::clone(&self) -> aya_ebpf_bindings::bindings::bpf_prog_info
impl core::fmt::Debug for aya_ebpf_bindings::bindings::bpf_prog_info
@ -6976,7 +6986,10 @@ pub const aya_ebpf_bindings::bindings::SO_TXTIME: u32
pub const aya_ebpf_bindings::bindings::SO_TYPE: u32
pub const aya_ebpf_bindings::bindings::SO_WIFI_STATUS: u32
pub const aya_ebpf_bindings::bindings::SO_ZEROCOPY: u32
pub const aya_ebpf_bindings::bindings::TC_ACT_EXT_OPCODE_MAX: u32
pub const aya_ebpf_bindings::bindings::TC_ACT_EXT_VAL_MASK: i32
pub const aya_ebpf_bindings::bindings::TC_ACT_GOTO_CHAIN: u32
pub const aya_ebpf_bindings::bindings::TC_ACT_JUMP: u32
pub const aya_ebpf_bindings::bindings::TC_ACT_OK: i32
pub const aya_ebpf_bindings::bindings::TC_ACT_PIPE: i32
pub const aya_ebpf_bindings::bindings::TC_ACT_QUEUED: i32

@ -1925,6 +1925,52 @@ impl<T> core::clone::CloneToUninit for aya_obj::generated::bpf_stats_type where
pub unsafe fn aya_obj::generated::bpf_stats_type::clone_to_uninit(&self, dst: *mut u8)
impl<T> core::convert::From<T> for aya_obj::generated::bpf_stats_type
pub fn aya_obj::generated::bpf_stats_type::from(t: T) -> T
#[repr(u32)] pub enum aya_obj::generated::bpf_task_fd_type
pub aya_obj::generated::bpf_task_fd_type::BPF_FD_TYPE_KPROBE = 2
pub aya_obj::generated::bpf_task_fd_type::BPF_FD_TYPE_KRETPROBE = 3
pub aya_obj::generated::bpf_task_fd_type::BPF_FD_TYPE_RAW_TRACEPOINT = 0
pub aya_obj::generated::bpf_task_fd_type::BPF_FD_TYPE_TRACEPOINT = 1
pub aya_obj::generated::bpf_task_fd_type::BPF_FD_TYPE_UPROBE = 4
pub aya_obj::generated::bpf_task_fd_type::BPF_FD_TYPE_URETPROBE = 5
impl core::clone::Clone for aya_obj::generated::bpf_task_fd_type
pub fn aya_obj::generated::bpf_task_fd_type::clone(&self) -> aya_obj::generated::bpf_task_fd_type
impl core::cmp::Eq for aya_obj::generated::bpf_task_fd_type
impl core::cmp::PartialEq for aya_obj::generated::bpf_task_fd_type
pub fn aya_obj::generated::bpf_task_fd_type::eq(&self, other: &aya_obj::generated::bpf_task_fd_type) -> bool
impl core::fmt::Debug for aya_obj::generated::bpf_task_fd_type
pub fn aya_obj::generated::bpf_task_fd_type::fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result
impl core::hash::Hash for aya_obj::generated::bpf_task_fd_type
pub fn aya_obj::generated::bpf_task_fd_type::hash<__H: core::hash::Hasher>(&self, state: &mut __H)
impl core::marker::Copy for aya_obj::generated::bpf_task_fd_type
impl core::marker::StructuralPartialEq for aya_obj::generated::bpf_task_fd_type
impl core::marker::Freeze for aya_obj::generated::bpf_task_fd_type
impl core::marker::Send for aya_obj::generated::bpf_task_fd_type
impl core::marker::Sync for aya_obj::generated::bpf_task_fd_type
impl core::marker::Unpin for aya_obj::generated::bpf_task_fd_type
impl core::panic::unwind_safe::RefUnwindSafe for aya_obj::generated::bpf_task_fd_type
impl core::panic::unwind_safe::UnwindSafe for aya_obj::generated::bpf_task_fd_type
impl<T, U> core::convert::Into<U> for aya_obj::generated::bpf_task_fd_type where U: core::convert::From<T>
pub fn aya_obj::generated::bpf_task_fd_type::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_obj::generated::bpf_task_fd_type where U: core::convert::Into<T>
pub type aya_obj::generated::bpf_task_fd_type::Error = core::convert::Infallible
pub fn aya_obj::generated::bpf_task_fd_type::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_obj::generated::bpf_task_fd_type where U: core::convert::TryFrom<T>
pub type aya_obj::generated::bpf_task_fd_type::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_obj::generated::bpf_task_fd_type::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> alloc::borrow::ToOwned for aya_obj::generated::bpf_task_fd_type where T: core::clone::Clone
pub type aya_obj::generated::bpf_task_fd_type::Owned = T
pub fn aya_obj::generated::bpf_task_fd_type::clone_into(&self, target: &mut T)
pub fn aya_obj::generated::bpf_task_fd_type::to_owned(&self) -> T
impl<T> core::any::Any for aya_obj::generated::bpf_task_fd_type where T: 'static + ?core::marker::Sized
pub fn aya_obj::generated::bpf_task_fd_type::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_obj::generated::bpf_task_fd_type where T: ?core::marker::Sized
pub fn aya_obj::generated::bpf_task_fd_type::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_obj::generated::bpf_task_fd_type where T: ?core::marker::Sized
pub fn aya_obj::generated::bpf_task_fd_type::borrow_mut(&mut self) -> &mut T
impl<T> core::clone::CloneToUninit for aya_obj::generated::bpf_task_fd_type where T: core::clone::Clone
pub unsafe fn aya_obj::generated::bpf_task_fd_type::clone_to_uninit(&self, dst: *mut u8)
impl<T> core::convert::From<T> for aya_obj::generated::bpf_task_fd_type
pub fn aya_obj::generated::bpf_task_fd_type::from(t: T) -> T
#[repr(u32)] pub enum aya_obj::generated::btf_func_linkage
pub aya_obj::generated::btf_func_linkage::BTF_FUNC_EXTERN = 2
pub aya_obj::generated::btf_func_linkage::BTF_FUNC_GLOBAL = 1
@ -3394,6 +3440,10 @@ pub fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1::from(t: T) -> T
impl<Storage> aya_obj::generated::__BindgenBitfieldUnit<Storage> where Storage: core::convert::AsRef<[u8]> + core::convert::AsMut<[u8]>
pub fn aya_obj::generated::__BindgenBitfieldUnit<Storage>::get(&self, bit_offset: usize, bit_width: u8) -> u64
pub fn aya_obj::generated::__BindgenBitfieldUnit<Storage>::get_bit(&self, index: usize) -> bool
pub unsafe fn aya_obj::generated::__BindgenBitfieldUnit<Storage>::raw_get(this: *const Self, bit_offset: usize, bit_width: u8) -> u64
pub unsafe fn aya_obj::generated::__BindgenBitfieldUnit<Storage>::raw_get_bit(this: *const Self, index: usize) -> bool
pub unsafe fn aya_obj::generated::__BindgenBitfieldUnit<Storage>::raw_set(this: *mut Self, bit_offset: usize, bit_width: u8, val: u64)
pub unsafe fn aya_obj::generated::__BindgenBitfieldUnit<Storage>::raw_set_bit(this: *mut Self, index: usize, val: bool)
pub fn aya_obj::generated::__BindgenBitfieldUnit<Storage>::set(&mut self, bit_offset: usize, bit_width: u8, val: u64)
pub fn aya_obj::generated::__BindgenBitfieldUnit<Storage>::set_bit(&mut self, index: usize, val: bool)
impl<Storage> aya_obj::generated::__BindgenBitfieldUnit<Storage>
@ -4768,10 +4818,14 @@ pub aya_obj::generated::bpf_insn::imm: aya_obj::generated::__s32
pub aya_obj::generated::bpf_insn::off: aya_obj::generated::__s16
impl aya_obj::generated::bpf_insn
pub fn aya_obj::generated::bpf_insn::dst_reg(&self) -> aya_obj::generated::__u8
pub unsafe fn aya_obj::generated::bpf_insn::dst_reg_raw(this: *const Self) -> aya_obj::generated::__u8
pub fn aya_obj::generated::bpf_insn::new_bitfield_1(dst_reg: aya_obj::generated::__u8, src_reg: aya_obj::generated::__u8) -> aya_obj::generated::__BindgenBitfieldUnit<[u8; 1]>
pub fn aya_obj::generated::bpf_insn::set_dst_reg(&mut self, val: aya_obj::generated::__u8)
pub unsafe fn aya_obj::generated::bpf_insn::set_dst_reg_raw(this: *mut Self, val: aya_obj::generated::__u8)
pub fn aya_obj::generated::bpf_insn::set_src_reg(&mut self, val: aya_obj::generated::__u8)
pub unsafe fn aya_obj::generated::bpf_insn::set_src_reg_raw(this: *mut Self, val: aya_obj::generated::__u8)
pub fn aya_obj::generated::bpf_insn::src_reg(&self) -> aya_obj::generated::__u8
pub unsafe fn aya_obj::generated::bpf_insn::src_reg_raw(this: *const Self) -> aya_obj::generated::__u8
impl core::clone::Clone for aya_obj::generated::bpf_insn
pub fn aya_obj::generated::bpf_insn::clone(&self) -> aya_obj::generated::bpf_insn
impl core::fmt::Debug for aya_obj::generated::bpf_insn
@ -5745,8 +5799,10 @@ pub aya_obj::generated::bpf_prog_info::xlated_prog_insns: aya_obj::generated::__
pub aya_obj::generated::bpf_prog_info::xlated_prog_len: aya_obj::generated::__u32
impl aya_obj::generated::bpf_prog_info
pub fn aya_obj::generated::bpf_prog_info::gpl_compatible(&self) -> aya_obj::generated::__u32
pub unsafe fn aya_obj::generated::bpf_prog_info::gpl_compatible_raw(this: *const Self) -> aya_obj::generated::__u32
pub fn aya_obj::generated::bpf_prog_info::new_bitfield_1(gpl_compatible: aya_obj::generated::__u32) -> aya_obj::generated::__BindgenBitfieldUnit<[u8; 4]>
pub fn aya_obj::generated::bpf_prog_info::set_gpl_compatible(&mut self, val: aya_obj::generated::__u32)
pub unsafe fn aya_obj::generated::bpf_prog_info::set_gpl_compatible_raw(this: *mut Self, val: aya_obj::generated::__u32)
impl core::clone::Clone for aya_obj::generated::bpf_prog_info
pub fn aya_obj::generated::bpf_prog_info::clone(&self) -> aya_obj::generated::bpf_prog_info
impl core::fmt::Debug for aya_obj::generated::bpf_prog_info
@ -6221,82 +6277,158 @@ pub aya_obj::generated::perf_event_attr::size: aya_obj::generated::__u32
pub aya_obj::generated::perf_event_attr::type_: aya_obj::generated::__u32
impl aya_obj::generated::perf_event_attr
pub fn aya_obj::generated::perf_event_attr::__reserved_1(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::__reserved_1_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::aux_output(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::aux_output_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::bpf_event(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::bpf_event_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::build_id(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::build_id_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::cgroup(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::cgroup_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::comm(&self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::comm_exec(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::comm_exec_raw(this: *const Self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::comm_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::context_switch(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::context_switch_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::disabled(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::disabled_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::enable_on_exec(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::enable_on_exec_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::exclude_callchain_kernel(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::exclude_callchain_kernel_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::exclude_callchain_user(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::exclude_callchain_user_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::exclude_guest(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::exclude_guest_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::exclude_host(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::exclude_host_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::exclude_hv(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::exclude_hv_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::exclude_idle(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::exclude_idle_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::exclude_kernel(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::exclude_kernel_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::exclude_user(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::exclude_user_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::exclusive(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::exclusive_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::freq(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::freq_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::inherit(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::inherit_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::inherit_stat(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::inherit_stat_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::inherit_thread(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::inherit_thread_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::ksymbol(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::ksymbol_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::mmap(&self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::mmap2(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::mmap2_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::mmap_data(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::mmap_data_raw(this: *const Self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::mmap_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::namespaces(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::namespaces_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::new_bitfield_1(disabled: aya_obj::generated::__u64, inherit: aya_obj::generated::__u64, pinned: aya_obj::generated::__u64, exclusive: aya_obj::generated::__u64, exclude_user: aya_obj::generated::__u64, exclude_kernel: aya_obj::generated::__u64, exclude_hv: aya_obj::generated::__u64, exclude_idle: aya_obj::generated::__u64, mmap: aya_obj::generated::__u64, comm: aya_obj::generated::__u64, freq: aya_obj::generated::__u64, inherit_stat: aya_obj::generated::__u64, enable_on_exec: aya_obj::generated::__u64, task: aya_obj::generated::__u64, watermark: aya_obj::generated::__u64, precise_ip: aya_obj::generated::__u64, mmap_data: aya_obj::generated::__u64, sample_id_all: aya_obj::generated::__u64, exclude_host: aya_obj::generated::__u64, exclude_guest: aya_obj::generated::__u64, exclude_callchain_kernel: aya_obj::generated::__u64, exclude_callchain_user: aya_obj::generated::__u64, mmap2: aya_obj::generated::__u64, comm_exec: aya_obj::generated::__u64, use_clockid: aya_obj::generated::__u64, context_switch: aya_obj::generated::__u64, write_backward: aya_obj::generated::__u64, namespaces: aya_obj::generated::__u64, ksymbol: aya_obj::generated::__u64, bpf_event: aya_obj::generated::__u64, aux_output: aya_obj::generated::__u64, cgroup: aya_obj::generated::__u64, text_poke: aya_obj::generated::__u64, build_id: aya_obj::generated::__u64, inherit_thread: aya_obj::generated::__u64, remove_on_exec: aya_obj::generated::__u64, sigtrap: aya_obj::generated::__u64, __reserved_1: aya_obj::generated::__u64) -> aya_obj::generated::__BindgenBitfieldUnit<[u8; 8]>
pub fn aya_obj::generated::perf_event_attr::pinned(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::pinned_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::precise_ip(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::precise_ip_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::remove_on_exec(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::remove_on_exec_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::sample_id_all(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::sample_id_all_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::set___reserved_1(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set___reserved_1_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_aux_output(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_aux_output_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_bpf_event(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_bpf_event_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_build_id(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_build_id_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_cgroup(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_cgroup_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_comm(&mut self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_comm_exec(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_comm_exec_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_comm_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_context_switch(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_context_switch_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_disabled(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_disabled_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_enable_on_exec(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_enable_on_exec_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_exclude_callchain_kernel(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_exclude_callchain_kernel_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_exclude_callchain_user(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_exclude_callchain_user_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_exclude_guest(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_exclude_guest_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_exclude_host(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_exclude_host_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_exclude_hv(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_exclude_hv_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_exclude_idle(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_exclude_idle_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_exclude_kernel(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_exclude_kernel_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_exclude_user(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_exclude_user_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_exclusive(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_exclusive_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_freq(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_freq_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_inherit(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_inherit_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_inherit_stat(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_inherit_stat_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_inherit_thread(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_inherit_thread_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_ksymbol(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_ksymbol_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_mmap(&mut self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_mmap2(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_mmap2_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_mmap_data(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_mmap_data_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_mmap_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_namespaces(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_namespaces_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_pinned(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_pinned_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_precise_ip(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_precise_ip_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_remove_on_exec(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_remove_on_exec_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_sample_id_all(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_sample_id_all_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_sigtrap(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_sigtrap_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_task(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_task_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_text_poke(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_text_poke_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_use_clockid(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_use_clockid_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_watermark(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_watermark_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::set_write_backward(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_attr::set_write_backward_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_attr::sigtrap(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::sigtrap_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::task(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::task_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::text_poke(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::text_poke_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::use_clockid(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::use_clockid_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::watermark(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::watermark_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_attr::write_backward(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_attr::write_backward_raw(this: *const Self) -> aya_obj::generated::__u64
impl core::clone::Clone for aya_obj::generated::perf_event_attr
pub fn aya_obj::generated::perf_event_attr::clone(&self) -> aya_obj::generated::perf_event_attr
impl core::marker::Copy for aya_obj::generated::perf_event_attr
@ -6428,20 +6560,34 @@ pub aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::_bitfi
pub aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::_bitfield_align_1: [u64; 0]
impl aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1
pub fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::cap_____res(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::cap_____res_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::cap_bit0(&self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::cap_bit0_is_deprecated(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::cap_bit0_is_deprecated_raw(this: *const Self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::cap_bit0_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::cap_user_rdpmc(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::cap_user_rdpmc_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::cap_user_time(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::cap_user_time_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::cap_user_time_short(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::cap_user_time_short_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::cap_user_time_zero(&self) -> aya_obj::generated::__u64
pub unsafe fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::cap_user_time_zero_raw(this: *const Self) -> aya_obj::generated::__u64
pub fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::new_bitfield_1(cap_bit0: aya_obj::generated::__u64, cap_bit0_is_deprecated: aya_obj::generated::__u64, cap_user_rdpmc: aya_obj::generated::__u64, cap_user_time: aya_obj::generated::__u64, cap_user_time_zero: aya_obj::generated::__u64, cap_user_time_short: aya_obj::generated::__u64, cap_____res: aya_obj::generated::__u64) -> aya_obj::generated::__BindgenBitfieldUnit<[u8; 8]>
pub fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::set_cap_____res(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::set_cap_____res_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::set_cap_bit0(&mut self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::set_cap_bit0_is_deprecated(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::set_cap_bit0_is_deprecated_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::set_cap_bit0_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::set_cap_user_rdpmc(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::set_cap_user_rdpmc_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::set_cap_user_time(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::set_cap_user_time_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::set_cap_user_time_short(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::set_cap_user_time_short_raw(this: *mut Self, val: aya_obj::generated::__u64)
pub fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::set_cap_user_time_zero(&mut self, val: aya_obj::generated::__u64)
pub unsafe fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::set_cap_user_time_zero_raw(this: *mut Self, val: aya_obj::generated::__u64)
impl core::clone::Clone for aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1
pub fn aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1::clone(&self) -> aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1
impl core::fmt::Debug for aya_obj::generated::perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1
@ -6671,6 +6817,18 @@ pub const aya_obj::generated::NFPROTO_NUMPROTO: aya_obj::generated::_bindgen_ty_
pub const aya_obj::generated::NFPROTO_UNSPEC: aya_obj::generated::_bindgen_ty_99
pub const aya_obj::generated::NLMSG_ALIGNTO: u32
pub const aya_obj::generated::NR_BTF_KINDS: aya_obj::generated::_bindgen_ty_42
pub const aya_obj::generated::PERF_EVENT_IOC_DISABLE: u32
pub const aya_obj::generated::PERF_EVENT_IOC_ENABLE: u32
pub const aya_obj::generated::PERF_EVENT_IOC_ID: u32
pub const aya_obj::generated::PERF_EVENT_IOC_MODIFY_ATTRIBUTES: u32
pub const aya_obj::generated::PERF_EVENT_IOC_PAUSE_OUTPUT: u32
pub const aya_obj::generated::PERF_EVENT_IOC_PERIOD: u32
pub const aya_obj::generated::PERF_EVENT_IOC_QUERY_BPF: u32
pub const aya_obj::generated::PERF_EVENT_IOC_REFRESH: u32
pub const aya_obj::generated::PERF_EVENT_IOC_RESET: u32
pub const aya_obj::generated::PERF_EVENT_IOC_SET_BPF: u32
pub const aya_obj::generated::PERF_EVENT_IOC_SET_FILTER: u32
pub const aya_obj::generated::PERF_EVENT_IOC_SET_OUTPUT: u32
pub const aya_obj::generated::PERF_FLAG_FD_CLOEXEC: u32
pub const aya_obj::generated::PERF_FLAG_FD_NO_GROUP: u32
pub const aya_obj::generated::PERF_FLAG_FD_OUTPUT: u32

Loading…
Cancel
Save