Merge pull request #650 from aya-rs/test-cleanup

Remove async feature; misc test cleanup
reviewable/pr629/r30
Tamir Duberstein 2 years ago committed by GitHub
commit 61608e6458
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,7 +1,5 @@
[alias] [alias]
xtask = "run --package xtask --" xtask = "run --package xtask --"
build-bpfel = "build -Zbuild-std=core --target=bpfel-unknown-none"
build-bpfeb = "build -Zbuild-std=core --target=bpfeb-unknown-none"
[target.armv7-unknown-linux-gnueabi] [target.armv7-unknown-linux-gnueabi]
linker = "arm-linux-gnueabi-gcc" linker = "arm-linux-gnueabi-gcc"

@ -16,12 +16,16 @@ env:
jobs: jobs:
build: build:
strategy: strategy:
fail-fast: false
matrix: matrix:
arch: arch:
- x86_64 - x86_64
- aarch64 - aarch64
- arm - arm
- riscv64 - riscv64
target:
- bpfel-unknown-none
- bpfeb-unknown-none
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
@ -37,11 +41,12 @@ jobs:
- name: Prereqs - name: Prereqs
run: cargo install bpf-linker run: cargo install bpf-linker
- uses: taiki-e/install-action@cargo-hack
- name: Build - name: Build
env: env:
CARGO_CFG_BPF_TARGET_ARCH: ${{ matrix.arch }} CARGO_CFG_BPF_TARGET_ARCH: ${{ matrix.arch }}
run: | run: |
cargo build-bpfel -p aya-bpf --verbose cargo hack build --package aya-bpf --package aya-log-ebpf \
cargo build-bpfeb -p aya-bpf --verbose --feature-powerset \
cargo build-bpfel -p aya-log-ebpf --verbose --target ${{ matrix.target }} \
cargo build-bpfeb -p aya-log-ebpf --verbose -Z build-std=core

@ -16,6 +16,7 @@ env:
jobs: jobs:
build-test: build-test:
strategy: strategy:
fail-fast: false
matrix: matrix:
arch: arch:
- x86_64-unknown-linux-gnu - x86_64-unknown-linux-gnu
@ -29,20 +30,33 @@ jobs:
- uses: dtolnay/rust-toolchain@master - uses: dtolnay/rust-toolchain@master
with: with:
toolchain: stable toolchain: stable
targets: ${{ matrix.arch }}
- uses: Swatinem/rust-cache@v2
- uses: taiki-e/install-action@cargo-hack - uses: taiki-e/install-action@cargo-hack
- name: Check
run: cargo hack check --all-targets --feature-powerset --ignore-private
- uses: Swatinem/rust-cache@v2 - uses: taiki-e/setup-cross-toolchain-action@v1
- name: Prereqs with:
run: cargo install cross --git https://github.com/cross-rs/cross target: ${{ matrix.arch }}
- name: Build - name: Build
run: cross build --verbose --target ${{matrix.arch}} run: |
cargo hack build --all-targets --feature-powerset \
- name: Run test --exclude aya-bpf \
--exclude aya-bpf-bindings \
--exclude aya-log-ebpf \
--exclude integration-ebpf \
--workspace
- name: Test
env: env:
RUST_BACKTRACE: full RUST_BACKTRACE: full
run: | run: |
cross test --verbose --target ${{matrix.arch}} cargo hack test --all-targets --feature-powerset \
--exclude aya-bpf \
--exclude aya-bpf-bindings \
--exclude aya-log-ebpf \
--exclude integration-ebpf \
--exclude integration-test \
--workspace

@ -25,11 +25,14 @@ jobs:
toolchain: nightly toolchain: nightly
components: rustfmt, clippy, miri, rust-src components: rustfmt, clippy, miri, rust-src
- uses: Swatinem/rust-cache@v2
- name: Check formatting - name: Check formatting
run: cargo fmt --all -- --check run: cargo fmt --all -- --check
- uses: taiki-e/install-action@cargo-hack
- name: Run clippy - name: Run clippy
run: cargo clippy --all-targets --workspace -- --deny warnings run: cargo hack clippy --all-targets --feature-powerset --workspace -- --deny warnings
- name: Run miri - name: Run miri
run: cargo miri test --all-targets run: cargo miri test --all-targets

@ -29,6 +29,6 @@ jobs:
with: with:
tag_name: ${{ github.ref }} tag_name: ${{ github.ref }}
release_name: ${{ github.ref }} release_name: ${{ github.ref }}
body: ${{steps.github_release.outputs.changelog}} body: ${{ steps.github_release.outputs.changelog }}
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

@ -1,11 +1,11 @@
[workspace] [workspace]
members = [ members = [
"aya", "aya",
"aya-obj",
"aya-tool",
"aya-log", "aya-log",
"aya-log-common", "aya-log-common",
"aya-log-parser", "aya-log-parser",
"aya-obj",
"aya-tool",
"test/integration-test", "test/integration-test",
"xtask", "xtask",
@ -19,15 +19,26 @@ members = [
"bpf/aya-log-ebpf", "bpf/aya-log-ebpf",
"test/integration-ebpf", "test/integration-ebpf",
] ]
resolver = "2" resolver = "2"
default-members = [ default-members = [
"aya", "aya",
"aya-bpf-macros",
"aya-log",
"aya-log-common",
"aya-log-parser",
"aya-obj", "aya-obj",
"aya-tool", "aya-tool",
"aya-log", # test/integration-test is omitted; it must be built with xtask.
"xtask",
"aya-bpf-macros", "aya-bpf-macros",
"aya-log-ebpf-macros", "aya-log-ebpf-macros",
# ebpf crates are omitted; they must be built with:
# --target bpfe{b,l}-unknown-none
# CARGO_CFG_BPF_TARGET_ARCH={x86_64,aarch64,arm,riscv64}
] ]
[profile.dev] [profile.dev]

@ -298,8 +298,8 @@ mod test {
#[test] #[test]
fn log_value_length_sufficient() { fn log_value_length_sufficient() {
assert!( assert!(
LOG_BUF_CAPACITY >= LogValueLength::MAX.into(), LOG_BUF_CAPACITY <= LogValueLength::MAX.into(),
"{} < {}", "{} > {}",
LOG_BUF_CAPACITY, LOG_BUF_CAPACITY,
LogValueLength::MAX LogValueLength::MAX
); );

@ -163,8 +163,7 @@ mod test {
Fragment::Parameter(Parameter { Fragment::Parameter(Parameter {
hint: DisplayHint::Ip hint: DisplayHint::Ip
}), }),
Fragment::Literal(" lmao ".into()), Fragment::Literal(" lmao {} {something}".into()),
Fragment::Literal(" {{}} {{something}}".into()),
]) ])
); );
assert!(parse("foo {:}").is_err()); assert!(parse("foo {:}").is_err());

@ -68,7 +68,7 @@ use thiserror::Error;
use aya::{ use aya::{
maps::{ maps::{
perf::{AsyncPerfEventArray, PerfBufferError}, perf::{AsyncPerfEventArray, Events, PerfBufferError},
MapError, MapError,
}, },
util::online_cpus, util::online_cpus,
@ -121,12 +121,10 @@ impl BpfLogger {
let mut buffers = vec![BytesMut::with_capacity(LOG_BUF_CAPACITY); 10]; let mut buffers = vec![BytesMut::with_capacity(LOG_BUF_CAPACITY); 10];
loop { loop {
let events = buf.read_events(&mut buffers).await.unwrap(); let Events { read, lost: _ } = buf.read_events(&mut buffers).await.unwrap();
#[allow(clippy::needless_range_loop)] for buf in buffers.iter().take(read) {
for i in 0..events.read { log_buf(buf.as_ref(), &*log).unwrap();
let buf = &mut buffers[i];
log_buf(buf, &*log).unwrap();
} }
} }
}); });

@ -37,7 +37,10 @@
//! let bytes = std::fs::read("program.o").unwrap(); //! let bytes = std::fs::read("program.o").unwrap();
//! let mut object = Object::parse(&bytes).unwrap(); //! let mut object = Object::parse(&bytes).unwrap();
//! // Relocate the programs //! // Relocate the programs
//! #[cfg(feature = "std")]
//! let text_sections = std::collections::HashSet::new(); //! let text_sections = std::collections::HashSet::new();
//! #[cfg(not(feature = "std"))]
//! let text_sections = hashbrown::HashSet::new();
//! object.relocate_calls(&text_sections).unwrap(); //! object.relocate_calls(&text_sections).unwrap();
//! object.relocate_maps(std::iter::empty(), &text_sections).unwrap(); //! object.relocate_maps(std::iter::empty(), &text_sections).unwrap();
//! //!

@ -2,7 +2,7 @@
name = "aya" name = "aya"
version = "0.11.0" version = "0.11.0"
description = "An eBPF library with a focus on developer experience and operability." description = "An eBPF library with a focus on developer experience and operability."
keywords = ["ebpf", "bpf", "linux", "kernel"] keywords = ["bpf", "ebpf", "kernel", "linux"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
authors = ["The Aya Contributors"] authors = ["The Aya Contributors"]
repository = "https://github.com/aya-rs/aya" repository = "https://github.com/aya-rs/aya"
@ -19,30 +19,25 @@ lazy_static = "1"
libc = { version = "0.2.105" } libc = { version = "0.2.105" }
log = "0.4" log = "0.4"
object = { version = "0.31", default-features = false, features = [ object = { version = "0.31", default-features = false, features = [
"std",
"read_core",
"elf", "elf",
"read_core",
"std",
] } ] }
parking_lot = { version = "0.12.0", features = ["send_guard"] } parking_lot = { version = "0.12.0", features = ["send_guard"] }
text_io = "0.1.12" text_io = "0.1.12"
thiserror = "1" thiserror = "1"
tokio = { version = "1.24.0", features = [ tokio = { version = "1.24.0", features = ["rt"], optional = true }
"macros",
"rt",
"rt-multi-thread",
"net",
], optional = true }
[dev-dependencies] [dev-dependencies]
futures = { version = "0.3.12", default-features = false, features = ["std"] } futures = { version = "0.3.12", default-features = false, features = ["std"] }
matches = "0.1.8" matches = "0.1.8"
tempfile = "3"
[features] [features]
default = [] default = []
async = [] async_tokio = ["tokio/net"]
async_tokio = ["tokio", "async"] async_std = ["async-io"]
async_std = ["async-io", "async"]
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
rustdoc-args = ["--cfg", "docsrs", "-D", "warnings"] rustdoc-args = ["--cfg", "-D", "docsrs", "warnings"]

@ -77,8 +77,8 @@ pub use array::{Array, PerCpuArray, ProgramArray};
pub use bloom_filter::BloomFilter; pub use bloom_filter::BloomFilter;
pub use hash_map::{HashMap, PerCpuHashMap}; pub use hash_map::{HashMap, PerCpuHashMap};
pub use lpm_trie::LpmTrie; pub use lpm_trie::LpmTrie;
#[cfg(feature = "async")] #[cfg(any(feature = "async_tokio", feature = "async_std"))]
#[cfg_attr(docsrs, doc(cfg(feature = "async")))] #[cfg_attr(docsrs, doc(cfg(any(feature = "async_tokio", feature = "async_std"))))]
pub use perf::AsyncPerfEventArray; pub use perf::AsyncPerfEventArray;
pub use perf::PerfEventArray; pub use perf::PerfEventArray;
pub use queue::Queue; pub use queue::Queue;
@ -349,8 +349,8 @@ impl_try_from_map!(
StackTraceMap from Map::StackTraceMap, StackTraceMap from Map::StackTraceMap,
); );
#[cfg(feature = "async")] #[cfg(any(feature = "async_tokio", feature = "async_std"))]
#[cfg_attr(docsrs, doc(cfg(feature = "async")))] #[cfg_attr(docsrs, doc(cfg(any(feature = "async_tokio", feature = "async_std"))))]
impl_try_from_map!( impl_try_from_map!(
AsyncPerfEventArray from Map::PerfEventArray, AsyncPerfEventArray from Map::PerfEventArray,
); );

@ -4,6 +4,10 @@ use std::{
os::fd::{AsRawFd, RawFd}, os::fd::{AsRawFd, RawFd},
}; };
// See https://doc.rust-lang.org/cargo/reference/features.html#mutually-exclusive-features.
//
// We should eventually split async functionality out into separate crates "aya-async-tokio" and
// "async-async-std". Presently we arbitrarily choose tokio over async-std when both are requested.
#[cfg(all(not(feature = "async_tokio"), feature = "async_std"))] #[cfg(all(not(feature = "async_tokio"), feature = "async_std"))]
use async_io::Async; use async_io::Async;
@ -98,16 +102,17 @@ impl<T: BorrowMut<MapData> + Borrow<MapData>> AsyncPerfEventArray<T> {
index: u32, index: u32,
page_count: Option<usize>, page_count: Option<usize>,
) -> Result<AsyncPerfEventArrayBuffer<T>, PerfBufferError> { ) -> Result<AsyncPerfEventArrayBuffer<T>, PerfBufferError> {
let buf = self.perf_map.open(index, page_count)?; let Self { perf_map } = self;
let buf = perf_map.open(index, page_count)?;
let fd = buf.as_raw_fd(); let fd = buf.as_raw_fd();
Ok(AsyncPerfEventArrayBuffer { Ok(AsyncPerfEventArrayBuffer {
buf, buf,
#[cfg(feature = "async_tokio")] #[cfg(feature = "async_tokio")]
async_fd: AsyncFd::new(fd)?, async_tokio_fd: AsyncFd::new(fd)?,
#[cfg(all(not(feature = "async_tokio"), feature = "async_std"))] #[cfg(all(not(feature = "async_tokio"), feature = "async_std"))]
async_fd: Async::new(fd)?, async_std_fd: Async::new(fd)?,
}) })
} }
} }
@ -131,13 +136,12 @@ pub struct AsyncPerfEventArrayBuffer<T> {
buf: PerfEventArrayBuffer<T>, buf: PerfEventArrayBuffer<T>,
#[cfg(feature = "async_tokio")] #[cfg(feature = "async_tokio")]
async_fd: AsyncFd<RawFd>, async_tokio_fd: AsyncFd<RawFd>,
#[cfg(all(not(feature = "async_tokio"), feature = "async_std"))] #[cfg(all(not(feature = "async_tokio"), feature = "async_std"))]
async_fd: Async<RawFd>, async_std_fd: Async<RawFd>,
} }
#[cfg(feature = "async_tokio")]
impl<T: BorrowMut<MapData> + Borrow<MapData>> AsyncPerfEventArrayBuffer<T> { impl<T: BorrowMut<MapData> + Borrow<MapData>> AsyncPerfEventArrayBuffer<T> {
/// Reads events from the buffer. /// Reads events from the buffer.
/// ///
@ -152,46 +156,30 @@ impl<T: BorrowMut<MapData> + Borrow<MapData>> AsyncPerfEventArrayBuffer<T> {
&mut self, &mut self,
buffers: &mut [BytesMut], buffers: &mut [BytesMut],
) -> Result<Events, PerfBufferError> { ) -> Result<Events, PerfBufferError> {
let Self {
buf,
#[cfg(feature = "async_tokio")]
async_tokio_fd,
#[cfg(all(not(feature = "async_tokio"), feature = "async_std"))]
async_std_fd,
} = self;
loop { loop {
let mut guard = self.async_fd.readable_mut().await?; #[cfg(feature = "async_tokio")]
let mut guard = async_tokio_fd.readable_mut().await?;
match self.buf.read_events(buffers) { #[cfg(all(not(feature = "async_tokio"), feature = "async_std"))]
Ok(events) if events.read > 0 || events.lost > 0 => return Ok(events), if !buf.readable() {
Ok(_) => { async_std_fd.readable().await?;
guard.clear_ready();
continue;
}
Err(e) => return Err(e),
}
}
} }
}
#[cfg(all(not(feature = "async_tokio"), feature = "async_std"))] let events = buf.read_events(buffers)?;
impl<T: BorrowMut<MapData> + Borrow<MapData>> AsyncPerfEventArrayBuffer<T> { const EMPTY: Events = Events { read: 0, lost: 0 };
/// Reads events from the buffer. if events != EMPTY {
/// break Ok(events);
/// This method reads events into the provided slice of buffers, filling
/// each buffer in order stopping when there are no more events to read or
/// all the buffers have been filled.
///
/// Returns the number of events read and the number of events lost. Events
/// are lost when user space doesn't read events fast enough and the ring
/// buffer fills up.
pub async fn read_events(
&mut self,
buffers: &mut [BytesMut],
) -> Result<Events, PerfBufferError> {
loop {
if !self.buf.readable() {
let _ = self.async_fd.readable().await?;
} }
match self.buf.read_events(buffers) { #[cfg(feature = "async_tokio")]
Ok(events) if events.read > 0 || events.lost > 0 => return Ok(events), guard.clear_ready();
Ok(_) => continue,
Err(e) => return Err(e),
}
} }
} }
} }

@ -1,14 +1,14 @@
//! Ring buffer types used to receive events from eBPF programs using the linux `perf` API. //! Ring buffer types used to receive events from eBPF programs using the linux `perf` API.
//! //!
//! See the [`PerfEventArray`](crate::maps::PerfEventArray) and [`AsyncPerfEventArray`](crate::maps::perf::AsyncPerfEventArray). //! See the [`PerfEventArray`](crate::maps::PerfEventArray) and [`AsyncPerfEventArray`](crate::maps::perf::AsyncPerfEventArray).
#[cfg(feature = "async")] #[cfg(any(feature = "async_tokio", feature = "async_std"))]
#[cfg_attr(docsrs, doc(cfg(feature = "async")))] #[cfg_attr(docsrs, doc(cfg(any(feature = "async_tokio", feature = "async_std"))))]
mod async_perf_event_array; mod async_perf_event_array;
mod perf_buffer; mod perf_buffer;
mod perf_event_array; mod perf_event_array;
#[cfg(feature = "async")] #[cfg(any(feature = "async_tokio", feature = "async_std"))]
#[cfg_attr(docsrs, doc(cfg(feature = "async")))] #[cfg_attr(docsrs, doc(cfg(any(feature = "async_tokio", feature = "async_std"))))]
pub use async_perf_event_array::*; pub use async_perf_event_array::*;
pub use perf_buffer::*; pub use perf_buffer::*;
pub use perf_event_array::*; pub use perf_event_array::*;

@ -17,10 +17,6 @@ use crate::{
sys::{bpf_get_object, bpf_pin_object, bpf_prog_detach}, sys::{bpf_get_object, bpf_pin_object, bpf_prog_detach},
}; };
// for docs link
#[allow(unused)]
use crate::programs::cgroup_skb::CgroupSkb;
/// A Link. /// A Link.
pub trait Link: std::fmt::Debug + 'static { pub trait Link: std::fmt::Debug + 'static {
/// Unique Id /// Unique Id
@ -88,8 +84,8 @@ pub struct FdLinkId(pub(crate) RawFd);
/// A file descriptor link. /// A file descriptor link.
/// ///
/// Fd links are returned directly when attaching some program types (for /// Fd links are returned directly when attaching some program types (for
/// instance [`CgroupSkb`]), or can be obtained by converting other link /// instance [`crate::programs::cgroup_skb::CgroupSkb`]), or can be obtained by
/// types (see the `TryFrom` implementations). /// converting other link types (see the `TryFrom` implementations).
/// ///
/// An important property of fd links is that they can be pinned. Pinning /// An important property of fd links is that they can be pinned. Pinning
/// can be used keep a link attached "in background" even after the program /// can be used keep a link attached "in background" even after the program
@ -358,7 +354,8 @@ pub enum LinkError {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use matches::assert_matches; use matches::assert_matches;
use std::{cell::RefCell, env, fs::File, mem, os::unix::io::AsRawFd, rc::Rc}; use std::{cell::RefCell, fs::File, mem, os::unix::io::AsRawFd, rc::Rc};
use tempfile::tempdir;
use crate::{programs::ProgramError, sys::override_syscall}; use crate::{programs::ProgramError, sys::override_syscall};
@ -493,8 +490,8 @@ mod tests {
#[test] #[test]
#[cfg_attr(miri, ignore)] #[cfg_attr(miri, ignore)]
fn test_pin() { fn test_pin() {
let dir = env::temp_dir(); let dir = tempdir().unwrap();
let f1 = File::create(dir.join("f1")).expect("unable to create file in tmpdir"); let f1 = File::create(dir.path().join("f1")).expect("unable to create file in tmpdir");
let fd_link = FdLink::new(f1.as_raw_fd()); let fd_link = FdLink::new(f1.as_raw_fd());
// leak the fd, it will get closed when our pinned link is dropped // leak the fd, it will get closed when our pinned link is dropped
@ -503,11 +500,12 @@ mod tests {
// override syscall to allow for pin to happen in our tmpdir // override syscall to allow for pin to happen in our tmpdir
override_syscall(|_| Ok(0)); override_syscall(|_| Ok(0));
// create the file that would have happened as a side-effect of a real pin operation // create the file that would have happened as a side-effect of a real pin operation
File::create(dir.join("f1-pin")).expect("unable to create file in tmpdir"); let pin = dir.path().join("f1-pin");
assert!(dir.join("f1-pin").exists()); File::create(&pin).expect("unable to create file in tmpdir");
assert!(pin.exists());
let pinned_link = fd_link.pin(dir.join("f1-pin")).expect("pin failed"); let pinned_link = fd_link.pin(&pin).expect("pin failed");
pinned_link.unpin().expect("unpin failed"); pinned_link.unpin().expect("unpin failed");
assert!(!dir.join("f1-pin").exists()); assert!(!pin.exists());
} }
} }

@ -585,7 +585,6 @@ pub unsafe fn bpf_probe_read_kernel_str_bytes(
/// # Errors /// # Errors
/// ///
/// On failure, this function returns a negative value wrapped in an `Err`. /// On failure, this function returns a negative value wrapped in an `Err`.
#[allow(clippy::fn_to_numeric_cast_with_truncation)]
#[inline] #[inline]
pub unsafe fn bpf_probe_write_user<T>(dst: *mut T, src: *const T) -> Result<(), c_long> { pub unsafe fn bpf_probe_write_user<T>(dst: *mut T, src: *const T) -> Result<(), c_long> {
let ret = gen::bpf_probe_write_user( let ret = gen::bpf_probe_write_user(

@ -404,8 +404,13 @@ impl SkBuffContext {
/// # Examples /// # Examples
/// ///
/// ```no_run /// ```no_run
/// mod bindings; /// use aya_bpf::programs::SkBuffContext;
/// use bindings::{ethhdr, iphdr, udphdr}; /// # #[allow(non_camel_case_types)]
/// # struct ethhdr {};
/// # #[allow(non_camel_case_types)]
/// # struct iphdr {};
/// # #[allow(non_camel_case_types)]
/// # struct udphdr {};
/// ///
/// const ETH_HLEN: usize = core::mem::size_of::<ethhdr>(); /// const ETH_HLEN: usize = core::mem::size_of::<ethhdr>();
/// const IP_HLEN: usize = core::mem::size_of::<iphdr>(); /// const IP_HLEN: usize = core::mem::size_of::<iphdr>();

@ -161,8 +161,13 @@ impl TcContext {
/// # Examples /// # Examples
/// ///
/// ```no_run /// ```no_run
/// mod bindings; /// use aya_bpf::programs::TcContext;
/// use bindings::{ethhdr, iphdr, udphdr}; /// # #[allow(non_camel_case_types)]
/// # struct ethhdr {};
/// # #[allow(non_camel_case_types)]
/// # struct iphdr {};
/// # #[allow(non_camel_case_types)]
/// # struct udphdr {};
/// ///
/// const ETH_HLEN: usize = core::mem::size_of::<ethhdr>(); /// const ETH_HLEN: usize = core::mem::size_of::<ethhdr>();
/// const IP_HLEN: usize = core::mem::size_of::<iphdr>(); /// const IP_HLEN: usize = core::mem::size_of::<iphdr>();

@ -13,12 +13,15 @@ libc = { version = "0.2.105" }
log = "0.4" log = "0.4"
matches = "0.1.8" matches = "0.1.8"
object = { version = "0.31", default-features = false, features = [ object = { version = "0.31", default-features = false, features = [
"std",
"read_core",
"elf", "elf",
"read_core",
"std",
] } ] }
rbpf = "0.2.0" rbpf = "0.2.0"
tokio = { version = "1.24", default-features = false, features = ["time"] } tokio = { version = "1.24", default-features = false, features = [
"macros",
"time",
] }
[build-dependencies] [build-dependencies]
cargo_metadata = "0.15.4" cargo_metadata = "0.15.4"

Loading…
Cancel
Save