From c06a55088bd09d49c73aa32cd3fef3ed14778721 Mon Sep 17 00:00:00 2001 From: Bryan Baron Date: Mon, 11 Dec 2023 11:50:24 -0500 Subject: [PATCH 001/162] coop: fix typo in docs for `task::unconstrained` (#6212) --- tokio/src/task/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio/src/task/mod.rs b/tokio/src/task/mod.rs index 5dd0584338d..6b352b16fa9 100644 --- a/tokio/src/task/mod.rs +++ b/tokio/src/task/mod.rs @@ -283,7 +283,7 @@ //! //! #### unconstrained //! -//! If necessary, [`task::unconstrained`] lets you opt a future out of of Tokio's cooperative +//! If necessary, [`task::unconstrained`] lets you opt a future out of Tokio's cooperative //! scheduling. When a future is wrapped with `unconstrained`, it will never be forced to yield to //! Tokio. For example: //! From 4aa7bbff4c70b7c43c931b1e18af5048da191f40 Mon Sep 17 00:00:00 2001 From: oliver <151407407+kwfn@users.noreply.github.com> Date: Mon, 11 Dec 2023 15:22:55 -0400 Subject: [PATCH 002/162] chore: typo fixes (#6213) Co-authored-by: kwfn --- benches/copy.rs | 2 +- tokio-macros/src/entry.rs | 2 +- tokio/CHANGELOG.md | 6 +++--- tokio/src/process/mod.rs | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/benches/copy.rs b/benches/copy.rs index 1da55f349f7..478cd6e8a5a 100644 --- a/benches/copy.rs +++ b/benches/copy.rs @@ -29,7 +29,7 @@ const WRITE_SERVICE_PERIOD: Duration = Duration::from_millis(20); // because another writer claimed the buffer space const PROBABILITY_FLUSH_WAIT: f64 = 0.1; -/// A slow writer that aims to simulate HDD behaviour under heavy load. +/// A slow writer that aims to simulate HDD behavior under heavy load. /// /// There is a limited buffer, which is fully drained on the next write after /// a time limit is reached. Flush waits for the time limit to be reached diff --git a/tokio-macros/src/entry.rs b/tokio-macros/src/entry.rs index 3706026d2c4..ed782ad38f6 100644 --- a/tokio-macros/src/entry.rs +++ b/tokio-macros/src/entry.rs @@ -507,7 +507,7 @@ impl ItemFn { // Inner attributes require extra care, since they're not supported on // blocks (which is what we're expanded into) we instead lift them - // outside of the function. This matches the behaviour of `syn`. + // outside of the function. This matches the behavior of `syn`. for mut attr in self.inner_attrs { attr.style = syn::AttrStyle::Outer; attr.to_tokens(&mut tokens); diff --git a/tokio/CHANGELOG.md b/tokio/CHANGELOG.md index 7a0ccde4380..147b62d332d 100644 --- a/tokio/CHANGELOG.md +++ b/tokio/CHANGELOG.md @@ -1616,7 +1616,7 @@ Forward ports 1.5.1 fixes. - net: add ready/try methods to `NamedPipe{Client,Server}` ([#3866], [#3899]) - sync: add `watch::Receiver::borrow_and_update` ([#3813]) - sync: implement `From` for `OnceCell` ([#3877]) -- time: allow users to specify Interval behaviour when delayed ([#3721]) +- time: allow users to specify Interval behavior when delayed ([#3721]) ### Added (unstable) @@ -1630,7 +1630,7 @@ Forward ports 1.5.1 fixes. - doc: document cancellation safety ([#3900]) - time: add wait alias to sleep ([#3897]) -- time: document auto-advancing behaviour of runtime ([#3763]) +- time: document auto-advancing behavior of runtime ([#3763]) [#3163]: https://github.com/tokio-rs/tokio/pull/3163 [#3721]: https://github.com/tokio-rs/tokio/pull/3721 @@ -1877,7 +1877,7 @@ a kernel bug. ([#3803]) - doc: doc aliases for pre-1.0 function names ([#3523]) - io: fix typos ([#3541]) -- io: note the EOF behaviour of `read_until` ([#3536]) +- io: note the EOF behavior of `read_until` ([#3536]) - io: update `AsyncRead::poll_read` doc ([#3557]) - net: update `UdpSocket` splitting doc ([#3517]) - runtime: add link to `LocalSet` on `new_current_thread` ([#3508]) diff --git a/tokio/src/process/mod.rs b/tokio/src/process/mod.rs index 60c34304796..a688f63f213 100644 --- a/tokio/src/process/mod.rs +++ b/tokio/src/process/mod.rs @@ -747,7 +747,7 @@ impl Command { /// tokio's MSRV is sufficiently new. See [the documentation on /// unstable features][unstable] for details about using unstable features. /// - /// If you want similar behaviour without using this unstable feature you can + /// If you want similar behavior without using this unstable feature you can /// create a [`std::process::Command`] and convert that into a /// [`tokio::process::Command`] using the `From` trait. /// From bd15bace3505a1945a21cb7c8b3db013320fd0f8 Mon Sep 17 00:00:00 2001 From: snek Date: Tue, 19 Dec 2023 10:51:48 -0800 Subject: [PATCH 003/162] io: add `tokio::io::Join` (#6220) --- tokio/src/io/join.rs | 117 +++++++++++++++++++++++++++++++++++++++++ tokio/src/io/mod.rs | 2 + tokio/tests/io_join.rs | 83 +++++++++++++++++++++++++++++ 3 files changed, 202 insertions(+) create mode 100644 tokio/src/io/join.rs create mode 100644 tokio/tests/io_join.rs diff --git a/tokio/src/io/join.rs b/tokio/src/io/join.rs new file mode 100644 index 00000000000..dbc7043b67e --- /dev/null +++ b/tokio/src/io/join.rs @@ -0,0 +1,117 @@ +//! Join two values implementing `AsyncRead` and `AsyncWrite` into a single one. + +use crate::io::{AsyncRead, AsyncWrite, ReadBuf}; + +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// Join two values implementing `AsyncRead` and `AsyncWrite` into a +/// single handle. +pub fn join(reader: R, writer: W) -> Join +where + R: AsyncRead, + W: AsyncWrite, +{ + Join { reader, writer } +} + +pin_project_lite::pin_project! { + /// Joins two values implementing `AsyncRead` and `AsyncWrite` into a + /// single handle. + #[derive(Debug)] + pub struct Join { + #[pin] + reader: R, + #[pin] + writer: W, + } +} + +impl Join +where + R: AsyncRead, + W: AsyncWrite, +{ + /// Splits this `Join` back into its `AsyncRead` and `AsyncWrite` + /// components. + pub fn into_inner(self) -> (R, W) { + (self.reader, self.writer) + } + + /// Returns a reference to the inner reader. + pub fn reader(&self) -> &R { + &self.reader + } + + /// Returns a reference to the inner writer. + pub fn writer(&self) -> &W { + &self.writer + } + + /// Returns a mutable reference to the inner reader. + pub fn reader_mut(&mut self) -> &mut R { + &mut self.reader + } + + /// Returns a mutable reference to the inner writer. + pub fn writer_mut(&mut self) -> &mut W { + &mut self.writer + } + + /// Returns a pinned mutable reference to the inner reader. + pub fn reader_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { + self.project().reader + } + + /// Returns a pinned mutable reference to the inner writer. + pub fn writer_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> { + self.project().writer + } +} + +impl AsyncRead for Join +where + R: AsyncRead, +{ + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut ReadBuf<'_>, + ) -> Poll> { + self.project().reader.poll_read(cx, buf) + } +} + +impl AsyncWrite for Join +where + W: AsyncWrite, +{ + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + self.project().writer.poll_write(cx, buf) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.project().writer.poll_flush(cx) + } + + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.project().writer.poll_shutdown(cx) + } + + fn poll_write_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[io::IoSlice<'_>], + ) -> Poll> { + self.project().writer.poll_write_vectored(cx, bufs) + } + + fn is_write_vectored(&self) -> bool { + self.writer.is_write_vectored() + } +} diff --git a/tokio/src/io/mod.rs b/tokio/src/io/mod.rs index 0fd6cc2c5cb..ff35a0e0f7e 100644 --- a/tokio/src/io/mod.rs +++ b/tokio/src/io/mod.rs @@ -265,6 +265,8 @@ cfg_io_std! { cfg_io_util! { mod split; pub use split::{split, ReadHalf, WriteHalf}; + mod join; + pub use join::{join, Join}; pub(crate) mod seek; pub(crate) mod util; diff --git a/tokio/tests/io_join.rs b/tokio/tests/io_join.rs new file mode 100644 index 00000000000..69b09393311 --- /dev/null +++ b/tokio/tests/io_join.rs @@ -0,0 +1,83 @@ +#![warn(rust_2018_idioms)] +#![cfg(feature = "full")] + +use tokio::io::{join, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, Join, ReadBuf}; + +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; + +struct R; + +impl AsyncRead for R { + fn poll_read( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + buf: &mut ReadBuf<'_>, + ) -> Poll> { + buf.put_slice(&[b'z']); + Poll::Ready(Ok(())) + } +} + +struct W; + +impl AsyncWrite for W { + fn poll_write( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + _buf: &[u8], + ) -> Poll> { + Poll::Ready(Ok(1)) + } + + fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_write_vectored( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + _bufs: &[io::IoSlice<'_>], + ) -> Poll> { + Poll::Ready(Ok(2)) + } + + fn is_write_vectored(&self) -> bool { + true + } +} + +#[test] +fn is_send_and_sync() { + fn assert_bound() {} + + assert_bound::>(); +} + +#[test] +fn method_delegation() { + let mut rw = join(R, W); + let mut buf = [0; 1]; + + tokio_test::block_on(async move { + assert_eq!(1, rw.read(&mut buf).await.unwrap()); + assert_eq!(b'z', buf[0]); + + assert_eq!(1, rw.write(&[b'x']).await.unwrap()); + assert_eq!( + 2, + rw.write_vectored(&[io::IoSlice::new(&[b'x'])]) + .await + .unwrap() + ); + assert!(rw.is_write_vectored()); + + assert!(rw.flush().await.is_ok()); + assert!(rw.shutdown().await.is_ok()); + }); +} From 433545782e3cb69832bdf74d815bee2b955429ce Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Tue, 19 Dec 2023 20:02:02 +0100 Subject: [PATCH 004/162] tests: fix name of `coop_budget.rs` (#6234) --- tokio/tests/{coop_budger.rs => coop_budget.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tokio/tests/{coop_budger.rs => coop_budget.rs} (100%) diff --git a/tokio/tests/coop_budger.rs b/tokio/tests/coop_budget.rs similarity index 100% rename from tokio/tests/coop_budger.rs rename to tokio/tests/coop_budget.rs From e7214e3670233a76411d06b25ad8dc7b0bfd617a Mon Sep 17 00:00:00 2001 From: Miccah Date: Tue, 19 Dec 2023 16:05:53 -0800 Subject: [PATCH 005/162] io: implement `AsyncWrite` for `Empty` (#6235) --- tokio/src/io/util/empty.rs | 69 +++++++++++++++++++++++++++++++++++--- 1 file changed, 65 insertions(+), 4 deletions(-) diff --git a/tokio/src/io/util/empty.rs b/tokio/src/io/util/empty.rs index b96fabbaabe..06be4ff3073 100644 --- a/tokio/src/io/util/empty.rs +++ b/tokio/src/io/util/empty.rs @@ -1,4 +1,4 @@ -use crate::io::{AsyncBufRead, AsyncRead, ReadBuf}; +use crate::io::{AsyncBufRead, AsyncRead, AsyncWrite, ReadBuf}; use std::fmt; use std::io; @@ -6,7 +6,8 @@ use std::pin::Pin; use std::task::{Context, Poll}; cfg_io_util! { - /// An async reader which is always at EOF. + /// `Empty` ignores any data written via [`AsyncWrite`], and will always be empty + /// (returning zero bytes) when read via [`AsyncRead`]. /// /// This struct is generally created by calling [`empty`]. Please see /// the documentation of [`empty()`][`empty`] for more details. @@ -19,9 +20,12 @@ cfg_io_util! { _p: (), } - /// Creates a new empty async reader. + /// Creates a value that is always at EOF for reads, and ignores all data written. /// - /// All reads from the returned reader will return `Poll::Ready(Ok(0))`. + /// All writes on the returned instance will return `Poll::Ready(Ok(buf.len()))` + /// and the contents of the buffer will not be inspected. + /// + /// All reads from the returned instance will return `Poll::Ready(Ok(0))`. /// /// This is an asynchronous version of [`std::io::empty`][std]. /// @@ -41,6 +45,19 @@ cfg_io_util! { /// assert!(buffer.is_empty()); /// } /// ``` + /// + /// A convoluted way of getting the length of a buffer: + /// + /// ``` + /// use tokio::io::{self, AsyncWriteExt}; + /// + /// #[tokio::main] + /// async fn main() { + /// let buffer = vec![1, 2, 3, 5, 8]; + /// let num_bytes = io::empty().write(&buffer).await.unwrap(); + /// assert_eq!(num_bytes, 5); + /// } + /// ``` pub fn empty() -> Empty { Empty { _p: () } } @@ -71,6 +88,50 @@ impl AsyncBufRead for Empty { fn consume(self: Pin<&mut Self>, _: usize) {} } +impl AsyncWrite for Empty { + #[inline] + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + ready!(crate::trace::trace_leaf(cx)); + ready!(poll_proceed_and_make_progress(cx)); + Poll::Ready(Ok(buf.len())) + } + + #[inline] + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + ready!(crate::trace::trace_leaf(cx)); + ready!(poll_proceed_and_make_progress(cx)); + Poll::Ready(Ok(())) + } + + #[inline] + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + ready!(crate::trace::trace_leaf(cx)); + ready!(poll_proceed_and_make_progress(cx)); + Poll::Ready(Ok(())) + } + + #[inline] + fn is_write_vectored(&self) -> bool { + true + } + + #[inline] + fn poll_write_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[io::IoSlice<'_>], + ) -> Poll> { + ready!(crate::trace::trace_leaf(cx)); + ready!(poll_proceed_and_make_progress(cx)); + let num_bytes = bufs.iter().map(|b| b.len()).sum(); + Poll::Ready(Ok(num_bytes)) + } +} + impl fmt::Debug for Empty { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("Empty { .. }") From 53ad102cf0e99e91445d9aea0b5ba3cc768b7566 Mon Sep 17 00:00:00 2001 From: Eddy Oyieko <67474838+mobley-trent@users.noreply.github.com> Date: Fri, 22 Dec 2023 20:23:04 +0300 Subject: [PATCH 006/162] util: implement `Sink` for `Either` (#6239) --- tokio-util/src/either.rs | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/tokio-util/src/either.rs b/tokio-util/src/either.rs index 8a02398bc18..e7fec9546b3 100644 --- a/tokio-util/src/either.rs +++ b/tokio-util/src/either.rs @@ -164,6 +164,39 @@ where } } +impl futures_sink::Sink for Either +where + L: futures_sink::Sink, + R: futures_sink::Sink, +{ + type Error = Error; + + fn poll_ready( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + delegate_call!(self.poll_ready(cx)) + } + + fn start_send(self: Pin<&mut Self>, item: Item) -> std::result::Result<(), Self::Error> { + delegate_call!(self.start_send(item)) + } + + fn poll_flush( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + delegate_call!(self.poll_flush(cx)) + } + + fn poll_close( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + delegate_call!(self.poll_close(cx)) + } +} + #[cfg(test)] mod tests { use super::*; From 7cae89af47ffe75e2bd396ea2adcd806e5184dd4 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Fri, 22 Dec 2023 23:43:05 +0100 Subject: [PATCH 007/162] benches: fix benchmarking conflicts (#6243) --- benches/rt_current_thread.rs | 4 ++-- benches/rt_multi_threaded.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/benches/rt_current_thread.rs b/benches/rt_current_thread.rs index 821207638f1..7f3414003af 100644 --- a/benches/rt_current_thread.rs +++ b/benches/rt_current_thread.rs @@ -80,10 +80,10 @@ fn rt() -> Runtime { } criterion_group!( - scheduler, + rt_curr_scheduler, spawn_many_local, spawn_many_remote_idle, spawn_many_remote_busy ); -criterion_main!(scheduler); +criterion_main!(rt_curr_scheduler); diff --git a/benches/rt_multi_threaded.rs b/benches/rt_multi_threaded.rs index 324fb60961f..14f81dc102f 100644 --- a/benches/rt_multi_threaded.rs +++ b/benches/rt_multi_threaded.rs @@ -263,7 +263,7 @@ fn stall() { } criterion_group!( - scheduler, + rt_multi_scheduler, spawn_many_local, spawn_many_remote_idle, spawn_many_remote_busy1, @@ -273,4 +273,4 @@ criterion_group!( chained_spawn, ); -criterion_main!(scheduler); +criterion_main!(rt_multi_scheduler); From d51308b0359a3a3f4d3c11fd71bb402646afe0b5 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sat, 23 Dec 2023 01:28:14 +0100 Subject: [PATCH 008/162] ci: fix configured clippy msrv (#6244) --- .clippy.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.clippy.toml b/.clippy.toml index 62ca7423409..550d4759a1a 100644 --- a/.clippy.toml +++ b/.clippy.toml @@ -1 +1 @@ -msrv = "1.56" +msrv = "1.63" From 52f28dcb4f12dd6b626776b3297f0bf3c11b9169 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Thu, 28 Dec 2023 10:52:18 +0100 Subject: [PATCH 009/162] benches: fix benchmarking conflicts for real this time (#6246) --- benches/rt_current_thread.rs | 12 ++++++------ benches/rt_multi_threaded.rs | 28 ++++++++++++++-------------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/benches/rt_current_thread.rs b/benches/rt_current_thread.rs index 7f3414003af..c62fd152fe6 100644 --- a/benches/rt_current_thread.rs +++ b/benches/rt_current_thread.rs @@ -8,7 +8,7 @@ use criterion::{criterion_group, criterion_main, Criterion}; const NUM_SPAWN: usize = 1_000; -fn spawn_many_local(c: &mut Criterion) { +fn rt_curr_spawn_many_local(c: &mut Criterion) { let rt = rt(); let mut handles = Vec::with_capacity(NUM_SPAWN); @@ -27,7 +27,7 @@ fn spawn_many_local(c: &mut Criterion) { }); } -fn spawn_many_remote_idle(c: &mut Criterion) { +fn rt_curr_spawn_many_remote_idle(c: &mut Criterion) { let rt = rt(); let rt_handle = rt.handle(); let mut handles = Vec::with_capacity(NUM_SPAWN); @@ -47,7 +47,7 @@ fn spawn_many_remote_idle(c: &mut Criterion) { }); } -fn spawn_many_remote_busy(c: &mut Criterion) { +fn rt_curr_spawn_many_remote_busy(c: &mut Criterion) { let rt = rt(); let rt_handle = rt.handle(); let mut handles = Vec::with_capacity(NUM_SPAWN); @@ -81,9 +81,9 @@ fn rt() -> Runtime { criterion_group!( rt_curr_scheduler, - spawn_many_local, - spawn_many_remote_idle, - spawn_many_remote_busy + rt_curr_spawn_many_local, + rt_curr_spawn_many_remote_idle, + rt_curr_spawn_many_remote_busy ); criterion_main!(rt_curr_scheduler); diff --git a/benches/rt_multi_threaded.rs b/benches/rt_multi_threaded.rs index 14f81dc102f..2502b619c2d 100644 --- a/benches/rt_multi_threaded.rs +++ b/benches/rt_multi_threaded.rs @@ -16,7 +16,7 @@ const NUM_WORKERS: usize = 4; const NUM_SPAWN: usize = 10_000; const STALL_DUR: Duration = Duration::from_micros(10); -fn spawn_many_local(c: &mut Criterion) { +fn rt_multi_spawn_many_local(c: &mut Criterion) { let rt = rt(); let (tx, rx) = mpsc::sync_channel(1000); @@ -44,7 +44,7 @@ fn spawn_many_local(c: &mut Criterion) { }); } -fn spawn_many_remote_idle(c: &mut Criterion) { +fn rt_multi_spawn_many_remote_idle(c: &mut Criterion) { let rt = rt(); let mut handles = Vec::with_capacity(NUM_SPAWN); @@ -66,7 +66,7 @@ fn spawn_many_remote_idle(c: &mut Criterion) { // The runtime is busy with tasks that consume CPU time and yield. Yielding is a // lower notification priority than spawning / regular notification. -fn spawn_many_remote_busy1(c: &mut Criterion) { +fn rt_multi_spawn_many_remote_busy1(c: &mut Criterion) { let rt = rt(); let rt_handle = rt.handle(); let mut handles = Vec::with_capacity(NUM_SPAWN); @@ -102,7 +102,7 @@ fn spawn_many_remote_busy1(c: &mut Criterion) { // The runtime is busy with tasks that consume CPU time and spawn new high-CPU // tasks. Spawning goes via a higher notification priority than yielding. -fn spawn_many_remote_busy2(c: &mut Criterion) { +fn rt_multi_spawn_many_remote_busy2(c: &mut Criterion) { const NUM_SPAWN: usize = 1_000; let rt = rt(); @@ -143,7 +143,7 @@ fn spawn_many_remote_busy2(c: &mut Criterion) { flag.store(false, Relaxed); } -fn yield_many(c: &mut Criterion) { +fn rt_multi_yield_many(c: &mut Criterion) { const NUM_YIELD: usize = 1_000; const TASKS: usize = 200; @@ -171,7 +171,7 @@ fn yield_many(c: &mut Criterion) { }); } -fn ping_pong(c: &mut Criterion) { +fn rt_multi_ping_pong(c: &mut Criterion) { const NUM_PINGS: usize = 1_000; let rt = rt(); @@ -216,7 +216,7 @@ fn ping_pong(c: &mut Criterion) { }); } -fn chained_spawn(c: &mut Criterion) { +fn rt_multi_chained_spawn(c: &mut Criterion) { const ITER: usize = 1_000; fn iter(done_tx: mpsc::SyncSender<()>, n: usize) { @@ -264,13 +264,13 @@ fn stall() { criterion_group!( rt_multi_scheduler, - spawn_many_local, - spawn_many_remote_idle, - spawn_many_remote_busy1, - spawn_many_remote_busy2, - ping_pong, - yield_many, - chained_spawn, + rt_multi_spawn_many_local, + rt_multi_spawn_many_remote_idle, + rt_multi_spawn_many_remote_busy1, + rt_multi_spawn_many_remote_busy2, + rt_multi_ping_pong, + rt_multi_yield_many, + rt_multi_chained_spawn, ); criterion_main!(rt_multi_scheduler); From 5f7fe8fd0d3d529a7282e674240cdd76b11d3730 Mon Sep 17 00:00:00 2001 From: kim / Motoyuki Kimura <55653825+mox692@users.noreply.github.com> Date: Sat, 30 Dec 2023 01:22:54 +0900 Subject: [PATCH 010/162] ci: fix new warnings on 1.75.0 (#6255) --- .../tests/fail/macros_type_mismatch.stderr | 22 ++++++++++++++----- tokio/src/future/mod.rs | 1 + 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/tests-build/tests/fail/macros_type_mismatch.stderr b/tests-build/tests/fail/macros_type_mismatch.stderr index d900ac2330c..579c241559b 100644 --- a/tests-build/tests/fail/macros_type_mismatch.stderr +++ b/tests-build/tests/fail/macros_type_mismatch.stderr @@ -1,24 +1,36 @@ error[E0308]: mismatched types --> tests/fail/macros_type_mismatch.rs:5:5 | -4 | async fn missing_semicolon_or_return_type() { - | - help: a return type might be missing here: `-> _` 5 | Ok(()) | ^^^^^^ expected `()`, found `Result<(), _>` | = note: expected unit type `()` found enum `Result<(), _>` +help: a return type might be missing here + | +4 | async fn missing_semicolon_or_return_type() -> _ { + | ++++ +help: consider using `Result::expect` to unwrap the `Result<(), _>` value, panicking if the value is a `Result::Err` + | +5 | Ok(()).expect("REASON") + | +++++++++++++++++ error[E0308]: mismatched types --> tests/fail/macros_type_mismatch.rs:10:5 | -9 | async fn missing_return_type() { - | - help: a return type might be missing here: `-> _` 10 | return Ok(()); | ^^^^^^^^^^^^^^ expected `()`, found `Result<(), _>` | = note: expected unit type `()` found enum `Result<(), _>` +help: a return type might be missing here + | +9 | async fn missing_return_type() -> _ { + | ++++ +help: consider using `Result::expect` to unwrap the `Result<(), _>` value, panicking if the value is a `Result::Err` + | +10 | return Ok(());.expect("REASON") + | +++++++++++++++++ error[E0308]: mismatched types --> tests/fail/macros_type_mismatch.rs:23:5 @@ -41,7 +53,7 @@ error[E0308]: mismatched types --> tests/fail/macros_type_mismatch.rs:32:5 | 30 | async fn issue_4635() { - | - help: try adding a return type: `-> i32` + | - help: try adding a return type: `-> i32` 31 | return 1; 32 | ; | ^ expected `()`, found integer diff --git a/tokio/src/future/mod.rs b/tokio/src/future/mod.rs index 7c883eb3c34..12b6bbc4945 100644 --- a/tokio/src/future/mod.rs +++ b/tokio/src/future/mod.rs @@ -6,6 +6,7 @@ pub(crate) mod maybe_done; mod poll_fn; +#[allow(unused_imports)] pub use poll_fn::poll_fn; cfg_process! { From 581cd41d79e3c3253490663ea854b904bf39eff0 Mon Sep 17 00:00:00 2001 From: kim / Motoyuki Kimura <55653825+mox692@users.noreply.github.com> Date: Sat, 30 Dec 2023 23:26:16 +0900 Subject: [PATCH 011/162] io: make `repeat` and `sink` cooperative (#6254) --- tokio/src/io/util/empty.rs | 15 +------------ tokio/src/io/util/mod.rs | 14 ++++++++++++ tokio/src/io/util/repeat.rs | 5 ++++- tokio/src/io/util/sink.rs | 13 ++++++++--- tokio/tests/io_repeat.rs | 18 +++++++++++++++ tokio/tests/io_sink.rs | 44 +++++++++++++++++++++++++++++++++++++ 6 files changed, 91 insertions(+), 18 deletions(-) create mode 100644 tokio/tests/io_repeat.rs create mode 100644 tokio/tests/io_sink.rs diff --git a/tokio/src/io/util/empty.rs b/tokio/src/io/util/empty.rs index 06be4ff3073..289725ce49f 100644 --- a/tokio/src/io/util/empty.rs +++ b/tokio/src/io/util/empty.rs @@ -1,3 +1,4 @@ +use crate::io::util::poll_proceed_and_make_progress; use crate::io::{AsyncBufRead, AsyncRead, AsyncWrite, ReadBuf}; use std::fmt; @@ -138,20 +139,6 @@ impl fmt::Debug for Empty { } } -cfg_coop! { - fn poll_proceed_and_make_progress(cx: &mut Context<'_>) -> Poll<()> { - let coop = ready!(crate::runtime::coop::poll_proceed(cx)); - coop.made_progress(); - Poll::Ready(()) - } -} - -cfg_not_coop! { - fn poll_proceed_and_make_progress(_: &mut Context<'_>) -> Poll<()> { - Poll::Ready(()) - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/tokio/src/io/util/mod.rs b/tokio/src/io/util/mod.rs index 21199d0be84..47b951f2b83 100644 --- a/tokio/src/io/util/mod.rs +++ b/tokio/src/io/util/mod.rs @@ -85,6 +85,20 @@ cfg_io_util! { // used by `BufReader` and `BufWriter` // https://github.com/rust-lang/rust/blob/master/library/std/src/sys_common/io.rs#L1 const DEFAULT_BUF_SIZE: usize = 8 * 1024; + + cfg_coop! { + fn poll_proceed_and_make_progress(cx: &mut std::task::Context<'_>) -> std::task::Poll<()> { + let coop = ready!(crate::runtime::coop::poll_proceed(cx)); + coop.made_progress(); + std::task::Poll::Ready(()) + } + } + + cfg_not_coop! { + fn poll_proceed_and_make_progress(_: &mut std::task::Context<'_>) -> std::task::Poll<()> { + std::task::Poll::Ready(()) + } + } } cfg_not_io_util! { diff --git a/tokio/src/io/util/repeat.rs b/tokio/src/io/util/repeat.rs index 1142765df5c..4a3ac78e49e 100644 --- a/tokio/src/io/util/repeat.rs +++ b/tokio/src/io/util/repeat.rs @@ -1,3 +1,4 @@ +use crate::io::util::poll_proceed_and_make_progress; use crate::io::{AsyncRead, ReadBuf}; use std::io; @@ -50,9 +51,11 @@ impl AsyncRead for Repeat { #[inline] fn poll_read( self: Pin<&mut Self>, - _: &mut Context<'_>, + cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { + ready!(crate::trace::trace_leaf(cx)); + ready!(poll_proceed_and_make_progress(cx)); // TODO: could be faster, but should we unsafe it? while buf.remaining() != 0 { buf.put_slice(&[self.byte]); diff --git a/tokio/src/io/util/sink.rs b/tokio/src/io/util/sink.rs index 05ee773fa38..1c0102d4b2f 100644 --- a/tokio/src/io/util/sink.rs +++ b/tokio/src/io/util/sink.rs @@ -1,3 +1,4 @@ +use crate::io::util::poll_proceed_and_make_progress; use crate::io::AsyncWrite; use std::fmt; @@ -53,19 +54,25 @@ impl AsyncWrite for Sink { #[inline] fn poll_write( self: Pin<&mut Self>, - _: &mut Context<'_>, + cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { + ready!(crate::trace::trace_leaf(cx)); + ready!(poll_proceed_and_make_progress(cx)); Poll::Ready(Ok(buf.len())) } #[inline] - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + ready!(crate::trace::trace_leaf(cx)); + ready!(poll_proceed_and_make_progress(cx)); Poll::Ready(Ok(())) } #[inline] - fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + ready!(crate::trace::trace_leaf(cx)); + ready!(poll_proceed_and_make_progress(cx)); Poll::Ready(Ok(())) } } diff --git a/tokio/tests/io_repeat.rs b/tokio/tests/io_repeat.rs new file mode 100644 index 00000000000..8094ffe7dae --- /dev/null +++ b/tokio/tests/io_repeat.rs @@ -0,0 +1,18 @@ +#![warn(rust_2018_idioms)] +#![cfg(all(feature = "full"))] + +use tokio::io::AsyncReadExt; + +#[tokio::test] +async fn repeat_poll_read_is_cooperative() { + tokio::select! { + biased; + _ = async { + loop { + let mut buf = [0u8; 4096]; + tokio::io::repeat(0b101).read_exact(&mut buf).await.unwrap(); + } + } => {}, + _ = tokio::task::yield_now() => {} + } +} diff --git a/tokio/tests/io_sink.rs b/tokio/tests/io_sink.rs new file mode 100644 index 00000000000..9b4fb31f30f --- /dev/null +++ b/tokio/tests/io_sink.rs @@ -0,0 +1,44 @@ +#![warn(rust_2018_idioms)] +#![cfg(all(feature = "full"))] + +use tokio::io::AsyncWriteExt; + +#[tokio::test] +async fn sink_poll_write_is_cooperative() { + tokio::select! { + biased; + _ = async { + loop { + let buf = vec![1, 2, 3]; + tokio::io::sink().write_all(&buf).await.unwrap(); + } + } => {}, + _ = tokio::task::yield_now() => {} + } +} + +#[tokio::test] +async fn sink_poll_flush_is_cooperative() { + tokio::select! { + biased; + _ = async { + loop { + tokio::io::sink().flush().await.unwrap(); + } + } => {}, + _ = tokio::task::yield_now() => {} + } +} + +#[tokio::test] +async fn sink_poll_shutdown_is_cooperative() { + tokio::select! { + biased; + _ = async { + loop { + tokio::io::sink().shutdown().await.unwrap(); + } + } => {}, + _ = tokio::task::yield_now() => {} + } +} From 48345d6e4822b4c0ea00d5c1c075a6b5ac663acf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tymoteusz=20Wi=C5=9Bniewski?= Date: Sat, 30 Dec 2023 16:37:47 +0100 Subject: [PATCH 012/162] net: add support for anonymous unix pipes (#6127) --- tokio/src/net/unix/pipe.rs | 281 +++++++++++++++++++++++++++++++---- tokio/tests/net_unix_pipe.rs | 105 +++++++++++++ 2 files changed, 358 insertions(+), 28 deletions(-) diff --git a/tokio/src/net/unix/pipe.rs b/tokio/src/net/unix/pipe.rs index 0b2508a9257..7c279134dbf 100644 --- a/tokio/src/net/unix/pipe.rs +++ b/tokio/src/net/unix/pipe.rs @@ -6,8 +6,8 @@ use crate::io::{AsyncRead, AsyncWrite, PollEvented, ReadBuf, Ready}; use mio::unix::pipe as mio_pipe; use std::fs::File; use std::io::{self, Read, Write}; -use std::os::unix::fs::{FileTypeExt, OpenOptionsExt}; -use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd}; +use std::os::unix::fs::OpenOptionsExt; +use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd}; use std::path::Path; use std::pin::Pin; use std::task::{Context, Poll}; @@ -16,6 +16,59 @@ cfg_io_util! { use bytes::BufMut; } +/// Creates a new anonymous Unix pipe. +/// +/// This function will open a new pipe and associate both pipe ends with the default +/// event loop. +/// +/// If you need to create a pipe for communication with a spawned process, you can +/// use [`Stdio::piped()`] instead. +/// +/// [`Stdio::piped()`]: std::process::Stdio::piped +/// +/// # Errors +/// +/// If creating a pipe fails, this function will return with the related OS error. +/// +/// # Examples +/// +/// Create a pipe and pass the writing end to a spawned process. +/// +/// ```no_run +/// use tokio::net::unix::pipe; +/// use tokio::process::Command; +/// # use tokio::io::AsyncReadExt; +/// # use std::error::Error; +/// +/// # async fn dox() -> Result<(), Box> { +/// let (tx, mut rx) = pipe::pipe()?; +/// let mut buffer = String::new(); +/// +/// let status = Command::new("echo") +/// .arg("Hello, world!") +/// .stdout(tx.into_blocking_fd()?) +/// .status(); +/// rx.read_to_string(&mut buffer).await?; +/// +/// assert!(status.await?.success()); +/// assert_eq!(buffer, "Hello, world!\n"); +/// # Ok(()) +/// # } +/// ``` +/// +/// # Panics +/// +/// This function panics if it is not called from within a runtime with +/// IO enabled. +/// +/// The runtime is usually set implicitly when this function is called +/// from a future driven by a tokio runtime, otherwise runtime can be set +/// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. +pub fn pipe() -> io::Result<(Sender, Receiver)> { + let (tx, rx) = mio_pipe::new()?; + Ok((Sender::from_mio(tx)?, Receiver::from_mio(rx)?)) +} + /// Options and flags which can be used to configure how a FIFO file is opened. /// /// This builder allows configuring how to create a pipe end from a FIFO file. @@ -218,7 +271,7 @@ impl OpenOptions { let file = options.open(path)?; - if !self.unchecked && !is_fifo(&file)? { + if !self.unchecked && !is_pipe(file.as_fd())? { return Err(io::Error::new(io::ErrorKind::InvalidInput, "not a pipe")); } @@ -338,15 +391,40 @@ impl Sender { /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. - pub fn from_file(mut file: File) -> io::Result { - if !is_fifo(&file)? { + pub fn from_file(file: File) -> io::Result { + Sender::from_owned_fd(file.into()) + } + + /// Creates a new `Sender` from an [`OwnedFd`]. + /// + /// This function is intended to construct a pipe from an [`OwnedFd`] representing + /// an anonymous pipe or a special FIFO file. It will check if the file descriptor + /// is a pipe and has write access, set it in non-blocking mode and perform the + /// conversion. + /// + /// # Errors + /// + /// Fails with `io::ErrorKind::InvalidInput` if the file descriptor is not a pipe + /// or it does not have write access. Also fails with any standard OS error if it + /// occurs. + /// + /// # Panics + /// + /// This function panics if it is not called from within a runtime with + /// IO enabled. + /// + /// The runtime is usually set implicitly when this function is called + /// from a future driven by a tokio runtime, otherwise runtime can be set + /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. + pub fn from_owned_fd(owned_fd: OwnedFd) -> io::Result { + if !is_pipe(owned_fd.as_fd())? { return Err(io::Error::new(io::ErrorKind::InvalidInput, "not a pipe")); } - let flags = get_file_flags(&file)?; + let flags = get_file_flags(owned_fd.as_fd())?; if has_write_access(flags) { - set_nonblocking(&mut file, flags)?; - Sender::from_file_unchecked(file) + set_nonblocking(owned_fd.as_fd(), flags)?; + Sender::from_owned_fd_unchecked(owned_fd) } else { Err(io::Error::new( io::ErrorKind::InvalidInput, @@ -394,8 +472,28 @@ impl Sender { /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. pub fn from_file_unchecked(file: File) -> io::Result { - let raw_fd = file.into_raw_fd(); - let mio_tx = unsafe { mio_pipe::Sender::from_raw_fd(raw_fd) }; + Sender::from_owned_fd_unchecked(file.into()) + } + + /// Creates a new `Sender` from an [`OwnedFd`] without checking pipe properties. + /// + /// This function is intended to construct a pipe from an [`OwnedFd`] representing + /// an anonymous pipe or a special FIFO file. The conversion assumes nothing about + /// the underlying pipe; it is left up to the user to make sure that the file + /// descriptor represents the writing end of a pipe and the pipe is set in + /// non-blocking mode. + /// + /// # Panics + /// + /// This function panics if it is not called from within a runtime with + /// IO enabled. + /// + /// The runtime is usually set implicitly when this function is called + /// from a future driven by a tokio runtime, otherwise runtime can be set + /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. + pub fn from_owned_fd_unchecked(owned_fd: OwnedFd) -> io::Result { + // Safety: OwnedFd represents a valid, open file descriptor. + let mio_tx = unsafe { mio_pipe::Sender::from_raw_fd(owned_fd.into_raw_fd()) }; Sender::from_mio(mio_tx) } @@ -623,6 +721,31 @@ impl Sender { .registration() .try_io(Interest::WRITABLE, || (&*self.io).write_vectored(buf)) } + + /// Converts the pipe into an [`OwnedFd`] in blocking mode. + /// + /// This function will deregister this pipe end from the event loop, set + /// it in blocking mode and perform the conversion. + pub fn into_blocking_fd(self) -> io::Result { + let fd = self.into_nonblocking_fd()?; + set_blocking(&fd)?; + Ok(fd) + } + + /// Converts the pipe into an [`OwnedFd`] in nonblocking mode. + /// + /// This function will deregister this pipe end from the event loop and + /// perform the conversion. The returned file descriptor will be in nonblocking + /// mode. + pub fn into_nonblocking_fd(self) -> io::Result { + let mio_pipe = self.io.into_inner()?; + + // Safety: the pipe is now deregistered from the event loop + // and we are the only owner of this pipe end. + let owned_fd = unsafe { OwnedFd::from_raw_fd(mio_pipe.into_raw_fd()) }; + + Ok(owned_fd) + } } impl AsyncWrite for Sender { @@ -764,15 +887,40 @@ impl Receiver { /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. - pub fn from_file(mut file: File) -> io::Result { - if !is_fifo(&file)? { + pub fn from_file(file: File) -> io::Result { + Receiver::from_owned_fd(file.into()) + } + + /// Creates a new `Receiver` from an [`OwnedFd`]. + /// + /// This function is intended to construct a pipe from an [`OwnedFd`] representing + /// an anonymous pipe or a special FIFO file. It will check if the file descriptor + /// is a pipe and has read access, set it in non-blocking mode and perform the + /// conversion. + /// + /// # Errors + /// + /// Fails with `io::ErrorKind::InvalidInput` if the file descriptor is not a pipe + /// or it does not have read access. Also fails with any standard OS error if it + /// occurs. + /// + /// # Panics + /// + /// This function panics if it is not called from within a runtime with + /// IO enabled. + /// + /// The runtime is usually set implicitly when this function is called + /// from a future driven by a tokio runtime, otherwise runtime can be set + /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. + pub fn from_owned_fd(owned_fd: OwnedFd) -> io::Result { + if !is_pipe(owned_fd.as_fd())? { return Err(io::Error::new(io::ErrorKind::InvalidInput, "not a pipe")); } - let flags = get_file_flags(&file)?; + let flags = get_file_flags(owned_fd.as_fd())?; if has_read_access(flags) { - set_nonblocking(&mut file, flags)?; - Receiver::from_file_unchecked(file) + set_nonblocking(owned_fd.as_fd(), flags)?; + Receiver::from_owned_fd_unchecked(owned_fd) } else { Err(io::Error::new( io::ErrorKind::InvalidInput, @@ -820,8 +968,28 @@ impl Receiver { /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. pub fn from_file_unchecked(file: File) -> io::Result { - let raw_fd = file.into_raw_fd(); - let mio_rx = unsafe { mio_pipe::Receiver::from_raw_fd(raw_fd) }; + Receiver::from_owned_fd_unchecked(file.into()) + } + + /// Creates a new `Receiver` from an [`OwnedFd`] without checking pipe properties. + /// + /// This function is intended to construct a pipe from an [`OwnedFd`] representing + /// an anonymous pipe or a special FIFO file. The conversion assumes nothing about + /// the underlying pipe; it is left up to the user to make sure that the file + /// descriptor represents the reading end of a pipe and the pipe is set in + /// non-blocking mode. + /// + /// # Panics + /// + /// This function panics if it is not called from within a runtime with + /// IO enabled. + /// + /// The runtime is usually set implicitly when this function is called + /// from a future driven by a tokio runtime, otherwise runtime can be set + /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. + pub fn from_owned_fd_unchecked(owned_fd: OwnedFd) -> io::Result { + // Safety: OwnedFd represents a valid, open file descriptor. + let mio_rx = unsafe { mio_pipe::Receiver::from_raw_fd(owned_fd.into_raw_fd()) }; Receiver::from_mio(mio_rx) } @@ -1146,6 +1314,31 @@ impl Receiver { }) } } + + /// Converts the pipe into an [`OwnedFd`] in blocking mode. + /// + /// This function will deregister this pipe end from the event loop, set + /// it in blocking mode and perform the conversion. + pub fn into_blocking_fd(self) -> io::Result { + let fd = self.into_nonblocking_fd()?; + set_blocking(&fd)?; + Ok(fd) + } + + /// Converts the pipe into an [`OwnedFd`] in nonblocking mode. + /// + /// This function will deregister this pipe end from the event loop and + /// perform the conversion. Returned file descriptor will be in nonblocking + /// mode. + pub fn into_nonblocking_fd(self) -> io::Result { + let mio_pipe = self.io.into_inner()?; + + // Safety: the pipe is now deregistered from the event loop + // and we are the only owner of this pipe end. + let owned_fd = unsafe { OwnedFd::from_raw_fd(mio_pipe.into_raw_fd()) }; + + Ok(owned_fd) + } } impl AsyncRead for Receiver { @@ -1172,15 +1365,27 @@ impl AsFd for Receiver { } } -/// Checks if file is a FIFO -fn is_fifo(file: &File) -> io::Result { - Ok(file.metadata()?.file_type().is_fifo()) +/// Checks if the file descriptor is a pipe or a FIFO. +fn is_pipe(fd: BorrowedFd<'_>) -> io::Result { + // Safety: `libc::stat` is C-like struct used for syscalls and all-zero + // byte pattern forms a valid value. + let mut stat: libc::stat = unsafe { std::mem::zeroed() }; + + // Safety: it's safe to call `fstat` with a valid, open file descriptor + // and a valid pointer to a `stat` struct. + let r = unsafe { libc::fstat(fd.as_raw_fd(), &mut stat) }; + + if r == -1 { + Err(io::Error::last_os_error()) + } else { + Ok((stat.st_mode as libc::mode_t & libc::S_IFMT) == libc::S_IFIFO) + } } /// Gets file descriptor's flags by fcntl. -fn get_file_flags(file: &File) -> io::Result { - let fd = file.as_raw_fd(); - let flags = unsafe { libc::fcntl(fd, libc::F_GETFL) }; +fn get_file_flags(fd: BorrowedFd<'_>) -> io::Result { + // Safety: it's safe to use `fcntl` to read flags of a valid, open file descriptor. + let flags = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_GETFL) }; if flags < 0 { Err(io::Error::last_os_error()) } else { @@ -1200,14 +1405,14 @@ fn has_write_access(flags: libc::c_int) -> bool { mode == libc::O_WRONLY || mode == libc::O_RDWR } -/// Sets file's flags with `O_NONBLOCK` by fcntl. -fn set_nonblocking(file: &mut File, current_flags: libc::c_int) -> io::Result<()> { - let fd = file.as_raw_fd(); - +/// Sets file descriptor's flags with `O_NONBLOCK` by fcntl. +fn set_nonblocking(fd: BorrowedFd<'_>, current_flags: libc::c_int) -> io::Result<()> { let flags = current_flags | libc::O_NONBLOCK; if flags != current_flags { - let ret = unsafe { libc::fcntl(fd, libc::F_SETFL, flags) }; + // Safety: it's safe to use `fcntl` to set the `O_NONBLOCK` flag of a valid, + // open file descriptor. + let ret = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_SETFL, flags) }; if ret < 0 { return Err(io::Error::last_os_error()); } @@ -1215,3 +1420,23 @@ fn set_nonblocking(file: &mut File, current_flags: libc::c_int) -> io::Result<() Ok(()) } + +/// Removes `O_NONBLOCK` from fd's flags. +fn set_blocking(fd: &T) -> io::Result<()> { + // Safety: it's safe to use `fcntl` to read flags of a valid, open file descriptor. + let previous = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_GETFL) }; + if previous == -1 { + return Err(io::Error::last_os_error()); + } + + let new = previous & !libc::O_NONBLOCK; + + // Safety: it's safe to use `fcntl` to unset the `O_NONBLOCK` flag of a valid, + // open file descriptor. + let r = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_SETFL, new) }; + if r == -1 { + Err(io::Error::last_os_error()) + } else { + Ok(()) + } +} diff --git a/tokio/tests/net_unix_pipe.rs b/tokio/tests/net_unix_pipe.rs index c96d6e70fbd..6706880ed1b 100644 --- a/tokio/tests/net_unix_pipe.rs +++ b/tokio/tests/net_unix_pipe.rs @@ -427,3 +427,108 @@ async fn try_read_buf() -> std::io::Result<()> { Ok(()) } + +#[tokio::test] +async fn anon_pipe_simple_send() -> io::Result<()> { + const DATA: &[u8] = b"this is some data to write to the pipe"; + + let (mut writer, mut reader) = pipe::pipe()?; + + // Create a reading task which should wait for data from the pipe. + let mut read_fut = task::spawn(async move { + let mut buf = vec![0; DATA.len()]; + reader.read_exact(&mut buf).await?; + Ok::<_, io::Error>(buf) + }); + assert_pending!(read_fut.poll()); + + writer.write_all(DATA).await?; + + // Let the IO driver poll events for the reader. + while !read_fut.is_woken() { + tokio::task::yield_now().await; + } + + // Reading task should be ready now. + let read_data = assert_ready_ok!(read_fut.poll()); + assert_eq!(&read_data, DATA); + + Ok(()) +} + +#[tokio::test] +async fn anon_pipe_spawn_echo() -> std::io::Result<()> { + use tokio::process::Command; + + const DATA: &str = "this is some data to write to the pipe"; + + let (tx, mut rx) = pipe::pipe()?; + + let status = Command::new("echo") + .arg("-n") + .arg(DATA) + .stdout(tx.into_blocking_fd()?) + .status(); + + let mut buf = vec![0; DATA.len()]; + rx.read_exact(&mut buf).await?; + assert_eq!(String::from_utf8(buf).unwrap(), DATA); + + let exit_code = status.await?; + assert!(exit_code.success()); + + // Check if the pipe is closed. + buf = Vec::new(); + let total = assert_ok!(rx.try_read(&mut buf)); + assert_eq!(total, 0); + + Ok(()) +} + +#[tokio::test] +#[cfg(target_os = "linux")] +async fn anon_pipe_from_owned_fd() -> std::io::Result<()> { + use nix::fcntl::OFlag; + use std::os::unix::io::{FromRawFd, OwnedFd}; + + const DATA: &[u8] = b"this is some data to write to the pipe"; + + let fds = nix::unistd::pipe2(OFlag::O_CLOEXEC | OFlag::O_NONBLOCK)?; + let (rx_fd, tx_fd) = unsafe { (OwnedFd::from_raw_fd(fds.0), OwnedFd::from_raw_fd(fds.1)) }; + + let mut rx = pipe::Receiver::from_owned_fd(rx_fd)?; + let mut tx = pipe::Sender::from_owned_fd(tx_fd)?; + + let mut buf = vec![0; DATA.len()]; + tx.write_all(DATA).await?; + rx.read_exact(&mut buf).await?; + assert_eq!(buf, DATA); + + Ok(()) +} + +#[tokio::test] +async fn anon_pipe_into_nonblocking_fd() -> std::io::Result<()> { + let (tx, rx) = pipe::pipe()?; + + let tx_fd = tx.into_nonblocking_fd()?; + let rx_fd = rx.into_nonblocking_fd()?; + + assert!(is_nonblocking(&tx_fd)?); + assert!(is_nonblocking(&rx_fd)?); + + Ok(()) +} + +#[tokio::test] +async fn anon_pipe_into_blocking_fd() -> std::io::Result<()> { + let (tx, rx) = pipe::pipe()?; + + let tx_fd = tx.into_blocking_fd()?; + let rx_fd = rx.into_blocking_fd()?; + + assert!(!is_nonblocking(&tx_fd)?); + assert!(!is_nonblocking(&rx_fd)?); + + Ok(()) +} From 02b779e315c5c5f0dbbc8b56fc711cc8e665ee1e Mon Sep 17 00:00:00 2001 From: Paul Olteanu Date: Sat, 30 Dec 2023 15:05:37 -0500 Subject: [PATCH 013/162] sync: add `watch::Receiver::mark_unchanged` (#6252) --- tokio/src/sync/watch.rs | 11 +++++++++++ tokio/tests/sync_watch.rs | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/tokio/src/sync/watch.rs b/tokio/src/sync/watch.rs index 587aa795aef..3979b07202f 100644 --- a/tokio/src/sync/watch.rs +++ b/tokio/src/sync/watch.rs @@ -669,6 +669,17 @@ impl Receiver { self.version.decrement(); } + /// Marks the state as unchanged. + /// + /// The current value will be considered seen by the receiver. + /// + /// This is useful if you are not interested in the current value + /// visible in the receiver. + pub fn mark_unchanged(&mut self) { + let current_version = self.shared.state.load().version(); + self.version = current_version; + } + /// Waits for a change notification, then marks the newest value as seen. /// /// If the newest value in the channel has not yet been marked seen when diff --git a/tokio/tests/sync_watch.rs b/tokio/tests/sync_watch.rs index 70cc110b937..a5b229f7ddc 100644 --- a/tokio/tests/sync_watch.rs +++ b/tokio/tests/sync_watch.rs @@ -102,6 +102,39 @@ fn rx_mark_changed() { assert_eq!(*rx.borrow(), "two"); } +#[test] +fn rx_mark_unchanged() { + let (tx, mut rx) = watch::channel("one"); + + let mut rx2 = rx.clone(); + + { + assert!(!rx.has_changed().unwrap()); + + rx.mark_changed(); + assert!(rx.has_changed().unwrap()); + + rx.mark_unchanged(); + assert!(!rx.has_changed().unwrap()); + + let mut t = spawn(rx.changed()); + assert_pending!(t.poll()); + } + + { + assert!(!rx2.has_changed().unwrap()); + + tx.send("two").unwrap(); + assert!(rx2.has_changed().unwrap()); + + rx2.mark_unchanged(); + assert!(!rx2.has_changed().unwrap()); + assert_eq!(*rx2.borrow_and_update(), "two"); + } + + assert_eq!(*rx.borrow(), "two"); +} + #[test] fn multi_rx() { let (tx, mut rx1) = watch::channel("one"); From 7341004535ffccc05ee8bd1fd856e587509335bf Mon Sep 17 00:00:00 2001 From: Owen Leung Date: Mon, 1 Jan 2024 21:26:58 +0800 Subject: [PATCH 014/162] sync: add `{Receiver,UnboundedReceiver}::poll_recv_many` (#6236) --- tokio/src/sync/mpsc/bounded.rs | 81 +++++++++++++++++++++++++++++++- tokio/src/sync/mpsc/unbounded.rs | 81 +++++++++++++++++++++++++++++++- 2 files changed, 158 insertions(+), 4 deletions(-) diff --git a/tokio/src/sync/mpsc/bounded.rs b/tokio/src/sync/mpsc/bounded.rs index 4aa8b6377ca..3a795d55774 100644 --- a/tokio/src/sync/mpsc/bounded.rs +++ b/tokio/src/sync/mpsc/bounded.rs @@ -464,8 +464,8 @@ impl Receiver { /// When the method returns `Poll::Pending`, the `Waker` in the provided /// `Context` is scheduled to receive a wakeup when a message is sent on any /// receiver, or when the channel is closed. Note that on multiple calls to - /// `poll_recv`, only the `Waker` from the `Context` passed to the most - /// recent call is scheduled to receive a wakeup. + /// `poll_recv` or `poll_recv_many`, only the `Waker` from the `Context` + /// passed to the most recent call is scheduled to receive a wakeup. /// /// If this method returns `Poll::Pending` due to a spurious failure, then /// the `Waker` will be notified when the situation causing the spurious @@ -475,6 +475,83 @@ impl Receiver { pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { self.chan.recv(cx) } + + /// Polls to receive multiple messages on this channel, extending the provided buffer. + /// + /// This method returns: + /// * `Poll::Pending` if no messages are available but the channel is not closed, or if a + /// spurious failure happens. + /// * `Poll::Ready(count)` where `count` is the number of messages successfully received and + /// stored in `buffer`. This can be less than, or equal to, `limit`. + /// * `Poll::Ready(0)` if `limit` is set to zero or when the channel is closed. + /// + /// When the method returns `Poll::Pending`, the `Waker` in the provided + /// `Context` is scheduled to receive a wakeup when a message is sent on any + /// receiver, or when the channel is closed. Note that on multiple calls to + /// `poll_recv` or `poll_recv_many`, only the `Waker` from the `Context` + /// passed to the most recent call is scheduled to receive a wakeup. + /// + /// Note that this method does not guarantee that exactly `limit` messages + /// are received. Rather, if at least one message is available, it returns + /// as many messages as it can up to the given limit. This method returns + /// zero only if the channel is closed (or if `limit` is zero). + /// + /// # Examples + /// + /// ``` + /// use std::task::{Context, Poll}; + /// use std::pin::Pin; + /// use tokio::sync::mpsc; + /// use futures::Future; + /// + /// struct MyReceiverFuture<'a> { + /// receiver: mpsc::Receiver, + /// buffer: &'a mut Vec, + /// limit: usize, + /// } + /// + /// impl<'a> Future for MyReceiverFuture<'a> { + /// type Output = usize; // Number of messages received + /// + /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + /// let MyReceiverFuture { receiver, buffer, limit } = &mut *self; + /// + /// // Now `receiver` and `buffer` are mutable references, and `limit` is copied + /// match receiver.poll_recv_many(cx, *buffer, *limit) { + /// Poll::Pending => Poll::Pending, + /// Poll::Ready(count) => Poll::Ready(count), + /// } + /// } + /// } + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, rx) = mpsc::channel(32); + /// let mut buffer = Vec::new(); + /// + /// let my_receiver_future = MyReceiverFuture { + /// receiver: rx, + /// buffer: &mut buffer, + /// limit: 3, + /// }; + /// + /// for i in 0..10 { + /// tx.send(i).await.unwrap(); + /// } + /// + /// let count = my_receiver_future.await; + /// assert_eq!(count, 3); + /// assert_eq!(buffer, vec![0,1,2]) + /// } + /// ``` + pub fn poll_recv_many( + &mut self, + cx: &mut Context<'_>, + buffer: &mut Vec, + limit: usize, + ) -> Poll { + self.chan.recv_many(cx, buffer, limit) + } } impl fmt::Debug for Receiver { diff --git a/tokio/src/sync/mpsc/unbounded.rs b/tokio/src/sync/mpsc/unbounded.rs index d996b8564af..7dff942ee70 100644 --- a/tokio/src/sync/mpsc/unbounded.rs +++ b/tokio/src/sync/mpsc/unbounded.rs @@ -343,8 +343,8 @@ impl UnboundedReceiver { /// When the method returns `Poll::Pending`, the `Waker` in the provided /// `Context` is scheduled to receive a wakeup when a message is sent on any /// receiver, or when the channel is closed. Note that on multiple calls to - /// `poll_recv`, only the `Waker` from the `Context` passed to the most - /// recent call is scheduled to receive a wakeup. + /// `poll_recv` or `poll_recv_many`, only the `Waker` from the `Context` + /// passed to the most recent call is scheduled to receive a wakeup. /// /// If this method returns `Poll::Pending` due to a spurious failure, then /// the `Waker` will be notified when the situation causing the spurious @@ -354,6 +354,83 @@ impl UnboundedReceiver { pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { self.chan.recv(cx) } + + /// Polls to receive multiple messages on this channel, extending the provided buffer. + /// + /// This method returns: + /// * `Poll::Pending` if no messages are available but the channel is not closed, or if a + /// spurious failure happens. + /// * `Poll::Ready(count)` where `count` is the number of messages successfully received and + /// stored in `buffer`. This can be less than, or equal to, `limit`. + /// * `Poll::Ready(0)` if `limit` is set to zero or when the channel is closed. + /// + /// When the method returns `Poll::Pending`, the `Waker` in the provided + /// `Context` is scheduled to receive a wakeup when a message is sent on any + /// receiver, or when the channel is closed. Note that on multiple calls to + /// `poll_recv` or `poll_recv_many`, only the `Waker` from the `Context` + /// passed to the most recent call is scheduled to receive a wakeup. + /// + /// Note that this method does not guarantee that exactly `limit` messages + /// are received. Rather, if at least one message is available, it returns + /// as many messages as it can up to the given limit. This method returns + /// zero only if the channel is closed (or if `limit` is zero). + /// + /// # Examples + /// + /// ``` + /// use std::task::{Context, Poll}; + /// use std::pin::Pin; + /// use tokio::sync::mpsc; + /// use futures::Future; + /// + /// struct MyReceiverFuture<'a> { + /// receiver: mpsc::UnboundedReceiver, + /// buffer: &'a mut Vec, + /// limit: usize, + /// } + /// + /// impl<'a> Future for MyReceiverFuture<'a> { + /// type Output = usize; // Number of messages received + /// + /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + /// let MyReceiverFuture { receiver, buffer, limit } = &mut *self; + /// + /// // Now `receiver` and `buffer` are mutable references, and `limit` is copied + /// match receiver.poll_recv_many(cx, *buffer, *limit) { + /// Poll::Pending => Poll::Pending, + /// Poll::Ready(count) => Poll::Ready(count), + /// } + /// } + /// } + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, rx) = mpsc::unbounded_channel::(); + /// let mut buffer = Vec::new(); + /// + /// let my_receiver_future = MyReceiverFuture { + /// receiver: rx, + /// buffer: &mut buffer, + /// limit: 3, + /// }; + /// + /// for i in 0..10 { + /// tx.send(i).expect("Unable to send integer"); + /// } + /// + /// let count = my_receiver_future.await; + /// assert_eq!(count, 3); + /// assert_eq!(buffer, vec![0,1,2]) + /// } + /// ``` + pub fn poll_recv_many( + &mut self, + cx: &mut Context<'_>, + buffer: &mut Vec, + limit: usize, + ) -> Poll { + self.chan.recv_many(cx, buffer, limit) + } } impl UnboundedSender { From c029771247e31bfba61fd62400986c0d155ef0d0 Mon Sep 17 00:00:00 2001 From: Jesse Schalken Date: Tue, 2 Jan 2024 22:08:11 +1100 Subject: [PATCH 015/162] task: fix typo (#6261) --- tokio/src/task/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio/src/task/mod.rs b/tokio/src/task/mod.rs index 6b352b16fa9..aefa395c044 100644 --- a/tokio/src/task/mod.rs +++ b/tokio/src/task/mod.rs @@ -123,7 +123,7 @@ //! all tasks on it. //! //! When tasks are shut down, it will stop running at whichever `.await` it has -//! yielded at. All local variables are destroyed by running their detructor. +//! yielded at. All local variables are destroyed by running their destructor. //! Once shutdown has completed, awaiting the [`JoinHandle`] will fail with a //! [cancelled error](crate::task::JoinError::is_cancelled). //! From 2d2faf6014ea3ba8329afcdb311fabbc04dd71e1 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Tue, 2 Jan 2024 17:25:51 +0100 Subject: [PATCH 016/162] rt: improve robustness of `wake_in_drop_after_panic` test (#6238) --- tokio/tests/rt_basic.rs | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/tokio/tests/rt_basic.rs b/tokio/tests/rt_basic.rs index 789c0475da4..47bf2dfdc12 100644 --- a/tokio/tests/rt_basic.rs +++ b/tokio/tests/rt_basic.rs @@ -181,25 +181,35 @@ fn drop_tasks_in_context() { #[cfg_attr(target_os = "wasi", ignore = "Wasi does not support panic recovery")] #[should_panic(expected = "boom")] fn wake_in_drop_after_panic() { - let (tx, rx) = oneshot::channel::<()>(); - struct WakeOnDrop(Option>); impl Drop for WakeOnDrop { fn drop(&mut self) { - self.0.take().unwrap().send(()).unwrap(); + let _ = self.0.take().unwrap().send(()); } } let rt = rt(); + let (tx1, rx1) = oneshot::channel::<()>(); + let (tx2, rx2) = oneshot::channel::<()>(); + + // Spawn two tasks. We don't know the order in which they are dropped, so we + // make both tasks identical. When the first task is dropped, we wake up the + // second task. This ensures that we trigger a wakeup on a live task while + // handling the "boom" panic, no matter the order in which the tasks are + // dropped. rt.spawn(async move { - let _wake_on_drop = WakeOnDrop(Some(tx)); - // wait forever - futures::future::pending::<()>().await; + let _wake_on_drop = WakeOnDrop(Some(tx2)); + let _ = rx1.await; + unreachable!() }); - let _join = rt.spawn(async move { rx.await }); + rt.spawn(async move { + let _wake_on_drop = WakeOnDrop(Some(tx1)); + let _ = rx2.await; + unreachable!() + }); rt.block_on(async { tokio::task::yield_now().await; From 7c606ab44aa9f0c33dfcf5bc8678411dddeaa9e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Th=C3=A9odore=20Pr=C3=A9vot?= Date: Tue, 2 Jan 2024 17:34:56 +0100 Subject: [PATCH 017/162] sync: add `Sender::{try_,}reserve_many` (#6205) --- tokio/src/sync/batch_semaphore.rs | 26 ++-- tokio/src/sync/mpsc/bounded.rs | 224 +++++++++++++++++++++++++++++- tokio/src/sync/mpsc/mod.rs | 4 +- tokio/src/sync/rwlock.rs | 8 +- tokio/src/sync/semaphore.rs | 12 +- tokio/tests/sync_mpsc.rs | 179 ++++++++++++++++++++++++ 6 files changed, 425 insertions(+), 28 deletions(-) diff --git a/tokio/src/sync/batch_semaphore.rs b/tokio/src/sync/batch_semaphore.rs index 35de9a57436..aa23dea7d3c 100644 --- a/tokio/src/sync/batch_semaphore.rs +++ b/tokio/src/sync/batch_semaphore.rs @@ -71,7 +71,7 @@ pub struct AcquireError(()); pub(crate) struct Acquire<'a> { node: Waiter, semaphore: &'a Semaphore, - num_permits: u32, + num_permits: usize, queued: bool, } @@ -262,13 +262,13 @@ impl Semaphore { self.permits.load(Acquire) & Self::CLOSED == Self::CLOSED } - pub(crate) fn try_acquire(&self, num_permits: u32) -> Result<(), TryAcquireError> { + pub(crate) fn try_acquire(&self, num_permits: usize) -> Result<(), TryAcquireError> { assert!( - num_permits as usize <= Self::MAX_PERMITS, + num_permits <= Self::MAX_PERMITS, "a semaphore may not have more than MAX_PERMITS permits ({})", Self::MAX_PERMITS ); - let num_permits = (num_permits as usize) << Self::PERMIT_SHIFT; + let num_permits = num_permits << Self::PERMIT_SHIFT; let mut curr = self.permits.load(Acquire); loop { // Has the semaphore closed? @@ -293,7 +293,7 @@ impl Semaphore { } } - pub(crate) fn acquire(&self, num_permits: u32) -> Acquire<'_> { + pub(crate) fn acquire(&self, num_permits: usize) -> Acquire<'_> { Acquire::new(self, num_permits) } @@ -371,7 +371,7 @@ impl Semaphore { fn poll_acquire( &self, cx: &mut Context<'_>, - num_permits: u32, + num_permits: usize, node: Pin<&mut Waiter>, queued: bool, ) -> Poll> { @@ -380,7 +380,7 @@ impl Semaphore { let needed = if queued { node.state.load(Acquire) << Self::PERMIT_SHIFT } else { - (num_permits as usize) << Self::PERMIT_SHIFT + num_permits << Self::PERMIT_SHIFT }; let mut lock = None; @@ -506,12 +506,12 @@ impl fmt::Debug for Semaphore { impl Waiter { fn new( - num_permits: u32, + num_permits: usize, #[cfg(all(tokio_unstable, feature = "tracing"))] ctx: trace::AsyncOpTracingCtx, ) -> Self { Waiter { waker: UnsafeCell::new(None), - state: AtomicUsize::new(num_permits as usize), + state: AtomicUsize::new(num_permits), pointers: linked_list::Pointers::new(), #[cfg(all(tokio_unstable, feature = "tracing"))] ctx, @@ -591,7 +591,7 @@ impl Future for Acquire<'_> { } impl<'a> Acquire<'a> { - fn new(semaphore: &'a Semaphore, num_permits: u32) -> Self { + fn new(semaphore: &'a Semaphore, num_permits: usize) -> Self { #[cfg(any(not(tokio_unstable), not(feature = "tracing")))] return Self { node: Waiter::new(num_permits), @@ -635,14 +635,14 @@ impl<'a> Acquire<'a> { }); } - fn project(self: Pin<&mut Self>) -> (Pin<&mut Waiter>, &Semaphore, u32, &mut bool) { + fn project(self: Pin<&mut Self>) -> (Pin<&mut Waiter>, &Semaphore, usize, &mut bool) { fn is_unpin() {} unsafe { // Safety: all fields other than `node` are `Unpin` is_unpin::<&Semaphore>(); is_unpin::<&mut bool>(); - is_unpin::(); + is_unpin::(); let this = self.get_unchecked_mut(); ( @@ -673,7 +673,7 @@ impl Drop for Acquire<'_> { // Safety: we have locked the wait list. unsafe { waiters.queue.remove(node) }; - let acquired_permits = self.num_permits as usize - self.node.state.load(Acquire); + let acquired_permits = self.num_permits - self.node.state.load(Acquire); if acquired_permits > 0 { self.semaphore.add_permits_locked(acquired_permits, waiters); } diff --git a/tokio/src/sync/mpsc/bounded.rs b/tokio/src/sync/mpsc/bounded.rs index 3a795d55774..a1e0a82d9e2 100644 --- a/tokio/src/sync/mpsc/bounded.rs +++ b/tokio/src/sync/mpsc/bounded.rs @@ -68,6 +68,18 @@ pub struct Permit<'a, T> { chan: &'a chan::Tx, } +/// An [`Iterator`] of [`Permit`] that can be used to hold `n` slots in the channel. +/// +/// `PermitIterator` values are returned by [`Sender::reserve_many()`] and [`Sender::try_reserve_many()`] +/// and are used to guarantee channel capacity before generating `n` messages to send. +/// +/// [`Sender::reserve_many()`]: Sender::reserve_many +/// [`Sender::try_reserve_many()`]: Sender::try_reserve_many +pub struct PermitIterator<'a, T> { + chan: &'a chan::Tx, + n: usize, +} + /// Owned permit to send one value into the channel. /// /// This is identical to the [`Permit`] type, except that it moves the sender @@ -926,10 +938,74 @@ impl Sender { /// } /// ``` pub async fn reserve(&self) -> Result, SendError<()>> { - self.reserve_inner().await?; + self.reserve_inner(1).await?; Ok(Permit { chan: &self.chan }) } + /// Waits for channel capacity. Once capacity to send `n` messages is + /// available, it is reserved for the caller. + /// + /// If the channel is full or if there are fewer than `n` permits available, the function waits + /// for the number of unreceived messages to become `n` less than the channel capacity. + /// Capacity to send `n` message is then reserved for the caller. + /// + /// A [`PermitIterator`] is returned to track the reserved capacity. + /// You can call this [`Iterator`] until it is exhausted to + /// get a [`Permit`] and then call [`Permit::send`]. This function is similar to + /// [`try_reserve_many`] except it awaits for the slots to become available. + /// + /// If the channel is closed, the function returns a [`SendError`]. + /// + /// Dropping [`PermitIterator`] without consuming it entirely releases the remaining + /// permits back to the channel. + /// + /// [`PermitIterator`]: PermitIterator + /// [`Permit`]: Permit + /// [`send`]: Permit::send + /// [`try_reserve_many`]: Sender::try_reserve_many + /// + /// # Cancel safety + /// + /// This channel uses a queue to ensure that calls to `send` and `reserve_many` + /// complete in the order they were requested. Cancelling a call to + /// `reserve_many` makes you lose your place in the queue. + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::mpsc; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, mut rx) = mpsc::channel(2); + /// + /// // Reserve capacity + /// let mut permit = tx.reserve_many(2).await.unwrap(); + /// + /// // Trying to send directly on the `tx` will fail due to no + /// // available capacity. + /// assert!(tx.try_send(123).is_err()); + /// + /// // Sending with the permit iterator succeeds + /// permit.next().unwrap().send(456); + /// permit.next().unwrap().send(457); + /// + /// // The iterator should now be exhausted + /// assert!(permit.next().is_none()); + /// + /// // The value sent on the permit is received + /// assert_eq!(rx.recv().await.unwrap(), 456); + /// assert_eq!(rx.recv().await.unwrap(), 457); + /// } + /// ``` + pub async fn reserve_many(&self, n: usize) -> Result, SendError<()>> { + self.reserve_inner(n).await?; + Ok(PermitIterator { + chan: &self.chan, + n, + }) + } + /// Waits for channel capacity, moving the `Sender` and returning an owned /// permit. Once capacity to send one message is available, it is reserved /// for the caller. @@ -1011,16 +1087,19 @@ impl Sender { /// [`send`]: OwnedPermit::send /// [`Arc::clone`]: std::sync::Arc::clone pub async fn reserve_owned(self) -> Result, SendError<()>> { - self.reserve_inner().await?; + self.reserve_inner(1).await?; Ok(OwnedPermit { chan: Some(self.chan), }) } - async fn reserve_inner(&self) -> Result<(), SendError<()>> { + async fn reserve_inner(&self, n: usize) -> Result<(), SendError<()>> { crate::trace::async_trace_leaf().await; - match self.chan.semaphore().semaphore.acquire(1).await { + if n > self.max_capacity() { + return Err(SendError(())); + } + match self.chan.semaphore().semaphore.acquire(n).await { Ok(()) => Ok(()), Err(_) => Err(SendError(())), } @@ -1079,6 +1158,91 @@ impl Sender { Ok(Permit { chan: &self.chan }) } + /// Tries to acquire `n` slots in the channel without waiting for the slot to become + /// available. + /// + /// A [`PermitIterator`] is returned to track the reserved capacity. + /// You can call this [`Iterator`] until it is exhausted to + /// get a [`Permit`] and then call [`Permit::send`]. This function is similar to + /// [`reserve_many`] except it does not await for the slots to become available. + /// + /// If there are fewer than `n` permits available on the channel, then + /// this function will return a [`TrySendError::Full`]. If the channel is closed + /// this function will return a [`TrySendError::Closed`]. + /// + /// Dropping [`PermitIterator`] without consuming it entirely releases the remaining + /// permits back to the channel. + /// + /// [`PermitIterator`]: PermitIterator + /// [`send`]: Permit::send + /// [`reserve_many`]: Sender::reserve_many + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::mpsc; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, mut rx) = mpsc::channel(2); + /// + /// // Reserve capacity + /// let mut permit = tx.try_reserve_many(2).unwrap(); + /// + /// // Trying to send directly on the `tx` will fail due to no + /// // available capacity. + /// assert!(tx.try_send(123).is_err()); + /// + /// // Trying to reserve an additional slot on the `tx` will + /// // fail because there is no capacity. + /// assert!(tx.try_reserve().is_err()); + /// + /// // Sending with the permit iterator succeeds + /// permit.next().unwrap().send(456); + /// permit.next().unwrap().send(457); + /// + /// // The iterator should now be exhausted + /// assert!(permit.next().is_none()); + /// + /// // The value sent on the permit is received + /// assert_eq!(rx.recv().await.unwrap(), 456); + /// assert_eq!(rx.recv().await.unwrap(), 457); + /// + /// // Trying to call try_reserve_many with 0 will return an empty iterator + /// let mut permit = tx.try_reserve_many(0).unwrap(); + /// assert!(permit.next().is_none()); + /// + /// // Trying to call try_reserve_many with a number greater than the channel + /// // capacity will return an error + /// let permit = tx.try_reserve_many(3); + /// assert!(permit.is_err()); + /// + /// // Trying to call try_reserve_many on a closed channel will return an error + /// drop(rx); + /// let permit = tx.try_reserve_many(1); + /// assert!(permit.is_err()); + /// + /// let permit = tx.try_reserve_many(0); + /// assert!(permit.is_err()); + /// } + /// ``` + pub fn try_reserve_many(&self, n: usize) -> Result, TrySendError<()>> { + if n > self.max_capacity() { + return Err(TrySendError::Full(())); + } + + match self.chan.semaphore().semaphore.try_acquire(n) { + Ok(()) => {} + Err(TryAcquireError::Closed) => return Err(TrySendError::Closed(())), + Err(TryAcquireError::NoPermits) => return Err(TrySendError::Full(())), + } + + Ok(PermitIterator { + chan: &self.chan, + n, + }) + } + /// Tries to acquire a slot in the channel without waiting for the slot to become /// available, returning an owned permit. /// @@ -1355,6 +1519,58 @@ impl fmt::Debug for Permit<'_, T> { } } +// ===== impl PermitIterator ===== + +impl<'a, T> Iterator for PermitIterator<'a, T> { + type Item = Permit<'a, T>; + + fn next(&mut self) -> Option { + if self.n == 0 { + return None; + } + + self.n -= 1; + Some(Permit { chan: self.chan }) + } + + fn size_hint(&self) -> (usize, Option) { + let n = self.n; + (n, Some(n)) + } +} +impl ExactSizeIterator for PermitIterator<'_, T> {} +impl std::iter::FusedIterator for PermitIterator<'_, T> {} + +impl Drop for PermitIterator<'_, T> { + fn drop(&mut self) { + use chan::Semaphore; + + if self.n == 0 { + return; + } + + let semaphore = self.chan.semaphore(); + + // Add the remaining permits back to the semaphore + semaphore.add_permits(self.n); + + // If this is the last sender for this channel, wake the receiver so + // that it can be notified that the channel is closed. + if semaphore.is_closed() && semaphore.is_idle() { + self.chan.wake_rx(); + } + } +} + +impl fmt::Debug for PermitIterator<'_, T> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("PermitIterator") + .field("chan", &self.chan) + .field("capacity", &self.n) + .finish() + } +} + // ===== impl Permit ===== impl OwnedPermit { diff --git a/tokio/src/sync/mpsc/mod.rs b/tokio/src/sync/mpsc/mod.rs index b2af084b2ae..052620be1a9 100644 --- a/tokio/src/sync/mpsc/mod.rs +++ b/tokio/src/sync/mpsc/mod.rs @@ -95,7 +95,9 @@ pub(super) mod block; mod bounded; -pub use self::bounded::{channel, OwnedPermit, Permit, Receiver, Sender, WeakSender}; +pub use self::bounded::{ + channel, OwnedPermit, Permit, PermitIterator, Receiver, Sender, WeakSender, +}; mod chan; diff --git a/tokio/src/sync/rwlock.rs b/tokio/src/sync/rwlock.rs index 877458a57fb..37cf73c5905 100644 --- a/tokio/src/sync/rwlock.rs +++ b/tokio/src/sync/rwlock.rs @@ -772,7 +772,7 @@ impl RwLock { /// ``` pub async fn write(&self) -> RwLockWriteGuard<'_, T> { let acquire_fut = async { - self.s.acquire(self.mr).await.unwrap_or_else(|_| { + self.s.acquire(self.mr as usize).await.unwrap_or_else(|_| { // The semaphore was closed. but, we never explicitly close it, and we have a // handle to it through the Arc, which means that this can never happen. unreachable!() @@ -907,7 +907,7 @@ impl RwLock { let resource_span = self.resource_span.clone(); let acquire_fut = async { - self.s.acquire(self.mr).await.unwrap_or_else(|_| { + self.s.acquire(self.mr as usize).await.unwrap_or_else(|_| { // The semaphore was closed. but, we never explicitly close it, and we have a // handle to it through the Arc, which means that this can never happen. unreachable!() @@ -971,7 +971,7 @@ impl RwLock { /// } /// ``` pub fn try_write(&self) -> Result, TryLockError> { - match self.s.try_acquire(self.mr) { + match self.s.try_acquire(self.mr as usize) { Ok(permit) => permit, Err(TryAcquireError::NoPermits) => return Err(TryLockError(())), Err(TryAcquireError::Closed) => unreachable!(), @@ -1029,7 +1029,7 @@ impl RwLock { /// } /// ``` pub fn try_write_owned(self: Arc) -> Result, TryLockError> { - match self.s.try_acquire(self.mr) { + match self.s.try_acquire(self.mr as usize) { Ok(permit) => permit, Err(TryAcquireError::NoPermits) => return Err(TryLockError(())), Err(TryAcquireError::Closed) => unreachable!(), diff --git a/tokio/src/sync/semaphore.rs b/tokio/src/sync/semaphore.rs index 8b8fdb23871..25e4134373c 100644 --- a/tokio/src/sync/semaphore.rs +++ b/tokio/src/sync/semaphore.rs @@ -565,7 +565,7 @@ impl Semaphore { pub async fn acquire_many(&self, n: u32) -> Result, AcquireError> { #[cfg(all(tokio_unstable, feature = "tracing"))] trace::async_op( - || self.ll_sem.acquire(n), + || self.ll_sem.acquire(n as usize), self.resource_span.clone(), "Semaphore::acquire_many", "poll", @@ -574,7 +574,7 @@ impl Semaphore { .await?; #[cfg(not(all(tokio_unstable, feature = "tracing")))] - self.ll_sem.acquire(n).await?; + self.ll_sem.acquire(n as usize).await?; Ok(SemaphorePermit { sem: self, @@ -646,7 +646,7 @@ impl Semaphore { /// [`TryAcquireError::NoPermits`]: crate::sync::TryAcquireError::NoPermits /// [`SemaphorePermit`]: crate::sync::SemaphorePermit pub fn try_acquire_many(&self, n: u32) -> Result, TryAcquireError> { - match self.ll_sem.try_acquire(n) { + match self.ll_sem.try_acquire(n as usize) { Ok(()) => Ok(SemaphorePermit { sem: self, permits: n, @@ -764,14 +764,14 @@ impl Semaphore { ) -> Result { #[cfg(all(tokio_unstable, feature = "tracing"))] let inner = trace::async_op( - || self.ll_sem.acquire(n), + || self.ll_sem.acquire(n as usize), self.resource_span.clone(), "Semaphore::acquire_many_owned", "poll", true, ); #[cfg(not(all(tokio_unstable, feature = "tracing")))] - let inner = self.ll_sem.acquire(n); + let inner = self.ll_sem.acquire(n as usize); inner.await?; Ok(OwnedSemaphorePermit { @@ -855,7 +855,7 @@ impl Semaphore { self: Arc, n: u32, ) -> Result { - match self.ll_sem.try_acquire(n) { + match self.ll_sem.try_acquire(n as usize) { Ok(()) => Ok(OwnedSemaphorePermit { sem: self, permits: n, diff --git a/tokio/tests/sync_mpsc.rs b/tokio/tests/sync_mpsc.rs index a5c15a4cfc6..1b581ce98c1 100644 --- a/tokio/tests/sync_mpsc.rs +++ b/tokio/tests/sync_mpsc.rs @@ -522,6 +522,79 @@ async fn try_send_fail_with_try_recv() { assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected)); } +#[maybe_tokio_test] +async fn reserve_many_above_cap() { + const MAX_PERMITS: usize = tokio::sync::Semaphore::MAX_PERMITS; + let (tx, _rx) = mpsc::channel::<()>(1); + + assert_err!(tx.reserve_many(2).await); + assert_err!(tx.reserve_many(MAX_PERMITS + 1).await); + assert_err!(tx.reserve_many(usize::MAX).await); +} + +#[test] +fn try_reserve_many_zero() { + let (tx, rx) = mpsc::channel::<()>(1); + + // Succeeds when not closed. + assert!(assert_ok!(tx.try_reserve_many(0)).next().is_none()); + + // Even when channel is full. + tx.try_send(()).unwrap(); + assert!(assert_ok!(tx.try_reserve_many(0)).next().is_none()); + + drop(rx); + + // Closed error when closed. + assert_eq!( + assert_err!(tx.try_reserve_many(0)), + TrySendError::Closed(()) + ); +} + +#[maybe_tokio_test] +async fn reserve_many_zero() { + let (tx, rx) = mpsc::channel::<()>(1); + + // Succeeds when not closed. + assert!(assert_ok!(tx.reserve_many(0).await).next().is_none()); + + // Even when channel is full. + tx.send(()).await.unwrap(); + assert!(assert_ok!(tx.reserve_many(0).await).next().is_none()); + + drop(rx); + + // Closed error when closed. + assert_err!(tx.reserve_many(0).await); +} + +#[maybe_tokio_test] +async fn try_reserve_many_edge_cases() { + const MAX_PERMITS: usize = tokio::sync::Semaphore::MAX_PERMITS; + + let (tx, rx) = mpsc::channel::<()>(1); + + let mut permit = assert_ok!(tx.try_reserve_many(0)); + assert!(permit.next().is_none()); + + let permit = tx.try_reserve_many(MAX_PERMITS + 1); + match assert_err!(permit) { + TrySendError::Full(..) => {} + _ => panic!(), + } + + let permit = tx.try_reserve_many(usize::MAX); + match assert_err!(permit) { + TrySendError::Full(..) => {} + _ => panic!(), + } + + // Dropping the receiver should close the channel + drop(rx); + assert_err!(tx.reserve_many(0).await); +} + #[maybe_tokio_test] async fn try_reserve_fails() { let (tx, mut rx) = mpsc::channel(1); @@ -545,6 +618,87 @@ async fn try_reserve_fails() { let _permit = tx.try_reserve().unwrap(); } +#[maybe_tokio_test] +async fn reserve_many_and_send() { + let (tx, mut rx) = mpsc::channel(100); + for i in 0..100 { + for permit in assert_ok!(tx.reserve_many(i).await) { + permit.send("foo"); + assert_eq!(rx.recv().await, Some("foo")); + } + assert_eq!(rx.try_recv(), Err(TryRecvError::Empty)); + } +} +#[maybe_tokio_test] +async fn try_reserve_many_and_send() { + let (tx, mut rx) = mpsc::channel(100); + for i in 0..100 { + for permit in assert_ok!(tx.try_reserve_many(i)) { + permit.send("foo"); + assert_eq!(rx.recv().await, Some("foo")); + } + assert_eq!(rx.try_recv(), Err(TryRecvError::Empty)); + } +} + +#[maybe_tokio_test] +async fn reserve_many_on_closed_channel() { + let (tx, rx) = mpsc::channel::<()>(100); + drop(rx); + assert_err!(tx.reserve_many(10).await); +} + +#[maybe_tokio_test] +async fn try_reserve_many_on_closed_channel() { + let (tx, rx) = mpsc::channel::(100); + drop(rx); + match assert_err!(tx.try_reserve_many(10)) { + TrySendError::Closed(()) => {} + _ => panic!(), + }; +} + +#[maybe_tokio_test] +async fn try_reserve_many_full() { + // Reserve n capacity and send k messages + for n in 1..100 { + for k in 0..n { + let (tx, mut rx) = mpsc::channel::(n); + let permits = assert_ok!(tx.try_reserve_many(n)); + + assert_eq!(permits.len(), n); + assert_eq!(tx.capacity(), 0); + + match assert_err!(tx.try_reserve_many(1)) { + TrySendError::Full(..) => {} + _ => panic!(), + }; + + for permit in permits.take(k) { + permit.send(0); + } + // We only used k permits on the n reserved + assert_eq!(tx.capacity(), n - k); + + // We can reserve more permits + assert_ok!(tx.try_reserve_many(1)); + + // But not more than the current capacity + match assert_err!(tx.try_reserve_many(n - k + 1)) { + TrySendError::Full(..) => {} + _ => panic!(), + }; + + for _i in 0..k { + assert_eq!(rx.recv().await, Some(0)); + } + + // Now that we've received everything, capacity should be back to n + assert_eq!(tx.capacity(), n); + } + } +} + #[tokio::test] #[cfg(feature = "full")] async fn drop_permit_releases_permit() { @@ -564,6 +718,30 @@ async fn drop_permit_releases_permit() { assert_ready_ok!(reserve2.poll()); } +#[maybe_tokio_test] +async fn drop_permit_iterator_releases_permits() { + // poll_ready reserves capacity, ensure that the capacity is released if tx + // is dropped w/o sending a value. + for n in 1..100 { + let (tx1, _rx) = mpsc::channel::(n); + let tx2 = tx1.clone(); + + let permits = assert_ok!(tx1.reserve_many(n).await); + + let mut reserve2 = tokio_test::task::spawn(tx2.reserve_many(n)); + assert_pending!(reserve2.poll()); + + drop(permits); + + assert!(reserve2.is_woken()); + + let permits = assert_ready_ok!(reserve2.poll()); + drop(permits); + + assert_eq!(tx1.capacity(), n); + } +} + #[maybe_tokio_test] async fn dropping_rx_closes_channel() { let (tx, rx) = mpsc::channel(100); @@ -573,6 +751,7 @@ async fn dropping_rx_closes_channel() { drop(rx); assert_err!(tx.reserve().await); + assert_err!(tx.reserve_many(10).await); assert_eq!(1, Arc::strong_count(&msg)); } From c7e7f203eeb3190a556cf312bc4770e6d3363b69 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Wed, 3 Jan 2024 19:01:27 +0100 Subject: [PATCH 018/162] chore: typographic improvements (#6262) --- tokio-stream/src/stream_map.rs | 10 +++++----- tokio-test/src/stream_mock.rs | 2 +- tokio-util/src/task/join_map.rs | 2 +- tokio/src/io/util/flush.rs | 1 + tokio/src/runtime/task/core.rs | 8 ++++---- 5 files changed, 12 insertions(+), 11 deletions(-) diff --git a/tokio-stream/src/stream_map.rs b/tokio-stream/src/stream_map.rs index da021c795f6..041e477aa51 100644 --- a/tokio-stream/src/stream_map.rs +++ b/tokio-stream/src/stream_map.rs @@ -209,7 +209,7 @@ pub struct StreamMap { impl StreamMap { /// An iterator visiting all key-value pairs in arbitrary order. /// - /// The iterator element type is &'a (K, V). + /// The iterator element type is `&'a (K, V)`. /// /// # Examples /// @@ -232,7 +232,7 @@ impl StreamMap { /// An iterator visiting all key-value pairs mutably in arbitrary order. /// - /// The iterator element type is &'a mut (K, V). + /// The iterator element type is `&'a mut (K, V)`. /// /// # Examples /// @@ -289,7 +289,7 @@ impl StreamMap { /// Returns an iterator visiting all keys in arbitrary order. /// - /// The iterator element type is &'a K. + /// The iterator element type is `&'a K`. /// /// # Examples /// @@ -312,7 +312,7 @@ impl StreamMap { /// An iterator visiting all values in arbitrary order. /// - /// The iterator element type is &'a V. + /// The iterator element type is `&'a V`. /// /// # Examples /// @@ -335,7 +335,7 @@ impl StreamMap { /// An iterator visiting all values mutably in arbitrary order. /// - /// The iterator element type is &'a mut V. + /// The iterator element type is `&'a mut V`. /// /// # Examples /// diff --git a/tokio-test/src/stream_mock.rs b/tokio-test/src/stream_mock.rs index 0426470af27..a3f3c776502 100644 --- a/tokio-test/src/stream_mock.rs +++ b/tokio-test/src/stream_mock.rs @@ -8,7 +8,7 @@ //! intervals between items. //! //! # Usage -//! To use the `StreamMock`, you need to create a builder using[`StreamMockBuilder`]. The builder +//! To use the `StreamMock`, you need to create a builder using [`StreamMockBuilder`]. The builder //! allows you to enqueue actions such as returning items or waiting for a certain duration. //! //! # Example diff --git a/tokio-util/src/task/join_map.rs b/tokio-util/src/task/join_map.rs index 1fbe274a2f8..412aa96c10b 100644 --- a/tokio-util/src/task/join_map.rs +++ b/tokio-util/src/task/join_map.rs @@ -462,7 +462,7 @@ where /// * `Some((key, Ok(value)))` if one of the tasks in this `JoinMap` has /// completed. The `value` is the return value of that ask, and `key` is /// the key associated with the task. - /// * `Some((key, Err(err))` if one of the tasks in this JoinMap` has + /// * `Some((key, Err(err))` if one of the tasks in this `JoinMap` has /// panicked or been aborted. `key` is the key associated with the task /// that panicked or was aborted. /// * `None` if the `JoinMap` is empty. diff --git a/tokio/src/io/util/flush.rs b/tokio/src/io/util/flush.rs index 88d60b868d4..42e06bcbb01 100644 --- a/tokio/src/io/util/flush.rs +++ b/tokio/src/io/util/flush.rs @@ -11,6 +11,7 @@ pin_project! { /// A future used to fully flush an I/O object. /// /// Created by the [`AsyncWriteExt::flush`][flush] function. + /// /// [flush]: crate::io::AsyncWriteExt::flush #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] diff --git a/tokio/src/runtime/task/core.rs b/tokio/src/runtime/task/core.rs index 1903a01aa41..e61bbe5061d 100644 --- a/tokio/src/runtime/task/core.rs +++ b/tokio/src/runtime/task/core.rs @@ -157,10 +157,10 @@ pub(crate) struct Header { /// Table of function pointers for executing actions on the task. pub(super) vtable: &'static Vtable, - /// This integer contains the id of the OwnedTasks or LocalOwnedTasks that - /// this task is stored in. If the task is not in any list, should be the - /// id of the list that it was previously in, or `None` if it has never been - /// in any list. + /// This integer contains the id of the `OwnedTasks` or `LocalOwnedTasks` + /// that this task is stored in. If the task is not in any list, should be + /// the id of the list that it was previously in, or `None` if it has never + /// been in any list. /// /// Once a task has been bound to a list, it can never be bound to another /// list, even if removed from the first list. From 2f730d4d5a4810088428f6e6397bda42d376deec Mon Sep 17 00:00:00 2001 From: Alan Somers Date: Sat, 6 Jan 2024 03:59:04 -0700 Subject: [PATCH 019/162] ci: update FreeBSD CI environment to 13.2 (#6270) Because 13.1 is EoL --- .cirrus.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cirrus.yml b/.cirrus.yml index 05f7f82b40f..b61a6a83f11 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -1,7 +1,7 @@ only_if: $CIRRUS_TAG == '' && ($CIRRUS_PR != '' || $CIRRUS_BRANCH == 'master' || $CIRRUS_BRANCH =~ 'tokio-.*') auto_cancellation: $CIRRUS_BRANCH != 'master' && $CIRRUS_BRANCH !=~ 'tokio-.*' freebsd_instance: - image_family: freebsd-13-1 + image_family: freebsd-13-2 env: RUST_STABLE: stable RUST_NIGHTLY: nightly-2023-10-21 From e929d0e8b9aa46d4b7cad0f5d692ad94f5b2da45 Mon Sep 17 00:00:00 2001 From: Alan Somers Date: Sat, 6 Jan 2024 04:14:32 -0700 Subject: [PATCH 020/162] tests: update mio-aio to 0.8 (#6269) This is a test-only dependency. The main reason for the update is to avoid transitively depending on a Nix version with a CVE. mio-aio 0.8.0 has a substantially different API than 0.7.0. Notably, it no longer includes any lio_listio functionality. So to test Tokio's handling of EVFILT_LIO events we must go low-level and call libc::lio_listio directly. --- .cargo/audit.toml | 8 - tokio/Cargo.toml | 2 +- tokio/tests/io_poll_aio.rs | 389 +++++++++++++++++-------------------- 3 files changed, 177 insertions(+), 222 deletions(-) delete mode 100644 .cargo/audit.toml diff --git a/.cargo/audit.toml b/.cargo/audit.toml deleted file mode 100644 index 25e764be2b1..00000000000 --- a/.cargo/audit.toml +++ /dev/null @@ -1,8 +0,0 @@ -# See https://github.com/rustsec/rustsec/blob/59e1d2ad0b9cbc6892c26de233d4925074b4b97b/cargo-audit/audit.toml.example for example. - -[advisories] -ignore = [ - # We depend on nix 0.22 only via mio-aio, a dev-dependency. - # https://github.com/tokio-rs/tokio/pull/4255#issuecomment-974786349 - "RUSTSEC-2021-0119", -] diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 05157cdb5a4..87d2a460c8d 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -149,7 +149,7 @@ rand = "0.8.0" wasm-bindgen-test = "0.3.0" [target.'cfg(target_os = "freebsd")'.dev-dependencies] -mio-aio = { version = "0.7.0", features = ["tokio"] } +mio-aio = { version = "0.8.0", features = ["tokio"] } [target.'cfg(loom)'.dev-dependencies] loom = { version = "0.7", features = ["futures", "checkpoint"] } diff --git a/tokio/tests/io_poll_aio.rs b/tokio/tests/io_poll_aio.rs index f044af5cc4e..e83859f5c98 100644 --- a/tokio/tests/io_poll_aio.rs +++ b/tokio/tests/io_poll_aio.rs @@ -1,12 +1,12 @@ #![warn(rust_2018_idioms)] #![cfg(all(target_os = "freebsd", feature = "net"))] -use mio_aio::{AioCb, AioFsyncMode, LioCb}; +use mio_aio::{AioFsyncMode, SourceApi}; use std::{ future::Future, - mem, + io, mem, os::unix::io::{AsRawFd, RawFd}, - pin::Pin, + pin::{pin, Pin}, task::{Context, Poll}, }; use tempfile::tempfile; @@ -16,9 +16,10 @@ use tokio_test::assert_pending; mod aio { use super::*; - /// Adapts mio_aio::AioCb (which implements mio::event::Source) to AioSource - struct WrappedAioCb<'a>(AioCb<'a>); - impl<'a> AioSource for WrappedAioCb<'a> { + #[derive(Debug)] + struct TokioSource(mio_aio::Source); + + impl AioSource for TokioSource { fn register(&mut self, kq: RawFd, token: usize) { self.0.register_raw(kq, token) } @@ -28,12 +29,22 @@ mod aio { } /// A very crude implementation of an AIO-based future - struct FsyncFut(Aio>); + struct FsyncFut(Aio); + + impl FsyncFut { + pub fn submit(self: Pin<&mut Self>) -> io::Result<()> { + let p = unsafe { self.map_unchecked_mut(|s| &mut s.0 .0) }; + match p.submit() { + Ok(()) => Ok(()), + Err(e) => Err(io::Error::from_raw_os_error(e as i32)), + } + } + } impl Future for FsyncFut { - type Output = std::io::Result<()>; + type Output = io::Result<()>; - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let poll_result = self.0.poll_ready(cx); match poll_result { Poll::Pending => Poll::Pending, @@ -41,10 +52,11 @@ mod aio { Poll::Ready(Ok(_ev)) => { // At this point, we could clear readiness. But there's no // point, since we're about to drop the Aio. - let result = (*self.0).0.aio_return(); + let p = unsafe { self.map_unchecked_mut(|s| &mut s.0 .0) }; + let result = p.aio_return(); match result { - Ok(_) => Poll::Ready(Ok(())), - Err(e) => Poll::Ready(Err(e.into())), + Ok(r) => Poll::Ready(Ok(r)), + Err(e) => Poll::Ready(Err(io::Error::from_raw_os_error(e as i32))), } } } @@ -57,6 +69,16 @@ mod aio { /// registration actually works, under the hood. struct LlSource(Pin>); + impl LlSource { + fn fsync(mut self: Pin<&mut Self>) { + let r = unsafe { + let p = self.0.as_mut().get_unchecked_mut(); + libc::aio_fsync(libc::O_SYNC, p) + }; + assert_eq!(0, r); + } + } + impl AioSource for LlSource { fn register(&mut self, kq: RawFd, token: usize) { let mut sev: libc::sigevent = unsafe { mem::MaybeUninit::zeroed().assume_init() }; @@ -77,62 +99,15 @@ mod aio { struct LlFut(Aio); - impl Future for LlFut { - type Output = std::io::Result<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let poll_result = self.0.poll_ready(cx); - match poll_result { - Poll::Pending => Poll::Pending, - Poll::Ready(Err(e)) => Poll::Ready(Err(e)), - Poll::Ready(Ok(_ev)) => { - let r = unsafe { libc::aio_return(self.0 .0.as_mut().get_unchecked_mut()) }; - assert_eq!(0, r); - Poll::Ready(Ok(())) - } - } + impl LlFut { + pub fn fsync(self: Pin<&mut Self>) { + let p = unsafe { self.map_unchecked_mut(|s| &mut *(s.0)) }; + p.fsync(); } } - /// A very simple object that can implement AioSource and can be reused. - /// - /// mio_aio normally assumes that each AioCb will be consumed on completion. - /// This somewhat contrived example shows how an Aio object can be reused - /// anyway. - struct ReusableFsyncSource { - aiocb: Pin>>, - fd: RawFd, - token: usize, - } - impl ReusableFsyncSource { - fn fsync(&mut self) { - self.aiocb.register_raw(self.fd, self.token); - self.aiocb.fsync(AioFsyncMode::O_SYNC).unwrap(); - } - fn new(aiocb: AioCb<'static>) -> Self { - ReusableFsyncSource { - aiocb: Box::pin(aiocb), - fd: 0, - token: 0, - } - } - fn reset(&mut self, aiocb: AioCb<'static>) { - self.aiocb = Box::pin(aiocb); - } - } - impl AioSource for ReusableFsyncSource { - fn register(&mut self, kq: RawFd, token: usize) { - self.fd = kq; - self.token = token; - } - fn deregister(&mut self) { - self.fd = 0; - } - } - - struct ReusableFsyncFut<'a>(&'a mut Aio); - impl<'a> Future for ReusableFsyncFut<'a> { - type Output = std::io::Result<()>; + impl Future for LlFut { + type Output = std::io::Result; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let poll_result = self.0.poll_ready(cx); @@ -140,16 +115,16 @@ mod aio { Poll::Pending => Poll::Pending, Poll::Ready(Err(e)) => Poll::Ready(Err(e)), Poll::Ready(Ok(ev)) => { - // Since this future uses a reusable Aio, we must clear - // its readiness here. That makes the future - // non-idempotent; the caller can't poll it repeatedly after - // it has already returned Ready. But that's ok; most - // futures behave this way. + // Clearing readiness makes the future non-idempotent; the + // caller can't poll it repeatedly after it has already + // returned Ready. But that's ok; most futures behave this + // way. self.0.clear_ready(ev); - let result = (*self.0).aiocb.aio_return(); - match result { - Ok(_) => Poll::Ready(Ok(())), - Err(e) => Poll::Ready(Err(e.into())), + let r = unsafe { libc::aio_return(self.0 .0.as_mut().get_unchecked_mut()) }; + if r >= 0 { + Poll::Ready(Ok(r as usize)) + } else { + Poll::Ready(Err(io::Error::last_os_error())) } } } @@ -160,11 +135,11 @@ mod aio { async fn fsync() { let f = tempfile().unwrap(); let fd = f.as_raw_fd(); - let aiocb = AioCb::from_fd(fd, 0); - let source = WrappedAioCb(aiocb); - let mut poll_aio = Aio::new_for_aio(source).unwrap(); - (*poll_aio).0.fsync(AioFsyncMode::O_SYNC).unwrap(); - let fut = FsyncFut(poll_aio); + let mode = AioFsyncMode::O_SYNC; + let source = TokioSource(mio_aio::Fsync::fsync(fd, mode, 0)); + let poll_aio = Aio::new_for_aio(source).unwrap(); + let mut fut = pin!(FsyncFut(poll_aio)); + fut.as_mut().submit().unwrap(); fut.await.unwrap(); } @@ -177,7 +152,7 @@ mod aio { let source = LlSource(Box::pin(aiocb)); let mut poll_aio = Aio::new_for_aio(source).unwrap(); let r = unsafe { - let p = (*poll_aio).0.as_mut().get_unchecked_mut(); + let p = poll_aio.0.as_mut().get_unchecked_mut(); libc::aio_fsync(libc::O_SYNC, p) }; assert_eq!(0, r); @@ -190,144 +165,140 @@ mod aio { async fn reuse() { let f = tempfile().unwrap(); let fd = f.as_raw_fd(); - let aiocb0 = AioCb::from_fd(fd, 0); - let source = ReusableFsyncSource::new(aiocb0); - let mut poll_aio = Aio::new_for_aio(source).unwrap(); - poll_aio.fsync(); - let fut0 = ReusableFsyncFut(&mut poll_aio); - fut0.await.unwrap(); + let mut aiocb: libc::aiocb = unsafe { mem::MaybeUninit::zeroed().assume_init() }; + aiocb.aio_fildes = fd; + let source = LlSource(Box::pin(aiocb)); + let poll_aio = Aio::new_for_aio(source).unwrap(); + + // Send the operation to the kernel the first time + let mut fut = LlFut(poll_aio); + { + let mut pfut = Pin::new(&mut fut); + pfut.as_mut().fsync(); + pfut.as_mut().await.unwrap(); + } - let aiocb1 = AioCb::from_fd(fd, 0); - poll_aio.reset(aiocb1); + // Check that readiness was cleared let mut ctx = Context::from_waker(futures::task::noop_waker_ref()); - assert_pending!(poll_aio.poll_ready(&mut ctx)); - poll_aio.fsync(); - let fut1 = ReusableFsyncFut(&mut poll_aio); - fut1.await.unwrap(); + assert_pending!(fut.0.poll_ready(&mut ctx)); + + // and reuse the future and its Aio object + { + let mut pfut = Pin::new(&mut fut); + pfut.as_mut().fsync(); + pfut.as_mut().await.unwrap(); + } } } mod lio { use super::*; - struct WrappedLioCb<'a>(LioCb<'a>); - impl<'a> AioSource for WrappedLioCb<'a> { - fn register(&mut self, kq: RawFd, token: usize) { - self.0.register_raw(kq, token) - } - fn deregister(&mut self) { - self.0.deregister_raw() - } + /// Low-level source based on lio_listio + /// + /// An example demonstrating using AIO with `Interest::Lio`. mio_aio 0.8 + /// doesn't include any bindings for lio_listio, so we've got to go + /// low-level. + struct LioSource<'a> { + aiocb: Pin<&'a mut [&'a mut libc::aiocb; 1]>, + sev: libc::sigevent, } - /// A very crude lio_listio-based Future - struct LioFut(Option>>); - - impl Future for LioFut { - type Output = std::io::Result>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let poll_result = self.0.as_mut().unwrap().poll_ready(cx); - match poll_result { - Poll::Pending => Poll::Pending, - Poll::Ready(Err(e)) => Poll::Ready(Err(e)), - Poll::Ready(Ok(_ev)) => { - // At this point, we could clear readiness. But there's no - // point, since we're about to drop the Aio. - let r = self.0.take().unwrap().into_inner().0.into_results(|iter| { - iter.map(|lr| lr.result.unwrap()).collect::>() - }); - Poll::Ready(Ok(r)) - } + impl<'a> LioSource<'a> { + fn new(aiocb: Pin<&'a mut [&'a mut libc::aiocb; 1]>) -> Self { + LioSource { + aiocb, + sev: unsafe { mem::zeroed() }, } } - } - /// Minimal example demonstrating reuse of an Aio object with lio - /// readiness. mio_aio::LioCb actually does something similar under the - /// hood. - struct ReusableLioSource { - liocb: Option>, - fd: RawFd, - token: usize, - } - impl ReusableLioSource { - fn new(liocb: LioCb<'static>) -> Self { - ReusableLioSource { - liocb: Some(liocb), - fd: 0, - token: 0, - } - } - fn reset(&mut self, liocb: LioCb<'static>) { - self.liocb = Some(liocb); - } - fn submit(&mut self) { - self.liocb - .as_mut() - .unwrap() - .register_raw(self.fd, self.token); - self.liocb.as_mut().unwrap().submit().unwrap(); + fn submit(mut self: Pin<&mut Self>) { + let p: *const *mut libc::aiocb = + unsafe { self.aiocb.as_mut().get_unchecked_mut() } as *const _ as *const *mut _; + let r = unsafe { libc::lio_listio(libc::LIO_NOWAIT, p, 1, &mut self.sev) }; + assert_eq!(r, 0); } } - impl AioSource for ReusableLioSource { + + impl<'a> AioSource for LioSource<'a> { fn register(&mut self, kq: RawFd, token: usize) { - self.fd = kq; - self.token = token; + let mut sev: libc::sigevent = unsafe { mem::MaybeUninit::zeroed().assume_init() }; + sev.sigev_notify = libc::SIGEV_KEVENT; + sev.sigev_signo = kq; + sev.sigev_value = libc::sigval { + sival_ptr: token as *mut libc::c_void, + }; + self.sev = sev; } + fn deregister(&mut self) { - self.fd = 0; + unsafe { + self.sev = mem::zeroed(); + } } } - struct ReusableLioFut<'a>(&'a mut Aio); - impl<'a> Future for ReusableLioFut<'a> { - type Output = std::io::Result>; - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + struct LioFut<'a>(Aio>); + + impl<'a> LioFut<'a> { + pub fn submit(self: Pin<&mut Self>) { + let p = unsafe { self.map_unchecked_mut(|s| &mut *(s.0)) }; + p.submit(); + } + } + + impl<'a> Future for LioFut<'a> { + type Output = std::io::Result; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let poll_result = self.0.poll_ready(cx); match poll_result { Poll::Pending => Poll::Pending, Poll::Ready(Err(e)) => Poll::Ready(Err(e)), Poll::Ready(Ok(ev)) => { - // Since this future uses a reusable Aio, we must clear - // its readiness here. That makes the future - // non-idempotent; the caller can't poll it repeatedly after - // it has already returned Ready. But that's ok; most - // futures behave this way. + // Clearing readiness makes the future non-idempotent; the + // caller can't poll it repeatedly after it has already + // returned Ready. But that's ok; most futures behave this + // way. Clearing readiness is especially useful for + // lio_listio, because sometimes some operations will be + // ready but not all. self.0.clear_ready(ev); - let r = (*self.0).liocb.take().unwrap().into_results(|iter| { - iter.map(|lr| lr.result.unwrap()).collect::>() - }); - Poll::Ready(Ok(r)) + let r = unsafe { + let p1 = self.get_unchecked_mut(); + let p2: &mut [&mut libc::aiocb; 1] = + p1.0.aiocb.as_mut().get_unchecked_mut(); + let p3: &mut libc::aiocb = p2[0]; + libc::aio_return(p3) + }; + if r >= 0 { + Poll::Ready(Ok(r as usize)) + } else { + Poll::Ready(Err(io::Error::last_os_error())) + } } } } } - /// An lio_listio operation with one write element + /// An lio_listio operation with one fsync element #[tokio::test] async fn onewrite() { const WBUF: &[u8] = b"abcdef"; let f = tempfile().unwrap(); - let mut builder = mio_aio::LioCbBuilder::with_capacity(1); - builder = builder.emplace_slice( - f.as_raw_fd(), - 0, - &WBUF[..], - 0, - mio_aio::LioOpcode::LIO_WRITE, - ); - let liocb = builder.finish(); - let source = WrappedLioCb(liocb); - let mut poll_aio = Aio::new_for_lio(source).unwrap(); + let mut aiocb: libc::aiocb = unsafe { mem::zeroed() }; + aiocb.aio_fildes = f.as_raw_fd(); + aiocb.aio_lio_opcode = libc::LIO_WRITE; + aiocb.aio_nbytes = WBUF.len(); + aiocb.aio_buf = WBUF.as_ptr() as *mut _; + let aiocb = pin!([&mut aiocb]); + let source = LioSource::new(aiocb); + let poll_aio = Aio::new_for_lio(source).unwrap(); // Send the operation to the kernel - (*poll_aio).0.submit().unwrap(); - let fut = LioFut(Some(poll_aio)); - let v = fut.await.unwrap(); - assert_eq!(v.len(), 1); - assert_eq!(v[0] as usize, WBUF.len()); + let mut fut = pin!(LioFut(poll_aio)); + fut.as_mut().submit(); + fut.await.unwrap(); } /// A suitably crafted future type can reuse an Aio object @@ -336,40 +307,32 @@ mod lio { const WBUF: &[u8] = b"abcdef"; let f = tempfile().unwrap(); - let mut builder0 = mio_aio::LioCbBuilder::with_capacity(1); - builder0 = builder0.emplace_slice( - f.as_raw_fd(), - 0, - &WBUF[..], - 0, - mio_aio::LioOpcode::LIO_WRITE, - ); - let liocb0 = builder0.finish(); - let source = ReusableLioSource::new(liocb0); - let mut poll_aio = Aio::new_for_aio(source).unwrap(); - poll_aio.submit(); - let fut0 = ReusableLioFut(&mut poll_aio); - let v = fut0.await.unwrap(); - assert_eq!(v.len(), 1); - assert_eq!(v[0] as usize, WBUF.len()); - - // Now reuse the same Aio - let mut builder1 = mio_aio::LioCbBuilder::with_capacity(1); - builder1 = builder1.emplace_slice( - f.as_raw_fd(), - 0, - &WBUF[..], - 0, - mio_aio::LioOpcode::LIO_WRITE, - ); - let liocb1 = builder1.finish(); - poll_aio.reset(liocb1); + let mut aiocb: libc::aiocb = unsafe { mem::zeroed() }; + aiocb.aio_fildes = f.as_raw_fd(); + aiocb.aio_lio_opcode = libc::LIO_WRITE; + aiocb.aio_nbytes = WBUF.len(); + aiocb.aio_buf = WBUF.as_ptr() as *mut _; + let aiocb = pin!([&mut aiocb]); + let source = LioSource::new(aiocb); + let poll_aio = Aio::new_for_lio(source).unwrap(); + + // Send the operation to the kernel the first time + let mut fut = LioFut(poll_aio); + { + let mut pfut = Pin::new(&mut fut); + pfut.as_mut().submit(); + pfut.as_mut().await.unwrap(); + } + + // Check that readiness was cleared let mut ctx = Context::from_waker(futures::task::noop_waker_ref()); - assert_pending!(poll_aio.poll_ready(&mut ctx)); - poll_aio.submit(); - let fut1 = ReusableLioFut(&mut poll_aio); - let v = fut1.await.unwrap(); - assert_eq!(v.len(), 1); - assert_eq!(v[0] as usize, WBUF.len()); + assert_pending!(fut.0.poll_ready(&mut ctx)); + + // and reuse the future and its Aio object + { + let mut pfut = Pin::new(&mut fut); + pfut.as_mut().submit(); + pfut.as_mut().await.unwrap(); + } } } From d6ba535ceb327869ad149dc638af8cf3146fba81 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sat, 6 Jan 2024 20:30:28 +0900 Subject: [PATCH 021/162] ci: update actions/checkout action to v4 (#6272) --- .github/workflows/audit.yml | 2 +- .github/workflows/ci.yml | 66 +++++++++++++++---------------- .github/workflows/loom.yml | 10 ++--- .github/workflows/pr-audit.yml | 2 +- .github/workflows/stress-test.yml | 2 +- 5 files changed, 41 insertions(+), 41 deletions(-) diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml index cb124aabdb6..2a5cb641a9a 100644 --- a/.github/workflows/audit.yml +++ b/.github/workflows/audit.yml @@ -21,7 +21,7 @@ jobs: runs-on: ubuntu-latest if: "!contains(github.event.head_commit.message, 'ci skip')" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Audit Check # https://github.com/rustsec/audit-check/issues/2 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 35d202dd689..b22f53c8a43 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -97,7 +97,7 @@ jobs: - ubuntu-latest - macos-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@stable with: @@ -129,7 +129,7 @@ jobs: - ubuntu-latest - macos-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@stable with: @@ -159,7 +159,7 @@ jobs: - ubuntu-latest - macos-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@stable with: @@ -200,7 +200,7 @@ jobs: needs: basics runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@stable with: @@ -222,7 +222,7 @@ jobs: name: test tokio instrumentation runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@stable with: @@ -247,7 +247,7 @@ jobs: needs: basics runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@stable with: @@ -286,7 +286,7 @@ jobs: - os: ubuntu-latest - os: macos-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@stable with: @@ -320,7 +320,7 @@ jobs: include: - os: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@stable with: @@ -354,7 +354,7 @@ jobs: include: - os: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@stable with: @@ -382,7 +382,7 @@ jobs: needs: basics runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_nightly }} uses: dtolnay/rust-toolchain@stable with: @@ -403,7 +403,7 @@ jobs: needs: basics runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install llvm # Required to resolve symbols in sanitizer output run: sudo apt-get install -y llvm @@ -425,7 +425,7 @@ jobs: needs: basics runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Check semver uses: obi1kenobi/cargo-semver-checks-action@v2 with: @@ -443,7 +443,7 @@ jobs: - powerpc64-unknown-linux-gnu - arm-linux-androideabi steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@stable with: @@ -465,7 +465,7 @@ jobs: - name: armv7-sony-vita-newlibeabihf exclude_features: "process,signal,rt-process-signal,full" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_nightly }} uses: dtolnay/rust-toolchain@nightly with: @@ -493,7 +493,7 @@ jobs: - target: aarch64-unknown-linux-gnu rustflags: --cfg tokio_taskdump steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust stable uses: dtolnay/rust-toolchain@stable with: @@ -532,7 +532,7 @@ jobs: - target: aarch64-unknown-linux-gnu rustflags: --cfg tokio_taskdump steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust stable uses: dtolnay/rust-toolchain@stable with: @@ -568,7 +568,7 @@ jobs: needs: basics runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_nightly }} uses: dtolnay/rust-toolchain@stable with: @@ -598,7 +598,7 @@ jobs: needs: basics runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_nightly }} uses: dtolnay/rust-toolchain@stable with: @@ -631,7 +631,7 @@ jobs: # Try with unstable and taskdump feature flags - { name: "--unstable --taskdump", rustflags: "--cfg tokio_unstable -Dwarnings --cfg tokio_taskdump" } steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_nightly }} uses: dtolnay/rust-toolchain@stable with: @@ -650,7 +650,7 @@ jobs: name: minrust runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_min }} uses: dtolnay/rust-toolchain@stable with: @@ -666,7 +666,7 @@ jobs: needs: basics runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_nightly }} uses: dtolnay/rust-toolchain@stable with: @@ -698,7 +698,7 @@ jobs: name: fmt runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@stable with: @@ -718,7 +718,7 @@ jobs: name: clippy runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_clippy }} uses: dtolnay/rust-toolchain@stable with: @@ -733,7 +733,7 @@ jobs: name: docs runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_nightly }} uses: dtolnay/rust-toolchain@stable with: @@ -751,7 +751,7 @@ jobs: needs: basics runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@stable with: @@ -767,7 +767,7 @@ jobs: name: Check README runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Verify that both READMEs are identical run: diff README.md tokio/README.md @@ -786,7 +786,7 @@ jobs: - ubuntu-latest - macos-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@stable with: @@ -832,7 +832,7 @@ jobs: needs: basics runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_nightly }} uses: dtolnay/rust-toolchain@stable with: @@ -849,7 +849,7 @@ jobs: needs: basics runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_nightly }} uses: dtolnay/rust-toolchain@master with: @@ -864,7 +864,7 @@ jobs: needs: basics runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@stable with: @@ -882,7 +882,7 @@ jobs: needs: basics runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@stable with: @@ -933,7 +933,7 @@ jobs: # the README for details: https://github.com/awslabs/cargo-check-external-types - nightly-2023-05-31 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ matrix.rust }} uses: dtolnay/rust-toolchain@stable with: @@ -952,7 +952,7 @@ jobs: needs: basics runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_nightly }} uses: dtolnay/rust-toolchain@stable with: diff --git a/.github/workflows/loom.yml b/.github/workflows/loom.yml index e7528090205..b25f9ac5e30 100644 --- a/.github/workflows/loom.yml +++ b/.github/workflows/loom.yml @@ -29,7 +29,7 @@ jobs: if: github.repository_owner == 'tokio-rs' && (contains(github.event.pull_request.labels.*.name, 'R-loom-sync') || (github.base_ref == null)) runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@master with: @@ -45,7 +45,7 @@ jobs: if: github.repository_owner == 'tokio-rs' && (contains(github.event.pull_request.labels.*.name, 'R-loom-time-driver') || (github.base_ref == null)) runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@master with: @@ -61,7 +61,7 @@ jobs: if: github.repository_owner == 'tokio-rs' && (contains(github.event.pull_request.labels.*.name, 'R-loom-current-thread') || (github.base_ref == null)) runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@master with: @@ -84,7 +84,7 @@ jobs: - scope: loom_multi_thread::group_c - scope: loom_multi_thread::group_d steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@master with: @@ -109,7 +109,7 @@ jobs: - scope: loom_multi_thread_alt::group_c - scope: loom_multi_thread_alt::group_d steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@master with: diff --git a/.github/workflows/pr-audit.yml b/.github/workflows/pr-audit.yml index 892cb0cfa5c..ce2e81cda43 100644 --- a/.github/workflows/pr-audit.yml +++ b/.github/workflows/pr-audit.yml @@ -20,7 +20,7 @@ jobs: runs-on: ubuntu-latest if: "!contains(github.event.head_commit.message, 'ci skip')" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install cargo-audit run: cargo install cargo-audit diff --git a/.github/workflows/stress-test.yml b/.github/workflows/stress-test.yml index 1c21bc91486..e5c13f6f432 100644 --- a/.github/workflows/stress-test.yml +++ b/.github/workflows/stress-test.yml @@ -27,7 +27,7 @@ jobs: stress-test: - simple_echo_tcp steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@master with: From 9780bf491f0a69986f9f35b9c6a81ac951356aff Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sun, 7 Jan 2024 00:53:08 +0900 Subject: [PATCH 022/162] ci: update cargo-check-external-types to 0.1.10 (#6274) --- .github/workflows/ci.yml | 6 +++--- tokio/Cargo.toml | 10 ++++++++++ tokio/external-types.toml | 11 ----------- 3 files changed, 13 insertions(+), 14 deletions(-) delete mode 100644 tokio/external-types.toml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b22f53c8a43..a2242d3b873 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -931,7 +931,7 @@ jobs: rust: # `check-external-types` requires a specific Rust nightly version. See # the README for details: https://github.com/awslabs/cargo-check-external-types - - nightly-2023-05-31 + - nightly-2023-10-21 steps: - uses: actions/checkout@v4 - name: Install Rust ${{ matrix.rust }} @@ -942,9 +942,9 @@ jobs: - name: Install cargo-check-external-types uses: taiki-e/cache-cargo-install-action@v1 with: - tool: cargo-check-external-types@0.1.7 + tool: cargo-check-external-types@0.1.10 - name: check-external-types - run: cargo check-external-types --all-features --config external-types.toml + run: cargo check-external-types --all-features working-directory: tokio check-fuzzing: diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 87d2a460c8d..2efbca02dbc 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -164,3 +164,13 @@ rustc-args = ["--cfg", "tokio_unstable", "--cfg", "tokio_taskdump"] [package.metadata.playground] features = ["full", "test-util"] + +[package.metadata.cargo_check_external_types] +# The following are types that are allowed to be exposed in Tokio's public API. +# The standard library is allowed by default. +allowed_external_types = [ + "bytes::buf::buf_impl::Buf", + "bytes::buf::buf_mut::BufMut", + + "tokio_macros::*", +] diff --git a/tokio/external-types.toml b/tokio/external-types.toml deleted file mode 100644 index a5bde8ed10c..00000000000 --- a/tokio/external-types.toml +++ /dev/null @@ -1,11 +0,0 @@ -# This config file is for the `cargo-check-external-types` tool that is run in CI. - -# The following are types that are allowed to be exposed in Tokio's public API. -# The standard library is allowed by default. -allowed_external_types = [ - "bytes::buf::buf_impl::Buf", - "bytes::buf::buf_mut::BufMut", - - "tokio_macros::*", -] - From 3275cfb638fe6cac6b0bbb1f60ee59eb499f6c2a Mon Sep 17 00:00:00 2001 From: Rustin Date: Sun, 7 Jan 2024 00:22:26 +0800 Subject: [PATCH 023/162] io: make `copy` cooperative (#6265) --- tokio/src/io/util/copy.rs | 75 +++++++++++++++++++++++++++- tokio/tests/io_copy.rs | 15 ++++++ tokio/tests/io_copy_bidirectional.rs | 25 ++++++++++ 3 files changed, 113 insertions(+), 2 deletions(-) diff --git a/tokio/src/io/util/copy.rs b/tokio/src/io/util/copy.rs index 8bd0bff7f2b..56310c86f59 100644 --- a/tokio/src/io/util/copy.rs +++ b/tokio/src/io/util/copy.rs @@ -82,6 +82,19 @@ impl CopyBuffer { R: AsyncRead + ?Sized, W: AsyncWrite + ?Sized, { + ready!(crate::trace::trace_leaf(cx)); + #[cfg(any( + feature = "fs", + feature = "io-std", + feature = "net", + feature = "process", + feature = "rt", + feature = "signal", + feature = "sync", + feature = "time", + ))] + // Keep track of task budget + let coop = ready!(crate::runtime::coop::poll_proceed(cx)); loop { // If our buffer is empty, then we need to read some data to // continue. @@ -90,13 +103,49 @@ impl CopyBuffer { self.cap = 0; match self.poll_fill_buf(cx, reader.as_mut()) { - Poll::Ready(Ok(())) => (), - Poll::Ready(Err(err)) => return Poll::Ready(Err(err)), + Poll::Ready(Ok(())) => { + #[cfg(any( + feature = "fs", + feature = "io-std", + feature = "net", + feature = "process", + feature = "rt", + feature = "signal", + feature = "sync", + feature = "time", + ))] + coop.made_progress(); + } + Poll::Ready(Err(err)) => { + #[cfg(any( + feature = "fs", + feature = "io-std", + feature = "net", + feature = "process", + feature = "rt", + feature = "signal", + feature = "sync", + feature = "time", + ))] + coop.made_progress(); + return Poll::Ready(Err(err)); + } Poll::Pending => { // Try flushing when the reader has no progress to avoid deadlock // when the reader depends on buffered writer. if self.need_flush { ready!(writer.as_mut().poll_flush(cx))?; + #[cfg(any( + feature = "fs", + feature = "io-std", + feature = "net", + feature = "process", + feature = "rt", + feature = "signal", + feature = "sync", + feature = "time", + ))] + coop.made_progress(); self.need_flush = false; } @@ -108,6 +157,17 @@ impl CopyBuffer { // If our buffer has some data, let's write it out! while self.pos < self.cap { let i = ready!(self.poll_write_buf(cx, reader.as_mut(), writer.as_mut()))?; + #[cfg(any( + feature = "fs", + feature = "io-std", + feature = "net", + feature = "process", + feature = "rt", + feature = "signal", + feature = "sync", + feature = "time", + ))] + coop.made_progress(); if i == 0 { return Poll::Ready(Err(io::Error::new( io::ErrorKind::WriteZero, @@ -132,6 +192,17 @@ impl CopyBuffer { // data and finish the transfer. if self.pos == self.cap && self.read_done { ready!(writer.as_mut().poll_flush(cx))?; + #[cfg(any( + feature = "fs", + feature = "io-std", + feature = "net", + feature = "process", + feature = "rt", + feature = "signal", + feature = "sync", + feature = "time", + ))] + coop.made_progress(); return Poll::Ready(Ok(self.amt)); } } diff --git a/tokio/tests/io_copy.rs b/tokio/tests/io_copy.rs index 005e1701191..82d92a9688b 100644 --- a/tokio/tests/io_copy.rs +++ b/tokio/tests/io_copy.rs @@ -85,3 +85,18 @@ async fn proxy() { assert_eq!(n, 1024); } + +#[tokio::test] +async fn copy_is_cooperative() { + tokio::select! { + biased; + _ = async { + loop { + let mut reader: &[u8] = b"hello"; + let mut writer: Vec = vec![]; + let _ = io::copy(&mut reader, &mut writer).await; + } + } => {}, + _ = tokio::task::yield_now() => {} + } +} diff --git a/tokio/tests/io_copy_bidirectional.rs b/tokio/tests/io_copy_bidirectional.rs index 10eba3166ac..3cdce32d0ce 100644 --- a/tokio/tests/io_copy_bidirectional.rs +++ b/tokio/tests/io_copy_bidirectional.rs @@ -138,3 +138,28 @@ async fn immediate_exit_on_read_error() { assert!(copy_bidirectional(&mut a, &mut b).await.is_err()); } + +#[tokio::test] +async fn copy_bidirectional_is_cooperative() { + tokio::select! { + biased; + _ = async { + loop { + let payload = b"here, take this"; + + let mut a = tokio_test::io::Builder::new() + .read(payload) + .write(payload) + .build(); + + let mut b = tokio_test::io::Builder::new() + .read(payload) + .write(payload) + .build(); + + let _ = copy_bidirectional(&mut a, &mut b).await; + } + } => {}, + _ = tokio::task::yield_now() => {} + } +} From 84c5674c601dfc36ab417ff0ec01763c2dd30a5c Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sun, 7 Jan 2024 02:33:37 +0900 Subject: [PATCH 024/162] ci: update clippy version to 1.75 (#6273) --- .clippy.toml | 1 - .github/workflows/ci.yml | 4 ++-- CONTRIBUTING.md | 2 +- tokio-stream/src/wrappers/lines.rs | 1 - tokio-stream/src/wrappers/split.rs | 1 - tokio-test/src/task.rs | 2 -- tokio-util/src/sync/cancellation_token/tree_node.rs | 2 ++ tokio-util/src/time/wheel/level.rs | 2 +- tokio-util/tests/compat.rs | 2 +- tokio-util/tests/mpsc.rs | 2 +- tokio-util/tests/spawn_pinned.rs | 4 ++-- tokio/src/io/bsd/poll_aio.rs | 2 +- tokio/src/lib.rs | 3 +-- tokio/src/runtime/scheduler/current_thread/mod.rs | 6 ------ tokio/src/runtime/task/state.rs | 3 --- tokio/src/runtime/time/wheel/level.rs | 2 +- tokio/src/signal/registry.rs | 2 -- tokio/tests/io_async_fd.rs | 2 +- tokio/tests/io_repeat.rs | 2 +- tokio/tests/io_sink.rs | 2 +- tokio/tests/macros_try_join.rs | 9 ++------- tokio/tests/net_named_pipe.rs | 6 +++--- tokio/tests/net_panic.rs | 4 ++-- tokio/tests/rt_time_start_paused.rs | 2 +- tokio/tests/task_id.rs | 1 - tokio/tests/task_join_set.rs | 6 +++--- tokio/tests/task_local.rs | 2 +- tokio/tests/task_panic.rs | 3 +-- tokio/tests/tcp_into_split.rs | 2 +- tokio/tests/time_panic.rs | 2 +- 30 files changed, 31 insertions(+), 53 deletions(-) delete mode 100644 .clippy.toml diff --git a/.clippy.toml b/.clippy.toml deleted file mode 100644 index 550d4759a1a..00000000000 --- a/.clippy.toml +++ /dev/null @@ -1 +0,0 @@ -msrv = "1.63" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a2242d3b873..2b79ec21c22 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,7 +16,7 @@ env: # Change to specific Rust release to pin rust_stable: stable rust_nightly: nightly-2023-10-21 - rust_clippy: 1.65.0 + rust_clippy: '1.75' # When updating this, also update: # - README.md # - tokio/README.md @@ -25,7 +25,7 @@ env: # - tokio-util/Cargo.toml # - tokio-test/Cargo.toml # - tokio-stream/Cargo.toml - rust_min: 1.63.0 + rust_min: '1.63' defaults: run: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 57a3bb36648..a1fd3bf0d28 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -149,7 +149,7 @@ When updating this, also update: --> ``` -cargo +1.65.0 clippy --all --tests --all-features +cargo +1.75 clippy --all --tests --all-features ``` When building documentation normally, the markers that list the features diff --git a/tokio-stream/src/wrappers/lines.rs b/tokio-stream/src/wrappers/lines.rs index ad3c25349f6..4850429a72d 100644 --- a/tokio-stream/src/wrappers/lines.rs +++ b/tokio-stream/src/wrappers/lines.rs @@ -30,7 +30,6 @@ impl LinesStream { } /// Obtain a pinned reference to the inner `Lines`. - #[allow(clippy::wrong_self_convention)] // https://github.com/rust-lang/rust-clippy/issues/4546 pub fn as_pin_mut(self: Pin<&mut Self>) -> Pin<&mut Lines> { self.project().inner } diff --git a/tokio-stream/src/wrappers/split.rs b/tokio-stream/src/wrappers/split.rs index 5a6bb2d408c..ac46a8ba6ff 100644 --- a/tokio-stream/src/wrappers/split.rs +++ b/tokio-stream/src/wrappers/split.rs @@ -30,7 +30,6 @@ impl SplitStream { } /// Obtain a pinned reference to the inner `Split`. - #[allow(clippy::wrong_self_convention)] // https://github.com/rust-lang/rust-clippy/issues/4546 pub fn as_pin_mut(self: Pin<&mut Self>) -> Pin<&mut Split> { self.project().inner } diff --git a/tokio-test/src/task.rs b/tokio-test/src/task.rs index 67d558dde2f..a9cf50f52f9 100644 --- a/tokio-test/src/task.rs +++ b/tokio-test/src/task.rs @@ -25,8 +25,6 @@ //! assert!(task.poll().is_ready(), "Task was not ready!"); //! ``` -#![allow(clippy::mutex_atomic)] - use std::future::Future; use std::mem; use std::ops; diff --git a/tokio-util/src/sync/cancellation_token/tree_node.rs b/tokio-util/src/sync/cancellation_token/tree_node.rs index b7a98059e13..0263f311164 100644 --- a/tokio-util/src/sync/cancellation_token/tree_node.rs +++ b/tokio-util/src/sync/cancellation_token/tree_node.rs @@ -178,6 +178,8 @@ where locked_node = node.inner.lock().unwrap(); locked_parent } + // https://github.com/tokio-rs/tokio/pull/6273#discussion_r1443752911 + #[allow(clippy::unnecessary_literal_unwrap)] Err(TryLockError::Poisoned(err)) => Err(err).unwrap(), }; diff --git a/tokio-util/src/time/wheel/level.rs b/tokio-util/src/time/wheel/level.rs index 4290acf9c59..a69aee918aa 100644 --- a/tokio-util/src/time/wheel/level.rs +++ b/tokio-util/src/time/wheel/level.rs @@ -270,7 +270,7 @@ mod test { for level in 1..5 { for pos in level..64 { let a = pos * 64_usize.pow(level as u32); - assert_eq!(pos as usize, slot_for(a as u64, level)); + assert_eq!(pos, slot_for(a as u64, level)); } } } diff --git a/tokio-util/tests/compat.rs b/tokio-util/tests/compat.rs index 278ebfcfb66..8a0eab3407d 100644 --- a/tokio-util/tests/compat.rs +++ b/tokio-util/tests/compat.rs @@ -1,4 +1,4 @@ -#![cfg(all(feature = "compat"))] +#![cfg(feature = "compat")] #![cfg(not(target_os = "wasi"))] // WASI does not support all fs operations #![warn(rust_2018_idioms)] diff --git a/tokio-util/tests/mpsc.rs b/tokio-util/tests/mpsc.rs index 74b83c21186..545a580318d 100644 --- a/tokio-util/tests/mpsc.rs +++ b/tokio-util/tests/mpsc.rs @@ -29,7 +29,7 @@ async fn simple() { #[tokio::test] async fn simple_ref() { - let v = vec![1, 2, 3i32]; + let v = [1, 2, 3i32]; let (send, mut recv) = channel(3); let mut send = PollSender::new(send); diff --git a/tokio-util/tests/spawn_pinned.rs b/tokio-util/tests/spawn_pinned.rs index 9ea8cd27830..9eeeecfb0c0 100644 --- a/tokio-util/tests/spawn_pinned.rs +++ b/tokio-util/tests/spawn_pinned.rs @@ -31,7 +31,7 @@ fn can_drop_future_and_still_get_output() { let pool = task::LocalPoolHandle::new(1); let (sender, receiver) = std::sync::mpsc::channel(); - let _ = pool.spawn_pinned(move || { + pool.spawn_pinned(move || { // Rc is !Send + !Sync let local_data = Rc::new("test"); @@ -209,7 +209,7 @@ async fn spawn_by_idx() { }, 0, ); - let _ = pool.spawn_pinned_by_idx( + pool.spawn_pinned_by_idx( || async move { barrier2.wait().await; std::thread::current().id() diff --git a/tokio/src/io/bsd/poll_aio.rs b/tokio/src/io/bsd/poll_aio.rs index 6ac9e2880e1..708ca7484e7 100644 --- a/tokio/src/io/bsd/poll_aio.rs +++ b/tokio/src/io/bsd/poll_aio.rs @@ -164,7 +164,7 @@ impl Aio { /// is scheduled to receive a wakeup when the underlying operation /// completes. Note that on multiple calls to `poll_ready`, only the `Waker` from the /// `Context` passed to the most recent call is scheduled to receive a wakeup. - pub fn poll_ready<'a>(&'a self, cx: &mut Context<'_>) -> Poll> { + pub fn poll_ready(&self, cx: &mut Context<'_>) -> Poll> { let ev = ready!(self.registration.poll_read_ready(cx))?; Poll::Ready(Ok(AioEvent(ev))) } diff --git a/tokio/src/lib.rs b/tokio/src/lib.rs index 21d19bac9d9..3a979396831 100644 --- a/tokio/src/lib.rs +++ b/tokio/src/lib.rs @@ -2,8 +2,7 @@ clippy::cognitive_complexity, clippy::large_enum_variant, clippy::module_inception, - clippy::needless_doctest_main, - clippy::declare_interior_mutable_const + clippy::needless_doctest_main )] #![warn( missing_debug_implementations, diff --git a/tokio/src/runtime/scheduler/current_thread/mod.rs b/tokio/src/runtime/scheduler/current_thread/mod.rs index 3ae3d7accfc..55a43970195 100644 --- a/tokio/src/runtime/scheduler/current_thread/mod.rs +++ b/tokio/src/runtime/scheduler/current_thread/mod.rs @@ -351,9 +351,6 @@ impl Context { let mut driver = core.driver.take().expect("driver missing"); if let Some(f) = &handle.shared.config.before_park { - // Incorrect lint, the closures are actually different types so `f` - // cannot be passed as an argument to `enter`. - #[allow(clippy::redundant_closure)] let (c, ()) = self.enter(core, || f()); core = c; } @@ -374,9 +371,6 @@ impl Context { } if let Some(f) = &handle.shared.config.after_unpark { - // Incorrect lint, the closures are actually different types so `f` - // cannot be passed as an argument to `enter`. - #[allow(clippy::redundant_closure)] let (c, ()) = self.enter(core, || f()); core = c; } diff --git a/tokio/src/runtime/task/state.rs b/tokio/src/runtime/task/state.rs index 25c6b434a34..24cb4338b96 100644 --- a/tokio/src/runtime/task/state.rs +++ b/tokio/src/runtime/task/state.rs @@ -29,15 +29,12 @@ const LIFECYCLE_MASK: usize = 0b11; const NOTIFIED: usize = 0b100; /// The join handle is still around. -#[allow(clippy::unusual_byte_groupings)] // https://github.com/rust-lang/rust-clippy/issues/6556 const JOIN_INTEREST: usize = 0b1_000; /// A join handle waker has been set. -#[allow(clippy::unusual_byte_groupings)] // https://github.com/rust-lang/rust-clippy/issues/6556 const JOIN_WAKER: usize = 0b10_000; /// The task has been forcibly cancelled. -#[allow(clippy::unusual_byte_groupings)] // https://github.com/rust-lang/rust-clippy/issues/6556 const CANCELLED: usize = 0b100_000; /// All bits. diff --git a/tokio/src/runtime/time/wheel/level.rs b/tokio/src/runtime/time/wheel/level.rs index 7e48ff5c57d..4c9ba18cd89 100644 --- a/tokio/src/runtime/time/wheel/level.rs +++ b/tokio/src/runtime/time/wheel/level.rs @@ -267,7 +267,7 @@ mod test { for level in 1..5 { for pos in level..64 { let a = pos * 64_usize.pow(level as u32); - assert_eq!(pos as usize, slot_for(a as u64, level)); + assert_eq!(pos, slot_for(a as u64, level)); } } } diff --git a/tokio/src/signal/registry.rs b/tokio/src/signal/registry.rs index 022ad082b35..74973293a2d 100644 --- a/tokio/src/signal/registry.rs +++ b/tokio/src/signal/registry.rs @@ -1,5 +1,3 @@ -#![allow(clippy::unit_arg)] - use crate::signal::os::{OsExtraData, OsStorage}; use crate::sync::watch; use crate::util::once_cell::OnceCell; diff --git a/tokio/tests/io_async_fd.rs b/tokio/tests/io_async_fd.rs index bacf8e843c9..1fb203a6524 100644 --- a/tokio/tests/io_async_fd.rs +++ b/tokio/tests/io_async_fd.rs @@ -825,7 +825,7 @@ async fn await_error_readiness_invalid_address() { msg.msg_iovlen = 1; if unsafe { libc::sendmsg(socket_fd, &msg, 0) } == -1 { - Err(std::io::Error::last_os_error()).unwrap() + panic!("{:?}", std::io::Error::last_os_error()) } }); diff --git a/tokio/tests/io_repeat.rs b/tokio/tests/io_repeat.rs index 8094ffe7dae..b3745877cd5 100644 --- a/tokio/tests/io_repeat.rs +++ b/tokio/tests/io_repeat.rs @@ -1,5 +1,5 @@ #![warn(rust_2018_idioms)] -#![cfg(all(feature = "full"))] +#![cfg(feature = "full")] use tokio::io::AsyncReadExt; diff --git a/tokio/tests/io_sink.rs b/tokio/tests/io_sink.rs index 9b4fb31f30f..fb085c51561 100644 --- a/tokio/tests/io_sink.rs +++ b/tokio/tests/io_sink.rs @@ -1,5 +1,5 @@ #![warn(rust_2018_idioms)] -#![cfg(all(feature = "full"))] +#![cfg(feature = "full")] use tokio::io::AsyncWriteExt; diff --git a/tokio/tests/macros_try_join.rs b/tokio/tests/macros_try_join.rs index c8ed00bcd13..76958f167cc 100644 --- a/tokio/tests/macros_try_join.rs +++ b/tokio/tests/macros_try_join.rs @@ -45,8 +45,7 @@ async fn two_await() { let (tx1, rx1) = oneshot::channel::<&str>(); let (tx2, rx2) = oneshot::channel::(); - let mut join = - task::spawn(async { tokio::try_join!(async { rx1.await }, async { rx2.await }) }); + let mut join = task::spawn(async { tokio::try_join!(rx1, rx2) }); assert_pending!(join.poll()); @@ -67,11 +66,7 @@ async fn err_abort_early() { let (tx2, rx2) = oneshot::channel::(); let (_tx3, rx3) = oneshot::channel::(); - let mut join = task::spawn(async { - tokio::try_join!(async { rx1.await }, async { rx2.await }, async { - rx3.await - }) - }); + let mut join = task::spawn(async { tokio::try_join!(rx1, rx2, rx3) }); assert_pending!(join.poll()); diff --git a/tokio/tests/net_named_pipe.rs b/tokio/tests/net_named_pipe.rs index 02eda48e57b..48b9da82955 100644 --- a/tokio/tests/net_named_pipe.rs +++ b/tokio/tests/net_named_pipe.rs @@ -1,5 +1,5 @@ #![cfg(feature = "full")] -#![cfg(all(windows))] +#![cfg(windows)] use std::io; use std::time::Duration; @@ -92,7 +92,7 @@ async fn test_named_pipe_multi_client() -> io::Result<()> { // `io::ErrorKind::NotFound`. server = ServerOptions::new().create(PIPE_NAME)?; - let _ = tokio::spawn(async move { + tokio::spawn(async move { let mut buf = String::new(); inner.read_line(&mut buf).await?; inner.write_all(b"pong\n").await?; @@ -170,7 +170,7 @@ async fn test_named_pipe_multi_client_ready() -> io::Result<()> { // `io::ErrorKind::NotFound`. server = ServerOptions::new().create(PIPE_NAME)?; - let _ = tokio::spawn(async move { + tokio::spawn(async move { let server = inner_server; { diff --git a/tokio/tests/net_panic.rs b/tokio/tests/net_panic.rs index a7ce0afc0ab..81c9df55e3b 100644 --- a/tokio/tests/net_panic.rs +++ b/tokio/tests/net_panic.rs @@ -101,7 +101,7 @@ fn unix_listener_from_std_panic_caller() -> Result<(), Box> { let dir = tempfile::tempdir().unwrap(); let sock_path = dir.path().join("socket"); - let std_listener = std::os::unix::net::UnixListener::bind(&sock_path).unwrap(); + let std_listener = std::os::unix::net::UnixListener::bind(sock_path).unwrap(); let panic_location_file = test_panic(|| { let rt = runtime_without_io(); @@ -150,7 +150,7 @@ fn unix_datagram_from_std_panic_caller() -> Result<(), Box> { // Bind the socket to a filesystem path // /let socket_path = tmp.path().join("socket"); - let std_socket = StdUDS::bind(&sock_path).unwrap(); + let std_socket = StdUDS::bind(sock_path).unwrap(); std_socket.set_nonblocking(true).unwrap(); let panic_location_file = test_panic(move || { diff --git a/tokio/tests/rt_time_start_paused.rs b/tokio/tests/rt_time_start_paused.rs index 283f4748a8a..1765d625e19 100644 --- a/tokio/tests/rt_time_start_paused.rs +++ b/tokio/tests/rt_time_start_paused.rs @@ -1,4 +1,4 @@ -#![cfg(all(feature = "full"))] +#![cfg(feature = "full")] use tokio::time::{Duration, Instant}; diff --git a/tokio/tests/task_id.rs b/tokio/tests/task_id.rs index e10f24be99c..95e48f4901d 100644 --- a/tokio/tests/task_id.rs +++ b/tokio/tests/task_id.rs @@ -1,5 +1,4 @@ #![warn(rust_2018_idioms)] -#![allow(clippy::declare_interior_mutable_const)] #![cfg(all(feature = "full", tokio_unstable))] #[cfg(not(target_os = "wasi"))] diff --git a/tokio/tests/task_join_set.rs b/tokio/tests/task_join_set.rs index d236aa1fca1..bed9b7dad82 100644 --- a/tokio/tests/task_join_set.rs +++ b/tokio/tests/task_join_set.rs @@ -23,7 +23,7 @@ async fn test_with_sleep() { set.detach_all(); assert_eq!(set.len(), 0); - assert!(matches!(set.join_next().await, None)); + assert!(set.join_next().await.is_none()); for i in 0..10 { set.spawn(async move { @@ -41,7 +41,7 @@ async fn test_with_sleep() { for was_seen in &seen { assert!(was_seen); } - assert!(matches!(set.join_next().await, None)); + assert!(set.join_next().await.is_none()); // Do it again. for i in 0..10 { @@ -59,7 +59,7 @@ async fn test_with_sleep() { for was_seen in &seen { assert!(was_seen); } - assert!(matches!(set.join_next().await, None)); + assert!(set.join_next().await.is_none()); } #[tokio::test] diff --git a/tokio/tests/task_local.rs b/tokio/tests/task_local.rs index a9ffaa15acc..fbc885c3599 100644 --- a/tokio/tests/task_local.rs +++ b/tokio/tests/task_local.rs @@ -1,5 +1,5 @@ #![cfg(all(feature = "full", not(target_os = "wasi")))] // Wasi doesn't support threads -#![allow(clippy::declare_interior_mutable_const)] + use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; diff --git a/tokio/tests/task_panic.rs b/tokio/tests/task_panic.rs index 47f3d6f0f46..eb302d632c0 100644 --- a/tokio/tests/task_panic.rs +++ b/tokio/tests/task_panic.rs @@ -1,5 +1,4 @@ #![warn(rust_2018_idioms)] -#![allow(clippy::declare_interior_mutable_const)] #![cfg(all(feature = "full", not(target_os = "wasi")))] use futures::future; @@ -34,7 +33,7 @@ fn local_set_spawn_local_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { let _local = task::LocalSet::new(); - let _ = task::spawn_local(async {}); + task::spawn_local(async {}); }); // The panic location should be in this file diff --git a/tokio/tests/tcp_into_split.rs b/tokio/tests/tcp_into_split.rs index 2a030691f64..df8efadb50f 100644 --- a/tokio/tests/tcp_into_split.rs +++ b/tokio/tests/tcp_into_split.rs @@ -38,7 +38,7 @@ async fn split() -> Result<()> { Ok(()) }, async { - let mut read_buf = vec![0u8; 32]; + let mut read_buf = [0u8; 32]; let peek_len1 = read_half.peek(&mut read_buf[..]).await?; let peek_len2 = read_half.peek(&mut read_buf[..]).await?; assert_eq!(peek_len1, peek_len2); diff --git a/tokio/tests/time_panic.rs b/tokio/tests/time_panic.rs index 2b8e09573c5..0532812d3ee 100644 --- a/tokio/tests/time_panic.rs +++ b/tokio/tests/time_panic.rs @@ -80,7 +80,7 @@ fn timeout_panic_caller() -> Result<(), Box> { // Runtime without `enable_time` so it has no current timer set. let rt = Builder::new_current_thread().build().unwrap(); rt.block_on(async { - let _ = timeout(Duration::from_millis(5), future::pending::<()>()); + let _timeout = timeout(Duration::from_millis(5), future::pending::<()>()); }); }); From 8463af92afb398aa94600eaac84eafe3ee83b737 Mon Sep 17 00:00:00 2001 From: zixuan zhao Date: Thu, 11 Jan 2024 05:15:57 -0500 Subject: [PATCH 025/162] sync: document FIFO behavior of `tokio::sync::Mutex` (#6279) Signed-off-by: azuredream --- tokio/src/sync/mutex.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tokio/src/sync/mutex.rs b/tokio/src/sync/mutex.rs index 52ba2d34fcd..30f0bdecedb 100644 --- a/tokio/src/sync/mutex.rs +++ b/tokio/src/sync/mutex.rs @@ -17,6 +17,10 @@ use std::{fmt, mem, ptr}; /// differences: [`lock`] is an async method so does not block, and the lock /// guard is designed to be held across `.await` points. /// +/// Tokio's Mutex operates on a guaranteed FIFO basis. +/// This means that the order in which tasks call the [`lock`] method is +/// the exact order in which they will acquire the lock. +/// /// # Which kind of mutex should you use? /// /// Contrary to popular belief, it is ok and often preferred to use the ordinary From e4f9bcb5775a8cbbc848aedea3ad49aa60dd1dae Mon Sep 17 00:00:00 2001 From: Jiahao XU Date: Thu, 11 Jan 2024 22:58:23 +1000 Subject: [PATCH 026/162] process: use pidfd on Linux when available (#6152) Signed-off-by: Jiahao XU --- tokio/src/io/poll_evented.rs | 19 ++ tokio/src/process/unix/mod.rs | 57 +++- tokio/src/process/unix/pidfd_reaper.rs | 317 +++++++++++++++++++++++ tokio/tests/process_change_of_runtime.rs | 34 +++ 4 files changed, 414 insertions(+), 13 deletions(-) create mode 100644 tokio/src/process/unix/pidfd_reaper.rs create mode 100644 tokio/tests/process_change_of_runtime.rs diff --git a/tokio/src/io/poll_evented.rs b/tokio/src/io/poll_evented.rs index cb5bffd54a9..67beb5b1551 100644 --- a/tokio/src/io/poll_evented.rs +++ b/tokio/src/io/poll_evented.rs @@ -136,6 +136,25 @@ impl PollEvented { self.registration.deregister(&mut inner)?; Ok(inner) } + + #[cfg(all(feature = "process", target_os = "linux"))] + pub(crate) fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll> { + self.registration + .poll_read_ready(cx) + .map_err(io::Error::from) + .map_ok(|_| ()) + } + + /// Re-register under new runtime with `interest`. + #[cfg(all(feature = "process", target_os = "linux"))] + pub(crate) fn reregister(&mut self, interest: Interest) -> io::Result<()> { + let io = self.io.as_mut().unwrap(); // As io shouldn't ever be None, just unwrap here. + let _ = self.registration.deregister(io); + self.registration = + Registration::new_with_interest_and_handle(io, interest, scheduler::Handle::current())?; + + Ok(()) + } } feature! { diff --git a/tokio/src/process/unix/mod.rs b/tokio/src/process/unix/mod.rs index 5b55b7a52f7..c9d1035f53d 100644 --- a/tokio/src/process/unix/mod.rs +++ b/tokio/src/process/unix/mod.rs @@ -27,6 +27,9 @@ use orphan::{OrphanQueue, OrphanQueueImpl, Wait}; mod reap; use reap::Reaper; +#[cfg(all(target_os = "linux", feature = "rt"))] +mod pidfd_reaper; + use crate::io::{AsyncRead, AsyncWrite, PollEvented, ReadBuf}; use crate::process::kill::Kill; use crate::process::SpawnedChild; @@ -100,15 +103,15 @@ impl OrphanQueue for GlobalOrphanQueue { } #[must_use = "futures do nothing unless polled"] -pub(crate) struct Child { - inner: Reaper, +pub(crate) enum Child { + SignalReaper(Reaper), + #[cfg(all(target_os = "linux", feature = "rt"))] + PidfdReaper(pidfd_reaper::PidfdReaper), } impl fmt::Debug for Child { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Child") - .field("pid", &self.inner.id()) - .finish() + fmt.debug_struct("Child").field("pid", &self.id()).finish() } } @@ -118,12 +121,24 @@ pub(crate) fn spawn_child(cmd: &mut std::process::Command) -> io::Result { + return Ok(SpawnedChild { + child: Child::PidfdReaper(pidfd_reaper), + stdin, + stdout, + stderr, + }) + } + Err((Some(err), _child)) => return Err(err), + Err((None, child_returned)) => child = child_returned, + } + let signal = signal(SignalKind::child())?; Ok(SpawnedChild { - child: Child { - inner: Reaper::new(child, GlobalOrphanQueue, signal), - }, + child: Child::SignalReaper(Reaper::new(child, GlobalOrphanQueue, signal)), stdin, stdout, stderr, @@ -132,25 +147,41 @@ pub(crate) fn spawn_child(cmd: &mut std::process::Command) -> io::Result u32 { - self.inner.id() + match self { + Self::SignalReaper(signal_reaper) => signal_reaper.id(), + #[cfg(all(target_os = "linux", feature = "rt"))] + Self::PidfdReaper(pidfd_reaper) => pidfd_reaper.id(), + } + } + + fn std_child(&mut self) -> &mut StdChild { + match self { + Self::SignalReaper(signal_reaper) => signal_reaper.inner_mut(), + #[cfg(all(target_os = "linux", feature = "rt"))] + Self::PidfdReaper(pidfd_reaper) => pidfd_reaper.inner_mut(), + } } pub(crate) fn try_wait(&mut self) -> io::Result> { - self.inner.inner_mut().try_wait() + self.std_child().try_wait() } } impl Kill for Child { fn kill(&mut self) -> io::Result<()> { - self.inner.kill() + self.std_child().kill() } } impl Future for Child { type Output = io::Result; - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - Pin::new(&mut self.inner).poll(cx) + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match Pin::into_inner(self) { + Self::SignalReaper(signal_reaper) => Pin::new(signal_reaper).poll(cx), + #[cfg(all(target_os = "linux", feature = "rt"))] + Self::PidfdReaper(pidfd_reaper) => Pin::new(pidfd_reaper).poll(cx), + } } } diff --git a/tokio/src/process/unix/pidfd_reaper.rs b/tokio/src/process/unix/pidfd_reaper.rs new file mode 100644 index 00000000000..45d23471f84 --- /dev/null +++ b/tokio/src/process/unix/pidfd_reaper.rs @@ -0,0 +1,317 @@ +use crate::{ + io::{interest::Interest, PollEvented}, + process::{ + imp::{orphan::Wait, OrphanQueue}, + kill::Kill, + }, + util::error::RUNTIME_SHUTTING_DOWN_ERROR, +}; + +use libc::{syscall, SYS_pidfd_open, ENOSYS, PIDFD_NONBLOCK}; +use mio::{event::Source, unix::SourceFd}; +use std::{ + fs::File, + future::Future, + io, + marker::Unpin, + ops::Deref, + os::unix::io::{AsRawFd, FromRawFd, RawFd}, + pin::Pin, + process::ExitStatus, + sync::atomic::{AtomicBool, Ordering::Relaxed}, + task::{Context, Poll}, +}; + +#[derive(Debug)] +struct Pidfd { + fd: File, +} + +impl Pidfd { + fn open(pid: u32) -> Option { + // Store false (0) to reduce executable size + static NO_PIDFD_SUPPORT: AtomicBool = AtomicBool::new(false); + + if NO_PIDFD_SUPPORT.load(Relaxed) { + return None; + } + + // Safety: The following function calls invovkes syscall pidfd_open, + // which takes two parameter: pidfd_open(fd: c_int, flag: c_int) + let fd = unsafe { syscall(SYS_pidfd_open, pid, PIDFD_NONBLOCK) }; + if fd == -1 { + let errno = io::Error::last_os_error().raw_os_error().unwrap(); + + if errno == ENOSYS { + NO_PIDFD_SUPPORT.store(true, Relaxed) + } + + None + } else { + // Safety: pidfd_open returns -1 on error or a valid fd with ownership. + Some(Pidfd { + fd: unsafe { File::from_raw_fd(fd as i32) }, + }) + } + } +} + +impl AsRawFd for Pidfd { + fn as_raw_fd(&self) -> RawFd { + self.fd.as_raw_fd() + } +} + +impl Source for Pidfd { + fn register( + &mut self, + registry: &mio::Registry, + token: mio::Token, + interest: mio::Interest, + ) -> io::Result<()> { + SourceFd(&self.as_raw_fd()).register(registry, token, interest) + } + + fn reregister( + &mut self, + registry: &mio::Registry, + token: mio::Token, + interest: mio::Interest, + ) -> io::Result<()> { + SourceFd(&self.as_raw_fd()).reregister(registry, token, interest) + } + + fn deregister(&mut self, registry: &mio::Registry) -> io::Result<()> { + SourceFd(&self.as_raw_fd()).deregister(registry) + } +} + +#[derive(Debug)] +struct PidfdReaperInner +where + W: Unpin, +{ + inner: W, + pidfd: PollEvented, +} + +#[allow(deprecated)] +fn is_rt_shutdown_err(err: &io::Error) -> bool { + if let Some(inner) = err.get_ref() { + // Using `Error::description()` is more efficient than `format!("{inner}")`, + // so we use it here even if it is deprecated. + err.kind() == io::ErrorKind::Other + && inner.source().is_none() + && inner.description() == RUNTIME_SHUTTING_DOWN_ERROR + } else { + false + } +} + +impl Future for PidfdReaperInner +where + W: Wait + Unpin, +{ + type Output = io::Result; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = Pin::into_inner(self); + + match ready!(this.pidfd.poll_read_ready(cx)) { + Err(err) if is_rt_shutdown_err(&err) => { + this.pidfd.reregister(Interest::READABLE)?; + ready!(this.pidfd.poll_read_ready(cx))? + } + res => res?, + } + Poll::Ready(Ok(this + .inner + .try_wait()? + .expect("pidfd is ready to read, the process should have exited"))) + } +} + +#[derive(Debug)] +pub(crate) struct PidfdReaper +where + W: Wait + Unpin, + Q: OrphanQueue + Unpin, +{ + inner: Option>, + orphan_queue: Q, +} + +impl Deref for PidfdReaper +where + W: Wait + Unpin, + Q: OrphanQueue + Unpin, +{ + type Target = W; + + fn deref(&self) -> &Self::Target { + &self.inner.as_ref().expect("inner has gone away").inner + } +} + +impl PidfdReaper +where + W: Wait + Unpin, + Q: OrphanQueue + Unpin, +{ + pub(crate) fn new(inner: W, orphan_queue: Q) -> Result, W)> { + if let Some(pidfd) = Pidfd::open(inner.id()) { + match PollEvented::new_with_interest(pidfd, Interest::READABLE) { + Ok(pidfd) => Ok(Self { + inner: Some(PidfdReaperInner { pidfd, inner }), + orphan_queue, + }), + Err(io_error) => Err((Some(io_error), inner)), + } + } else { + Err((None, inner)) + } + } + + pub(crate) fn inner_mut(&mut self) -> &mut W { + &mut self.inner.as_mut().expect("inner has gone away").inner + } +} + +impl Future for PidfdReaper +where + W: Wait + Unpin, + Q: OrphanQueue + Unpin, +{ + type Output = io::Result; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + Pin::new( + Pin::into_inner(self) + .inner + .as_mut() + .expect("inner has gone away"), + ) + .poll(cx) + } +} + +impl Kill for PidfdReaper +where + W: Wait + Unpin + Kill, + Q: OrphanQueue + Unpin, +{ + fn kill(&mut self) -> io::Result<()> { + self.inner_mut().kill() + } +} + +impl Drop for PidfdReaper +where + W: Wait + Unpin, + Q: OrphanQueue + Unpin, +{ + fn drop(&mut self) { + let mut orphan = self.inner.take().expect("inner has gone away").inner; + if let Ok(Some(_)) = orphan.try_wait() { + return; + } + + self.orphan_queue.push_orphan(orphan); + } +} + +#[cfg(all(test, not(loom), not(miri)))] +mod test { + use super::*; + use crate::{ + process::unix::orphan::test::MockQueue, + runtime::{Builder as RuntimeBuilder, Runtime}, + }; + use std::process::{Command, Output}; + + fn create_runtime() -> Runtime { + RuntimeBuilder::new_current_thread() + .enable_io() + .build() + .unwrap() + } + + fn run_test(fut: impl Future) { + create_runtime().block_on(fut) + } + + fn is_pidfd_available() -> bool { + let Output { stdout, status, .. } = Command::new("uname").arg("-r").output().unwrap(); + assert!(status.success()); + let stdout = String::from_utf8_lossy(&stdout); + + let mut kernel_version_iter = stdout.split_once('-').unwrap().0.split('.'); + let major: u32 = kernel_version_iter.next().unwrap().parse().unwrap(); + let minor: u32 = kernel_version_iter.next().unwrap().parse().unwrap(); + + major >= 6 || (major == 5 && minor >= 10) + } + + #[test] + fn test_pidfd_reaper_poll() { + if !is_pidfd_available() { + eprintln!("pidfd is not available on this linux kernel, skip this test"); + return; + } + + let queue = MockQueue::new(); + + run_test(async { + let child = Command::new("true").spawn().unwrap(); + let pidfd_reaper = PidfdReaper::new(child, &queue).unwrap(); + + let exit_status = pidfd_reaper.await.unwrap(); + assert!(exit_status.success()); + }); + + assert!(queue.all_enqueued.borrow().is_empty()); + } + + #[test] + fn test_pidfd_reaper_kill() { + if !is_pidfd_available() { + eprintln!("pidfd is not available on this linux kernel, skip this test"); + return; + } + + let queue = MockQueue::new(); + + run_test(async { + let child = Command::new("sleep").arg("1800").spawn().unwrap(); + let mut pidfd_reaper = PidfdReaper::new(child, &queue).unwrap(); + + pidfd_reaper.kill().unwrap(); + + let exit_status = pidfd_reaper.await.unwrap(); + assert!(!exit_status.success()); + }); + + assert!(queue.all_enqueued.borrow().is_empty()); + } + + #[test] + fn test_pidfd_reaper_drop() { + if !is_pidfd_available() { + eprintln!("pidfd is not available on this linux kernel, skip this test"); + return; + } + + let queue = MockQueue::new(); + + let mut child = Command::new("sleep").arg("1800").spawn().unwrap(); + + run_test(async { + let _pidfd_reaper = PidfdReaper::new(&mut child, &queue).unwrap(); + }); + + assert_eq!(queue.all_enqueued.borrow().len(), 1); + + child.kill().unwrap(); + child.wait().unwrap(); + } +} diff --git a/tokio/tests/process_change_of_runtime.rs b/tokio/tests/process_change_of_runtime.rs new file mode 100644 index 00000000000..94efe35b146 --- /dev/null +++ b/tokio/tests/process_change_of_runtime.rs @@ -0,0 +1,34 @@ +#![cfg(feature = "process")] +#![warn(rust_2018_idioms)] +// This tests test the behavior of `process::Command::spawn` when it is used +// outside runtime, and when `process::Child::wait ` is used in a different +// runtime from which `process::Command::spawn` is used. +#![cfg(all(unix, not(target_os = "freebsd")))] + +use std::process::Stdio; +use tokio::{process::Command, runtime::Runtime}; + +#[test] +fn process_spawned_and_wait_in_different_runtime() { + let mut child = Runtime::new().unwrap().block_on(async { + Command::new("true") + .stdin(Stdio::piped()) + .stdout(Stdio::null()) + .spawn() + .unwrap() + }); + Runtime::new().unwrap().block_on(async { + let _ = child.wait().await.unwrap(); + }); +} + +#[test] +#[should_panic( + expected = "there is no reactor running, must be called from the context of a Tokio 1.x runtime" +)] +fn process_spawned_outside_runtime() { + let _ = Command::new("true") + .stdin(Stdio::piped()) + .stdout(Stdio::null()) + .spawn(); +} From 12ce924fb9c1ffe0340b979fefa00d13ebf631c3 Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Sun, 14 Jan 2024 14:04:30 +0330 Subject: [PATCH 027/162] task: add `JoinSet::try_join_next` (#6280) --- tokio/src/task/join_set.rs | 55 ++++++++++++++++++++- tokio/src/util/idle_notified_set.rs | 28 +++++++++++ tokio/tests/task_join_set.rs | 77 +++++++++++++++++++++++++++++ 3 files changed, 159 insertions(+), 1 deletion(-) diff --git a/tokio/src/task/join_set.rs b/tokio/src/task/join_set.rs index 4eb15a24d5f..7aace14d850 100644 --- a/tokio/src/task/join_set.rs +++ b/tokio/src/task/join_set.rs @@ -12,7 +12,7 @@ use std::task::{Context, Poll}; use crate::runtime::Handle; #[cfg(tokio_unstable)] use crate::task::Id; -use crate::task::{AbortHandle, JoinError, JoinHandle, LocalSet}; +use crate::task::{unconstrained, AbortHandle, JoinError, JoinHandle, LocalSet}; use crate::util::IdleNotifiedSet; /// A collection of tasks spawned on a Tokio runtime. @@ -306,6 +306,59 @@ impl JoinSet { crate::future::poll_fn(|cx| self.poll_join_next_with_id(cx)).await } + /// Tries to join one of the tasks in the set that has completed and return its output. + /// + /// Returns `None` if the set is empty. + pub fn try_join_next(&mut self) -> Option> { + // Loop over all notified `JoinHandle`s to find one that's ready, or until none are left. + loop { + let mut entry = self.inner.try_pop_notified()?; + + let res = entry.with_value_and_context(|jh, ctx| { + // Since this function is not async and cannot be forced to yield, we should + // disable budgeting when we want to check for the `JoinHandle` readiness. + Pin::new(&mut unconstrained(jh)).poll(ctx) + }); + + if let Poll::Ready(res) = res { + let _entry = entry.remove(); + + return Some(res); + } + } + } + + /// Tries to join one of the tasks in the set that has completed and return its output, + /// along with the [task ID] of the completed task. + /// + /// Returns `None` if the set is empty. + /// + /// When this method returns an error, then the id of the task that failed can be accessed + /// using the [`JoinError::id`] method. + /// + /// [task ID]: crate::task::Id + /// [`JoinError::id`]: fn@crate::task::JoinError::id + #[cfg(tokio_unstable)] + #[cfg_attr(docsrs, doc(cfg(tokio_unstable)))] + pub fn try_join_next_with_id(&mut self) -> Option> { + // Loop over all notified `JoinHandle`s to find one that's ready, or until none are left. + loop { + let mut entry = self.inner.try_pop_notified()?; + + let res = entry.with_value_and_context(|jh, ctx| { + // Since this function is not async and cannot be forced to yield, we should + // disable budgeting when we want to check for the `JoinHandle` readiness. + Pin::new(&mut unconstrained(jh)).poll(ctx) + }); + + if let Poll::Ready(res) = res { + let entry = entry.remove(); + + return Some(res.map(|output| (entry.id(), output))); + } + } + } + /// Aborts all tasks and waits for them to finish shutting down. /// /// Calling this method is equivalent to calling [`abort_all`] and then calling [`join_next`] in diff --git a/tokio/src/util/idle_notified_set.rs b/tokio/src/util/idle_notified_set.rs index 430f2e7568b..bd9c2ef1bbc 100644 --- a/tokio/src/util/idle_notified_set.rs +++ b/tokio/src/util/idle_notified_set.rs @@ -203,6 +203,34 @@ impl IdleNotifiedSet { Some(EntryInOneOfTheLists { entry, set: self }) } + /// Tries to pop an entry from the notified list to poll it. The entry is moved to + /// the idle list atomically. + pub(crate) fn try_pop_notified(&mut self) -> Option> { + // We don't decrement the length because this call moves the entry to + // the idle list rather than removing it. + if self.length == 0 { + // Fast path. + return None; + } + + let mut lock = self.lists.lock(); + + // Pop the entry, returning None if empty. + let entry = lock.notified.pop_back()?; + + lock.idle.push_front(entry.clone()); + + // Safety: We are holding the lock. + entry.my_list.with_mut(|ptr| unsafe { + *ptr = List::Idle; + }); + + drop(lock); + + // Safety: We just put the entry in the idle list, so it is in one of the lists. + Some(EntryInOneOfTheLists { entry, set: self }) + } + /// Call a function on every element in this list. pub(crate) fn for_each(&mut self, mut func: F) { fn get_ptrs(list: &mut LinkedList, ptrs: &mut Vec<*mut T>) { diff --git a/tokio/tests/task_join_set.rs b/tokio/tests/task_join_set.rs index bed9b7dad82..8a42be17b49 100644 --- a/tokio/tests/task_join_set.rs +++ b/tokio/tests/task_join_set.rs @@ -227,3 +227,80 @@ async fn join_set_coop() { assert!(coop_count >= 1); assert_eq!(count, TASK_NUM); } + +#[tokio::test(flavor = "current_thread")] +async fn try_join_next() { + const TASK_NUM: u32 = 1000; + + let (send, recv) = tokio::sync::watch::channel(()); + + let mut set = JoinSet::new(); + + for _ in 0..TASK_NUM { + let mut recv = recv.clone(); + set.spawn(async move { recv.changed().await.unwrap() }); + } + drop(recv); + + assert!(set.try_join_next().is_none()); + + send.send_replace(()); + send.closed().await; + + let mut count = 0; + loop { + match set.try_join_next() { + Some(Ok(())) => { + count += 1; + } + Some(Err(err)) => panic!("failed: {}", err), + None => { + break; + } + } + } + + assert_eq!(count, TASK_NUM); +} + +#[cfg(tokio_unstable)] +#[tokio::test(flavor = "current_thread")] +async fn try_join_next_with_id() { + const TASK_NUM: u32 = 1000; + + let (send, recv) = tokio::sync::watch::channel(()); + + let mut set = JoinSet::new(); + let mut spawned = std::collections::HashSet::with_capacity(TASK_NUM as usize); + + for _ in 0..TASK_NUM { + let mut recv = recv.clone(); + let handle = set.spawn(async move { recv.changed().await.unwrap() }); + + spawned.insert(handle.id()); + } + drop(recv); + + assert!(set.try_join_next_with_id().is_none()); + + send.send_replace(()); + send.closed().await; + + let mut count = 0; + let mut joined = std::collections::HashSet::with_capacity(TASK_NUM as usize); + loop { + match set.try_join_next_with_id() { + Some(Ok((id, ()))) => { + count += 1; + joined.insert(id); + } + Some(Err(err)) => panic!("failed: {}", err), + None => { + break; + } + } + } + + assert_eq!(count, TASK_NUM); + assert_eq!(joined, spawned); +} From bfd7b080678c03d868a5a5761873031f1e696b73 Mon Sep 17 00:00:00 2001 From: Evan Rittenhouse Date: Tue, 16 Jan 2024 02:51:38 -0600 Subject: [PATCH 028/162] time: add `FutureExt::timeout` (#6276) --- tokio-util/src/time/mod.rs | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/tokio-util/src/time/mod.rs b/tokio-util/src/time/mod.rs index 2d340083604..06d4bd2697e 100644 --- a/tokio-util/src/time/mod.rs +++ b/tokio-util/src/time/mod.rs @@ -8,7 +8,9 @@ //! //! This type must be used from within the context of the `Runtime`. +use futures_core::Future; use std::time::Duration; +use tokio::time::Timeout; mod wheel; @@ -17,6 +19,34 @@ pub mod delay_queue; #[doc(inline)] pub use delay_queue::DelayQueue; +/// A trait which contains a variety of convenient adapters and utilities for `Future`s. +pub trait FutureExt: Future { + /// A wrapper around [`tokio::time::timeout`], with the advantage that it is easier to write + /// fluent call chains. + /// + /// # Examples + /// + /// ```rust + /// use tokio::{sync::oneshot, time::Duration}; + /// use tokio_util::time::FutureExt; + /// + /// # async fn dox() { + /// let (tx, rx) = oneshot::channel::<()>(); + /// + /// let res = rx.timeout(Duration::from_millis(10)).await; + /// assert!(res.is_err()); + /// # } + /// ``` + fn timeout(self, timeout: Duration) -> Timeout + where + Self: Sized, + { + tokio::time::timeout(timeout, self) + } +} + +impl FutureExt for T {} + // ===== Internal utils ===== enum Round { From 58edfc61abe4acdc2dda485c1d66715790b9315c Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Tue, 16 Jan 2024 18:40:23 +0330 Subject: [PATCH 029/162] ci: verify that tests work with panic=abort (#6283) --- .github/workflows/ci.yml | 28 ++++++++++++++++++++ tokio-stream/tests/stream_panic.rs | 1 + tokio-util/tests/panic.rs | 3 +-- tokio-util/tests/spawn_pinned.rs | 2 ++ tokio-util/tests/time_delay_queue.rs | 2 ++ tokio/src/runtime/tests/task_combinations.rs | 2 +- tokio/tests/io_panic.rs | 6 +---- tokio/tests/join_handle_panic.rs | 2 +- tokio/tests/net_panic.rs | 5 +--- tokio/tests/process_issue_2174.rs | 1 + tokio/tests/rt_handle.rs | 8 +++--- tokio/tests/rt_panic.rs | 5 +--- tokio/tests/signal_panic.rs | 1 + tokio/tests/sync_panic.rs | 13 +-------- tokio/tests/task_panic.rs | 8 +----- tokio/tests/time_panic.rs | 6 +---- tokio/tests/time_pause.rs | 2 +- 17 files changed, 49 insertions(+), 46 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2b79ec21c22..60a962f8b25 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -148,6 +148,34 @@ jobs: cargo nextest run --workspace --all-features cargo test --doc --workspace --all-features + test-workspace-all-features-panic-abort: + needs: basics + name: test all crates in the workspace with all features and panic=abort + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: + - windows-latest + - ubuntu-latest + - macos-latest + steps: + - uses: actions/checkout@v4 + - name: Install Rust ${{ env.rust_nightly }} + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ env.rust_nightly }} + - name: Install cargo-nextest + uses: taiki-e/install-action@v2 + with: + tool: cargo-nextest + + - uses: Swatinem/rust-cache@v2 + + - name: test all --all-features panic=abort + run: | + set -euxo pipefail + RUSTFLAGS="$RUSTFLAGS -C panic=abort -Zpanic-abort-tests" cargo nextest run --workspace --exclude tokio-macros --exclude tests-build --all-features --tests + test-integration-tests-per-feature: needs: basics name: Run integration tests for each feature diff --git a/tokio-stream/tests/stream_panic.rs b/tokio-stream/tests/stream_panic.rs index 22c1c208001..6ec737083d5 100644 --- a/tokio-stream/tests/stream_panic.rs +++ b/tokio-stream/tests/stream_panic.rs @@ -1,5 +1,6 @@ #![warn(rust_2018_idioms)] #![cfg(all(feature = "time", not(target_os = "wasi")))] // Wasi does not support panic recovery +#![cfg(panic = "unwind")] use parking_lot::{const_mutex, Mutex}; use std::error::Error; diff --git a/tokio-util/tests/panic.rs b/tokio-util/tests/panic.rs index e4fcb47ef69..853f132fc2f 100644 --- a/tokio-util/tests/panic.rs +++ b/tokio-util/tests/panic.rs @@ -1,5 +1,6 @@ #![warn(rust_2018_idioms)] #![cfg(all(feature = "full", not(target_os = "wasi")))] // Wasi doesn't support panic recovery +#![cfg(panic = "unwind")] use parking_lot::{const_mutex, Mutex}; use std::error::Error; @@ -76,7 +77,6 @@ fn poll_sender_send_item_panic_caller() -> Result<(), Box> { } #[test] - fn local_pool_handle_new_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { let _ = LocalPoolHandle::new(0); @@ -89,7 +89,6 @@ fn local_pool_handle_new_panic_caller() -> Result<(), Box> { } #[test] - fn local_pool_handle_spawn_pinned_by_idx_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { let rt = basic(); diff --git a/tokio-util/tests/spawn_pinned.rs b/tokio-util/tests/spawn_pinned.rs index 9eeeecfb0c0..e05b4095eb4 100644 --- a/tokio-util/tests/spawn_pinned.rs +++ b/tokio-util/tests/spawn_pinned.rs @@ -71,6 +71,7 @@ async fn can_spawn_multiple_futures() { /// A panic in the spawned task causes the join handle to return an error. /// But, you can continue to spawn tasks. #[tokio::test] +#[cfg(panic = "unwind")] async fn task_panic_propagates() { let pool = task::LocalPoolHandle::new(1); @@ -95,6 +96,7 @@ async fn task_panic_propagates() { /// A panic during task creation causes the join handle to return an error. /// But, you can continue to spawn tasks. #[tokio::test] +#[cfg(panic = "unwind")] async fn callback_panic_does_not_kill_worker() { let pool = task::LocalPoolHandle::new(1); diff --git a/tokio-util/tests/time_delay_queue.rs b/tokio-util/tests/time_delay_queue.rs index 9b7b6cc85da..6616327d41c 100644 --- a/tokio-util/tests/time_delay_queue.rs +++ b/tokio-util/tests/time_delay_queue.rs @@ -805,6 +805,7 @@ async fn item_expiry_greater_than_wheel() { #[cfg_attr(target_os = "wasi", ignore = "FIXME: Does not seem to work with WASI")] #[tokio::test(start_paused = true)] +#[cfg(panic = "unwind")] async fn remove_after_compact() { let now = Instant::now(); let mut queue = DelayQueue::new(); @@ -822,6 +823,7 @@ async fn remove_after_compact() { #[cfg_attr(target_os = "wasi", ignore = "FIXME: Does not seem to work with WASI")] #[tokio::test(start_paused = true)] +#[cfg(panic = "unwind")] async fn remove_after_compact_poll() { let now = Instant::now(); let mut queue = task::spawn(DelayQueue::new()); diff --git a/tokio/src/runtime/tests/task_combinations.rs b/tokio/src/runtime/tests/task_combinations.rs index ca13c8c95f3..0f99ed66247 100644 --- a/tokio/src/runtime/tests/task_combinations.rs +++ b/tokio/src/runtime/tests/task_combinations.rs @@ -64,8 +64,8 @@ enum CombiAbortSource { AbortHandle, } -#[cfg(panic = "unwind")] #[test] +#[cfg_attr(panic = "abort", ignore)] fn test_combinations() { let mut rt = &[ CombiRuntime::CurrentThread, diff --git a/tokio/tests/io_panic.rs b/tokio/tests/io_panic.rs index 89e504f461c..b2cbad2751d 100644 --- a/tokio/tests/io_panic.rs +++ b/tokio/tests/io_panic.rs @@ -1,5 +1,6 @@ #![warn(rust_2018_idioms)] #![cfg(all(feature = "full", not(target_os = "wasi")))] // Wasi does not support panic recovery +#![cfg(panic = "unwind")] use std::task::{Context, Poll}; use std::{error::Error, pin::Pin}; @@ -54,7 +55,6 @@ mod unix { } } -#[cfg(panic = "unwind")] #[test] fn read_buf_initialize_unfilled_to_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -70,7 +70,6 @@ fn read_buf_initialize_unfilled_to_panic_caller() -> Result<(), Box> Ok(()) } -#[cfg(panic = "unwind")] #[test] fn read_buf_advance_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -86,7 +85,6 @@ fn read_buf_advance_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn read_buf_set_filled_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -102,7 +100,6 @@ fn read_buf_set_filled_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn read_buf_put_slice_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -120,7 +117,6 @@ fn read_buf_put_slice_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn unsplit_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { diff --git a/tokio/tests/join_handle_panic.rs b/tokio/tests/join_handle_panic.rs index 94a1b6112c0..248d5702f68 100644 --- a/tokio/tests/join_handle_panic.rs +++ b/tokio/tests/join_handle_panic.rs @@ -1,5 +1,6 @@ #![warn(rust_2018_idioms)] #![cfg(all(feature = "full", not(target_os = "wasi")))] // Wasi doesn't support panic recovery +#![cfg(panic = "unwind")] struct PanicsOnDrop; @@ -9,7 +10,6 @@ impl Drop for PanicsOnDrop { } } -#[cfg(panic = "unwind")] #[tokio::test] async fn test_panics_do_not_propagate_when_dropping_join_handle() { let join_handle = tokio::spawn(async move { PanicsOnDrop }); diff --git a/tokio/tests/net_panic.rs b/tokio/tests/net_panic.rs index 81c9df55e3b..9d6e87d9aee 100644 --- a/tokio/tests/net_panic.rs +++ b/tokio/tests/net_panic.rs @@ -1,5 +1,6 @@ #![warn(rust_2018_idioms)] #![cfg(all(feature = "full", not(target_os = "wasi")))] +#![cfg(panic = "unwind")] use std::error::Error; use tokio::net::{TcpListener, TcpStream}; @@ -10,7 +11,6 @@ mod support { } use support::panic::test_panic; -#[cfg(panic = "unwind")] #[test] fn udp_socket_from_std_panic_caller() -> Result<(), Box> { use std::net::SocketAddr; @@ -33,7 +33,6 @@ fn udp_socket_from_std_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn tcp_listener_from_std_panic_caller() -> Result<(), Box> { let std_listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); @@ -52,7 +51,6 @@ fn tcp_listener_from_std_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn tcp_stream_from_std_panic_caller() -> Result<(), Box> { let std_listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); @@ -166,7 +164,6 @@ fn unix_datagram_from_std_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] #[cfg(windows)] fn server_options_max_instances_panic_caller() -> Result<(), Box> { diff --git a/tokio/tests/process_issue_2174.rs b/tokio/tests/process_issue_2174.rs index 5ee9dc0a4b4..2f8c73a58d0 100644 --- a/tokio/tests/process_issue_2174.rs +++ b/tokio/tests/process_issue_2174.rs @@ -17,6 +17,7 @@ use tokio::time; use tokio_test::assert_err; #[tokio::test] +#[cfg_attr(panic = "abort", ignore)] async fn issue_2174() { let mut child = Command::new("sleep") .arg("2") diff --git a/tokio/tests/rt_handle.rs b/tokio/tests/rt_handle.rs index a571b98eaf6..92fa777e321 100644 --- a/tokio/tests/rt_handle.rs +++ b/tokio/tests/rt_handle.rs @@ -3,8 +3,8 @@ use tokio::runtime::Runtime; -#[cfg(panic = "unwind")] #[test] +#[cfg_attr(panic = "abort", ignore)] fn basic_enter() { let rt1 = rt(); let rt2 = rt(); @@ -16,9 +16,9 @@ fn basic_enter() { drop(enter1); } -#[cfg(panic = "unwind")] #[test] #[should_panic] +#[cfg_attr(panic = "abort", ignore)] fn interleave_enter_different_rt() { let rt1 = rt(); let rt2 = rt(); @@ -30,9 +30,9 @@ fn interleave_enter_different_rt() { drop(enter2); } -#[cfg(panic = "unwind")] #[test] #[should_panic] +#[cfg_attr(panic = "abort", ignore)] fn interleave_enter_same_rt() { let rt1 = rt(); @@ -44,9 +44,9 @@ fn interleave_enter_same_rt() { drop(enter3); } -#[cfg(panic = "unwind")] #[test] #[cfg(not(target_os = "wasi"))] +#[cfg_attr(panic = "abort", ignore)] fn interleave_then_enter() { let _ = std::panic::catch_unwind(|| { let rt1 = rt(); diff --git a/tokio/tests/rt_panic.rs b/tokio/tests/rt_panic.rs index 5b5ee48a960..ecaf977c881 100644 --- a/tokio/tests/rt_panic.rs +++ b/tokio/tests/rt_panic.rs @@ -1,6 +1,7 @@ #![warn(rust_2018_idioms)] #![cfg(feature = "full")] #![cfg(not(target_os = "wasi"))] // Wasi doesn't support panic recovery +#![cfg(panic = "unwind")] use futures::future; use std::error::Error; @@ -11,7 +12,6 @@ mod support { } use support::panic::test_panic; -#[cfg(panic = "unwind")] #[test] fn current_handle_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -24,7 +24,6 @@ fn current_handle_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn into_panic_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(move || { @@ -47,7 +46,6 @@ fn into_panic_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn builder_worker_threads_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -60,7 +58,6 @@ fn builder_worker_threads_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn builder_max_blocking_threads_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { diff --git a/tokio/tests/signal_panic.rs b/tokio/tests/signal_panic.rs index ce1ec3e4a73..6b662e54b96 100644 --- a/tokio/tests/signal_panic.rs +++ b/tokio/tests/signal_panic.rs @@ -1,6 +1,7 @@ #![warn(rust_2018_idioms)] #![cfg(feature = "full")] #![cfg(unix)] +#![cfg(panic = "unwind")] use std::error::Error; use tokio::runtime::Builder; diff --git a/tokio/tests/sync_panic.rs b/tokio/tests/sync_panic.rs index ae261f993fc..41bf0850068 100644 --- a/tokio/tests/sync_panic.rs +++ b/tokio/tests/sync_panic.rs @@ -1,5 +1,6 @@ #![warn(rust_2018_idioms)] #![cfg(all(feature = "full", not(target_os = "wasi")))] +#![cfg(panic = "unwind")] use std::{error::Error, sync::Arc}; use tokio::{ @@ -12,7 +13,6 @@ mod support { } use support::panic::test_panic; -#[cfg(panic = "unwind")] #[test] fn broadcast_channel_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -25,7 +25,6 @@ fn broadcast_channel_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn mutex_blocking_lock_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -42,7 +41,6 @@ fn mutex_blocking_lock_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn oneshot_blocking_recv_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -59,7 +57,6 @@ fn oneshot_blocking_recv_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn rwlock_with_max_readers_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -72,7 +69,6 @@ fn rwlock_with_max_readers_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn rwlock_blocking_read_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -89,7 +85,6 @@ fn rwlock_blocking_read_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn rwlock_blocking_write_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -106,7 +101,6 @@ fn rwlock_blocking_write_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn mpsc_bounded_channel_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -119,7 +113,6 @@ fn mpsc_bounded_channel_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn mpsc_bounded_receiver_blocking_recv_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -136,7 +129,6 @@ fn mpsc_bounded_receiver_blocking_recv_panic_caller() -> Result<(), Box Result<(), Box> { let panic_location_file = test_panic(|| { @@ -153,7 +145,6 @@ fn mpsc_bounded_sender_blocking_send_panic_caller() -> Result<(), Box Ok(()) } -#[cfg(panic = "unwind")] #[test] fn mpsc_unbounded_receiver_blocking_recv_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -170,7 +161,6 @@ fn mpsc_unbounded_receiver_blocking_recv_panic_caller() -> Result<(), Box Result<(), Box> { let panic_location_file = test_panic(|| { @@ -187,7 +177,6 @@ fn semaphore_merge_unrelated_owned_permits() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn semaphore_merge_unrelated_permits() -> Result<(), Box> { let panic_location_file = test_panic(|| { diff --git a/tokio/tests/task_panic.rs b/tokio/tests/task_panic.rs index eb302d632c0..8b4de2ada54 100644 --- a/tokio/tests/task_panic.rs +++ b/tokio/tests/task_panic.rs @@ -1,5 +1,6 @@ #![warn(rust_2018_idioms)] #![cfg(all(feature = "full", not(target_os = "wasi")))] +#![cfg(panic = "unwind")] use futures::future; use std::error::Error; @@ -11,7 +12,6 @@ mod support { } use support::panic::test_panic; -#[cfg(panic = "unwind")] #[test] fn block_in_place_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -27,7 +27,6 @@ fn block_in_place_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn local_set_spawn_local_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -42,7 +41,6 @@ fn local_set_spawn_local_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn local_set_block_on_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -60,7 +58,6 @@ fn local_set_block_on_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn spawn_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -73,7 +70,6 @@ fn spawn_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn local_key_sync_scope_panic_caller() -> Result<(), Box> { tokio::task_local! { @@ -94,7 +90,6 @@ fn local_key_sync_scope_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn local_key_with_panic_caller() -> Result<(), Box> { tokio::task_local! { @@ -111,7 +106,6 @@ fn local_key_with_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn local_key_get_panic_caller() -> Result<(), Box> { tokio::task_local! { diff --git a/tokio/tests/time_panic.rs b/tokio/tests/time_panic.rs index 0532812d3ee..8a997f04529 100644 --- a/tokio/tests/time_panic.rs +++ b/tokio/tests/time_panic.rs @@ -1,5 +1,6 @@ #![warn(rust_2018_idioms)] #![cfg(all(feature = "full", not(target_os = "wasi")))] // Wasi doesn't support panic recovery +#![cfg(panic = "unwind")] use futures::future; use std::error::Error; @@ -12,7 +13,6 @@ mod support { } use support::panic::test_panic; -#[cfg(panic = "unwind")] #[test] fn pause_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -30,7 +30,6 @@ fn pause_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn resume_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -47,7 +46,6 @@ fn resume_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn interval_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -60,7 +58,6 @@ fn interval_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn interval_at_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -73,7 +70,6 @@ fn interval_at_panic_caller() -> Result<(), Box> { Ok(()) } -#[cfg(panic = "unwind")] #[test] fn timeout_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { diff --git a/tokio/tests/time_pause.rs b/tokio/tests/time_pause.rs index 2750fc5c825..a14f7e22c61 100644 --- a/tokio/tests/time_pause.rs +++ b/tokio/tests/time_pause.rs @@ -36,7 +36,7 @@ async fn pause_time_in_main_threads() { tokio::time::pause(); } -#[cfg(panic = "unwind")] +#[cfg_attr(panic = "abort", ignore)] #[cfg(all(feature = "full", not(target_os = "wasi")))] // Wasi doesn't support threads #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn pause_time_in_spawn_threads() { From eab26a662c5716575bb0cd9c31d6ae79355e1c55 Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Thu, 18 Jan 2024 13:05:46 +0330 Subject: [PATCH 030/162] net: document that `*Fd` traits on `TcpSocket` are unix-only (#6294) --- tokio/src/macros/cfg.rs | 12 +++++++++ tokio/src/net/tcp/socket.rs | 50 ++++++++++++++++++------------------- 2 files changed, 36 insertions(+), 26 deletions(-) diff --git a/tokio/src/macros/cfg.rs b/tokio/src/macros/cfg.rs index 443cc14b373..5f0c6638857 100644 --- a/tokio/src/macros/cfg.rs +++ b/tokio/src/macros/cfg.rs @@ -25,6 +25,18 @@ macro_rules! cfg_windows { } } +/// Enables Unix-specific code. +/// Use this macro instead of `cfg(unix)` to generate docs properly. +macro_rules! cfg_unix { + ($($item:item)*) => { + $( + #[cfg(any(all(doc, docsrs), unix))] + #[cfg_attr(docsrs, doc(cfg(unix)))] + $item + )* + } +} + /// Enables unstable Windows-specific code. /// Use this macro instead of `cfg(windows)` to generate docs properly. macro_rules! cfg_unstable_windows { diff --git a/tokio/src/net/tcp/socket.rs b/tokio/src/net/tcp/socket.rs index aa9639a64cb..679e95866c5 100644 --- a/tokio/src/net/tcp/socket.rs +++ b/tokio/src/net/tcp/socket.rs @@ -777,38 +777,36 @@ impl fmt::Debug for TcpSocket { } } -#[cfg(unix)] -impl AsRawFd for TcpSocket { - fn as_raw_fd(&self) -> RawFd { - self.inner.as_raw_fd() +cfg_unix! { + impl AsRawFd for TcpSocket { + fn as_raw_fd(&self) -> RawFd { + self.inner.as_raw_fd() + } } -} -#[cfg(unix)] -impl AsFd for TcpSocket { - fn as_fd(&self) -> BorrowedFd<'_> { - unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } + impl AsFd for TcpSocket { + fn as_fd(&self) -> BorrowedFd<'_> { + unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } + } } -} -#[cfg(unix)] -impl FromRawFd for TcpSocket { - /// Converts a `RawFd` to a `TcpSocket`. - /// - /// # Notes - /// - /// The caller is responsible for ensuring that the socket is in - /// non-blocking mode. - unsafe fn from_raw_fd(fd: RawFd) -> TcpSocket { - let inner = socket2::Socket::from_raw_fd(fd); - TcpSocket { inner } + impl FromRawFd for TcpSocket { + /// Converts a `RawFd` to a `TcpSocket`. + /// + /// # Notes + /// + /// The caller is responsible for ensuring that the socket is in + /// non-blocking mode. + unsafe fn from_raw_fd(fd: RawFd) -> TcpSocket { + let inner = socket2::Socket::from_raw_fd(fd); + TcpSocket { inner } + } } -} -#[cfg(unix)] -impl IntoRawFd for TcpSocket { - fn into_raw_fd(self) -> RawFd { - self.inner.into_raw_fd() + impl IntoRawFd for TcpSocket { + fn into_raw_fd(self) -> RawFd { + self.inner.into_raw_fd() + } } } From f80bbec28ff790d15481a29583c9b778bf0cc40e Mon Sep 17 00:00:00 2001 From: Daniel Sedlak Date: Thu, 18 Jan 2024 11:59:22 +0100 Subject: [PATCH 031/162] io: simplify check for empty slice (#6293) --- tokio/src/io/util/chain.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio/src/io/util/chain.rs b/tokio/src/io/util/chain.rs index 84f37fc7d46..f96f42f4a99 100644 --- a/tokio/src/io/util/chain.rs +++ b/tokio/src/io/util/chain.rs @@ -114,7 +114,7 @@ where if !*me.done_first { match ready!(me.first.poll_fill_buf(cx)?) { - buf if buf.is_empty() => { + [] => { *me.done_first = true; } buf => return Poll::Ready(Ok(buf)), From ec3038357f8dbbae0ab4daf55ff27937ad42abb2 Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Mon, 22 Jan 2024 14:05:02 +0330 Subject: [PATCH 032/162] net: add `UnixSocket` (#6290) --- tokio/src/net/mod.rs | 1 + tokio/src/net/unix/datagram/socket.rs | 10 + tokio/src/net/unix/listener.rs | 5 + tokio/src/net/unix/mod.rs | 2 + tokio/src/net/unix/socket.rs | 271 ++++++++++++++++++++++++++ tokio/src/net/unix/stream.rs | 18 ++ tokio/tests/uds_socket.rs | 118 +++++++++++ 7 files changed, 425 insertions(+) create mode 100644 tokio/src/net/unix/socket.rs create mode 100644 tokio/tests/uds_socket.rs diff --git a/tokio/src/net/mod.rs b/tokio/src/net/mod.rs index 2d317a8a219..abc270bd0d8 100644 --- a/tokio/src/net/mod.rs +++ b/tokio/src/net/mod.rs @@ -49,6 +49,7 @@ cfg_net_unix! { pub use unix::datagram::socket::UnixDatagram; pub use unix::listener::UnixListener; pub use unix::stream::UnixStream; + pub use unix::socket::UnixSocket; } cfg_net_windows! { diff --git a/tokio/src/net/unix/datagram/socket.rs b/tokio/src/net/unix/datagram/socket.rs index d92ad5940e0..bec4bf983d5 100644 --- a/tokio/src/net/unix/datagram/socket.rs +++ b/tokio/src/net/unix/datagram/socket.rs @@ -96,6 +96,16 @@ cfg_net_unix! { } impl UnixDatagram { + pub(crate) fn from_mio(sys: mio::net::UnixDatagram) -> io::Result { + let datagram = UnixDatagram::new(sys)?; + + if let Some(e) = datagram.io.take_error()? { + return Err(e); + } + + Ok(datagram) + } + /// Waits for any of the requested ready states. /// /// This function is usually paired with `try_recv()` or `try_send()`. It diff --git a/tokio/src/net/unix/listener.rs b/tokio/src/net/unix/listener.rs index a7e9115eadd..bc7b53b3b53 100644 --- a/tokio/src/net/unix/listener.rs +++ b/tokio/src/net/unix/listener.rs @@ -50,6 +50,11 @@ cfg_net_unix! { } impl UnixListener { + pub(crate) fn new(listener: mio::net::UnixListener) -> io::Result { + let io = PollEvented::new(listener)?; + Ok(UnixListener { io }) + } + /// Creates a new `UnixListener` bound to the specified path. /// /// # Panics diff --git a/tokio/src/net/unix/mod.rs b/tokio/src/net/unix/mod.rs index a49b70af34a..a94fc7b2711 100644 --- a/tokio/src/net/unix/mod.rs +++ b/tokio/src/net/unix/mod.rs @@ -7,6 +7,8 @@ pub mod datagram; pub(crate) mod listener; +pub(crate) mod socket; + mod split; pub use split::{ReadHalf, WriteHalf}; diff --git a/tokio/src/net/unix/socket.rs b/tokio/src/net/unix/socket.rs new file mode 100644 index 00000000000..cb383b09a59 --- /dev/null +++ b/tokio/src/net/unix/socket.rs @@ -0,0 +1,271 @@ +use std::io; +use std::path::Path; + +use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd}; + +use crate::net::{UnixDatagram, UnixListener, UnixStream}; + +cfg_net_unix! { + /// A Unix socket that has not yet been converted to a [`UnixStream`], [`UnixDatagram`], or + /// [`UnixListener`]. + /// + /// `UnixSocket` wraps an operating system socket and enables the caller to + /// configure the socket before establishing a connection or accepting + /// inbound connections. The caller is able to set socket option and explicitly + /// bind the socket with a socket address. + /// + /// The underlying socket is closed when the `UnixSocket` value is dropped. + /// + /// `UnixSocket` should only be used directly if the default configuration used + /// by [`UnixStream::connect`], [`UnixDatagram::bind`], and [`UnixListener::bind`] + /// does not meet the required use case. + /// + /// Calling `UnixStream::connect(path)` effectively performs the same function as: + /// + /// ```no_run + /// use tokio::net::UnixSocket; + /// use std::error::Error; + /// + /// #[tokio::main] + /// async fn main() -> Result<(), Box> { + /// let dir = tempfile::tempdir().unwrap(); + /// let path = dir.path().join("bind_path"); + /// let socket = UnixSocket::new_stream()?; + /// + /// let stream = socket.connect(path).await?; + /// + /// Ok(()) + /// } + /// ``` + /// + /// Calling `UnixDatagram::bind(path)` effectively performs the same function as: + /// + /// ```no_run + /// use tokio::net::UnixSocket; + /// use std::error::Error; + /// + /// #[tokio::main] + /// async fn main() -> Result<(), Box> { + /// let dir = tempfile::tempdir().unwrap(); + /// let path = dir.path().join("bind_path"); + /// let socket = UnixSocket::new_datagram()?; + /// socket.bind(path)?; + /// + /// let datagram = socket.datagram()?; + /// + /// Ok(()) + /// } + /// ``` + /// + /// Calling `UnixListener::bind(path)` effectively performs the same function as: + /// + /// ```no_run + /// use tokio::net::UnixSocket; + /// use std::error::Error; + /// + /// #[tokio::main] + /// async fn main() -> Result<(), Box> { + /// let dir = tempfile::tempdir().unwrap(); + /// let path = dir.path().join("bind_path"); + /// let socket = UnixSocket::new_stream()?; + /// socket.bind(path)?; + /// + /// let listener = socket.listen(1024)?; + /// + /// Ok(()) + /// } + /// ``` + /// + /// Setting socket options not explicitly provided by `UnixSocket` may be done by + /// accessing the [`RawFd`]/[`RawSocket`] using [`AsRawFd`]/[`AsRawSocket`] and + /// setting the option with a crate like [`socket2`]. + /// + /// [`RawFd`]: std::os::fd::RawFd + /// [`RawSocket`]: https://doc.rust-lang.org/std/os/windows/io/type.RawSocket.html + /// [`AsRawFd`]: std::os::fd::AsRawFd + /// [`AsRawSocket`]: https://doc.rust-lang.org/std/os/windows/io/trait.AsRawSocket.html + /// [`socket2`]: https://docs.rs/socket2/ + #[derive(Debug)] + pub struct UnixSocket { + inner: socket2::Socket, + } +} + +impl UnixSocket { + fn ty(&self) -> socket2::Type { + self.inner.r#type().unwrap() + } + + /// Creates a new Unix datagram socket. + /// + /// Calls `socket(2)` with `AF_UNIX` and `SOCK_DGRAM`. + /// + /// # Returns + /// + /// On success, the newly created [`UnixSocket`] is returned. If an error is + /// encountered, it is returned instead. + pub fn new_datagram() -> io::Result { + UnixSocket::new(socket2::Type::DGRAM) + } + + /// Creates a new Unix stream socket. + /// + /// Calls `socket(2)` with `AF_UNIX` and `SOCK_STREAM`. + /// + /// # Returns + /// + /// On success, the newly created [`UnixSocket`] is returned. If an error is + /// encountered, it is returned instead. + pub fn new_stream() -> io::Result { + UnixSocket::new(socket2::Type::STREAM) + } + + fn new(ty: socket2::Type) -> io::Result { + #[cfg(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_os = "openbsd" + ))] + let ty = ty.nonblocking(); + let inner = socket2::Socket::new(socket2::Domain::UNIX, ty, None)?; + #[cfg(not(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_os = "openbsd" + )))] + inner.set_nonblocking(true)?; + Ok(UnixSocket { inner }) + } + + /// Binds the socket to the given address. + /// + /// This calls the `bind(2)` operating-system function. + pub fn bind(&self, path: impl AsRef) -> io::Result<()> { + let addr = socket2::SockAddr::unix(path)?; + self.inner.bind(&addr) + } + + /// Converts the socket into a `UnixListener`. + /// + /// `backlog` defines the maximum number of pending connections are queued + /// by the operating system at any given time. Connection are removed from + /// the queue with [`UnixListener::accept`]. When the queue is full, the + /// operating-system will start rejecting connections. + /// + /// Calling this function on a socket created by [`new_datagram`] will return an error. + /// + /// This calls the `listen(2)` operating-system function, marking the socket + /// as a passive socket. + /// + /// [`new_datagram`]: `UnixSocket::new_datagram` + pub fn listen(self, backlog: u32) -> io::Result { + if self.ty() == socket2::Type::DGRAM { + return Err(io::Error::new( + io::ErrorKind::Other, + "listen cannot be called on a datagram socket", + )); + } + + self.inner.listen(backlog as i32)?; + let mio = { + use std::os::unix::io::{FromRawFd, IntoRawFd}; + + let raw_fd = self.inner.into_raw_fd(); + unsafe { mio::net::UnixListener::from_raw_fd(raw_fd) } + }; + + UnixListener::new(mio) + } + + /// Establishes a Unix connection with a peer at the specified socket address. + /// + /// The `UnixSocket` is consumed. Once the connection is established, a + /// connected [`UnixStream`] is returned. If the connection fails, the + /// encountered error is returned. + /// + /// Calling this function on a socket created by [`new_datagram`] will return an error. + /// + /// This calls the `connect(2)` operating-system function. + /// + /// [`new_datagram`]: `UnixSocket::new_datagram` + pub async fn connect(self, path: impl AsRef) -> io::Result { + if self.ty() == socket2::Type::DGRAM { + return Err(io::Error::new( + io::ErrorKind::Other, + "connect cannot be called on a datagram socket", + )); + } + + let addr = socket2::SockAddr::unix(path)?; + if let Err(err) = self.inner.connect(&addr) { + if err.raw_os_error() != Some(libc::EINPROGRESS) { + return Err(err); + } + } + let mio = { + use std::os::unix::io::{FromRawFd, IntoRawFd}; + + let raw_fd = self.inner.into_raw_fd(); + unsafe { mio::net::UnixStream::from_raw_fd(raw_fd) } + }; + + UnixStream::connect_mio(mio).await + } + + /// Converts the socket into a [`UnixDatagram`]. + /// + /// Calling this function on a socket created by [`new_stream`] will return an error. + /// + /// [`new_stream`]: `UnixSocket::new_stream` + pub fn datagram(self) -> io::Result { + if self.ty() == socket2::Type::STREAM { + return Err(io::Error::new( + io::ErrorKind::Other, + "datagram cannot be called on a stream socket", + )); + } + let mio = { + use std::os::unix::io::{FromRawFd, IntoRawFd}; + + let raw_fd = self.inner.into_raw_fd(); + unsafe { mio::net::UnixDatagram::from_raw_fd(raw_fd) } + }; + + UnixDatagram::from_mio(mio) + } +} + +impl AsRawFd for UnixSocket { + fn as_raw_fd(&self) -> RawFd { + self.inner.as_raw_fd() + } +} + +impl AsFd for UnixSocket { + fn as_fd(&self) -> BorrowedFd<'_> { + unsafe { BorrowedFd::borrow_raw(self.as_raw_fd()) } + } +} + +impl FromRawFd for UnixSocket { + unsafe fn from_raw_fd(fd: RawFd) -> UnixSocket { + let inner = socket2::Socket::from_raw_fd(fd); + UnixSocket { inner } + } +} + +impl IntoRawFd for UnixSocket { + fn into_raw_fd(self) -> RawFd { + self.inner.into_raw_fd() + } +} diff --git a/tokio/src/net/unix/stream.rs b/tokio/src/net/unix/stream.rs index 4821260ff6a..e1a4ff437f7 100644 --- a/tokio/src/net/unix/stream.rs +++ b/tokio/src/net/unix/stream.rs @@ -39,6 +39,24 @@ cfg_net_unix! { } impl UnixStream { + pub(crate) async fn connect_mio(sys: mio::net::UnixStream) -> io::Result { + let stream = UnixStream::new(sys)?; + + // Once we've connected, wait for the stream to be writable as + // that's when the actual connection has been initiated. Once we're + // writable we check for `take_socket_error` to see if the connect + // actually hit an error or not. + // + // If all that succeeded then we ship everything on up. + poll_fn(|cx| stream.io.registration().poll_write_ready(cx)).await?; + + if let Some(e) = stream.io.take_error()? { + return Err(e); + } + + Ok(stream) + } + /// Connects to the socket named by `path`. /// /// This function will create a new Unix socket and connect to the path diff --git a/tokio/tests/uds_socket.rs b/tokio/tests/uds_socket.rs new file mode 100644 index 00000000000..5261ffe5da3 --- /dev/null +++ b/tokio/tests/uds_socket.rs @@ -0,0 +1,118 @@ +#![warn(rust_2018_idioms)] +#![cfg(feature = "full")] +#![cfg(unix)] + +use futures::future::try_join; +use std::io; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::UnixSocket, +}; + +#[tokio::test] +async fn datagram_echo_server() -> io::Result<()> { + let dir = tempfile::tempdir().unwrap(); + let server_path = dir.path().join("server.sock"); + let client_path = dir.path().join("client.sock"); + + let server_socket = { + let socket = UnixSocket::new_datagram()?; + socket.bind(&server_path)?; + socket.datagram()? + }; + + tokio::spawn(async move { + let mut recv_buf = vec![0u8; 1024]; + loop { + let (len, peer_addr) = server_socket.recv_from(&mut recv_buf[..]).await?; + if let Some(path) = peer_addr.as_pathname() { + server_socket.send_to(&recv_buf[..len], path).await?; + } + } + + #[allow(unreachable_code)] + Ok::<(), io::Error>(()) + }); + + { + let socket = UnixSocket::new_datagram()?; + socket.bind(&client_path).unwrap(); + let socket = socket.datagram()?; + + socket.connect(server_path)?; + socket.send(b"ECHO").await?; + + let mut recv_buf = [0u8; 16]; + let len = socket.recv(&mut recv_buf[..]).await?; + assert_eq!(&recv_buf[..len], b"ECHO"); + } + + Ok(()) +} + +#[tokio::test] +async fn listen_and_stream() -> std::io::Result<()> { + let dir = tempfile::Builder::new().tempdir().unwrap(); + let sock_path = dir.path().join("connect.sock"); + let peer_path = dir.path().join("peer.sock"); + + let listener = { + let sock = UnixSocket::new_stream()?; + sock.bind(&sock_path)?; + sock.listen(1024)? + }; + + let accept = listener.accept(); + let connect = { + let sock = UnixSocket::new_stream()?; + sock.bind(&peer_path)?; + sock.connect(&sock_path) + }; + + let ((mut server, _), mut client) = try_join(accept, connect).await?; + + assert_eq!( + server.peer_addr().unwrap().as_pathname().unwrap(), + &peer_path + ); + + // Write to the client. + client.write_all(b"hello").await?; + drop(client); + + // Read from the server. + let mut buf = vec![]; + server.read_to_end(&mut buf).await?; + assert_eq!(&buf, b"hello"); + let len = server.read(&mut buf).await?; + assert_eq!(len, 0); + Ok(()) +} + +#[tokio::test] +async fn assert_usage() -> std::io::Result<()> { + let datagram_socket = UnixSocket::new_datagram()?; + let result = datagram_socket + .connect(std::path::PathBuf::new().join("invalid.sock")) + .await; + assert_eq!( + result.unwrap_err().to_string(), + "connect cannot be called on a datagram socket" + ); + + let datagram_socket = UnixSocket::new_datagram()?; + let result = datagram_socket.listen(1024); + assert_eq!( + result.unwrap_err().to_string(), + "listen cannot be called on a datagram socket" + ); + + let stream_socket = UnixSocket::new_stream()?; + let result = stream_socket.datagram(); + assert_eq!( + result.unwrap_err().to_string(), + "datagram cannot be called on a stream socket" + ); + + Ok(()) +} From 4846959e8a534cf2aff63f8613e5f69a7d2ce5f8 Mon Sep 17 00:00:00 2001 From: kim / Motoyuki Kimura Date: Thu, 25 Jan 2024 00:22:28 +0900 Subject: [PATCH 033/162] runtime: remove obsolete comment (#6303) --- tokio/src/runtime/park.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/tokio/src/runtime/park.rs b/tokio/src/runtime/park.rs index 98a8fe5a31d..2fa0f501c6d 100644 --- a/tokio/src/runtime/park.rs +++ b/tokio/src/runtime/park.rs @@ -272,7 +272,6 @@ impl CachedParkThread { use std::task::Context; use std::task::Poll::Ready; - // `get_unpark()` should not return a Result let waker = self.waker()?; let mut cx = Context::from_waker(&waker); From b6d0c9091d4f3c6a90ad8764d3c65f8480a48676 Mon Sep 17 00:00:00 2001 From: JungChul Shin <3996272+ik1ne@users.noreply.github.com> Date: Fri, 26 Jan 2024 22:51:44 +0900 Subject: [PATCH 034/162] macros: fix trait_method breaking change detection (#6308) --- tokio/tests/macros_test.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tokio/tests/macros_test.rs b/tokio/tests/macros_test.rs index b5095e36e4b..69ee30b36ce 100644 --- a/tokio/tests/macros_test.rs +++ b/tokio/tests/macros_test.rs @@ -25,10 +25,16 @@ async fn unused_braces_test() { assert_eq!(1 + 1, 2) } fn trait_method() { trait A { fn f(self); + + fn g(self); } impl A for () { #[tokio::main] - async fn f(self) {} + async fn f(self) { + self.g() + } + + fn g(self) {} } ().f() } From 753613206566d3e818b8a6e2466121f0f18c70f4 Mon Sep 17 00:00:00 2001 From: Sergei Fomin Date: Sat, 27 Jan 2024 22:52:55 +0400 Subject: [PATCH 035/162] sync: use AtomicBool in broadcast channel future (#6298) --- benches/Cargo.toml | 5 +++ benches/sync_broadcast.rs | 82 ++++++++++++++++++++++++++++++++++ tokio/src/sync/broadcast.rs | 89 ++++++++++++++++++++++++------------- 3 files changed, 145 insertions(+), 31 deletions(-) create mode 100644 benches/sync_broadcast.rs diff --git a/benches/Cargo.toml b/benches/Cargo.toml index 1eea2e04489..c581055cf65 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -26,6 +26,11 @@ name = "spawn" path = "spawn.rs" harness = false +[[bench]] +name = "sync_broadcast" +path = "sync_broadcast.rs" +harness = false + [[bench]] name = "sync_mpsc" path = "sync_mpsc.rs" diff --git a/benches/sync_broadcast.rs b/benches/sync_broadcast.rs new file mode 100644 index 00000000000..38a2141387b --- /dev/null +++ b/benches/sync_broadcast.rs @@ -0,0 +1,82 @@ +use rand::{Rng, RngCore, SeedableRng}; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; +use tokio::sync::{broadcast, Notify}; + +use criterion::measurement::WallTime; +use criterion::{black_box, criterion_group, criterion_main, BenchmarkGroup, Criterion}; + +fn rt() -> tokio::runtime::Runtime { + tokio::runtime::Builder::new_multi_thread() + .worker_threads(6) + .build() + .unwrap() +} + +fn do_work(rng: &mut impl RngCore) -> u32 { + use std::fmt::Write; + let mut message = String::new(); + for i in 1..=10 { + let _ = write!(&mut message, " {i}={}", rng.gen::()); + } + message + .as_bytes() + .iter() + .map(|&c| c as u32) + .fold(0, u32::wrapping_add) +} + +fn contention_impl(g: &mut BenchmarkGroup) { + let rt = rt(); + + let (tx, _rx) = broadcast::channel::(1000); + let wg = Arc::new((AtomicUsize::new(0), Notify::new())); + + for n in 0..N_TASKS { + let wg = wg.clone(); + let mut rx = tx.subscribe(); + let mut rng = rand::rngs::StdRng::seed_from_u64(n as u64); + rt.spawn(async move { + while let Ok(_) = rx.recv().await { + let r = do_work(&mut rng); + let _ = black_box(r); + if wg.0.fetch_sub(1, Ordering::Relaxed) == 1 { + wg.1.notify_one(); + } + } + }); + } + + const N_ITERS: usize = 100; + + g.bench_function(N_TASKS.to_string(), |b| { + b.iter(|| { + rt.block_on({ + let wg = wg.clone(); + let tx = tx.clone(); + async move { + for i in 0..N_ITERS { + assert_eq!(wg.0.fetch_add(N_TASKS, Ordering::Relaxed), 0); + tx.send(i).unwrap(); + while wg.0.load(Ordering::Relaxed) > 0 { + wg.1.notified().await; + } + } + } + }) + }) + }); +} + +fn bench_contention(c: &mut Criterion) { + let mut group = c.benchmark_group("contention"); + contention_impl::<10>(&mut group); + contention_impl::<100>(&mut group); + contention_impl::<500>(&mut group); + contention_impl::<1000>(&mut group); + group.finish(); +} + +criterion_group!(contention, bench_contention); + +criterion_main!(contention); diff --git a/tokio/src/sync/broadcast.rs b/tokio/src/sync/broadcast.rs index 568a50bd59b..499e5296da4 100644 --- a/tokio/src/sync/broadcast.rs +++ b/tokio/src/sync/broadcast.rs @@ -117,7 +117,7 @@ //! ``` use crate::loom::cell::UnsafeCell; -use crate::loom::sync::atomic::AtomicUsize; +use crate::loom::sync::atomic::{AtomicBool, AtomicUsize}; use crate::loom::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard}; use crate::util::linked_list::{self, GuardedLinkedList, LinkedList}; use crate::util::WakeList; @@ -127,7 +127,7 @@ use std::future::Future; use std::marker::PhantomPinned; use std::pin::Pin; use std::ptr::NonNull; -use std::sync::atomic::Ordering::SeqCst; +use std::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst}; use std::task::{Context, Poll, Waker}; use std::usize; @@ -354,7 +354,7 @@ struct Slot { /// An entry in the wait queue. struct Waiter { /// True if queued. - queued: bool, + queued: AtomicBool, /// Task waiting on the broadcast channel. waker: Option, @@ -369,7 +369,7 @@ struct Waiter { impl Waiter { fn new() -> Self { Self { - queued: false, + queued: AtomicBool::new(false), waker: None, pointers: linked_list::Pointers::new(), _p: PhantomPinned, @@ -897,15 +897,22 @@ impl Shared { 'outer: loop { while wakers.can_push() { match list.pop_back_locked(&mut tail) { - Some(mut waiter) => { - // Safety: `tail` lock is still held. - let waiter = unsafe { waiter.as_mut() }; - - assert!(waiter.queued); - waiter.queued = false; - - if let Some(waker) = waiter.waker.take() { - wakers.push(waker); + Some(waiter) => { + unsafe { + // Safety: accessing `waker` is safe because + // the tail lock is held. + if let Some(waker) = (*waiter.as_ptr()).waker.take() { + wakers.push(waker); + } + + // Safety: `queued` is atomic. + let queued = &(*waiter.as_ptr()).queued; + // `Relaxed` suffices because the tail lock is held. + assert!(queued.load(Relaxed)); + // `Release` is needed to synchronize with `Recv::drop`. + // It is critical to set this variable **after** waker + // is extracted, otherwise we may data race with `Recv::drop`. + queued.store(false, Release); } } None => { @@ -1104,8 +1111,13 @@ impl Receiver { } } - if !(*ptr).queued { - (*ptr).queued = true; + // If the waiter is not already queued, enqueue it. + // `Relaxed` order suffices: we have synchronized with + // all writers through the tail lock that we hold. + if !(*ptr).queued.load(Relaxed) { + // `Relaxed` order suffices: all the readers will + // synchronize with this write through the tail lock. + (*ptr).queued.store(true, Relaxed); tail.waiters.push_front(NonNull::new_unchecked(&mut *ptr)); } }); @@ -1357,7 +1369,7 @@ impl<'a, T> Recv<'a, T> { Recv { receiver, waiter: UnsafeCell::new(Waiter { - queued: false, + queued: AtomicBool::new(false), waker: None, pointers: linked_list::Pointers::new(), _p: PhantomPinned, @@ -1402,22 +1414,37 @@ where impl<'a, T> Drop for Recv<'a, T> { fn drop(&mut self) { - // Acquire the tail lock. This is required for safety before accessing - // the waiter node. - let mut tail = self.receiver.shared.tail.lock(); - - // safety: tail lock is held - let queued = self.waiter.with(|ptr| unsafe { (*ptr).queued }); - + // Safety: `waiter.queued` is atomic. + // Acquire ordering is required to synchronize with + // `Shared::notify_rx` before we drop the object. + let queued = self + .waiter + .with(|ptr| unsafe { (*ptr).queued.load(Acquire) }); + + // If the waiter is queued, we need to unlink it from the waiters list. + // If not, no further synchronization is required, since the waiter + // is not in the list and, as such, is not shared with any other threads. if queued { - // Remove the node - // - // safety: tail lock is held and the wait node is verified to be in - // the list. - unsafe { - self.waiter.with_mut(|ptr| { - tail.waiters.remove((&mut *ptr).into()); - }); + // Acquire the tail lock. This is required for safety before accessing + // the waiter node. + let mut tail = self.receiver.shared.tail.lock(); + + // Safety: tail lock is held. + // `Relaxed` order suffices because we hold the tail lock. + let queued = self + .waiter + .with_mut(|ptr| unsafe { (*ptr).queued.load(Relaxed) }); + + if queued { + // Remove the node + // + // safety: tail lock is held and the wait node is verified to be in + // the list. + unsafe { + self.waiter.with_mut(|ptr| { + tail.waiters.remove((&mut *ptr).into()); + }); + } } } } From e53b92a9939565edb33575fff296804279e5e419 Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Sun, 28 Jan 2024 00:00:15 +0330 Subject: [PATCH 036/162] io: clarify `clear_ready` docs (#6304) --- tokio/src/io/async_fd.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tokio/src/io/async_fd.rs b/tokio/src/io/async_fd.rs index b27c60bf6c7..aaf17584198 100644 --- a/tokio/src/io/async_fd.rs +++ b/tokio/src/io/async_fd.rs @@ -826,6 +826,10 @@ impl<'a, Inner: AsRawFd> AsyncFdReadyGuard<'a, Inner> { /// _actually observes_ that the file descriptor is _not_ ready. Do not call /// it simply because, for example, a read succeeded; it should be called /// when a read is observed to block. + /// + /// This method only clears readiness events that happened before the creation of this guard. + /// In other words, if the IO resource becomes ready between the creation of the guard and + /// this call to `clear_ready`, then the readiness is not actually cleared. pub fn clear_ready(&mut self) { if let Some(event) = self.event.take() { self.async_fd.registration.clear_readiness(event); @@ -846,6 +850,10 @@ impl<'a, Inner: AsRawFd> AsyncFdReadyGuard<'a, Inner> { /// block. For example when a read blocks when using a combined interest, /// only clear `Ready::READABLE`. /// + /// This method only clears readiness events that happened before the creation of this guard. + /// In other words, if the IO resource becomes ready between the creation of the guard and + /// this call to `clear_ready`, then the readiness is not actually cleared. + /// /// # Examples /// /// Concurrently read and write to a [`std::net::TcpStream`] on the same task without @@ -1042,6 +1050,10 @@ impl<'a, Inner: AsRawFd> AsyncFdReadyMutGuard<'a, Inner> { /// _actually observes_ that the file descriptor is _not_ ready. Do not call /// it simply because, for example, a read succeeded; it should be called /// when a read is observed to block. + /// + /// This method only clears readiness events that happened before the creation of this guard. + /// In other words, if the IO resource becomes ready between the creation of the guard and + /// this call to `clear_ready`, then the readiness is not actually cleared. pub fn clear_ready(&mut self) { if let Some(event) = self.event.take() { self.async_fd.registration.clear_readiness(event); @@ -1062,6 +1074,10 @@ impl<'a, Inner: AsRawFd> AsyncFdReadyMutGuard<'a, Inner> { /// block. For example when a read blocks when using a combined interest, /// only clear `Ready::READABLE`. /// + /// This method only clears readiness events that happened before the creation of this guard. + /// In other words, if the IO resource becomes ready between the creation of the guard and + /// this call to `clear_ready`, then the readiness is not actually cleared. + /// /// # Examples /// /// Concurrently read and write to a [`std::net::TcpStream`] on the same task without From 131e7b4e49c8849298ba54b4e0c99f4b81d869e3 Mon Sep 17 00:00:00 2001 From: Owen Leung Date: Mon, 29 Jan 2024 17:53:43 +0800 Subject: [PATCH 037/162] ci: add spellchecking (#6297) --- .github/workflows/ci.yml | 20 ++ CONTRIBUTING.md | 18 ++ Cargo.toml | 4 + spellcheck.dic | 279 ++++++++++++++++++ spellcheck.toml | 13 + tokio-stream/src/lib.rs | 2 +- tokio-stream/src/stream_map.rs | 4 +- tokio-stream/src/wrappers/watch.rs | 2 +- tokio-util/src/codec/framed.rs | 2 +- tokio-util/src/codec/mod.rs | 2 +- tokio-util/src/io/inspect.rs | 4 +- .../src/sync/cancellation_token/tree_node.rs | 14 +- tokio-util/src/task/join_map.rs | 2 +- tokio/src/doc/os.rs | 44 +-- .../src/fs/open_options/mock_open_options.rs | 2 +- tokio/src/io/interest.rs | 4 +- tokio/src/io/mod.rs | 4 +- tokio/src/io/poll_evented.rs | 2 +- tokio/src/io/stdio_common.rs | 2 +- tokio/src/io/util/async_buf_read_ext.rs | 2 +- tokio/src/io/util/buf_reader.rs | 8 +- tokio/src/io/util/buf_writer.rs | 6 +- tokio/src/io/util/read_exact.rs | 2 +- tokio/src/io/util/read_line.rs | 2 +- tokio/src/io/util/shutdown.rs | 2 +- tokio/src/lib.rs | 30 +- tokio/src/loom/std/atomic_u64.rs | 2 +- tokio/src/macros/cfg.rs | 2 +- tokio/src/macros/try_join.rs | 2 +- tokio/src/net/tcp/listener.rs | 2 +- tokio/src/net/tcp/stream.rs | 8 +- tokio/src/net/udp.rs | 32 +- tokio/src/net/unix/datagram/socket.rs | 10 +- tokio/src/net/unix/listener.rs | 2 +- tokio/src/net/unix/stream.rs | 2 +- tokio/src/net/windows/named_pipe.rs | 4 +- tokio/src/process/mod.rs | 12 +- tokio/src/runtime/blocking/pool.rs | 6 +- tokio/src/runtime/builder.rs | 4 +- tokio/src/runtime/context.rs | 2 +- tokio/src/runtime/handle.rs | 6 +- tokio/src/runtime/io/driver.rs | 2 +- tokio/src/runtime/io/scheduled_io.rs | 6 +- .../runtime/scheduler/multi_thread/park.rs | 4 +- .../runtime/scheduler/multi_thread/queue.rs | 4 +- .../runtime/scheduler/multi_thread/stats.rs | 2 +- .../runtime/scheduler/multi_thread/worker.rs | 26 +- .../scheduler/multi_thread_alt/queue.rs | 4 +- .../scheduler/multi_thread_alt/stats.rs | 2 +- .../scheduler/multi_thread_alt/worker.rs | 26 +- tokio/src/runtime/task/core.rs | 4 +- tokio/src/runtime/task/harness.rs | 2 +- tokio/src/runtime/task/join.rs | 2 +- tokio/src/runtime/task/list.rs | 12 +- tokio/src/runtime/task/mod.rs | 82 ++--- tokio/src/runtime/task/state.rs | 4 +- tokio/src/runtime/task/trace/mod.rs | 4 +- tokio/src/runtime/tests/loom_local.rs | 2 +- tokio/src/runtime/time/entry.rs | 32 +- tokio/src/runtime/time/source.rs | 2 +- tokio/src/runtime/time/wheel/level.rs | 2 +- tokio/src/signal/ctrl_c.rs | 4 +- tokio/src/signal/mod.rs | 2 +- tokio/src/signal/registry.rs | 4 +- tokio/src/signal/unix.rs | 34 +-- tokio/src/signal/windows.rs | 14 +- tokio/src/sync/mpsc/error.rs | 2 +- tokio/src/sync/oneshot.rs | 10 +- tokio/src/task/local.rs | 8 +- tokio/src/task/mod.rs | 10 +- tokio/src/util/idle_notified_set.rs | 4 +- tokio/src/util/rand.rs | 4 +- tokio/src/util/sharded_list.rs | 6 +- 73 files changed, 616 insertions(+), 282 deletions(-) create mode 100644 spellcheck.dic create mode 100644 spellcheck.toml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 60a962f8b25..c0e728e63b0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -71,6 +71,7 @@ jobs: - check-external-types - check-fuzzing - check-unstable-mt-counters + - check-spelling steps: - run: exit 0 @@ -994,3 +995,22 @@ jobs: - name: Check /tokio-stream/ run: cargo fuzz check --all-features working-directory: tokio-stream + + check-spelling: + name: check-spelling + needs: basics + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install Rust ${{ env.rust_stable }} + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ env.rust_stable }} + - name: Install cargo-spellcheck + uses: taiki-e/install-action@v2 + with: + tool: cargo-spellcheck + - uses: actions/checkout@v4 + - name: Run cargo-spellcheck + run: cargo spellcheck --code 1 + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a1fd3bf0d28..cd6f651a2ae 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -194,6 +194,24 @@ MIRIFLAGS="-Zmiri-disable-isolation -Zmiri-tag-raw-pointers" \ cargo +nightly miri test --features full --lib ``` +### Performing spellcheck on tokio codebase + +You can perform spell-check on tokio codebase. For details of how to use the spellcheck tool, feel free to visit +https://github.com/drahnr/cargo-spellcheck +``` +# First install the spell-check plugin +cargo install --locked cargo-spellcheck + +# Then run the cargo spell check command +cargo spellcheck check +``` + +if the command rejects a word, you should backtick the rejected word if it's code related. If not, the +rejected word should be put into `spellcheck.dic` file. + +Note that when you add a word into the file, you should also update the first line which tells the spellcheck tool +the total number of words included in the file + ### Tests If the change being proposed alters code (as opposed to only documentation for diff --git a/Cargo.toml b/Cargo.toml index f3e19312e8b..d8ac248189d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,3 +14,7 @@ members = [ "tests-build", "tests-integration", ] + +[workspace.metadata.spellcheck] +config = "spellcheck.toml" + diff --git a/spellcheck.dic b/spellcheck.dic new file mode 100644 index 00000000000..ddedb90730d --- /dev/null +++ b/spellcheck.dic @@ -0,0 +1,279 @@ +279 +\ +~ +~4 +~12 +±1m +±1ms +— +& ++ +0o777 +0s +0xA +0xD +100ms +10ms +1ms +1s +250ms +2x +443 +450ms +50ms +8MB +< += +> +adaptor +adaptors +Adaptors +AIO +ambiant +amongst +api +APIs +async +awaitable +backend +backpressure +backtrace +backtraces +backtracing +binded +bitfield +bitfields +Blockingly +boolean +broadcasted +cancelled +cancelling +Cancelling +CLI +cloneable +codebase +codec +codecs +combinator +combinators +Config +config +connectionless +cpu +cpus +Customizable +Datagram +datagram +datagrams +deallocate +deallocated +Deallocates +decrementing +dequeued +deregister +deregistered +deregistering +Deregisters +deregisters +deregistration +descriptor's +destructor +destructors +destructure +Destructures +Dev +dns +DNS +DoS +dwOpenMode +endian +enqueue +enqueued +EntryInner +enum +eof +errored +EWMA +expirations +fcntl +fd's +FIFOs +filename +filesystem +filesystems +fn +fns +FreeBSD +frontend +fs +functionalities +getters +GID +Growable +gzip +hashmaps +HashMaps +hashsets +ie +Illumos +impl +implementers +implementor +implementors +incrementing +interoperate +Invariants +invariants +io +IOCP +iOS +IOs +IP +IPv4 +IPv6 +iteratively +latencies +Lauck +libc +lifecycle +lifo +lookups +macOS +MacOS +Marsaglia's +metadata +mio +Mio +mio's +misconfigured +mock's +mpmc +mpsc +Multi +multi +multicast +Multithreaded +mut +mutex +Mutex +Nagle +nonblocking +nondecreasing +noop +ntasks +ok +oneshot +ORed +os +overweighing +parker +parsers +peekable +PGID +PID +plaintext +poller +POSIX +proxied +qos +RAII +reallocations +recv's +refactors +refcount +refcounting +repo +repr +representable +reqwest +reregistering +resize +resized +RMW +runtime +runtimes +runtime's +rwlock +rx +scalability +scheduler's +semver +setpgid +sharded +signalled +signalling +SmallCrush +Solaris +spawner +Splitter +spmc +spsc +src +stabilised +startup +stateful +stderr +stdin +stdout +stealable +stealer +stealers +struct +structs +subfield +suboptimal +subprocess +superset +symlink +symlinks +sys +syscall +syscalls +TCP +tcp +TestU01 +threadpool +timestamp +timestamps +TLS +TOCTOU +TODO +Tokio +tokio +tokio's +Tokio's +Tuple +tuple +tx +UDP +udp +UID +unhandled +unix +unlink +unpark +Unpark +unparked +unparking +Unparks +unparks +unreceived +unsafety +Unsets +unsynchronized +untrusted +usecases +Valgrind +Varghese +vec +versa +versioned +versioning +vtable +waker +wakers +Wakers +wakeup +wakeups +workstealing + diff --git a/spellcheck.toml b/spellcheck.toml new file mode 100644 index 00000000000..fb14364be0c --- /dev/null +++ b/spellcheck.toml @@ -0,0 +1,13 @@ +dev_comments = false +skip_readme = false + +[Hunspell] +lang = "en_US" +search_dirs = ["."] +extra_dictionaries = ["spellcheck.dic"] +skip_os_lookups = true +use_builtin = true + +[Hunspell.quirks] +allow_concatenation = true + diff --git a/tokio-stream/src/lib.rs b/tokio-stream/src/lib.rs index 28ca9963ce1..b6e651c7b8b 100644 --- a/tokio-stream/src/lib.rs +++ b/tokio-stream/src/lib.rs @@ -54,7 +54,7 @@ //! //! [async-stream]: https://docs.rs/async-stream //! -//! # Conversion to and from AsyncRead/AsyncWrite +//! # Conversion to and from `AsyncRead`/`AsyncWrite` //! //! It is often desirable to convert a `Stream` into an [`AsyncRead`], //! especially when dealing with plaintext formats streamed over the network. diff --git a/tokio-stream/src/stream_map.rs b/tokio-stream/src/stream_map.rs index 041e477aa51..3f424eca221 100644 --- a/tokio-stream/src/stream_map.rs +++ b/tokio-stream/src/stream_map.rs @@ -658,9 +658,9 @@ mod rand { /// Fast random number generate /// - /// Implement xorshift64+: 2 32-bit xorshift sequences added together. + /// Implement `xorshift64+`: 2 32-bit `xorshift` sequences added together. /// Shift triplet `[17,7,16]` was calculated as indicated in Marsaglia's - /// Xorshift paper: + /// `Xorshift` paper: /// This generator passes the SmallCrush suite, part of TestU01 framework: /// #[derive(Debug)] diff --git a/tokio-stream/src/wrappers/watch.rs b/tokio-stream/src/wrappers/watch.rs index ec8ead06da0..a1ea646035a 100644 --- a/tokio-stream/src/wrappers/watch.rs +++ b/tokio-stream/src/wrappers/watch.rs @@ -10,7 +10,7 @@ use tokio::sync::watch::error::RecvError; /// A wrapper around [`tokio::sync::watch::Receiver`] that implements [`Stream`]. /// -/// This stream will start by yielding the current value when the WatchStream is polled, +/// This stream will start by yielding the current value when the `WatchStream` is polled, /// regardless of whether it was the initial value or sent afterwards, /// unless you use [`WatchStream::from_changes`]. /// diff --git a/tokio-util/src/codec/framed.rs b/tokio-util/src/codec/framed.rs index 8a344f90db2..e988da0a734 100644 --- a/tokio-util/src/codec/framed.rs +++ b/tokio-util/src/codec/framed.rs @@ -130,7 +130,7 @@ impl Framed { /// things like gzip or TLS, which require both read and write access to the /// underlying object. /// - /// This objects takes a stream and a readbuffer and a writebuffer. These field + /// This objects takes a stream and a `readbuffer` and a `writebuffer`. These field /// can be obtained from an existing `Framed` with the [`into_parts`] method. /// /// If you want to work more directly with the streams and sink, consider diff --git a/tokio-util/src/codec/mod.rs b/tokio-util/src/codec/mod.rs index 2295176bdce..98a2f724425 100644 --- a/tokio-util/src/codec/mod.rs +++ b/tokio-util/src/codec/mod.rs @@ -1,4 +1,4 @@ -//! Adaptors from AsyncRead/AsyncWrite to Stream/Sink +//! Adaptors from `AsyncRead`/`AsyncWrite` to Stream/Sink //! //! Raw I/O objects work with byte sequences, but higher-level code usually //! wants to batch these into meaningful chunks, called "frames". diff --git a/tokio-util/src/io/inspect.rs b/tokio-util/src/io/inspect.rs index c860b803571..7604d9a3de1 100644 --- a/tokio-util/src/io/inspect.rs +++ b/tokio-util/src/io/inspect.rs @@ -18,7 +18,7 @@ pin_project! { } impl InspectReader { - /// Create a new InspectReader, wrapping `reader` and calling `f` for the + /// Create a new `InspectReader`, wrapping `reader` and calling `f` for the /// new data supplied by each read call. /// /// The closure will only be called with an empty slice if the inner reader @@ -100,7 +100,7 @@ pin_project! { } impl InspectWriter { - /// Create a new InspectWriter, wrapping `write` and calling `f` for the + /// Create a new `InspectWriter`, wrapping `write` and calling `f` for the /// data successfully written by each write call. /// /// The closure `f` will never be called with an empty slice. A vectored diff --git a/tokio-util/src/sync/cancellation_token/tree_node.rs b/tokio-util/src/sync/cancellation_token/tree_node.rs index 0263f311164..f042e4e79e1 100644 --- a/tokio-util/src/sync/cancellation_token/tree_node.rs +++ b/tokio-util/src/sync/cancellation_token/tree_node.rs @@ -1,16 +1,16 @@ -//! This mod provides the logic for the inner tree structure of the CancellationToken. +//! This mod provides the logic for the inner tree structure of the `CancellationToken`. //! -//! CancellationTokens are only light handles with references to [`TreeNode`]. +//! `CancellationTokens` are only light handles with references to [`TreeNode`]. //! All the logic is actually implemented in the [`TreeNode`]. //! //! A [`TreeNode`] is part of the cancellation tree and may have one parent and an arbitrary number of //! children. //! -//! A [`TreeNode`] can receive the request to perform a cancellation through a CancellationToken. +//! A [`TreeNode`] can receive the request to perform a cancellation through a `CancellationToken`. //! This cancellation request will cancel the node and all of its descendants. //! //! As soon as a node cannot get cancelled any more (because it was already cancelled or it has no -//! more CancellationTokens pointing to it any more), it gets removed from the tree, to keep the +//! more `CancellationTokens` pointing to it any more), it gets removed from the tree, to keep the //! tree as small as possible. //! //! # Invariants @@ -66,7 +66,7 @@ impl TreeNode { } } -/// The data contained inside a TreeNode. +/// The data contained inside a `TreeNode`. /// /// This struct exists so that the data of the node can be wrapped /// in a Mutex. @@ -198,7 +198,7 @@ where /// `parent` MUST have been a parent of the node when they both got locked, /// otherwise there is a potential for a deadlock as invariant #2 would be violated. /// -/// To acquire the locks for node and parent, use [with_locked_node_and_parent]. +/// To acquire the locks for node and parent, use [`with_locked_node_and_parent`]. fn move_children_to_parent(node: &mut Inner, parent: &mut Inner) { // Pre-allocate in the parent, for performance parent.children.reserve(node.children.len()); @@ -216,7 +216,7 @@ fn move_children_to_parent(node: &mut Inner, parent: &mut Inner) { /// Removes a child from the parent. /// /// `parent` MUST be the parent of `node`. -/// To acquire the locks for node and parent, use [with_locked_node_and_parent]. +/// To acquire the locks for node and parent, use [`with_locked_node_and_parent`]. fn remove_child(parent: &mut Inner, mut node: MutexGuard<'_, Inner>) { // Query the position from where to remove a node let pos = node.parent_idx; diff --git a/tokio-util/src/task/join_map.rs b/tokio-util/src/task/join_map.rs index 412aa96c10b..13e27bb670b 100644 --- a/tokio-util/src/task/join_map.rs +++ b/tokio-util/src/task/join_map.rs @@ -878,7 +878,7 @@ impl Eq for Key {} #[derive(Debug, Clone)] pub struct JoinMapKeys<'a, K, V> { iter: hashbrown::hash_map::Keys<'a, Key, AbortHandle>, - /// To make it easier to change JoinMap in the future, keep V as a generic + /// To make it easier to change `JoinMap` in the future, keep V as a generic /// parameter. _value: PhantomData<&'a V>, } diff --git a/tokio/src/doc/os.rs b/tokio/src/doc/os.rs index cded8b97cc0..337f8969afa 100644 --- a/tokio/src/doc/os.rs +++ b/tokio/src/doc/os.rs @@ -1,67 +1,67 @@ -//! See [std::os](https://doc.rust-lang.org/std/os/index.html). +//! See [`std::os`](https://doc.rust-lang.org/std/os/index.html). /// Platform-specific extensions to `std` for Windows. /// -/// See [std::os::windows](https://doc.rust-lang.org/std/os/windows/index.html). +/// See [`std::os::windows`](https://doc.rust-lang.org/std/os/windows/index.html). pub mod windows { /// Windows-specific extensions to general I/O primitives. /// - /// See [std::os::windows::io](https://doc.rust-lang.org/std/os/windows/io/index.html). + /// See [`std::os::windows::io`](https://doc.rust-lang.org/std/os/windows/io/index.html). pub mod io { - /// See [std::os::windows::io::RawHandle](https://doc.rust-lang.org/std/os/windows/io/type.RawHandle.html) + /// See [`std::os::windows::io::RawHandle`](https://doc.rust-lang.org/std/os/windows/io/type.RawHandle.html) pub type RawHandle = crate::doc::NotDefinedHere; - /// See [std::os::windows::io::OwnedHandle](https://doc.rust-lang.org/std/os/windows/io/struct.OwnedHandle.html) + /// See [`std::os::windows::io::OwnedHandle`](https://doc.rust-lang.org/std/os/windows/io/struct.OwnedHandle.html) pub type OwnedHandle = crate::doc::NotDefinedHere; - /// See [std::os::windows::io::AsRawHandle](https://doc.rust-lang.org/std/os/windows/io/trait.AsRawHandle.html) + /// See [`std::os::windows::io::AsRawHandle`](https://doc.rust-lang.org/std/os/windows/io/trait.AsRawHandle.html) pub trait AsRawHandle { - /// See [std::os::windows::io::AsRawHandle::as_raw_handle](https://doc.rust-lang.org/std/os/windows/io/trait.AsRawHandle.html#tymethod.as_raw_handle) + /// See [`std::os::windows::io::AsRawHandle::as_raw_handle`](https://doc.rust-lang.org/std/os/windows/io/trait.AsRawHandle.html#tymethod.as_raw_handle) fn as_raw_handle(&self) -> RawHandle; } - /// See [std::os::windows::io::FromRawHandle](https://doc.rust-lang.org/std/os/windows/io/trait.FromRawHandle.html) + /// See [`std::os::windows::io::FromRawHandle`](https://doc.rust-lang.org/std/os/windows/io/trait.FromRawHandle.html) pub trait FromRawHandle { - /// See [std::os::windows::io::FromRawHandle::from_raw_handle](https://doc.rust-lang.org/std/os/windows/io/trait.FromRawHandle.html#tymethod.from_raw_handle) + /// See [`std::os::windows::io::FromRawHandle::from_raw_handle`](https://doc.rust-lang.org/std/os/windows/io/trait.FromRawHandle.html#tymethod.from_raw_handle) unsafe fn from_raw_handle(handle: RawHandle) -> Self; } - /// See [std::os::windows::io::RawSocket](https://doc.rust-lang.org/std/os/windows/io/type.RawSocket.html) + /// See [`std::os::windows::io::RawSocket`](https://doc.rust-lang.org/std/os/windows/io/type.RawSocket.html) pub type RawSocket = crate::doc::NotDefinedHere; - /// See [std::os::windows::io::AsRawSocket](https://doc.rust-lang.org/std/os/windows/io/trait.AsRawSocket.html) + /// See [`std::os::windows::io::AsRawSocket`](https://doc.rust-lang.org/std/os/windows/io/trait.AsRawSocket.html) pub trait AsRawSocket { - /// See [std::os::windows::io::AsRawSocket::as_raw_socket](https://doc.rust-lang.org/std/os/windows/io/trait.AsRawSocket.html#tymethod.as_raw_socket) + /// See [`std::os::windows::io::AsRawSocket::as_raw_socket`](https://doc.rust-lang.org/std/os/windows/io/trait.AsRawSocket.html#tymethod.as_raw_socket) fn as_raw_socket(&self) -> RawSocket; } - /// See [std::os::windows::io::FromRawSocket](https://doc.rust-lang.org/std/os/windows/io/trait.FromRawSocket.html) + /// See [`std::os::windows::io::FromRawSocket`](https://doc.rust-lang.org/std/os/windows/io/trait.FromRawSocket.html) pub trait FromRawSocket { - /// See [std::os::windows::io::FromRawSocket::from_raw_socket](https://doc.rust-lang.org/std/os/windows/io/trait.FromRawSocket.html#tymethod.from_raw_socket) + /// See [`std::os::windows::io::FromRawSocket::from_raw_socket`](https://doc.rust-lang.org/std/os/windows/io/trait.FromRawSocket.html#tymethod.from_raw_socket) unsafe fn from_raw_socket(sock: RawSocket) -> Self; } - /// See [std::os::windows::io::IntoRawSocket](https://doc.rust-lang.org/std/os/windows/io/trait.IntoRawSocket.html) + /// See [`std::os::windows::io::IntoRawSocket`](https://doc.rust-lang.org/std/os/windows/io/trait.IntoRawSocket.html) pub trait IntoRawSocket { - /// See [std::os::windows::io::IntoRawSocket::into_raw_socket](https://doc.rust-lang.org/std/os/windows/io/trait.IntoRawSocket.html#tymethod.into_raw_socket) + /// See [`std::os::windows::io::IntoRawSocket::into_raw_socket`](https://doc.rust-lang.org/std/os/windows/io/trait.IntoRawSocket.html#tymethod.into_raw_socket) fn into_raw_socket(self) -> RawSocket; } - /// See [std::os::windows::io::BorrowedHandle](https://doc.rust-lang.org/std/os/windows/io/struct.BorrowedHandle.html) + /// See [`std::os::windows::io::BorrowedHandle`](https://doc.rust-lang.org/std/os/windows/io/struct.BorrowedHandle.html) pub type BorrowedHandle<'handle> = crate::doc::NotDefinedHere; - /// See [std::os::windows::io::AsHandle](https://doc.rust-lang.org/std/os/windows/io/trait.AsHandle.html) + /// See [`std::os::windows::io::AsHandle`](https://doc.rust-lang.org/std/os/windows/io/trait.AsHandle.html) pub trait AsHandle { - /// See [std::os::windows::io::AsHandle::as_handle](https://doc.rust-lang.org/std/os/windows/io/trait.AsHandle.html#tymethod.as_handle) + /// See [`std::os::windows::io::AsHandle::as_handle`](https://doc.rust-lang.org/std/os/windows/io/trait.AsHandle.html#tymethod.as_handle) fn as_handle(&self) -> BorrowedHandle<'_>; } - /// See [std::os::windows::io::BorrowedSocket](https://doc.rust-lang.org/std/os/windows/io/struct.BorrowedSocket.html) + /// See [`std::os::windows::io::BorrowedSocket`](https://doc.rust-lang.org/std/os/windows/io/struct.BorrowedSocket.html) pub type BorrowedSocket<'socket> = crate::doc::NotDefinedHere; - /// See [std::os::windows::io::AsSocket](https://doc.rust-lang.org/std/os/windows/io/trait.AsSocket.html) + /// See [`std::os::windows::io::AsSocket`](https://doc.rust-lang.org/std/os/windows/io/trait.AsSocket.html) pub trait AsSocket { - /// See [std::os::windows::io::AsSocket::as_socket](https://doc.rust-lang.org/std/os/windows/io/trait.AsSocket.html#tymethod.as_socket) + /// See [`std::os::windows::io::AsSocket::as_socket`](https://doc.rust-lang.org/std/os/windows/io/trait.AsSocket.html#tymethod.as_socket) fn as_socket(&self) -> BorrowedSocket<'_>; } } diff --git a/tokio/src/fs/open_options/mock_open_options.rs b/tokio/src/fs/open_options/mock_open_options.rs index 17b4a48640a..2fbdd282a3a 100644 --- a/tokio/src/fs/open_options/mock_open_options.rs +++ b/tokio/src/fs/open_options/mock_open_options.rs @@ -1,5 +1,5 @@ #![allow(unreachable_pub)] -//! Mock version of std::fs::OpenOptions; +//! Mock version of `std::fs::OpenOptions`; use mockall::mock; use crate::fs::mocks::MockFile; diff --git a/tokio/src/io/interest.rs b/tokio/src/io/interest.rs index e8cc0c42d84..a15b28cf79a 100644 --- a/tokio/src/io/interest.rs +++ b/tokio/src/io/interest.rs @@ -40,11 +40,11 @@ impl Interest { #[cfg(not(target_os = "freebsd"))] pub const AIO: Interest = Interest(READABLE); - /// Interest for POSIX AIO lio_listio events. + /// Interest for POSIX AIO `lio_listio` events. #[cfg(target_os = "freebsd")] pub const LIO: Interest = Interest(LIO); - /// Interest for POSIX AIO lio_listio events. + /// Interest for POSIX AIO `lio_listio` events. #[cfg(not(target_os = "freebsd"))] pub const LIO: Interest = Interest(READABLE); } diff --git a/tokio/src/io/mod.rs b/tokio/src/io/mod.rs index ff35a0e0f7e..5e903c04842 100644 --- a/tokio/src/io/mod.rs +++ b/tokio/src/io/mod.rs @@ -4,7 +4,7 @@ //! defines two traits, [`AsyncRead`] and [`AsyncWrite`], which are asynchronous //! versions of the [`Read`] and [`Write`] traits in the standard library. //! -//! # AsyncRead and AsyncWrite +//! # `AsyncRead` and `AsyncWrite` //! //! Like the standard library's [`Read`] and [`Write`] traits, [`AsyncRead`] and //! [`AsyncWrite`] provide the most general interface for reading and writing @@ -122,7 +122,7 @@ //! [`BufReader`]: crate::io::BufReader //! [`BufWriter`]: crate::io::BufWriter //! -//! ## Implementing AsyncRead and AsyncWrite +//! ## Implementing `AsyncRead` and `AsyncWrite` //! //! Because they are traits, we can implement [`AsyncRead`] and [`AsyncWrite`] for //! our own types, as well. Note that these traits must only be implemented for diff --git a/tokio/src/io/poll_evented.rs b/tokio/src/io/poll_evented.rs index 67beb5b1551..3952a31e783 100644 --- a/tokio/src/io/poll_evented.rs +++ b/tokio/src/io/poll_evented.rs @@ -47,7 +47,7 @@ cfg_io_driver! { /// This clears the readiness state until a new readiness event is received. /// /// This allows the caller to implement additional functions. For example, - /// [`TcpListener`] implements poll_accept by using [`poll_read_ready`] and + /// [`TcpListener`] implements `poll_accept` by using [`poll_read_ready`] and /// [`clear_readiness`]. /// /// ## Platform-specific events diff --git a/tokio/src/io/stdio_common.rs b/tokio/src/io/stdio_common.rs index 792b3a40002..c32b889e582 100644 --- a/tokio/src/io/stdio_common.rs +++ b/tokio/src/io/stdio_common.rs @@ -4,7 +4,7 @@ use std::pin::Pin; use std::task::{Context, Poll}; /// # Windows /// [`AsyncWrite`] adapter that finds last char boundary in given buffer and does not write the rest, -/// if buffer contents seems to be utf8. Otherwise it only trims buffer down to `MAX_BUF`. +/// if buffer contents seems to be `utf8`. Otherwise it only trims buffer down to `MAX_BUF`. /// That's why, wrapped writer will always receive well-formed utf-8 bytes. /// # Other platforms /// Passes data to `inner` as is. diff --git a/tokio/src/io/util/async_buf_read_ext.rs b/tokio/src/io/util/async_buf_read_ext.rs index 2aee3925843..92500f7f869 100644 --- a/tokio/src/io/util/async_buf_read_ext.rs +++ b/tokio/src/io/util/async_buf_read_ext.rs @@ -302,7 +302,7 @@ cfg_io_util! { /// /// The stream returned from this function will yield instances of /// [`io::Result`]`<`[`Option`]`<`[`String`]`>>`. Each string returned will *not* have a newline - /// byte (the 0xA byte) or CRLF (0xD, 0xA bytes) at the end. + /// byte (the 0xA byte) or `CRLF` (0xD, 0xA bytes) at the end. /// /// [`io::Result`]: std::io::Result /// [`Option`]: core::option::Option diff --git a/tokio/src/io/util/buf_reader.rs b/tokio/src/io/util/buf_reader.rs index 60879c0fdc2..d9307202c13 100644 --- a/tokio/src/io/util/buf_reader.rs +++ b/tokio/src/io/util/buf_reader.rs @@ -145,13 +145,13 @@ impl AsyncBufRead for BufReader { #[derive(Debug, Clone, Copy)] pub(super) enum SeekState { - /// start_seek has not been called. + /// `start_seek` has not been called. Init, - /// start_seek has been called, but poll_complete has not yet been called. + /// `start_seek` has been called, but `poll_complete` has not yet been called. Start(SeekFrom), - /// Waiting for completion of the first poll_complete in the `n.checked_sub(remainder).is_none()` branch. + /// Waiting for completion of the first `poll_complete` in the `n.checked_sub(remainder).is_none()` branch. PendingOverflowed(i64), - /// Waiting for completion of poll_complete. + /// Waiting for completion of `poll_complete`. Pending, } diff --git a/tokio/src/io/util/buf_writer.rs b/tokio/src/io/util/buf_writer.rs index 8f398fecdf9..2971a8e057a 100644 --- a/tokio/src/io/util/buf_writer.rs +++ b/tokio/src/io/util/buf_writer.rs @@ -212,11 +212,11 @@ impl AsyncWrite for BufWriter { #[derive(Debug, Clone, Copy)] pub(super) enum SeekState { - /// start_seek has not been called. + /// `start_seek` has not been called. Init, - /// start_seek has been called, but poll_complete has not yet been called. + /// `start_seek` has been called, but `poll_complete` has not yet been called. Start(SeekFrom), - /// Waiting for completion of poll_complete. + /// Waiting for completion of `poll_complete`. Pending, } diff --git a/tokio/src/io/util/read_exact.rs b/tokio/src/io/util/read_exact.rs index dbdd58bae99..217315dcb91 100644 --- a/tokio/src/io/util/read_exact.rs +++ b/tokio/src/io/util/read_exact.rs @@ -12,7 +12,7 @@ use std::task::{Context, Poll}; /// a buffer. /// /// Created by the [`AsyncReadExt::read_exact`][read_exact]. -/// [read_exact]: [crate::io::AsyncReadExt::read_exact] +/// [`read_exact`]: [`crate::io::AsyncReadExt::read_exact`] pub(crate) fn read_exact<'a, A>(reader: &'a mut A, buf: &'a mut [u8]) -> ReadExact<'a, A> where A: AsyncRead + Unpin + ?Sized, diff --git a/tokio/src/io/util/read_line.rs b/tokio/src/io/util/read_line.rs index e641f515324..eacc7d59602 100644 --- a/tokio/src/io/util/read_line.rs +++ b/tokio/src/io/util/read_line.rs @@ -51,7 +51,7 @@ fn put_back_original_data(output: &mut String, mut vector: Vec, num_bytes_re /// This handles the various failure cases and puts the string back into `output`. /// -/// The `truncate_on_io_error` bool is necessary because `read_to_string` and `read_line` +/// The `truncate_on_io_error` `bool` is necessary because `read_to_string` and `read_line` /// disagree on what should happen when an IO error occurs. pub(super) fn finish_string_read( io_res: io::Result, diff --git a/tokio/src/io/util/shutdown.rs b/tokio/src/io/util/shutdown.rs index 6d30b004b1c..fcd20dc402d 100644 --- a/tokio/src/io/util/shutdown.rs +++ b/tokio/src/io/util/shutdown.rs @@ -11,7 +11,7 @@ pin_project! { /// A future used to shutdown an I/O object. /// /// Created by the [`AsyncWriteExt::shutdown`][shutdown] function. - /// [shutdown]: crate::io::AsyncWriteExt::shutdown + /// [shutdown]: [`crate::io::AsyncWriteExt::shutdown`] #[must_use = "futures do nothing unless you `.await` or poll them"] #[derive(Debug)] pub struct Shutdown<'a, A: ?Sized> { diff --git a/tokio/src/lib.rs b/tokio/src/lib.rs index 3a979396831..3f035098832 100644 --- a/tokio/src/lib.rs +++ b/tokio/src/lib.rs @@ -31,8 +31,8 @@ //! * APIs for [performing asynchronous I/O][io], including [TCP and UDP][net] sockets, //! [filesystem][fs] operations, and [process] and [signal] management. //! * A [runtime] for executing asynchronous code, including a task scheduler, -//! an I/O driver backed by the operating system's event queue (epoll, kqueue, -//! IOCP, etc...), and a high performance timer. +//! an I/O driver backed by the operating system's event queue (`epoll`, `kqueue`, +//! `IOCP`, etc...), and a high performance timer. //! //! Guide level documentation is found on the [website]. //! @@ -330,11 +330,11 @@ //! - `signal`: Enables all `tokio::signal` types. //! - `fs`: Enables `tokio::fs` types. //! - `test-util`: Enables testing based infrastructure for the Tokio runtime. -//! - `parking_lot`: As a potential optimization, use the _parking_lot_ crate's +//! - `parking_lot`: As a potential optimization, use the `_parking_lot_` crate's //! synchronization primitives internally. Also, this //! dependency is necessary to construct some of our primitives -//! in a const context. MSRV may increase according to the -//! _parking_lot_ release in use. +//! in a `const` context. `MSRV` may increase according to the +//! `_parking_lot_` release in use. //! //! _Note: `AsyncRead` and `AsyncWrite` traits do not require any features and are //! always available._ @@ -409,9 +409,9 @@ //! //! [mio-supported]: https://crates.io/crates/mio#platforms //! -//! ### WASM support +//! ### `WASM` support //! -//! Tokio has some limited support for the WASM platform. Without the +//! Tokio has some limited support for the `WASM` platform. Without the //! `tokio_unstable` flag, the following features are supported: //! //! * `sync` @@ -423,22 +423,22 @@ //! Enabling any other feature (including `full`) will cause a compilation //! failure. //! -//! The `time` module will only work on WASM platforms that have support for -//! timers (e.g. wasm32-wasi). The timing functions will panic if used on a WASM +//! The `time` module will only work on `WASM` platforms that have support for +//! timers (e.g. wasm32-wasi). The timing functions will panic if used on a `WASM` //! platform that does not support timers. //! //! Note also that if the runtime becomes indefinitely idle, it will panic //! immediately instead of blocking forever. On platforms that don't support //! time, this means that the runtime can never be idle in any way. //! -//! ### Unstable WASM support +//! ### Unstable `WASM` support //! -//! Tokio also has unstable support for some additional WASM features. This +//! Tokio also has unstable support for some additional `WASM` features. This //! requires the use of the `tokio_unstable` flag. //! //! Using this flag enables the use of `tokio::net` on the wasm32-wasi target. -//! However, not all methods are available on the networking types as WASI -//! currently does not support the creation of new sockets from within WASM. +//! However, not all methods are available on the networking types as `WASI` +//! currently does not support the creation of new sockets from within `WASM`. //! Because of this, sockets must currently be created via the `FromRawFd` //! trait. @@ -596,7 +596,7 @@ mod util; /// reach `std` on a stable compiler in time for the 1.0 release of Tokio. For /// this reason, the team has decided to move all `Stream` based utilities to /// the [`tokio-stream`] crate. While this is not ideal, once `Stream` has made -/// it into the standard library and the MSRV period has passed, we will implement +/// it into the standard library and the `MSRV` period has passed, we will implement /// stream for our different types. /// /// While this may seem unfortunate, not all is lost as you can get much of the @@ -689,6 +689,6 @@ cfg_macros! { #[cfg(test)] fn is_unpin() {} -/// fuzz test (fuzz_linked_list) +/// fuzz test (`fuzz_linked_list`) #[cfg(fuzzing)] pub mod fuzz; diff --git a/tokio/src/loom/std/atomic_u64.rs b/tokio/src/loom/std/atomic_u64.rs index ce391be3e11..ff6002114cd 100644 --- a/tokio/src/loom/std/atomic_u64.rs +++ b/tokio/src/loom/std/atomic_u64.rs @@ -1,4 +1,4 @@ -//! Implementation of an atomic u64 cell. On 64 bit platforms, this is a +//! Implementation of an atomic `u64` cell. On 64 bit platforms, this is a //! re-export of `AtomicU64`. On 32 bit platforms, this is implemented using a //! `Mutex`. diff --git a/tokio/src/macros/cfg.rs b/tokio/src/macros/cfg.rs index 5f0c6638857..d2f7b42bf60 100644 --- a/tokio/src/macros/cfg.rs +++ b/tokio/src/macros/cfg.rs @@ -49,7 +49,7 @@ macro_rules! cfg_unstable_windows { } } -/// Enables enter::block_on. +/// Enables `enter::block_on`. macro_rules! cfg_block_on { ($($item:item)*) => { $( diff --git a/tokio/src/macros/try_join.rs b/tokio/src/macros/try_join.rs index 7b123709231..a72cdb4a87a 100644 --- a/tokio/src/macros/try_join.rs +++ b/tokio/src/macros/try_join.rs @@ -30,7 +30,7 @@ /// /// # Examples /// -/// Basic try_join with two branches. +/// Basic `try_join` with two branches. /// /// ``` /// async fn do_stuff_async() -> Result<(), &'static str> { diff --git a/tokio/src/net/tcp/listener.rs b/tokio/src/net/tcp/listener.rs index f1befac26dc..3f6592abe19 100644 --- a/tokio/src/net/tcp/listener.rs +++ b/tokio/src/net/tcp/listener.rs @@ -58,7 +58,7 @@ cfg_net! { impl TcpListener { cfg_not_wasi! { - /// Creates a new TcpListener, which will be bound to the specified address. + /// Creates a new `TcpListener`, which will be bound to the specified address. /// /// The returned listener is ready for accepting connections. /// diff --git a/tokio/src/net/tcp/stream.rs b/tokio/src/net/tcp/stream.rs index 9b604b339f3..e20473e5cc3 100644 --- a/tokio/src/net/tcp/stream.rs +++ b/tokio/src/net/tcp/stream.rs @@ -1060,7 +1060,7 @@ impl TcpStream { /// returns the number of bytes peeked. /// /// Successive calls return the same data. This is accomplished by passing - /// `MSG_PEEK` as a flag to the underlying recv system call. + /// `MSG_PEEK` as a flag to the underlying `recv` system call. /// /// # Examples /// @@ -1178,13 +1178,13 @@ impl TcpStream { socket2::SockRef::from(self).linger() } - /// Sets the linger duration of this socket by setting the SO_LINGER option. + /// Sets the linger duration of this socket by setting the `SO_LINGER` option. /// /// This option controls the action taken when a stream has unsent messages and the stream is - /// closed. If SO_LINGER is set, the system shall block the process until it can transmit the + /// closed. If `SO_LINGER` is set, the system shall block the process until it can transmit the /// data or until the time expires. /// - /// If SO_LINGER is not specified, and the stream is closed, the system handles the call in a + /// If `SO_LINGER` is not specified, and the stream is closed, the system handles the call in a /// way that allows the process to continue as quickly as possible. /// /// # Examples diff --git a/tokio/src/net/udp.rs b/tokio/src/net/udp.rs index 74ea41d8378..4e2c140a856 100644 --- a/tokio/src/net/udp.rs +++ b/tokio/src/net/udp.rs @@ -180,7 +180,7 @@ impl UdpSocket { /// This function is intended to be used to wrap a UDP socket from the /// standard library in the Tokio equivalent. /// - /// This can be used in conjunction with socket2's `Socket` interface to + /// This can be used in conjunction with `socket2`'s `Socket` interface to /// configure a socket before it's handed off, such as setting options like /// `reuse_address` or binding to multiple addresses. /// @@ -313,7 +313,7 @@ impl UdpSocket { } /// Connects the UDP socket setting the default destination for send() and - /// limiting packets that are read via recv from the address specified in + /// limiting packets that are read via `recv` from the address specified in /// `addr`. /// /// # Example @@ -358,7 +358,7 @@ impl UdpSocket { /// Waits for any of the requested ready states. /// /// This function is usually paired with `try_recv()` or `try_send()`. It - /// can be used to concurrently recv / send to the same socket on a single + /// can be used to concurrently `recv` / `send` to the same socket on a single /// task without splitting the socket. /// /// The function may complete without the socket being ready. This is a @@ -786,7 +786,7 @@ impl UdpSocket { /// The [`connect`] method will connect this socket to a remote address. This method /// resolves to an error if the socket is not connected. /// - /// Note that on multiple calls to a `poll_*` method in the recv direction, only the + /// Note that on multiple calls to a `poll_*` method in the `recv` direction, only the /// `Waker` from the `Context` passed to the most recent call will be scheduled to /// receive a wakeup. /// @@ -825,7 +825,7 @@ impl UdpSocket { /// address to which it is connected. On success, returns the number of /// bytes read. /// - /// This method must be called with valid byte array buf of sufficient size + /// This method must be called with valid byte array `buf` of sufficient size /// to hold the message bytes. If a message is too long to fit in the /// supplied buffer, excess bytes may be discarded. /// @@ -881,7 +881,7 @@ impl UdpSocket { /// Tries to receive data from the stream into the provided buffer, advancing the /// buffer's internal cursor, returning how many bytes were read. /// - /// This method must be called with valid byte array buf of sufficient size + /// This method must be called with valid byte array `buf` of sufficient size /// to hold the message bytes. If a message is too long to fit in the /// supplied buffer, excess bytes may be discarded. /// @@ -949,7 +949,7 @@ impl UdpSocket { /// to which it is connected, advancing the buffer's internal cursor, /// returning how many bytes were read. /// - /// This method must be called with valid byte array buf of sufficient size + /// This method must be called with valid byte array `buf` of sufficient size /// to hold the message bytes. If a message is too long to fit in the /// supplied buffer, excess bytes may be discarded. /// @@ -996,7 +996,7 @@ impl UdpSocket { /// Tries to receive a single datagram message on the socket. On success, /// returns the number of bytes read and the origin. /// - /// This method must be called with valid byte array buf of sufficient size + /// This method must be called with valid byte array `buf` of sufficient size /// to hold the message bytes. If a message is too long to fit in the /// supplied buffer, excess bytes may be discarded. /// @@ -1071,7 +1071,7 @@ impl UdpSocket { /// Receives a single datagram message on the socket, advancing the /// buffer's internal cursor, returning how many bytes were read and the origin. /// - /// This method must be called with valid byte array buf of sufficient size + /// This method must be called with valid byte array `buf` of sufficient size /// to hold the message bytes. If a message is too long to fit in the /// supplied buffer, excess bytes may be discarded. /// @@ -1311,7 +1311,7 @@ impl UdpSocket { /// Attempts to receive a single datagram on the socket. /// - /// Note that on multiple calls to a `poll_*` method in the recv direction, only the + /// Note that on multiple calls to a `poll_*` method in the `recv` direction, only the /// `Waker` from the `Context` passed to the most recent call will be scheduled to /// receive a wakeup. /// @@ -1360,7 +1360,7 @@ impl UdpSocket { /// Tries to receive a single datagram message on the socket. On success, /// returns the number of bytes read and the origin. /// - /// This method must be called with valid byte array buf of sufficient size + /// This method must be called with valid byte array `buf` of sufficient size /// to hold the message bytes. If a message is too long to fit in the /// supplied buffer, excess bytes may be discarded. /// @@ -1507,7 +1507,7 @@ impl UdpSocket { /// /// On Windows, if the data is larger than the buffer specified, the buffer /// is filled with the first part of the data, and `peek_from` returns the error - /// WSAEMSGSIZE(10040). The excess data is lost. + /// `WSAEMSGSIZE(10040)`. The excess data is lost. /// Make sure to always use a sufficiently large buffer to hold the /// maximum UDP packet size, which can be up to 65536 bytes in size. /// @@ -1555,13 +1555,13 @@ impl UdpSocket { /// /// # Notes /// - /// Note that on multiple calls to a `poll_*` method in the recv direction, only the + /// Note that on multiple calls to a `poll_*` method in the `recv` direction, only the /// `Waker` from the `Context` passed to the most recent call will be scheduled to /// receive a wakeup /// /// On Windows, if the data is larger than the buffer specified, the buffer /// is filled with the first part of the data, and peek returns the error - /// WSAEMSGSIZE(10040). The excess data is lost. + /// `WSAEMSGSIZE(10040)`. The excess data is lost. /// Make sure to always use a sufficiently large buffer to hold the /// maximum UDP packet size, which can be up to 65536 bytes in size. /// @@ -1623,7 +1623,7 @@ impl UdpSocket { /// /// On Windows, if the data is larger than the buffer specified, the buffer /// is filled with the first part of the data, and peek returns the error - /// WSAEMSGSIZE(10040). The excess data is lost. + /// `WSAEMSGSIZE(10040)`. The excess data is lost. /// Make sure to always use a sufficiently large buffer to hold the /// maximum UDP packet size, which can be up to 65536 bytes in size. /// @@ -1674,7 +1674,7 @@ impl UdpSocket { /// /// # Notes /// - /// Note that on multiple calls to a `poll_*` method in the recv direction, only the + /// Note that on multiple calls to a `poll_*` method in the `recv` direction, only the /// `Waker` from the `Context` passed to the most recent call will be scheduled to /// receive a wakeup. /// diff --git a/tokio/src/net/unix/datagram/socket.rs b/tokio/src/net/unix/datagram/socket.rs index bec4bf983d5..0da20f81f53 100644 --- a/tokio/src/net/unix/datagram/socket.rs +++ b/tokio/src/net/unix/datagram/socket.rs @@ -109,7 +109,7 @@ impl UnixDatagram { /// Waits for any of the requested ready states. /// /// This function is usually paired with `try_recv()` or `try_send()`. It - /// can be used to concurrently recv / send to the same socket on a single + /// can be used to concurrently `recv` / `send` to the same socket on a single /// task without splitting the socket. /// /// The function may complete without the socket being ready. This is a @@ -435,12 +435,12 @@ impl UnixDatagram { /// Creates new [`UnixDatagram`] from a [`std::os::unix::net::UnixDatagram`]. /// - /// This function is intended to be used to wrap a UnixDatagram from the + /// This function is intended to be used to wrap a `UnixDatagram` from the /// standard library in the Tokio equivalent. /// /// # Notes /// - /// The caller is responsible for ensuring that the socker is in + /// The caller is responsible for ensuring that the socket is in /// non-blocking mode. Otherwise all I/O operations on the socket /// will block the thread, which will cause unexpected behavior. /// Non-blocking mode can be set using [`set_nonblocking`]. @@ -1141,7 +1141,7 @@ impl UnixDatagram { /// Attempts to receive a single datagram on the specified address. /// - /// Note that on multiple calls to a `poll_*` method in the recv direction, only the + /// Note that on multiple calls to a `poll_*` method in the `recv` direction, only the /// `Waker` from the `Context` passed to the most recent call will be scheduled to /// receive a wakeup. /// @@ -1244,7 +1244,7 @@ impl UnixDatagram { /// The [`connect`] method will connect this socket to a remote address. This method /// resolves to an error if the socket is not connected. /// - /// Note that on multiple calls to a `poll_*` method in the recv direction, only the + /// Note that on multiple calls to a `poll_*` method in the `recv` direction, only the /// `Waker` from the `Context` passed to the most recent call will be scheduled to /// receive a wakeup. /// diff --git a/tokio/src/net/unix/listener.rs b/tokio/src/net/unix/listener.rs index bc7b53b3b53..79b554ee1ab 100644 --- a/tokio/src/net/unix/listener.rs +++ b/tokio/src/net/unix/listener.rs @@ -77,7 +77,7 @@ impl UnixListener { /// Creates new [`UnixListener`] from a [`std::os::unix::net::UnixListener`]. /// - /// This function is intended to be used to wrap a UnixListener from the + /// This function is intended to be used to wrap a `UnixListener` from the /// standard library in the Tokio equivalent. /// /// # Notes diff --git a/tokio/src/net/unix/stream.rs b/tokio/src/net/unix/stream.rs index e1a4ff437f7..60d58139699 100644 --- a/tokio/src/net/unix/stream.rs +++ b/tokio/src/net/unix/stream.rs @@ -762,7 +762,7 @@ impl UnixStream { /// Creates new [`UnixStream`] from a [`std::os::unix::net::UnixStream`]. /// - /// This function is intended to be used to wrap a UnixStream from the + /// This function is intended to be used to wrap a `UnixStream` from the /// standard library in the Tokio equivalent. /// /// # Notes diff --git a/tokio/src/net/windows/named_pipe.rs b/tokio/src/net/windows/named_pipe.rs index a03e1d0acd1..98e63f0c450 100644 --- a/tokio/src/net/windows/named_pipe.rs +++ b/tokio/src/net/windows/named_pipe.rs @@ -2059,7 +2059,7 @@ impl ServerOptions { /// /// ``` /// use std::{io, os::windows::prelude::AsRawHandle, ptr}; - // + /// /// use tokio::net::windows::named_pipe::ServerOptions; /// use windows_sys::{ /// Win32::Foundation::ERROR_SUCCESS, @@ -2094,7 +2094,7 @@ impl ServerOptions { /// /// ``` /// use std::{io, os::windows::prelude::AsRawHandle, ptr}; - // + /// /// use tokio::net::windows::named_pipe::ServerOptions; /// use windows_sys::{ /// Win32::Foundation::ERROR_ACCESS_DENIED, diff --git a/tokio/src/process/mod.rs b/tokio/src/process/mod.rs index a688f63f213..0fad67cd01a 100644 --- a/tokio/src/process/mod.rs +++ b/tokio/src/process/mod.rs @@ -739,12 +739,12 @@ impl Command { } /// Sets the process group ID (PGID) of the child process. Equivalent to a - /// setpgid call in the child process, but may be more efficient. + /// `setpgid` call in the child process, but may be more efficient. /// /// Process groups determine which processes receive signals. /// /// **Note**: This is an [unstable API][unstable] but will be stabilised once - /// tokio's MSRV is sufficiently new. See [the documentation on + /// tokio's `MSRV` is sufficiently new. See [the documentation on /// unstable features][unstable] for details about using unstable features. /// /// If you want similar behavior without using this unstable feature you can @@ -1109,7 +1109,7 @@ impl Child { /// Attempts to force the child to exit, but does not wait for the request /// to take effect. /// - /// On Unix platforms, this is the equivalent to sending a SIGKILL. Note + /// On Unix platforms, this is the equivalent to sending a `SIGKILL`. Note /// that on Unix platforms it is possible for a zombie process to remain /// after a kill is sent; to avoid this, the caller should ensure that either /// `child.wait().await` or `child.try_wait()` is invoked successfully. @@ -1125,12 +1125,12 @@ impl Child { /// Forces the child to exit. /// - /// This is equivalent to sending a SIGKILL on unix platforms. + /// This is equivalent to sending a `SIGKILL` on unix platforms. /// /// If the child has to be killed remotely, it is possible to do it using - /// a combination of the select! macro and a oneshot channel. In the following + /// a combination of the select! macro and a `oneshot` channel. In the following /// example, the child will run until completion unless a message is sent on - /// the oneshot channel. If that happens, the child is killed immediately + /// the `oneshot` channel. If that happens, the child is killed immediately /// using the `.kill()` method. /// /// ```no_run diff --git a/tokio/src/runtime/blocking/pool.rs b/tokio/src/runtime/blocking/pool.rs index 3b6de8d7917..c74aea76568 100644 --- a/tokio/src/runtime/blocking/pool.rs +++ b/tokio/src/runtime/blocking/pool.rs @@ -105,16 +105,16 @@ struct Shared { num_notify: u32, shutdown: bool, shutdown_tx: Option, - /// Prior to shutdown, we clean up JoinHandles by having each timed-out + /// Prior to shutdown, we clean up `JoinHandles` by having each timed-out /// thread join on the previous timed-out thread. This is not strictly /// necessary but helps avoid Valgrind false positives, see /// /// for more information. last_exiting_thread: Option>, - /// This holds the JoinHandles for all running threads; on shutdown, the thread + /// This holds the `JoinHandles` for all running threads; on shutdown, the thread /// calling shutdown handles joining on these. worker_threads: HashMap>, - /// This is a counter used to iterate worker_threads in a consistent order (for loom's + /// This is a counter used to iterate `worker_threads` in a consistent order (for loom's /// benefit). worker_thread_index: usize, } diff --git a/tokio/src/runtime/builder.rs b/tokio/src/runtime/builder.rs index 78e6bf50d62..e20a3c4955b 100644 --- a/tokio/src/runtime/builder.rs +++ b/tokio/src/runtime/builder.rs @@ -78,7 +78,7 @@ pub struct Builder { /// To run after each thread is unparked. pub(super) after_unpark: Option, - /// Customizable keep alive timeout for BlockingPool + /// Customizable keep alive timeout for `BlockingPool` pub(super) keep_alive: Option, /// How many ticks before pulling a task from the global/remote queue? @@ -723,7 +723,7 @@ impl Builder { /// Sets a custom timeout for a thread in the blocking pool. /// /// By default, the timeout for a thread is set to 10 seconds. This can - /// be overridden using .thread_keep_alive(). + /// be overridden using `.thread_keep_alive()`. /// /// # Example /// diff --git a/tokio/src/runtime/context.rs b/tokio/src/runtime/context.rs index 07875a0723f..62e4fc9474c 100644 --- a/tokio/src/runtime/context.rs +++ b/tokio/src/runtime/context.rs @@ -61,7 +61,7 @@ struct Context { rng: Cell>, /// Tracks the amount of "work" a task may still do before yielding back to - /// the sheduler + /// the scheduler budget: Cell, #[cfg(all( diff --git a/tokio/src/runtime/handle.rs b/tokio/src/runtime/handle.rs index 4a833539f38..7e7e5636c80 100644 --- a/tokio/src/runtime/handle.rs +++ b/tokio/src/runtime/handle.rs @@ -463,7 +463,7 @@ cfg_taskdump! { /// ## Debug Info Must Be Available /// /// To produce task traces, the application must **not** be compiled - /// with split debuginfo. On Linux, including debuginfo within the + /// with `split debuginfo`. On Linux, including `debuginfo` within the /// application binary is the (correct) default. You can further ensure /// this behavior with the following directive in your `Cargo.toml`: /// @@ -475,7 +475,7 @@ cfg_taskdump! { /// ## Unstable Features /// /// This functionality is **unstable**, and requires both the - /// `tokio_unstable` and `tokio_taskdump` cfg flags to be set. + /// `tokio_unstable` and `tokio_taskdump` `cfg` flags to be set. /// /// You can do this by setting the `RUSTFLAGS` environment variable /// before invoking `cargo`; e.g.: @@ -495,7 +495,7 @@ cfg_taskdump! { /// /// ## Platform Requirements /// - /// Task dumps are supported on Linux atop aarch64, x86 and x86_64. + /// Task dumps are supported on Linux atop `aarch64`, `x86` and `x86_64`. /// /// ## Current Thread Runtime Requirements /// diff --git a/tokio/src/runtime/io/driver.rs b/tokio/src/runtime/io/driver.rs index c34a2ac060a..bece3560b72 100644 --- a/tokio/src/runtime/io/driver.rs +++ b/tokio/src/runtime/io/driver.rs @@ -40,7 +40,7 @@ pub(crate) struct Handle { synced: Mutex, /// Used to wake up the reactor from a call to `turn`. - /// Not supported on Wasi due to lack of threading support. + /// Not supported on `Wasi` due to lack of threading support. #[cfg(not(target_os = "wasi"))] waker: mio::Waker, diff --git a/tokio/src/runtime/io/scheduled_io.rs b/tokio/src/runtime/io/scheduled_io.rs index c214beff039..527bb9808de 100644 --- a/tokio/src/runtime/io/scheduled_io.rs +++ b/tokio/src/runtime/io/scheduled_io.rs @@ -114,10 +114,10 @@ struct Waiters { /// List of all current waiters. list: WaitList, - /// Waker used for AsyncRead. + /// Waker used for `AsyncRead`. reader: Option, - /// Waker used for AsyncWrite. + /// Waker used for `AsyncWrite`. writer: Option, } @@ -191,7 +191,7 @@ impl ScheduledIo { mio::Token(self as *const _ as usize) } - /// Invoked when the IO driver is shut down; forces this ScheduledIo into a + /// Invoked when the IO driver is shut down; forces this `ScheduledIo` into a /// permanently shutdown state. pub(super) fn shutdown(&self) { let mask = SHUTDOWN.pack(1, 0); diff --git a/tokio/src/runtime/scheduler/multi_thread/park.rs b/tokio/src/runtime/scheduler/multi_thread/park.rs index 87be200a12d..bc369387395 100644 --- a/tokio/src/runtime/scheduler/multi_thread/park.rs +++ b/tokio/src/runtime/scheduler/multi_thread/park.rs @@ -22,10 +22,10 @@ struct Inner { /// Avoids entering the park if possible state: AtomicUsize, - /// Used to coordinate access to the driver / condvar + /// Used to coordinate access to the driver / `condvar` mutex: Mutex<()>, - /// Condvar to block on if the driver is unavailable. + /// `Condvar` to block on if the driver is unavailable. condvar: Condvar, /// Resource (I/O, time, ...) driver diff --git a/tokio/src/runtime/scheduler/multi_thread/queue.rs b/tokio/src/runtime/scheduler/multi_thread/queue.rs index e3a3105bbb6..35223289870 100644 --- a/tokio/src/runtime/scheduler/multi_thread/queue.rs +++ b/tokio/src/runtime/scheduler/multi_thread/queue.rs @@ -36,8 +36,8 @@ pub(crate) struct Steal(Arc>); pub(crate) struct Inner { /// Concurrently updated by many threads. /// - /// Contains two `UnsignedShort` values. The LSB byte is the "real" head of - /// the queue. The `UnsignedShort` in the MSB is set by a stealer in process + /// Contains two `UnsignedShort` values. The `LSB` byte is the "real" head of + /// the queue. The `UnsignedShort` in the `MSB` is set by a stealer in process /// of stealing values. It represents the first value being stolen in the /// batch. The `UnsignedShort` indices are intentionally wider than strictly /// required for buffer indexing in order to provide ABA mitigation and make diff --git a/tokio/src/runtime/scheduler/multi_thread/stats.rs b/tokio/src/runtime/scheduler/multi_thread/stats.rs index 3b8c5020e49..30c108c9dd6 100644 --- a/tokio/src/runtime/scheduler/multi_thread/stats.rs +++ b/tokio/src/runtime/scheduler/multi_thread/stats.rs @@ -22,7 +22,7 @@ pub(crate) struct Stats { /// Exponentially-weighted moving average of time spent polling scheduled a /// task. /// - /// Tracked in nanoseconds, stored as a f64 since that is what we use with + /// Tracked in nanoseconds, stored as a `f64` since that is what we use with /// the EWMA calculations task_poll_time_ewma: f64, } diff --git a/tokio/src/runtime/scheduler/multi_thread/worker.rs b/tokio/src/runtime/scheduler/multi_thread/worker.rs index 313e2ea68f7..9998870ab4d 100644 --- a/tokio/src/runtime/scheduler/multi_thread/worker.rs +++ b/tokio/src/runtime/scheduler/multi_thread/worker.rs @@ -9,19 +9,19 @@ //! Shutting down the runtime involves the following steps: //! //! 1. The Shared::close method is called. This closes the inject queue and -//! OwnedTasks instance and wakes up all worker threads. +//! `OwnedTasks` instance and wakes up all worker threads. //! //! 2. Each worker thread observes the close signal next time it runs //! Core::maintenance by checking whether the inject queue is closed. -//! The Core::is_shutdown flag is set to true. +//! The `Core::is_shutdown` flag is set to true. //! //! 3. The worker thread calls `pre_shutdown` in parallel. Here, the worker -//! will keep removing tasks from OwnedTasks until it is empty. No new -//! tasks can be pushed to the OwnedTasks during or after this step as it +//! will keep removing tasks from `OwnedTasks` until it is empty. No new +//! tasks can be pushed to the `OwnedTasks` during or after this step as it //! was closed in step 1. //! //! 5. The workers call Shared::shutdown to enter the single-threaded phase of -//! shutdown. These calls will push their core to Shared::shutdown_cores, +//! shutdown. These calls will push their core to `Shared::shutdown_cores`, //! and the last thread to push its core will finish the shutdown procedure. //! //! 6. The local run queue of each core is emptied, then the inject queue is @@ -35,22 +35,22 @@ //! //! When spawning tasks during shutdown, there are two cases: //! -//! * The spawner observes the OwnedTasks being open, and the inject queue is +//! * The spawner observes the `OwnedTasks` being open, and the inject queue is //! closed. -//! * The spawner observes the OwnedTasks being closed and doesn't check the +//! * The spawner observes the `OwnedTasks` being closed and doesn't check the //! inject queue. //! -//! The first case can only happen if the OwnedTasks::bind call happens before +//! The first case can only happen if the `OwnedTasks::bind` call happens before //! or during step 1 of shutdown. In this case, the runtime will clean up the //! task in step 3 of shutdown. //! //! In the latter case, the task was not spawned and the task is immediately //! cancelled by the spawner. //! -//! The correctness of shutdown requires both the inject queue and OwnedTasks +//! The correctness of shutdown requires both the inject queue and `OwnedTasks` //! collection to have a closed bit. With a close bit on only the inject queue, //! spawning could run in to a situation where a task is successfully bound long -//! after the runtime has shut down. With a close bit on only the OwnedTasks, +//! after the runtime has shut down. With a close bit on only the `OwnedTasks`, //! the first spawning situation could result in the notification being pushed //! to the inject queue after step 6 of shutdown, which would leave a task in //! the inject queue indefinitely. This would be a ref-count cycle and a memory @@ -184,7 +184,7 @@ pub(crate) struct Shared { /// Only held to trigger some code on drop. This is used to get internal /// runtime metrics that can be useful when doing performance /// investigations. This does nothing (empty struct, no drop impl) unless - /// the `tokio_internal_mt_counters` cfg flag is set. + /// the `tokio_internal_mt_counters` `cfg` flag is set. _counters: Counters, } @@ -234,7 +234,7 @@ type Task = task::Task>; type Notified = task::Notified>; /// Value picked out of thin-air. Running the LIFO slot a handful of times -/// seemms sufficient to benefit from locality. More than 3 times probably is +/// seems sufficient to benefit from locality. More than 3 times probably is /// overweighing. The value can be tuned in the future with data that shows /// improvements. const MAX_LIFO_POLLS_PER_TICK: usize = 3; @@ -677,7 +677,7 @@ impl Context { /// Also important to notice that, before parking, the worker thread will try to take /// ownership of the Driver (IO/Time) and dispatch any events that might have fired. /// Whenever a worker thread executes the Driver loop, all waken tasks are scheduled - /// in its own local queue until the queue saturates (ntasks > LOCAL_QUEUE_CAPACITY). + /// in its own local queue until the queue saturates (ntasks > `LOCAL_QUEUE_CAPACITY`). /// When the local queue is saturated, the overflow tasks are added to the injection queue /// from where other workers can pick them up. /// Also, we rely on the workstealing algorithm to spread the tasks amongst workers diff --git a/tokio/src/runtime/scheduler/multi_thread_alt/queue.rs b/tokio/src/runtime/scheduler/multi_thread_alt/queue.rs index 004715daec8..2694d27cbdf 100644 --- a/tokio/src/runtime/scheduler/multi_thread_alt/queue.rs +++ b/tokio/src/runtime/scheduler/multi_thread_alt/queue.rs @@ -37,8 +37,8 @@ pub(crate) struct Steal(Arc>); pub(crate) struct Inner { /// Concurrently updated by many threads. /// - /// Contains two `UnsignedShort` values. The LSB byte is the "real" head of - /// the queue. The `UnsignedShort` in the MSB is set by a stealer in process + /// Contains two `UnsignedShort` values. The `LSB` byte is the "real" head of + /// the queue. The `UnsignedShort` in the `MSB` is set by a stealer in process /// of stealing values. It represents the first value being stolen in the /// batch. The `UnsignedShort` indices are intentionally wider than strictly /// required for buffer indexing in order to provide ABA mitigation and make diff --git a/tokio/src/runtime/scheduler/multi_thread_alt/stats.rs b/tokio/src/runtime/scheduler/multi_thread_alt/stats.rs index 228e797714b..7118e4915a0 100644 --- a/tokio/src/runtime/scheduler/multi_thread_alt/stats.rs +++ b/tokio/src/runtime/scheduler/multi_thread_alt/stats.rs @@ -13,7 +13,7 @@ pub(crate) struct Stats { /// Exponentially-weighted moving average of time spent polling scheduled a /// task. /// - /// Tracked in nanoseconds, stored as a f64 since that is what we use with + /// Tracked in nanoseconds, stored as a `f64` since that is what we use with /// the EWMA calculations task_poll_time_ewma: f64, } diff --git a/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs b/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs index 8d16418a80b..54c6b0ed7ba 100644 --- a/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs +++ b/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs @@ -9,19 +9,19 @@ //! Shutting down the runtime involves the following steps: //! //! 1. The Shared::close method is called. This closes the inject queue and -//! OwnedTasks instance and wakes up all worker threads. +//! `OwnedTasks` instance and wakes up all worker threads. //! //! 2. Each worker thread observes the close signal next time it runs //! Core::maintenance by checking whether the inject queue is closed. -//! The Core::is_shutdown flag is set to true. +//! The `Core::is_shutdown` flag is set to true. //! //! 3. The worker thread calls `pre_shutdown` in parallel. Here, the worker -//! will keep removing tasks from OwnedTasks until it is empty. No new -//! tasks can be pushed to the OwnedTasks during or after this step as it +//! will keep removing tasks from `OwnedTasks` until it is empty. No new +//! tasks can be pushed to the `OwnedTasks` during or after this step as it //! was closed in step 1. //! //! 5. The workers call Shared::shutdown to enter the single-threaded phase of -//! shutdown. These calls will push their core to Shared::shutdown_cores, +//! shutdown. These calls will push their core to `Shared::shutdown_cores`, //! and the last thread to push its core will finish the shutdown procedure. //! //! 6. The local run queue of each core is emptied, then the inject queue is @@ -35,22 +35,22 @@ //! //! When spawning tasks during shutdown, there are two cases: //! -//! * The spawner observes the OwnedTasks being open, and the inject queue is +//! * The spawner observes the `OwnedTasks` being open, and the inject queue is //! closed. -//! * The spawner observes the OwnedTasks being closed and doesn't check the +//! * The spawner observes the `OwnedTasks` being closed and doesn't check the //! inject queue. //! -//! The first case can only happen if the OwnedTasks::bind call happens before +//! The first case can only happen if the `OwnedTasks::bind` call happens before //! or during step 1 of shutdown. In this case, the runtime will clean up the //! task in step 3 of shutdown. //! //! In the latter case, the task was not spawned and the task is immediately //! cancelled by the spawner. //! -//! The correctness of shutdown requires both the inject queue and OwnedTasks +//! The correctness of shutdown requires both the inject queue and `OwnedTasks` //! collection to have a closed bit. With a close bit on only the inject queue, //! spawning could run in to a situation where a task is successfully bound long -//! after the runtime has shut down. With a close bit on only the OwnedTasks, +//! after the runtime has shut down. With a close bit on only the `OwnedTasks`, //! the first spawning situation could result in the notification being pushed //! to the inject queue after step 6 of shutdown, which would leave a task in //! the inject queue indefinitely. This would be a ref-count cycle and a memory @@ -157,7 +157,7 @@ pub(crate) struct Shared { driver: AtomicCell, /// Condition variables used to unblock worker threads. Each worker thread - /// has its own condvar it waits on. + /// has its own `condvar` it waits on. pub(super) condvars: Vec, /// The number of cores that have observed the trace signal. @@ -174,7 +174,7 @@ pub(crate) struct Shared { /// Only held to trigger some code on drop. This is used to get internal /// runtime metrics that can be useful when doing performance /// investigations. This does nothing (empty struct, no drop impl) unless - /// the `tokio_internal_mt_counters` cfg flag is set. + /// the `tokio_internal_mt_counters` `cfg` flag is set. _counters: Counters, } @@ -248,7 +248,7 @@ type Task = task::Task>; type Notified = task::Notified>; /// Value picked out of thin-air. Running the LIFO slot a handful of times -/// seemms sufficient to benefit from locality. More than 3 times probably is +/// seems sufficient to benefit from locality. More than 3 times probably is /// overweighing. The value can be tuned in the future with data that shows /// improvements. const MAX_LIFO_POLLS_PER_TICK: usize = 3; diff --git a/tokio/src/runtime/task/core.rs b/tokio/src/runtime/task/core.rs index e61bbe5061d..108b06bc8b6 100644 --- a/tokio/src/runtime/task/core.rs +++ b/tokio/src/runtime/task/core.rs @@ -28,7 +28,7 @@ use std::task::{Context, Poll, Waker}; /// be referenced by both *mut Cell and *mut Header. /// /// Any changes to the layout of this struct _must_ also be reflected in the -/// const fns in raw.rs. +/// `const` fns in raw.rs. /// // # This struct should be cache padded to avoid false sharing. The cache padding rules are copied // from crossbeam-utils/src/cache_padded.rs @@ -132,7 +132,7 @@ pub(super) struct CoreStage { /// Holds the future or output, depending on the stage of execution. /// /// Any changes to the layout of this struct _must_ also be reflected in the -/// const fns in raw.rs. +/// `const` fns in raw.rs. #[repr(C)] pub(super) struct Core { /// Scheduler used to drive this future. diff --git a/tokio/src/runtime/task/harness.rs b/tokio/src/runtime/task/harness.rs index 8bfd57e6fbf..cf19eea83bb 100644 --- a/tokio/src/runtime/task/harness.rs +++ b/tokio/src/runtime/task/harness.rs @@ -183,7 +183,7 @@ where /// If the return value is Complete, the caller is given ownership of a /// single ref-count, which should be passed on to `complete`. /// - /// If the return value is Dealloc, then this call consumed the last + /// If the return value is `Dealloc`, then this call consumed the last /// ref-count and the caller should call `dealloc`. /// /// Otherwise the ref-count is consumed and the caller should not access diff --git a/tokio/src/runtime/task/join.rs b/tokio/src/runtime/task/join.rs index 818d3c21dd5..19289cf5826 100644 --- a/tokio/src/runtime/task/join.rs +++ b/tokio/src/runtime/task/join.rs @@ -58,7 +58,7 @@ cfg_rt! { /// ``` /// /// The generic parameter `T` in `JoinHandle` is the return type of the spawned task. - /// If the return value is an i32, the join handle has type `JoinHandle`: + /// If the return value is an `i32`, the join handle has type `JoinHandle`: /// /// ``` /// use tokio::task; diff --git a/tokio/src/runtime/task/list.rs b/tokio/src/runtime/task/list.rs index 3d2a121cf1d..41a5fb439c9 100644 --- a/tokio/src/runtime/task/list.rs +++ b/tokio/src/runtime/task/list.rs @@ -84,8 +84,8 @@ impl OwnedTasks { } } - /// Binds the provided task to this OwnedTasks instance. This fails if the - /// OwnedTasks has been closed. + /// Binds the provided task to this `OwnedTasks` instance. This fails if the + /// `OwnedTasks` has been closed. pub(crate) fn bind( &self, task: T, @@ -125,8 +125,8 @@ impl OwnedTasks { Some(notified) } - /// Asserts that the given task is owned by this OwnedTasks and convert it to - /// a LocalNotified, giving the thread permission to poll this task. + /// Asserts that the given task is owned by this `OwnedTasks` and convert it to + /// a `LocalNotified`, giving the thread permission to poll this task. #[inline] pub(crate) fn assert_owner(&self, task: Notified) -> LocalNotified { debug_assert_eq!(task.header().get_owner_id(), Some(self.id)); @@ -284,8 +284,8 @@ impl LocalOwnedTasks { unsafe { inner.list.remove(task.header_ptr()) }) } - /// Asserts that the given task is owned by this LocalOwnedTasks and convert - /// it to a LocalNotified, giving the thread permission to poll this task. + /// Asserts that the given task is owned by this `LocalOwnedTasks` and convert + /// it to a `LocalNotified`, giving the thread permission to poll this task. #[inline] pub(crate) fn assert_owner(&self, task: Notified) -> LocalNotified { assert_eq!(task.header().get_owner_id(), Some(self.id)); diff --git a/tokio/src/runtime/task/mod.rs b/tokio/src/runtime/task/mod.rs index 0bd40cd875c..6b05f4d7d5c 100644 --- a/tokio/src/runtime/task/mod.rs +++ b/tokio/src/runtime/task/mod.rs @@ -2,52 +2,52 @@ //! //! The task module contains the code that manages spawned tasks and provides a //! safe API for the rest of the runtime to use. Each task in a runtime is -//! stored in an OwnedTasks or LocalOwnedTasks object. +//! stored in an `OwnedTasks` or `LocalOwnedTasks` object. //! //! # Task reference types //! //! A task is usually referenced by multiple handles, and there are several //! types of handles. //! -//! * OwnedTask - tasks stored in an OwnedTasks or LocalOwnedTasks are of this +//! * `OwnedTask` - tasks stored in an `OwnedTasks` or `LocalOwnedTasks` are of this //! reference type. //! -//! * JoinHandle - each task has a JoinHandle that allows access to the output +//! * `JoinHandle` - each task has a `JoinHandle` that allows access to the output //! of the task. //! -//! * Waker - every waker for a task has this reference type. There can be any +//! * `Waker` - every waker for a task has this reference type. There can be any //! number of waker references. //! -//! * Notified - tracks whether the task is notified. +//! * `Notified` - tracks whether the task is notified. //! -//! * Unowned - this task reference type is used for tasks not stored in any +//! * `Unowned` - this task reference type is used for tasks not stored in any //! runtime. Mainly used for blocking tasks, but also in tests. //! //! The task uses a reference count to keep track of how many active references -//! exist. The Unowned reference type takes up two ref-counts. All other +//! exist. The `Unowned` reference type takes up two ref-counts. All other //! reference types take up a single ref-count. //! //! Besides the waker type, each task has at most one of each reference type. //! //! # State //! -//! The task stores its state in an atomic usize with various bitfields for the +//! The task stores its state in an atomic `usize` with various bitfields for the //! necessary information. The state has the following bitfields: //! -//! * RUNNING - Tracks whether the task is currently being polled or cancelled. +//! * `RUNNING` - Tracks whether the task is currently being polled or cancelled. //! This bit functions as a lock around the task. //! -//! * COMPLETE - Is one once the future has fully completed and has been +//! * `COMPLETE` - Is one once the future has fully completed and has been //! dropped. Never unset once set. Never set together with RUNNING. //! -//! * NOTIFIED - Tracks whether a Notified object currently exists. +//! * `NOTIFIED` - Tracks whether a Notified object currently exists. //! -//! * CANCELLED - Is set to one for tasks that should be cancelled as soon as +//! * `CANCELLED` - Is set to one for tasks that should be cancelled as soon as //! possible. May take any value for completed tasks. //! -//! * JOIN_INTEREST - Is set to one if there exists a JoinHandle. +//! * `JOIN_INTEREST` - Is set to one if there exists a `JoinHandle`. //! -//! * JOIN_WAKER - Acts as an access control bit for the join handle waker. The +//! * `JOIN_WAKER` - Acts as an access control bit for the join handle waker. The //! protocol for its usage is described below. //! //! The rest of the bits are used for the ref-count. @@ -59,7 +59,7 @@ //! //! * The state field is accessed with atomic instructions. //! -//! * The OwnedTask reference has exclusive access to the `owned` field. +//! * The `OwnedTask` reference has exclusive access to the `owned` field. //! //! * The Notified reference has exclusive access to the `queue_next` field. //! @@ -67,42 +67,42 @@ //! is otherwise immutable and anyone can access the field immutably without //! synchronization. //! -//! * If COMPLETE is one, then the JoinHandle has exclusive access to the +//! * If COMPLETE is one, then the `JoinHandle` has exclusive access to the //! stage field. If COMPLETE is zero, then the RUNNING bitfield functions as //! a lock for the stage field, and it can be accessed only by the thread //! that set RUNNING to one. //! //! * The waker field may be concurrently accessed by different threads: in one //! thread the runtime may complete a task and *read* the waker field to -//! invoke the waker, and in another thread the task's JoinHandle may be -//! polled, and if the task hasn't yet completed, the JoinHandle may *write* -//! a waker to the waker field. The JOIN_WAKER bit ensures safe access by +//! invoke the waker, and in another thread the task's `JoinHandle` may be +//! polled, and if the task hasn't yet completed, the `JoinHandle` may *write* +//! a waker to the waker field. The `JOIN_WAKER` bit ensures safe access by //! multiple threads to the waker field using the following rules: //! -//! 1. JOIN_WAKER is initialized to zero. +//! 1. `JOIN_WAKER` is initialized to zero. //! -//! 2. If JOIN_WAKER is zero, then the JoinHandle has exclusive (mutable) +//! 2. If `JOIN_WAKER` is zero, then the `JoinHandle` has exclusive (mutable) //! access to the waker field. //! -//! 3. If JOIN_WAKER is one, then the JoinHandle has shared (read-only) +//! 3. If `JOIN_WAKER` is one, then the `JoinHandle` has shared (read-only) //! access to the waker field. //! -//! 4. If JOIN_WAKER is one and COMPLETE is one, then the runtime has shared +//! 4. If `JOIN_WAKER` is one and COMPLETE is one, then the runtime has shared //! (read-only) access to the waker field. //! -//! 5. If the JoinHandle needs to write to the waker field, then the -//! JoinHandle needs to (i) successfully set JOIN_WAKER to zero if it is +//! 5. If the `JoinHandle` needs to write to the waker field, then the +//! `JoinHandle` needs to (i) successfully set `JOIN_WAKER` to zero if it is //! not already zero to gain exclusive access to the waker field per rule -//! 2, (ii) write a waker, and (iii) successfully set JOIN_WAKER to one. +//! 2, (ii) write a waker, and (iii) successfully set `JOIN_WAKER` to one. //! -//! 6. The JoinHandle can change JOIN_WAKER only if COMPLETE is zero (i.e. +//! 6. The `JoinHandle` can change `JOIN_WAKER` only if COMPLETE is zero (i.e. //! the task hasn't yet completed). //! //! Rule 6 implies that the steps (i) or (iii) of rule 5 may fail due to a //! race. If step (i) fails, then the attempt to write a waker is aborted. If //! step (iii) fails because COMPLETE is set to one by another thread after //! step (i), then the waker field is cleared. Once COMPLETE is one (i.e. -//! task has completed), the JoinHandle will not modify JOIN_WAKER. After the +//! task has completed), the `JoinHandle` will not modify `JOIN_WAKER`. After the //! runtime sets COMPLETE to one, it invokes the waker if there is one. //! //! All other fields are immutable and can be accessed immutably without @@ -119,18 +119,18 @@ //! the RUNNING field, so exclusive access is ensured. //! //! When the task completes, exclusive access to the output is transferred to -//! the JoinHandle. If the JoinHandle is already dropped when the transition to +//! the `JoinHandle`. If the `JoinHandle` is already dropped when the transition to //! complete happens, the thread performing that transition retains exclusive //! access to the output and should immediately drop it. //! //! ## Non-Send futures //! -//! If a future is not Send, then it is bound to a LocalOwnedTasks. The future -//! will only ever be polled or dropped given a LocalNotified or inside a call -//! to LocalOwnedTasks::shutdown_all. In either case, it is guaranteed that the +//! If a future is not Send, then it is bound to a `LocalOwnedTasks`. The future +//! will only ever be polled or dropped given a `LocalNotified` or inside a call +//! to `LocalOwnedTasks::shutdown_all`. In either case, it is guaranteed that the //! future is on the right thread. //! -//! If the task is never removed from the LocalOwnedTasks, then it is leaked, so +//! If the task is never removed from the `LocalOwnedTasks`, then it is leaked, so //! there is no risk that the task is dropped on some other thread when the last //! ref-count drops. //! @@ -138,21 +138,21 @@ //! //! When a task completes, the output is placed in the stage of the task. Then, //! a transition that sets COMPLETE to true is performed, and the value of -//! JOIN_INTEREST when this transition happens is read. +//! `JOIN_INTEREST` when this transition happens is read. //! -//! If JOIN_INTEREST is zero when the transition to COMPLETE happens, then the +//! If `JOIN_INTEREST` is zero when the transition to COMPLETE happens, then the //! output is immediately dropped. //! -//! If JOIN_INTEREST is one when the transition to COMPLETE happens, then the -//! JoinHandle is responsible for cleaning up the output. If the output is not +//! If `JOIN_INTEREST` is one when the transition to COMPLETE happens, then the +//! `JoinHandle` is responsible for cleaning up the output. If the output is not //! Send, then this happens: //! //! 1. The output is created on the thread that the future was polled on. Since //! only non-Send futures can have non-Send output, the future was polled on //! the thread that the future was spawned from. //! 2. Since `JoinHandle` is not Send if Output is not Send, the -//! JoinHandle is also on the thread that the future was spawned from. -//! 3. Thus, the JoinHandle will not move the output across threads when it +//! `JoinHandle` is also on the thread that the future was spawned from. +//! 3. Thus, the `JoinHandle` will not move the output across threads when it //! takes or drops the output. //! //! ## Recursive poll/shutdown @@ -241,7 +241,7 @@ pub(crate) struct LocalNotified { _not_send: PhantomData<*const ()>, } -/// A task that is not owned by any OwnedTasks. Used for blocking tasks. +/// A task that is not owned by any `OwnedTasks`. Used for blocking tasks. /// This type holds two ref-counts. pub(crate) struct UnownedTask { raw: RawTask, @@ -280,7 +280,7 @@ pub(crate) trait Schedule: Sync + Sized + 'static { cfg_rt! { /// This is the constructor for a new task. Three references to the task are - /// created. The first task reference is usually put into an OwnedTasks + /// created. The first task reference is usually put into an `OwnedTasks` /// immediately. The Notified is sent to the scheduler as an ordinary /// notification. fn new_task( diff --git a/tokio/src/runtime/task/state.rs b/tokio/src/runtime/task/state.rs index 24cb4338b96..9f1a8ec5a14 100644 --- a/tokio/src/runtime/task/state.rs +++ b/tokio/src/runtime/task/state.rs @@ -53,9 +53,9 @@ const REF_ONE: usize = 1 << REF_COUNT_SHIFT; /// /// A task is initialized with three references: /// -/// * A reference that will be stored in an OwnedTasks or LocalOwnedTasks. +/// * A reference that will be stored in an `OwnedTasks` or `LocalOwnedTasks`. /// * A reference that will be sent to the scheduler as an ordinary notification. -/// * A reference for the JoinHandle. +/// * A reference for the `JoinHandle`. /// /// As the task starts with a `JoinHandle`, `JOIN_INTEREST` is set. /// As the task starts with a `Notified`, `NOTIFIED` is set. diff --git a/tokio/src/runtime/task/trace/mod.rs b/tokio/src/runtime/task/trace/mod.rs index 34d40a5a253..7c9acc035af 100644 --- a/tokio/src/runtime/task/trace/mod.rs +++ b/tokio/src/runtime/task/trace/mod.rs @@ -265,7 +265,7 @@ impl Future for Root { } } -/// Trace and poll all tasks of the current_thread runtime. +/// Trace and poll all tasks of the `current_thread` runtime. pub(in crate::runtime) fn trace_current_thread( owned: &OwnedTasks>, local: &mut VecDeque>>, @@ -293,7 +293,7 @@ cfg_rt_multi_thread! { use crate::runtime::scheduler::multi_thread::Synced; use crate::runtime::scheduler::inject::Shared; - /// Trace and poll all tasks of the current_thread runtime. + /// Trace and poll all tasks of the `current_thread` runtime. /// /// ## Safety /// diff --git a/tokio/src/runtime/tests/loom_local.rs b/tokio/src/runtime/tests/loom_local.rs index d9a07a45f05..89d025b811c 100644 --- a/tokio/src/runtime/tests/loom_local.rs +++ b/tokio/src/runtime/tests/loom_local.rs @@ -9,7 +9,7 @@ use std::task::Poll; /// to the runtime itself. This means that if they are not properly removed at /// runtime shutdown, this will cause a memory leak. /// -/// This test verifies that waking something during shutdown of a LocalSet does +/// This test verifies that waking something during shutdown of a `LocalSet` does /// not result in tasks lingering in the queue once shutdown is complete. This /// is verified using loom's leak finder. #[test] diff --git a/tokio/src/runtime/time/entry.rs b/tokio/src/runtime/time/entry.rs index 634ed2031ac..0998b53011d 100644 --- a/tokio/src/runtime/time/entry.rs +++ b/tokio/src/runtime/time/entry.rs @@ -11,10 +11,10 @@ //! 2) a held driver lock. //! //! It follows from this that any changes made while holding BOTH 1 and 2 will -//! be reliably visible, regardless of ordering. This is because of the acq/rel +//! be reliably visible, regardless of ordering. This is because of the `acq/rel` //! fences on the driver lock ensuring ordering with 2, and rust mutable //! reference rules for 1 (a mutable reference to an object can't be passed -//! between threads without an acq/rel barrier, and same-thread we have local +//! between threads without an `acq/rel` barrier, and same-thread we have local //! happens-before ordering). //! //! # State field @@ -81,12 +81,12 @@ pub(super) const MAX_SAFE_MILLIS_DURATION: u64 = u64::MAX - 2; /// time (if registered), or otherwise the result of the timer completing, as /// well as the registered waker. /// -/// Generally, the StateCell is only permitted to be accessed from two contexts: -/// Either a thread holding the corresponding &mut TimerEntry, or a thread -/// holding the timer driver lock. The write actions on the StateCell amount to -/// passing "ownership" of the StateCell between these contexts; moving a timer -/// from the TimerEntry to the driver requires _both_ holding the &mut -/// TimerEntry and the driver lock, while moving it back (firing the timer) +/// Generally, the `StateCell` is only permitted to be accessed from two contexts: +/// Either a thread holding the corresponding `&mut TimerEntry`, or a thread +/// holding the timer driver lock. The write actions on the `StateCell` amount to +/// passing "ownership" of the `StateCell` between these contexts; moving a timer +/// from the `TimerEntry` to the driver requires _both_ holding the `&mut +/// TimerEntry` and the driver lock, while moving it back (firing the timer) /// requires only the driver lock. pub(super) struct StateCell { /// Holds either the scheduled expiration time for this timer, or (if the @@ -164,7 +164,7 @@ impl StateCell { /// Marks this timer as being moved to the pending list, if its scheduled /// time is not after `not_after`. /// - /// If the timer is scheduled for a time after not_after, returns an Err + /// If the timer is scheduled for a time after `not_after`, returns an Err /// containing the current scheduled time. /// /// SAFETY: Must hold the driver lock. @@ -314,15 +314,15 @@ pub(crate) struct TimerEntry { unsafe impl Send for TimerEntry {} unsafe impl Sync for TimerEntry {} -/// An TimerHandle is the (non-enforced) "unique" pointer from the driver to the -/// timer entry. Generally, at most one TimerHandle exists for a timer at a time +/// An `TimerHandle` is the (non-enforced) "unique" pointer from the driver to the +/// timer entry. Generally, at most one `TimerHandle` exists for a timer at a time /// (enforced by the timer state machine). /// -/// SAFETY: An TimerHandle is essentially a raw pointer, and the usual caveats -/// of pointer safety apply. In particular, TimerHandle does not itself enforce -/// that the timer does still exist; however, normally an TimerHandle is created +/// SAFETY: An `TimerHandle` is essentially a raw pointer, and the usual caveats +/// of pointer safety apply. In particular, `TimerHandle` does not itself enforce +/// that the timer does still exist; however, normally an `TimerHandle` is created /// immediately before registering the timer, and is consumed when firing the -/// timer, to help minimize mistakes. Still, because TimerHandle cannot enforce +/// timer, to help minimize mistakes. Still, because `TimerHandle` cannot enforce /// memory safety, all operations are unsafe. #[derive(Debug)] pub(crate) struct TimerHandle { @@ -437,7 +437,7 @@ impl TimerShared { self.state.extend_expiration(t) } - /// Returns a TimerHandle for this timer. + /// Returns a `TimerHandle` for this timer. pub(super) fn handle(&self) -> TimerHandle { TimerHandle { inner: NonNull::from(self), diff --git a/tokio/src/runtime/time/source.rs b/tokio/src/runtime/time/source.rs index 4647bc41223..c709dc5380f 100644 --- a/tokio/src/runtime/time/source.rs +++ b/tokio/src/runtime/time/source.rs @@ -1,7 +1,7 @@ use super::MAX_SAFE_MILLIS_DURATION; use crate::time::{Clock, Duration, Instant}; -/// A structure which handles conversion from Instants to u64 timestamps. +/// A structure which handles conversion from Instants to `u64` timestamps. #[derive(Debug)] pub(crate) struct TimeSource { start_time: Instant, diff --git a/tokio/src/runtime/time/wheel/level.rs b/tokio/src/runtime/time/wheel/level.rs index 4c9ba18cd89..a828c0067ef 100644 --- a/tokio/src/runtime/time/wheel/level.rs +++ b/tokio/src/runtime/time/wheel/level.rs @@ -15,7 +15,7 @@ pub(crate) struct Level { /// The least-significant bit represents slot zero. occupied: u64, - /// Slots. We access these via the EntryInner `current_list` as well, so this needs to be an UnsafeCell. + /// Slots. We access these via the EntryInner `current_list` as well, so this needs to be an `UnsafeCell`. slot: [EntryList; LEVEL_MULT], } diff --git a/tokio/src/signal/ctrl_c.rs b/tokio/src/signal/ctrl_c.rs index b26ab7ead64..e1e92fa9977 100644 --- a/tokio/src/signal/ctrl_c.rs +++ b/tokio/src/signal/ctrl_c.rs @@ -23,10 +23,10 @@ use std::io; /// the entire process**. /// /// For example, Unix systems will terminate a process by default when it -/// receives a signal generated by "CTRL+C" on the terminal. But, when a +/// receives a signal generated by `"CTRL+C"` on the terminal. But, when a /// `ctrl_c` stream is created to listen for this signal, the time it arrives, /// it will be translated to a stream event, and the process will continue to -/// execute. **Even if this `Signal` instance is dropped, subsequent SIGINT +/// execute. **Even if this `Signal` instance is dropped, subsequent `SIGINT` /// deliveries will end up captured by Tokio, and the default platform behavior /// will NOT be reset**. /// diff --git a/tokio/src/signal/mod.rs b/tokio/src/signal/mod.rs index ab47e8af27b..59f71db0e46 100644 --- a/tokio/src/signal/mod.rs +++ b/tokio/src/signal/mod.rs @@ -23,7 +23,7 @@ //! } //! ``` //! -//! Wait for SIGHUP on Unix +//! Wait for `SIGHUP` on Unix //! //! ```rust,no_run //! # #[cfg(unix)] { diff --git a/tokio/src/signal/registry.rs b/tokio/src/signal/registry.rs index 74973293a2d..3fff8df9303 100644 --- a/tokio/src/signal/registry.rs +++ b/tokio/src/signal/registry.rs @@ -26,7 +26,7 @@ impl Default for EventInfo { } } -/// An interface for retrieving the `EventInfo` for a particular eventId. +/// An interface for retrieving the `EventInfo` for a particular `eventId`. pub(crate) trait Storage { /// Gets the `EventInfo` for `id` if it exists. fn event_info(&self, id: EventId) -> Option<&EventInfo>; @@ -59,7 +59,7 @@ pub(crate) trait Init { /// Manages and distributes event notifications to any registered listeners. /// /// Generic over the underlying storage to allow for domain specific -/// optimizations (e.g. eventIds may or may not be contiguous). +/// optimizations (e.g. `eventIds` may or may not be contiguous). #[derive(Debug)] pub(crate) struct Registry { storage: S, diff --git a/tokio/src/signal/unix.rs b/tokio/src/signal/unix.rs index d3d7fd4ab76..52a9cbaac40 100644 --- a/tokio/src/signal/unix.rs +++ b/tokio/src/signal/unix.rs @@ -97,7 +97,7 @@ impl SignalKind { self.0 } - /// Represents the SIGALRM signal. + /// Represents the `SIGALRM` signal. /// /// On Unix systems this signal is sent when a real-time timer has expired. /// By default, the process is terminated by this signal. @@ -105,7 +105,7 @@ impl SignalKind { Self(libc::SIGALRM) } - /// Represents the SIGCHLD signal. + /// Represents the `SIGCHLD` signal. /// /// On Unix systems this signal is sent when the status of a child process /// has changed. By default, this signal is ignored. @@ -113,7 +113,7 @@ impl SignalKind { Self(libc::SIGCHLD) } - /// Represents the SIGHUP signal. + /// Represents the `SIGHUP` signal. /// /// On Unix systems this signal is sent when the terminal is disconnected. /// By default, the process is terminated by this signal. @@ -121,7 +121,7 @@ impl SignalKind { Self(libc::SIGHUP) } - /// Represents the SIGINFO signal. + /// Represents the `SIGINFO` signal. /// /// On Unix systems this signal is sent to request a status update from the /// process. By default, this signal is ignored. @@ -136,7 +136,7 @@ impl SignalKind { Self(libc::SIGINFO) } - /// Represents the SIGINT signal. + /// Represents the `SIGINT` signal. /// /// On Unix systems this signal is sent to interrupt a program. /// By default, the process is terminated by this signal. @@ -144,7 +144,7 @@ impl SignalKind { Self(libc::SIGINT) } - /// Represents the SIGIO signal. + /// Represents the `SIGIO` signal. /// /// On Unix systems this signal is sent when I/O operations are possible /// on some file descriptor. By default, this signal is ignored. @@ -152,7 +152,7 @@ impl SignalKind { Self(libc::SIGIO) } - /// Represents the SIGPIPE signal. + /// Represents the `SIGPIPE` signal. /// /// On Unix systems this signal is sent when the process attempts to write /// to a pipe which has no reader. By default, the process is terminated by @@ -161,7 +161,7 @@ impl SignalKind { Self(libc::SIGPIPE) } - /// Represents the SIGQUIT signal. + /// Represents the `SIGQUIT` signal. /// /// On Unix systems this signal is sent to issue a shutdown of the /// process, after which the OS will dump the process core. @@ -170,7 +170,7 @@ impl SignalKind { Self(libc::SIGQUIT) } - /// Represents the SIGTERM signal. + /// Represents the `SIGTERM` signal. /// /// On Unix systems this signal is sent to issue a shutdown of the /// process. By default, the process is terminated by this signal. @@ -178,7 +178,7 @@ impl SignalKind { Self(libc::SIGTERM) } - /// Represents the SIGUSR1 signal. + /// Represents the `SIGUSR1` signal. /// /// On Unix systems this is a user defined signal. /// By default, the process is terminated by this signal. @@ -186,7 +186,7 @@ impl SignalKind { Self(libc::SIGUSR1) } - /// Represents the SIGUSR2 signal. + /// Represents the `SIGUSR2` signal. /// /// On Unix systems this is a user defined signal. /// By default, the process is terminated by this signal. @@ -194,7 +194,7 @@ impl SignalKind { Self(libc::SIGUSR2) } - /// Represents the SIGWINCH signal. + /// Represents the `SIGWINCH` signal. /// /// On Unix systems this signal is sent when the terminal window is resized. /// By default, this signal is ignored. @@ -330,10 +330,10 @@ fn signal_enable(signal: SignalKind, handle: &Handle) -> io::Result<()> { /// entire process**. /// /// For example, Unix systems will terminate a process by default when it -/// receives SIGINT. But, when a `Signal` instance is created to listen for -/// this signal, the next SIGINT that arrives will be translated to a stream +/// receives `SIGINT`. But, when a `Signal` instance is created to listen for +/// this signal, the next `SIGINT` that arrives will be translated to a stream /// event, and the process will continue to execute. **Even if this `Signal` -/// instance is dropped, subsequent SIGINT deliveries will end up captured by +/// instance is dropped, subsequent `SIGINT` deliveries will end up captured by /// Tokio, and the default platform behavior will NOT be reset**. /// /// Thus, applications should take care to ensure the expected signal behavior @@ -341,7 +341,7 @@ fn signal_enable(signal: SignalKind, handle: &Handle) -> io::Result<()> { /// /// # Examples /// -/// Wait for SIGHUP +/// Wait for `SIGHUP` /// /// ```rust,no_run /// use tokio::signal::unix::{signal, SignalKind}; @@ -424,7 +424,7 @@ impl Signal { /// /// # Examples /// - /// Wait for SIGHUP + /// Wait for `SIGHUP` /// /// ```rust,no_run /// use tokio::signal::unix::{signal, SignalKind}; diff --git a/tokio/src/signal/windows.rs b/tokio/src/signal/windows.rs index 2f70f98b15a..d8af9b4c9d9 100644 --- a/tokio/src/signal/windows.rs +++ b/tokio/src/signal/windows.rs @@ -3,7 +3,7 @@ //! This module is only defined on Windows and allows receiving "ctrl-c", //! "ctrl-break", "ctrl-logoff", "ctrl-shutdown", and "ctrl-close" //! notifications. These events are listened for via the `SetConsoleCtrlHandler` -//! function which receives the corresponding windows_sys event type. +//! function which receives the corresponding `windows_sys` event type. #![cfg(any(windows, docsrs))] #![cfg_attr(docsrs, doc(cfg(all(windows, feature = "signal"))))] @@ -257,8 +257,8 @@ pub fn ctrl_close() -> io::Result { }) } -/// Represents a listener which receives "ctrl-close" notitifications sent to the process -/// via 'SetConsoleCtrlHandler'. +/// Represents a listener which receives "ctrl-close" notifications sent to the process +/// via `SetConsoleCtrlHandler`. /// /// A notification to this process notifies *all* listeners listening for /// this event. Moreover, the notifications **are coalesced** if they aren't processed @@ -354,8 +354,8 @@ pub fn ctrl_shutdown() -> io::Result { }) } -/// Represents a listener which receives "ctrl-shutdown" notitifications sent to the process -/// via 'SetConsoleCtrlHandler'. +/// Represents a listener which receives "ctrl-shutdown" notifications sent to the process +/// via `SetConsoleCtrlHandler`. /// /// A notification to this process notifies *all* listeners listening for /// this event. Moreover, the notifications **are coalesced** if they aren't processed @@ -451,8 +451,8 @@ pub fn ctrl_logoff() -> io::Result { }) } -/// Represents a listener which receives "ctrl-logoff" notitifications sent to the process -/// via 'SetConsoleCtrlHandler'. +/// Represents a listener which receives "ctrl-logoff" notifications sent to the process +/// via `SetConsoleCtrlHandler`. /// /// A notification to this process notifies *all* listeners listening for /// this event. Moreover, the notifications **are coalesced** if they aren't processed diff --git a/tokio/src/sync/mpsc/error.rs b/tokio/src/sync/mpsc/error.rs index e41885664da..2fc24b38d83 100644 --- a/tokio/src/sync/mpsc/error.rs +++ b/tokio/src/sync/mpsc/error.rs @@ -24,7 +24,7 @@ impl Error for SendError {} // ===== TrySendError ===== /// This enumeration is the list of the possible error outcomes for the -/// [try_send](super::Sender::try_send) method. +/// [`try_send`](super::Sender::try_send) method. #[derive(PartialEq, Eq, Clone, Copy)] pub enum TrySendError { /// The data could not be sent on the channel because the channel is diff --git a/tokio/src/sync/oneshot.rs b/tokio/src/sync/oneshot.rs index 3fa77031e51..9e8c3fcb7f7 100644 --- a/tokio/src/sync/oneshot.rs +++ b/tokio/src/sync/oneshot.rs @@ -59,7 +59,7 @@ //! } //! ``` //! -//! To use a oneshot channel in a `tokio::select!` loop, add `&mut` in front of +//! To use a `oneshot` channel in a `tokio::select!` loop, add `&mut` in front of //! the channel. //! //! ``` @@ -330,7 +330,7 @@ pub struct Receiver { } pub mod error { - //! Oneshot error types. + //! `Oneshot` error types. use std::fmt; @@ -555,8 +555,8 @@ impl Sender { /// Attempts to send a value on this channel, returning it back if it could /// not be sent. /// - /// This method consumes `self` as only one value may ever be sent on a oneshot - /// channel. It is not marked async because sending a message to an oneshot + /// This method consumes `self` as only one value may ever be sent on a `oneshot` + /// channel. It is not marked async because sending a message to an `oneshot` /// channel never requires any form of waiting. Because of this, the `send` /// method can be used in both synchronous and asynchronous code without /// problems. @@ -750,7 +750,7 @@ impl Sender { state.is_closed() } - /// Checks whether the oneshot channel has been closed, and if not, schedules the + /// Checks whether the `oneshot` channel has been closed, and if not, schedules the /// `Waker` in the provided `Context` to receive a notification when the channel is /// closed. /// diff --git a/tokio/src/task/local.rs b/tokio/src/task/local.rs index fb49d1f2768..a40708d08c2 100644 --- a/tokio/src/task/local.rs +++ b/tokio/src/task/local.rs @@ -237,7 +237,7 @@ struct Context { unhandled_panic: Cell, } -/// LocalSet state shared between threads. +/// `LocalSet` state shared between threads. struct Shared { /// # Safety /// @@ -290,7 +290,7 @@ struct LocalData { impl LocalData { /// Should be called except when we call `LocalSet::enter`. - /// Especially when we poll a LocalSet. + /// Especially when we poll a `LocalSet`. #[must_use = "dropping this guard will reset the entered state"] fn enter(&self, ctx: Rc) -> LocalDataEnterGuard<'_> { let ctx = self.ctx.replace(Some(ctx)); @@ -392,7 +392,7 @@ const MAX_TASKS_PER_TICK: usize = 61; /// How often it check the remote queue first. const REMOTE_FIRST_INTERVAL: u8 = 31; -/// Context guard for LocalSet +/// Context guard for `LocalSet` pub struct LocalEnterGuard { ctx: Option>, @@ -526,7 +526,7 @@ impl LocalSet { /// This runs the given future on the runtime, blocking until it is /// complete, and yielding its resolved result. Any tasks or timers which /// the future spawns internally will be executed on the runtime. The future - /// may also call [`spawn_local`] to spawn_local additional local futures on the + /// may also call [`spawn_local`] to `spawn_local` additional local futures on the /// current thread. /// /// This method should not be called from an asynchronous context. diff --git a/tokio/src/task/mod.rs b/tokio/src/task/mod.rs index aefa395c044..806b9aebe76 100644 --- a/tokio/src/task/mod.rs +++ b/tokio/src/task/mod.rs @@ -5,8 +5,8 @@ //! A _task_ is a light weight, non-blocking unit of execution. A task is similar //! to an OS thread, but rather than being managed by the OS scheduler, they are //! managed by the [Tokio runtime][rt]. Another name for this general pattern is -//! [green threads]. If you are familiar with [Go's goroutines], [Kotlin's -//! coroutines], or [Erlang's processes], you can think of Tokio's tasks as +//! [green threads]. If you are familiar with [`Go's goroutines`], [`Kotlin's +//! coroutines`], or [`Erlang's processes`], you can think of Tokio's tasks as //! something similar. //! //! Key points about tasks include: @@ -167,7 +167,7 @@ //! blocking operations there. This includes destructors of objects destroyed in //! async code. //! -//! #### spawn_blocking +//! #### `spawn_blocking` //! //! The `task::spawn_blocking` function is similar to the `task::spawn` function //! discussed in the previous section, but rather than spawning an @@ -202,7 +202,7 @@ //! # } //! ``` //! -//! #### block_in_place +//! #### `block_in_place` //! //! When using the [multi-threaded runtime][rt-multi-thread], the [`task::block_in_place`] //! function is also available. Like `task::spawn_blocking`, this function @@ -227,7 +227,7 @@ //! # } //! ``` //! -//! #### yield_now +//! #### `yield_now` //! //! In addition, this module provides a [`task::yield_now`] async function //! that is analogous to the standard library's [`thread::yield_now`]. Calling diff --git a/tokio/src/util/idle_notified_set.rs b/tokio/src/util/idle_notified_set.rs index bd9c2ef1bbc..8fe13095a2c 100644 --- a/tokio/src/util/idle_notified_set.rs +++ b/tokio/src/util/idle_notified_set.rs @@ -42,8 +42,8 @@ pub(crate) struct EntryInOneOfTheLists<'a, T> { type Lists = Mutex>; -/// The linked lists hold strong references to the ListEntry items, and the -/// ListEntry items also hold a strong reference back to the Lists object, but +/// The linked lists hold strong references to the `ListEntry` items, and the +/// `ListEntry` items also hold a strong reference back to the Lists object, but /// the destructor of the `IdleNotifiedSet` will clear the two lists, so once /// that object is destroyed, no ref-cycles will remain. struct ListsInner { diff --git a/tokio/src/util/rand.rs b/tokio/src/util/rand.rs index d96c8d37e0a..67c45693c9c 100644 --- a/tokio/src/util/rand.rs +++ b/tokio/src/util/rand.rs @@ -20,9 +20,9 @@ pub struct RngSeed { /// Fast random number generate. /// -/// Implement xorshift64+: 2 32-bit xorshift sequences added together. +/// Implement `xorshift64+`: 2 32-bit `xorshift` sequences added together. /// Shift triplet `[17,7,16]` was calculated as indicated in Marsaglia's -/// Xorshift paper: +/// `Xorshift` paper: /// This generator passes the SmallCrush suite, part of TestU01 framework: /// #[derive(Clone, Copy, Debug)] diff --git a/tokio/src/util/sharded_list.rs b/tokio/src/util/sharded_list.rs index c1009db94c9..4da0bcdf7f1 100644 --- a/tokio/src/util/sharded_list.rs +++ b/tokio/src/util/sharded_list.rs @@ -56,7 +56,7 @@ pub(crate) struct ShardGuard<'a, L, T> { } impl ShardedList { - /// Removes the last element from a list specified by shard_id and returns it, or None if it is + /// Removes the last element from a list specified by `shard_id` and returns it, or None if it is /// empty. pub(crate) fn pop_back(&self, shard_id: usize) -> Option { let mut lock = self.shard_inner(shard_id); @@ -87,7 +87,7 @@ impl ShardedList { node } - /// Gets the lock of ShardedList, makes us have the write permission. + /// Gets the lock of `ShardedList`, makes us have the write permission. pub(crate) fn lock_shard(&self, val: &L::Handle) -> ShardGuard<'_, L, L::Target> { let id = unsafe { L::get_shard_id(L::as_raw(val)) }; ShardGuard { @@ -107,7 +107,7 @@ impl ShardedList { self.len() == 0 } - /// Gets the shard size of this SharedList. + /// Gets the shard size of this `SharedList`. /// /// Used to help us to decide the parameter `shard_id` of the `pop_back` method. pub(crate) fn shard_size(&self) -> usize { From 9077762545da312c3527ba45b0bba37ba4b4fb7f Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Tue, 30 Jan 2024 21:46:37 +0330 Subject: [PATCH 038/162] net: expose keepalive option on `TcpSocket` (#6311) --- tokio/src/net/tcp/socket.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tokio/src/net/tcp/socket.rs b/tokio/src/net/tcp/socket.rs index 679e95866c5..c528a14fe8e 100644 --- a/tokio/src/net/tcp/socket.rs +++ b/tokio/src/net/tcp/socket.rs @@ -185,6 +185,16 @@ impl TcpSocket { Ok(TcpSocket { inner }) } + /// Sets value for the `SO_KEEPALIVE` option on this socket. + pub fn set_keepalive(&self, keepalive: bool) -> io::Result<()> { + self.inner.set_keepalive(keepalive) + } + + /// Gets the value of the `SO_KEEPALIVE` option on this socket. + pub fn keepalive(&self) -> io::Result { + self.inner.keepalive() + } + /// Allows the socket to bind to an in-use address. /// /// Behavior is platform specific. Refer to the target platform's From 53f9e5a357c7bc30b01c738d325b4f4a9acc32a6 Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Wed, 31 Jan 2024 16:11:54 +0330 Subject: [PATCH 039/162] ci: make sure dictionary words are sorted and unique (#6316) --- .github/workflows/ci.yml | 13 +++++++++++++ spellcheck.dic | 40 ++++++++++++++++++++-------------------- 2 files changed, 33 insertions(+), 20 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c0e728e63b0..44cf3f6cd52 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1011,6 +1011,19 @@ jobs: with: tool: cargo-spellcheck - uses: actions/checkout@v4 + - name: Make sure dictionary words are sorted and unique + run: | + # `sed` removes the first line (number of words) and + # the last line (new line). + # + # `sort` makes sure everything in between is sorted + # and contains no duplicates. + # + # Since `sort` is sensitive to locale, we set it + # using LC_ALL to en_US.UTF8 to be consistent in different + # environments. + + sed '1d; $d' spellcheck.dic | LC_ALL=en_US.UTF8 sort -uc - name: Run cargo-spellcheck run: cargo spellcheck --code 1 diff --git a/spellcheck.dic b/spellcheck.dic index ddedb90730d..ea73532afc1 100644 --- a/spellcheck.dic +++ b/spellcheck.dic @@ -1,30 +1,30 @@ 279 +& ++ +< += +> \ ~ -~4 -~12 -±1m -±1ms — -& -+ 0o777 0s 0xA 0xD 100ms 10ms +~12 +±1m +±1ms 1ms 1s 250ms 2x +~4 443 450ms 50ms 8MB -< -= -> adaptor adaptors Adaptors @@ -56,14 +56,14 @@ codec codecs combinator combinators -Config config +Config connectionless cpu cpus Customizable -Datagram datagram +Datagram datagrams deallocate deallocated @@ -73,8 +73,8 @@ dequeued deregister deregistered deregistering -Deregisters deregisters +Deregisters deregistration descriptor's destructor @@ -122,8 +122,8 @@ implementor implementors incrementing interoperate -Invariants invariants +Invariants io IOCP iOS @@ -149,8 +149,8 @@ misconfigured mock's mpmc mpsc -Multi multi +Multi multicast Multithreaded mut @@ -191,8 +191,8 @@ resize resized RMW runtime -runtimes runtime's +runtimes rwlock rx scalability @@ -229,8 +229,8 @@ symlinks sys syscall syscalls -TCP tcp +TCP TestU01 threadpool timestamp @@ -238,15 +238,15 @@ timestamps TLS TOCTOU TODO -Tokio tokio +Tokio tokio's Tokio's -Tuple tuple +Tuple tx -UDP udp +UDP UID unhandled unix @@ -255,8 +255,8 @@ unpark Unpark unparked unparking -Unparks unparks +Unparks unreceived unsafety Unsets From eaf81ed324e7cca0fa9b497a6747746da37eea93 Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Fri, 2 Feb 2024 14:43:46 +0330 Subject: [PATCH 040/162] chore: prepare Tokio v1.36.0 (#6312) --- README.md | 2 +- tokio/CHANGELOG.md | 59 ++++++++++++++++++++++++++++++++++++++++++++++ tokio/Cargo.toml | 2 +- tokio/README.md | 2 +- 4 files changed, 62 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 00979b4dfa3..ec947925d59 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.35.1", features = ["full"] } +tokio = { version = "1.36.0", features = ["full"] } ``` Then, on your main.rs: diff --git a/tokio/CHANGELOG.md b/tokio/CHANGELOG.md index 1b04986af8b..9d122cdb71e 100644 --- a/tokio/CHANGELOG.md +++ b/tokio/CHANGELOG.md @@ -1,3 +1,62 @@ +# 1.36.0 (February 2nd, 2024) + +### Added + +- io: add `tokio::io::Join` ([#6220]) +- io: implement `AsyncWrite` for `Empty` ([#6235]) +- net: add support for anonymous unix pipes ([#6127]) +- net: add `UnixSocket` ([#6290]) +- net: expose keepalive option on `TcpSocket` ([#6311]) +- sync: add `{Receiver,UnboundedReceiver}::poll_recv_many` ([#6236]) +- sync: add `Sender::{try_,}reserve_many` ([#6205]) +- sync: add `watch::Receiver::mark_unchanged` ([#6252]) +- task: add `JoinSet::try_join_next` ([#6280]) +- time: add `FutureExt::timeout` ([#6276]) + +### Changed + +- io: make `copy` cooperative ([#6265]) +- io: make `repeat` and `sink` cooperative ([#6254]) +- io: simplify check for empty slice ([#6293]) +- rt: improve robustness of `wake_in_drop_after_panic` test ([#6238]) +- process: use pidfd on Linux when available ([#6152]) +- sync: use AtomicBool in broadcast channel future ([#6298]) + +### Fixed + +- chore: typographic improvements ([#6262]) +- runtime: remove obsolete comment ([#6303]) +- task: fix typo ([#6261]) + +### Documented + +- io: clarify `clear_ready` docs ([#6304]) +- net: document that `*Fd` traits on `TcpSocket` are unix-only ([#6294]) +- sync: document FIFO behavior of `tokio::sync::Mutex` ([#6279]) + +[#6220]: https://github.com/tokio-rs/tokio/pull/6220 +[#6235]: https://github.com/tokio-rs/tokio/pull/6235 +[#6127]: https://github.com/tokio-rs/tokio/pull/6127 +[#6290]: https://github.com/tokio-rs/tokio/pull/6290 +[#6311]: https://github.com/tokio-rs/tokio/pull/6311 +[#6236]: https://github.com/tokio-rs/tokio/pull/6236 +[#6205]: https://github.com/tokio-rs/tokio/pull/6205 +[#6252]: https://github.com/tokio-rs/tokio/pull/6252 +[#6280]: https://github.com/tokio-rs/tokio/pull/6280 +[#6276]: https://github.com/tokio-rs/tokio/pull/6276 +[#6265]: https://github.com/tokio-rs/tokio/pull/6265 +[#6254]: https://github.com/tokio-rs/tokio/pull/6254 +[#6293]: https://github.com/tokio-rs/tokio/pull/6293 +[#6238]: https://github.com/tokio-rs/tokio/pull/6238 +[#6152]: https://github.com/tokio-rs/tokio/pull/6152 +[#6298]: https://github.com/tokio-rs/tokio/pull/6298 +[#6262]: https://github.com/tokio-rs/tokio/pull/6262 +[#6303]: https://github.com/tokio-rs/tokio/pull/6303 +[#6261]: https://github.com/tokio-rs/tokio/pull/6261 +[#6304]: https://github.com/tokio-rs/tokio/pull/6304 +[#6294]: https://github.com/tokio-rs/tokio/pull/6294 +[#6279]: https://github.com/tokio-rs/tokio/pull/6279 + # 1.35.1 (December 19, 2023) This is a forward part of a change that was backported to 1.25.3. diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 2efbca02dbc..194494dabf7 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -6,7 +6,7 @@ name = "tokio" # - README.md # - Update CHANGELOG.md. # - Create "v1.x.y" git tag. -version = "1.35.1" +version = "1.36.0" edition = "2021" rust-version = "1.63" authors = ["Tokio Contributors "] diff --git a/tokio/README.md b/tokio/README.md index 00979b4dfa3..ec947925d59 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.35.1", features = ["full"] } +tokio = { version = "1.36.0", features = ["full"] } ``` Then, on your main.rs: From cb197def683277cb7ae1e00f13398b49ac225f75 Mon Sep 17 00:00:00 2001 From: Augusto Hack Date: Fri, 2 Feb 2024 13:29:35 +0100 Subject: [PATCH 041/162] docs: transition_to_idle doesn't return boolean (#6320) --- tokio/src/runtime/task/state.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/tokio/src/runtime/task/state.rs b/tokio/src/runtime/task/state.rs index 9f1a8ec5a14..42b239e05bb 100644 --- a/tokio/src/runtime/task/state.rs +++ b/tokio/src/runtime/task/state.rs @@ -141,7 +141,6 @@ impl State { /// Transitions the task from `Running` -> `Idle`. /// - /// Returns `true` if the transition to `Idle` is successful, `false` otherwise. /// The transition to `Idle` fails if the task has been flagged to be /// cancelled. pub(super) fn transition_to_idle(&self) -> TransitionToIdle { From 0b31b2a19531f969a172bca77f2fd80d0d3b54ed Mon Sep 17 00:00:00 2001 From: erikdesjardins Date: Fri, 2 Feb 2024 07:30:49 -0500 Subject: [PATCH 042/162] task: implement `FromIterator` for `JoinSet` (#6300) --- tokio/src/task/join_set.rs | 43 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/tokio/src/task/join_set.rs b/tokio/src/task/join_set.rs index 7aace14d850..c9251db86d7 100644 --- a/tokio/src/task/join_set.rs +++ b/tokio/src/task/join_set.rs @@ -526,6 +526,49 @@ impl Default for JoinSet { } } +/// Collect an iterator of futures into a [`JoinSet`]. +/// +/// This is equivalent to calling [`JoinSet::spawn`] on each element of the iterator. +/// +/// # Examples +/// +/// The main example from [`JoinSet`]'s documentation can also be written using [`collect`]: +/// +/// ``` +/// use tokio::task::JoinSet; +/// +/// #[tokio::main] +/// async fn main() { +/// let mut set: JoinSet<_> = (0..10).map(|i| async move { i }).collect(); +/// +/// let mut seen = [false; 10]; +/// while let Some(res) = set.join_next().await { +/// let idx = res.unwrap(); +/// seen[idx] = true; +/// } +/// +/// for i in 0..10 { +/// assert!(seen[i]); +/// } +/// } +/// ``` +/// +/// [`collect`]: std::iter::Iterator::collect +impl std::iter::FromIterator for JoinSet +where + F: Future, + F: Send + 'static, + T: Send + 'static, +{ + fn from_iter>(iter: I) -> Self { + let mut set = Self::new(); + iter.into_iter().for_each(|task| { + set.spawn(task); + }); + set + } +} + // === impl Builder === #[cfg(all(tokio_unstable, feature = "tracing"))] From 63caced26f07240fa2751cefccee86cc342d3581 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sat, 3 Feb 2024 21:57:58 +0100 Subject: [PATCH 043/162] tokio: update CHANGELOG.md (#6324) --- tokio/CHANGELOG.md | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/tokio/CHANGELOG.md b/tokio/CHANGELOG.md index 9d122cdb71e..d9b17bfbf8a 100644 --- a/tokio/CHANGELOG.md +++ b/tokio/CHANGELOG.md @@ -11,28 +11,23 @@ - sync: add `Sender::{try_,}reserve_many` ([#6205]) - sync: add `watch::Receiver::mark_unchanged` ([#6252]) - task: add `JoinSet::try_join_next` ([#6280]) -- time: add `FutureExt::timeout` ([#6276]) ### Changed - io: make `copy` cooperative ([#6265]) - io: make `repeat` and `sink` cooperative ([#6254]) - io: simplify check for empty slice ([#6293]) -- rt: improve robustness of `wake_in_drop_after_panic` test ([#6238]) - process: use pidfd on Linux when available ([#6152]) - sync: use AtomicBool in broadcast channel future ([#6298]) -### Fixed - -- chore: typographic improvements ([#6262]) -- runtime: remove obsolete comment ([#6303]) -- task: fix typo ([#6261]) - ### Documented - io: clarify `clear_ready` docs ([#6304]) - net: document that `*Fd` traits on `TcpSocket` are unix-only ([#6294]) - sync: document FIFO behavior of `tokio::sync::Mutex` ([#6279]) +- chore: typographic improvements ([#6262]) +- runtime: remove obsolete comment ([#6303]) +- task: fix typo ([#6261]) [#6220]: https://github.com/tokio-rs/tokio/pull/6220 [#6235]: https://github.com/tokio-rs/tokio/pull/6235 @@ -43,7 +38,6 @@ [#6205]: https://github.com/tokio-rs/tokio/pull/6205 [#6252]: https://github.com/tokio-rs/tokio/pull/6252 [#6280]: https://github.com/tokio-rs/tokio/pull/6280 -[#6276]: https://github.com/tokio-rs/tokio/pull/6276 [#6265]: https://github.com/tokio-rs/tokio/pull/6265 [#6254]: https://github.com/tokio-rs/tokio/pull/6254 [#6293]: https://github.com/tokio-rs/tokio/pull/6293 From fbdf539ac22563c6b8e3fb3f1844055baba12470 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Mon, 5 Feb 2024 08:46:04 +0100 Subject: [PATCH 044/162] sync: make downgrade must_use (#6326) --- tokio/src/sync/mpsc/bounded.rs | 1 + tokio/src/sync/mpsc/unbounded.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/tokio/src/sync/mpsc/bounded.rs b/tokio/src/sync/mpsc/bounded.rs index a1e0a82d9e2..3cdba3dc237 100644 --- a/tokio/src/sync/mpsc/bounded.rs +++ b/tokio/src/sync/mpsc/bounded.rs @@ -1367,6 +1367,7 @@ impl Sender { /// towards RAII semantics, i.e. if all `Sender` instances of the /// channel were dropped and only `WeakSender` instances remain, /// the channel is closed. + #[must_use = "Downgrade creates a WeakSender without destroying the original non-weak sender."] pub fn downgrade(&self) -> WeakSender { WeakSender { chan: self.chan.downgrade(), diff --git a/tokio/src/sync/mpsc/unbounded.rs b/tokio/src/sync/mpsc/unbounded.rs index 7dff942ee70..b87b07ba653 100644 --- a/tokio/src/sync/mpsc/unbounded.rs +++ b/tokio/src/sync/mpsc/unbounded.rs @@ -572,6 +572,7 @@ impl UnboundedSender { /// towards RAII semantics, i.e. if all `UnboundedSender` instances of the /// channel were dropped and only `WeakUnboundedSender` instances remain, /// the channel is closed. + #[must_use = "Downgrade creates a WeakSender without destroying the original non-weak sender."] pub fn downgrade(&self) -> WeakUnboundedSender { WeakUnboundedSender { chan: self.chan.downgrade(), From 47a5fe3a12cb8f92a776fe2469899a8cdc338a46 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Mon, 5 Feb 2024 16:59:04 +0100 Subject: [PATCH 045/162] metrics: fix worker_steal_count test hanging (#6327) --- tokio/tests/rt_metrics.rs | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/tokio/tests/rt_metrics.rs b/tokio/tests/rt_metrics.rs index 38bc18f7b6f..ec4856cb5c2 100644 --- a/tokio/tests/rt_metrics.rs +++ b/tokio/tests/rt_metrics.rs @@ -181,7 +181,7 @@ fn worker_steal_count() { // We use a blocking channel to backup one worker thread. use std::sync::mpsc::channel; - let rt = threaded(); + let rt = threaded_no_lifo(); let metrics = rt.metrics(); rt.block_on(async { @@ -190,14 +190,12 @@ fn worker_steal_count() { // Move to the runtime. tokio::spawn(async move { // Spawn the task that sends to the channel + // + // Since the lifo slot is disabled, this task is stealable. tokio::spawn(async move { tx.send(()).unwrap(); }); - // Spawn a task that bumps the previous task out of the "next - // scheduled" slot. - tokio::spawn(async {}); - // Blocking receive on the channel. rx.recv().unwrap(); }) @@ -729,6 +727,15 @@ fn threaded() -> Runtime { .unwrap() } +fn threaded_no_lifo() -> Runtime { + tokio::runtime::Builder::new_multi_thread() + .worker_threads(2) + .disable_lifo_slot() + .enable_all() + .build() + .unwrap() +} + fn us(n: u64) -> Duration { Duration::from_micros(n) } From 10c9eeb6c2af85961044b7cbb16a5a2d2e97287d Mon Sep 17 00:00:00 2001 From: Jack Wrenn Date: Mon, 5 Feb 2024 16:53:20 -0500 Subject: [PATCH 046/162] runtime: include task `Id` in taskdumps (#6328) Task `Id`s provide a semi-stable identifier for monitoring task state across task dumps. Fixes #6313 --- examples/dump.rs | 5 +++-- tokio/src/runtime/dump.rs | 20 ++++++++++++++++++- .../runtime/scheduler/current_thread/mod.rs | 2 +- .../scheduler/multi_thread/worker/taskdump.rs | 2 +- tokio/src/runtime/task/mod.rs | 11 ++++++++++ tokio/src/runtime/task/trace/mod.rs | 10 ++++++---- tokio/tests/dump.rs | 6 ++++-- 7 files changed, 45 insertions(+), 11 deletions(-) diff --git a/examples/dump.rs b/examples/dump.rs index 4d8ff19c065..c7ece458ff8 100644 --- a/examples/dump.rs +++ b/examples/dump.rs @@ -47,9 +47,10 @@ async fn main() -> Result<(), Box> { // capture a dump, and print each trace println!("{:-<80}", ""); if let Ok(dump) = timeout(Duration::from_secs(2), handle.dump()).await { - for (i, task) in dump.tasks().iter().enumerate() { + for task in dump.tasks().iter() { + let id = task.id(); let trace = task.trace(); - println!("TASK {i}:"); + println!("TASK {id}:"); println!("{trace}\n"); } } else { diff --git a/tokio/src/runtime/dump.rs b/tokio/src/runtime/dump.rs index 994b7f9c015..aea2381127b 100644 --- a/tokio/src/runtime/dump.rs +++ b/tokio/src/runtime/dump.rs @@ -2,6 +2,7 @@ //! //! See [Handle::dump][crate::runtime::Handle::dump]. +use crate::task::Id; use std::fmt; /// A snapshot of a runtime's state. @@ -25,6 +26,7 @@ pub struct Tasks { /// See [Handle::dump][crate::runtime::Handle::dump]. #[derive(Debug)] pub struct Task { + id: Id, trace: Trace, } @@ -57,12 +59,28 @@ impl Tasks { } impl Task { - pub(crate) fn new(trace: super::task::trace::Trace) -> Self { + pub(crate) fn new(id: Id, trace: super::task::trace::Trace) -> Self { Self { + id, trace: Trace { inner: trace }, } } + /// Returns a [task ID] that uniquely identifies this task relative to other + /// tasks spawned at the time of the dump. + /// + /// **Note**: This is an [unstable API][unstable]. The public API of this type + /// may break in 1.x releases. See [the documentation on unstable + /// features][unstable] for details. + /// + /// [task ID]: crate::task::Id + /// [unstable]: crate#unstable-features + #[cfg(tokio_unstable)] + #[cfg_attr(docsrs, doc(cfg(tokio_unstable)))] + pub fn id(&self) -> Id { + self.id + } + /// A trace of this task's state. pub fn trace(&self) -> &Trace { &self.trace diff --git a/tokio/src/runtime/scheduler/current_thread/mod.rs b/tokio/src/runtime/scheduler/current_thread/mod.rs index 55a43970195..36bcefc4406 100644 --- a/tokio/src/runtime/scheduler/current_thread/mod.rs +++ b/tokio/src/runtime/scheduler/current_thread/mod.rs @@ -470,7 +470,7 @@ impl Handle { traces = trace_current_thread(&self.shared.owned, local, &self.shared.inject) .into_iter() - .map(dump::Task::new) + .map(|(id, trace)| dump::Task::new(id, trace)) .collect(); // Avoid double borrow panic diff --git a/tokio/src/runtime/scheduler/multi_thread/worker/taskdump.rs b/tokio/src/runtime/scheduler/multi_thread/worker/taskdump.rs index d310d9f6d35..312673034d3 100644 --- a/tokio/src/runtime/scheduler/multi_thread/worker/taskdump.rs +++ b/tokio/src/runtime/scheduler/multi_thread/worker/taskdump.rs @@ -42,7 +42,7 @@ impl Handle { // was created with. let traces = unsafe { trace_multi_thread(owned, &mut local, synced, injection) } .into_iter() - .map(dump::Task::new) + .map(|(id, trace)| dump::Task::new(id, trace)) .collect(); let result = dump::Dump::new(traces); diff --git a/tokio/src/runtime/task/mod.rs b/tokio/src/runtime/task/mod.rs index 6b05f4d7d5c..aa799bf2be1 100644 --- a/tokio/src/runtime/task/mod.rs +++ b/tokio/src/runtime/task/mod.rs @@ -376,6 +376,17 @@ impl Task { None } } + + /// Returns a [task ID] that uniquely identifies this task relative to other + /// currently spawned tasks. + /// + /// [task ID]: crate::task::Id + #[cfg(tokio_unstable)] + #[cfg_attr(docsrs, doc(cfg(tokio_unstable)))] + pub(crate) fn id(&self) -> crate::task::Id { + // Safety: The header pointer is valid. + unsafe { Header::get_id(self.raw.header_ptr()) } + } } } diff --git a/tokio/src/runtime/task/trace/mod.rs b/tokio/src/runtime/task/trace/mod.rs index 7c9acc035af..ec2e8432216 100644 --- a/tokio/src/runtime/task/trace/mod.rs +++ b/tokio/src/runtime/task/trace/mod.rs @@ -1,6 +1,7 @@ use crate::loom::sync::Arc; use crate::runtime::context; use crate::runtime::scheduler::{self, current_thread, Inject}; +use crate::task::Id; use backtrace::BacktraceFrame; use std::cell::Cell; @@ -270,7 +271,7 @@ pub(in crate::runtime) fn trace_current_thread( owned: &OwnedTasks>, local: &mut VecDeque>>, injection: &Inject>, -) -> Vec { +) -> Vec<(Id, Trace)> { // clear the local and injection queues let mut dequeued = Vec::new(); @@ -303,7 +304,7 @@ cfg_rt_multi_thread! { local: &mut multi_thread::queue::Local>, synced: &Mutex, injection: &Shared>, - ) -> Vec { + ) -> Vec<(Id, Trace)> { let mut dequeued = Vec::new(); // clear the local queue @@ -331,7 +332,7 @@ cfg_rt_multi_thread! { /// /// This helper presumes exclusive access to each task. The tasks must not exist /// in any other queue. -fn trace_owned(owned: &OwnedTasks, dequeued: Vec>) -> Vec { +fn trace_owned(owned: &OwnedTasks, dequeued: Vec>) -> Vec<(Id, Trace)> { let mut tasks = dequeued; // Notify and trace all un-notified tasks. The dequeued tasks are already // notified and so do not need to be re-notified. @@ -351,8 +352,9 @@ fn trace_owned(owned: &OwnedTasks, dequeued: Vec>) - .into_iter() .map(|task| { let local_notified = owned.assert_owner(task); + let id = local_notified.task.id(); let ((), trace) = Trace::capture(|| local_notified.run()); - trace + (id, trace) }) .collect() } diff --git a/tokio/tests/dump.rs b/tokio/tests/dump.rs index ecb4495b33e..c946f38436c 100644 --- a/tokio/tests/dump.rs +++ b/tokio/tests/dump.rs @@ -41,8 +41,9 @@ fn current_thread() { assert_eq!(tasks.len(), 3); for task in tasks { + let id = task.id(); let trace = task.trace().to_string(); - eprintln!("\n\n{trace}\n\n"); + eprintln!("\n\n{id}:\n{trace}\n\n"); assert!(trace.contains("dump::a")); assert!(trace.contains("dump::b")); assert!(trace.contains("dump::c")); @@ -78,8 +79,9 @@ fn multi_thread() { assert_eq!(tasks.len(), 3); for task in tasks { + let id = task.id(); let trace = task.trace().to_string(); - eprintln!("\n\n{trace}\n\n"); + eprintln!("\n\n{id}:\n{trace}\n\n"); assert!(trace.contains("dump::a")); assert!(trace.contains("dump::b")); assert!(trace.contains("dump::c")); From 0fbde0e94b06536917b6686e996856a33aeb29ee Mon Sep 17 00:00:00 2001 From: kim / Motoyuki Kimura Date: Thu, 8 Feb 2024 23:10:55 +0900 Subject: [PATCH 047/162] sync: add `forget_permits` method to semaphore (#6331) --- tokio/src/sync/batch_semaphore.rs | 25 +++++++++++++++++ tokio/src/sync/semaphore.rs | 8 ++++++ tokio/src/sync/tests/loom_semaphore_batch.rs | 28 ++++++++++++++++++++ tokio/src/sync/tests/semaphore_batch.rs | 21 +++++++++++++++ 4 files changed, 82 insertions(+) diff --git a/tokio/src/sync/batch_semaphore.rs b/tokio/src/sync/batch_semaphore.rs index aa23dea7d3c..d7eb1d6b77e 100644 --- a/tokio/src/sync/batch_semaphore.rs +++ b/tokio/src/sync/batch_semaphore.rs @@ -368,6 +368,31 @@ impl Semaphore { assert_eq!(rem, 0); } + /// Decrease a semaphore's permits by a maximum of `n`. + /// + /// If there are insufficient permits and it's not possible to reduce by `n`, + /// return the number of permits that were actually reduced. + pub(crate) fn forget_permits(&self, n: usize) -> usize { + if n == 0 { + return 0; + } + + let mut curr_bits = self.permits.load(Acquire); + loop { + let curr = curr_bits >> Self::PERMIT_SHIFT; + let new = curr.saturating_sub(n); + match self.permits.compare_exchange_weak( + curr_bits, + new << Self::PERMIT_SHIFT, + AcqRel, + Acquire, + ) { + Ok(_) => return std::cmp::min(curr, n), + Err(actual) => curr_bits = actual, + }; + } + } + fn poll_acquire( &self, cx: &mut Context<'_>, diff --git a/tokio/src/sync/semaphore.rs b/tokio/src/sync/semaphore.rs index 25e4134373c..d0ee12591ee 100644 --- a/tokio/src/sync/semaphore.rs +++ b/tokio/src/sync/semaphore.rs @@ -481,6 +481,14 @@ impl Semaphore { self.ll_sem.release(n); } + /// Decrease a semaphore's permits by a maximum of `n`. + /// + /// If there are insufficient permits and it's not possible to reduce by `n`, + /// return the number of permits that were actually reduced. + pub fn forget_permits(&self, n: usize) -> usize { + self.ll_sem.forget_permits(n) + } + /// Acquires a permit from the semaphore. /// /// If the semaphore has been closed, this returns an [`AcquireError`]. diff --git a/tokio/src/sync/tests/loom_semaphore_batch.rs b/tokio/src/sync/tests/loom_semaphore_batch.rs index 76a1bc00626..85cd584d2d4 100644 --- a/tokio/src/sync/tests/loom_semaphore_batch.rs +++ b/tokio/src/sync/tests/loom_semaphore_batch.rs @@ -213,3 +213,31 @@ fn release_during_acquire() { assert_eq!(10, semaphore.available_permits()); }) } + +#[test] +fn concurrent_permit_updates() { + loom::model(move || { + let semaphore = Arc::new(Semaphore::new(5)); + let t1 = { + let semaphore = semaphore.clone(); + thread::spawn(move || semaphore.release(3)) + }; + let t2 = { + let semaphore = semaphore.clone(); + thread::spawn(move || { + semaphore + .try_acquire(1) + .expect("try_acquire should succeed") + }) + }; + let t3 = { + let semaphore = semaphore.clone(); + thread::spawn(move || semaphore.forget_permits(2)) + }; + + t1.join().unwrap(); + t2.join().unwrap(); + t3.join().unwrap(); + assert_eq!(semaphore.available_permits(), 5); + }) +} diff --git a/tokio/src/sync/tests/semaphore_batch.rs b/tokio/src/sync/tests/semaphore_batch.rs index 391797b3f66..09610ce71f2 100644 --- a/tokio/src/sync/tests/semaphore_batch.rs +++ b/tokio/src/sync/tests/semaphore_batch.rs @@ -287,3 +287,24 @@ fn release_permits_at_drop() { assert!(fut.as_mut().poll(&mut cx).is_pending()); } } + +#[test] +fn forget_permits_basic() { + let s = Semaphore::new(10); + assert_eq!(s.forget_permits(4), 4); + assert_eq!(s.available_permits(), 6); + assert_eq!(s.forget_permits(10), 6); + assert_eq!(s.available_permits(), 0); +} + +#[test] +fn update_permits_many_times() { + let s = Semaphore::new(5); + let mut acquire = task::spawn(s.acquire(7)); + assert_pending!(acquire.poll()); + s.release(5); + assert_ready_ok!(acquire.poll()); + assert_eq!(s.available_permits(), 3); + assert_eq!(s.forget_permits(3), 3); + assert_eq!(s.available_permits(), 0); +} From e392c4ff1effb7b35459a0d915831aaf41184d78 Mon Sep 17 00:00:00 2001 From: Patrick McGleenon Date: Sat, 10 Feb 2024 09:45:40 +0000 Subject: [PATCH 048/162] chore: update CI to clippy 1.76 (#6334) Co-authored-by: Rafael Bachmann --- .github/workflows/ci.yml | 2 +- CONTRIBUTING.md | 2 +- benches/copy.rs | 4 ++-- benches/rt_multi_threaded.rs | 4 ++-- benches/sync_broadcast.rs | 2 +- examples/custom-executor-tokio-context.rs | 2 +- stress-test/examples/simple_echo_tcp.rs | 2 +- tokio-util/src/codec/framed_impl.rs | 1 + tokio/src/net/udp.rs | 3 +++ tokio/src/net/unix/datagram/socket.rs | 2 ++ 10 files changed, 15 insertions(+), 9 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 44cf3f6cd52..b2934b822b9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,7 +16,7 @@ env: # Change to specific Rust release to pin rust_stable: stable rust_nightly: nightly-2023-10-21 - rust_clippy: '1.75' + rust_clippy: '1.76' # When updating this, also update: # - README.md # - tokio/README.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index cd6f651a2ae..7e9bb2c998e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -149,7 +149,7 @@ When updating this, also update: --> ``` -cargo +1.75 clippy --all --tests --all-features +cargo +1.76 clippy --all --tests --all-features ``` When building documentation normally, the markers that list the features diff --git a/benches/copy.rs b/benches/copy.rs index 478cd6e8a5a..5c4eab48943 100644 --- a/benches/copy.rs +++ b/benches/copy.rs @@ -64,7 +64,7 @@ impl SlowHddWriter { ) -> std::task::Poll> { // If we hit a service interval, the buffer can be cleared let res = self.service_intervals.poll_tick(cx).map(|_| Ok(())); - if let Poll::Ready(_) = res { + if res.is_ready() { self.buffer_used = 0; } res @@ -123,7 +123,7 @@ impl AsyncWrite for SlowHddWriter { cx: &mut std::task::Context<'_>, bufs: &[std::io::IoSlice<'_>], ) -> std::task::Poll> { - let writeable = bufs.into_iter().fold(0, |acc, buf| acc + buf.len()); + let writeable = bufs.iter().fold(0, |acc, buf| acc + buf.len()); self.write_bytes(cx, writeable) } diff --git a/benches/rt_multi_threaded.rs b/benches/rt_multi_threaded.rs index 2502b619c2d..d333ebdaaae 100644 --- a/benches/rt_multi_threaded.rs +++ b/benches/rt_multi_threaded.rs @@ -38,7 +38,7 @@ fn rt_multi_spawn_many_local(c: &mut Criterion) { }); } - let _ = rx.recv().unwrap(); + rx.recv().unwrap(); }); }) }); @@ -165,7 +165,7 @@ fn rt_multi_yield_many(c: &mut Criterion) { } for _ in 0..TASKS { - let _ = rx.recv().unwrap(); + rx.recv().unwrap(); } }) }); diff --git a/benches/sync_broadcast.rs b/benches/sync_broadcast.rs index 38a2141387b..a7dc2e37259 100644 --- a/benches/sync_broadcast.rs +++ b/benches/sync_broadcast.rs @@ -37,7 +37,7 @@ fn contention_impl(g: &mut BenchmarkGroup) { let mut rx = tx.subscribe(); let mut rng = rand::rngs::StdRng::seed_from_u64(n as u64); rt.spawn(async move { - while let Ok(_) = rx.recv().await { + while (rx.recv().await).is_ok() { let r = do_work(&mut rng); let _ = black_box(r); if wg.0.fetch_sub(1, Ordering::Relaxed) == 1 { diff --git a/examples/custom-executor-tokio-context.rs b/examples/custom-executor-tokio-context.rs index ae1cd2df2d5..25e54234b3c 100644 --- a/examples/custom-executor-tokio-context.rs +++ b/examples/custom-executor-tokio-context.rs @@ -23,7 +23,7 @@ fn main() { // Without the `HandleExt.wrap()` there would be a panic because there is // no timer running, since it would be referencing runtime r1. - let _ = rt1.block_on(rt2.wrap(async move { + rt1.block_on(rt2.wrap(async move { let listener = TcpListener::bind("0.0.0.0:0").await.unwrap(); println!("addr: {:?}", listener.local_addr()); tx.send(()).unwrap(); diff --git a/stress-test/examples/simple_echo_tcp.rs b/stress-test/examples/simple_echo_tcp.rs index 01e545026c5..3db32ff0898 100644 --- a/stress-test/examples/simple_echo_tcp.rs +++ b/stress-test/examples/simple_echo_tcp.rs @@ -42,7 +42,7 @@ fn main() { .write_all(one_mega_random_bytes.as_slice()) .await .unwrap(); - stream.read(&mut buff).await.unwrap(); + let _ = stream.read(&mut buff).await.unwrap(); } tx.send(()).unwrap(); }); diff --git a/tokio-util/src/codec/framed_impl.rs b/tokio-util/src/codec/framed_impl.rs index 8f3fa49b0d9..9a4e2a8f6b9 100644 --- a/tokio-util/src/codec/framed_impl.rs +++ b/tokio-util/src/codec/framed_impl.rs @@ -218,6 +218,7 @@ where // Make sure we've got room for at least one byte to read to ensure // that we don't get a spurious 0 that looks like EOF. state.buffer.reserve(1); + #[allow(clippy::blocks_in_conditions)] let bytect = match poll_read_buf(pinned.inner.as_mut(), cx, &mut state.buffer).map_err( |err| { trace!("Got an error, going to errored state"); diff --git a/tokio/src/net/udp.rs b/tokio/src/net/udp.rs index 4e2c140a856..03a66585047 100644 --- a/tokio/src/net/udp.rs +++ b/tokio/src/net/udp.rs @@ -804,6 +804,7 @@ impl UdpSocket { /// /// [`connect`]: method@Self::connect pub fn poll_recv(&self, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll> { + #[allow(clippy::blocks_in_conditions)] let n = ready!(self.io.registration().poll_read_io(cx, || { // Safety: will not read the maybe uninitialized bytes. let b = unsafe { @@ -1340,6 +1341,7 @@ impl UdpSocket { cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { + #[allow(clippy::blocks_in_conditions)] let (n, addr) = ready!(self.io.registration().poll_read_io(cx, || { // Safety: will not read the maybe uninitialized bytes. let b = unsafe { @@ -1595,6 +1597,7 @@ impl UdpSocket { cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { + #[allow(clippy::blocks_in_conditions)] let (n, addr) = ready!(self.io.registration().poll_read_io(cx, || { // Safety: will not read the maybe uninitialized bytes. let b = unsafe { diff --git a/tokio/src/net/unix/datagram/socket.rs b/tokio/src/net/unix/datagram/socket.rs index 0da20f81f53..d7786ca82d7 100644 --- a/tokio/src/net/unix/datagram/socket.rs +++ b/tokio/src/net/unix/datagram/socket.rs @@ -1161,6 +1161,7 @@ impl UnixDatagram { cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { + #[allow(clippy::blocks_in_conditions)] let (n, addr) = ready!(self.io.registration().poll_read_io(cx, || { // Safety: will not read the maybe uninitialized bytes. let b = unsafe { @@ -1262,6 +1263,7 @@ impl UnixDatagram { /// /// [`connect`]: method@Self::connect pub fn poll_recv(&self, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll> { + #[allow(clippy::blocks_in_conditions)] let n = ready!(self.io.registration().poll_read_io(cx, || { // Safety: will not read the maybe uninitialized bytes. let b = unsafe { From e25d661e08342ee14ea285e691ab2cf517ac72de Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Sun, 11 Feb 2024 02:38:44 +0330 Subject: [PATCH 049/162] util: add examples for FramedRead and FramedWrite (#6310) --- tokio-util/src/codec/framed_read.rs | 4 ++ tokio-util/src/codec/framed_write.rs | 4 ++ tokio-util/src/codec/mod.rs | 66 ++++++++++++++++++++++++++++ 3 files changed, 74 insertions(+) diff --git a/tokio-util/src/codec/framed_read.rs b/tokio-util/src/codec/framed_read.rs index 184c567b498..90ba5e7c9d0 100644 --- a/tokio-util/src/codec/framed_read.rs +++ b/tokio-util/src/codec/framed_read.rs @@ -14,8 +14,12 @@ use std::task::{Context, Poll}; pin_project! { /// A [`Stream`] of messages decoded from an [`AsyncRead`]. /// + /// For examples of how to use `FramedRead` with a codec, see the + /// examples on the [`codec`] module. + /// /// [`Stream`]: futures_core::Stream /// [`AsyncRead`]: tokio::io::AsyncRead + /// [`codec`]: crate::codec pub struct FramedRead { #[pin] inner: FramedImpl, diff --git a/tokio-util/src/codec/framed_write.rs b/tokio-util/src/codec/framed_write.rs index 3f0a3408157..a7efaadd2b9 100644 --- a/tokio-util/src/codec/framed_write.rs +++ b/tokio-util/src/codec/framed_write.rs @@ -15,7 +15,11 @@ use std::task::{Context, Poll}; pin_project! { /// A [`Sink`] of frames encoded to an `AsyncWrite`. /// + /// For examples of how to use `FramedWrite` with a codec, see the + /// examples on the [`codec`] module. + /// /// [`Sink`]: futures_sink::Sink + /// [`codec`]: crate::codec pub struct FramedWrite { #[pin] inner: FramedImpl, diff --git a/tokio-util/src/codec/mod.rs b/tokio-util/src/codec/mod.rs index 98a2f724425..50f01c28fa7 100644 --- a/tokio-util/src/codec/mod.rs +++ b/tokio-util/src/codec/mod.rs @@ -7,6 +7,71 @@ //! [`AsyncWrite`], to framed streams implementing [`Sink`] and [`Stream`]. //! Framed streams are also known as transports. //! +//! # Example encoding using `LinesCodec` +//! +//! The following example demonstrates how to use a codec such as [`LinesCodec`] to +//! write framed data. [`FramedWrite`] can be used to achieve this. Data sent to +//! [`FramedWrite`] are first framed according to a specific codec, and then sent to +//! an implementor of [`AsyncWrite`]. +//! +//! ``` +//! use futures::sink::SinkExt; +//! use tokio_util::codec::LinesCodec; +//! use tokio_util::codec::FramedWrite; +//! +//! #[tokio::main] +//! async fn main() { +//! let buffer = Vec::new(); +//! let messages = vec!["Hello", "World"]; +//! let encoder = LinesCodec::new(); +//! +//! // FramedWrite is a sink which means you can send values into it +//! // asynchronously. +//! let mut writer = FramedWrite::new(buffer, encoder); +//! +//! // To be able to send values into a FramedWrite, you need to bring the +//! // `SinkExt` trait into scope. +//! writer.send(messages[0]).await.unwrap(); +//! writer.send(messages[1]).await.unwrap(); +//! +//! let buffer = writer.get_ref(); +//! +//! assert_eq!(buffer.as_slice(), "Hello\nWorld\n".as_bytes()); +//! } +//!``` +//! +//! # Example decoding using `LinesCodec` +//! The following example demonstrates how to use a codec such as [`LinesCodec`] to +//! read a stream of framed data. [`FramedRead`] can be used to achieve this. [`FramedRead`] +//! will keep reading from an [`AsyncRead`] implementor until a whole frame, according to a codec, +//! can be parsed. +//! +//!``` +//! use tokio_stream::StreamExt; +//! use tokio_util::codec::LinesCodec; +//! use tokio_util::codec::FramedRead; +//! +//! #[tokio::main] +//! async fn main() { +//! let message = "Hello\nWorld".as_bytes(); +//! let decoder = LinesCodec::new(); +//! +//! // FramedRead can be used to read a stream of values that are framed according to +//! // a codec. FramedRead will read from its input (here `buffer`) until a whole frame +//! // can be parsed. +//! let mut reader = FramedRead::new(message, decoder); +//! +//! // To read values from a FramedRead, you need to bring the +//! // `StreamExt` trait into scope. +//! let frame1 = reader.next().await.unwrap().unwrap(); +//! let frame2 = reader.next().await.unwrap().unwrap(); +//! +//! assert!(reader.next().await.is_none()); +//! assert_eq!(frame1, "Hello"); +//! assert_eq!(frame2, "World"); +//! } +//! ``` +//! //! # The Decoder trait //! //! A [`Decoder`] is used together with [`FramedRead`] or [`Framed`] to turn an @@ -248,6 +313,7 @@ //! [`AsyncWrite`]: tokio::io::AsyncWrite //! [`Stream`]: futures_core::Stream //! [`Sink`]: futures_sink::Sink +//! [`SinkExt`]: futures::sink::SinkExt //! [`SinkExt::close`]: https://docs.rs/futures/0.3/futures/sink/trait.SinkExt.html#method.close //! [`FramedRead`]: struct@crate::codec::FramedRead //! [`FramedWrite`]: struct@crate::codec::FramedWrite From db6929ad62a1cebb088d7b5c5c16d689c49449b9 Mon Sep 17 00:00:00 2001 From: Val Lorentz Date: Sun, 11 Feb 2024 15:08:01 +0100 Subject: [PATCH 050/162] task: fix documentation links (#6336) --- spellcheck.dic | 6 +++++- tokio/src/task/mod.rs | 4 ++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/spellcheck.dic b/spellcheck.dic index ea73532afc1..5a0745df32d 100644 --- a/spellcheck.dic +++ b/spellcheck.dic @@ -1,4 +1,4 @@ -279 +283 & + < @@ -59,6 +59,7 @@ combinators config Config connectionless +coroutines cpu cpus Customizable @@ -92,6 +93,7 @@ enqueued EntryInner enum eof +Erlang's errored EWMA expirations @@ -109,6 +111,7 @@ fs functionalities getters GID +goroutines Growable gzip hashmaps @@ -132,6 +135,7 @@ IP IPv4 IPv6 iteratively +Kotlin's latencies Lauck libc diff --git a/tokio/src/task/mod.rs b/tokio/src/task/mod.rs index 806b9aebe76..f45df10a982 100644 --- a/tokio/src/task/mod.rs +++ b/tokio/src/task/mod.rs @@ -5,8 +5,8 @@ //! A _task_ is a light weight, non-blocking unit of execution. A task is similar //! to an OS thread, but rather than being managed by the OS scheduler, they are //! managed by the [Tokio runtime][rt]. Another name for this general pattern is -//! [green threads]. If you are familiar with [`Go's goroutines`], [`Kotlin's -//! coroutines`], or [`Erlang's processes`], you can think of Tokio's tasks as +//! [green threads]. If you are familiar with [Go's goroutines], [Kotlin's +//! coroutines], or [Erlang's processes], you can think of Tokio's tasks as //! something similar. //! //! Key points about tasks include: From 84e41d4affe2f94d892c5ab2320db6d695fca536 Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Sun, 11 Feb 2024 20:45:13 +0330 Subject: [PATCH 051/162] io: document cancel safety of `AsyncReadExt`'s primitive read functions (#6337) --- tokio/src/io/util/async_read_ext.rs | 132 ++++++++++++++++++++++++++++ 1 file changed, 132 insertions(+) diff --git a/tokio/src/io/util/async_read_ext.rs b/tokio/src/io/util/async_read_ext.rs index 11bd42448ab..4007e4993ee 100644 --- a/tokio/src/io/util/async_read_ext.rs +++ b/tokio/src/io/util/async_read_ext.rs @@ -330,6 +330,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is cancel safe. If this method is used as an event in a + /// [`tokio::select!`](crate::select) statement and some other branch + /// completes first, it is guaranteed that no data were read. + /// /// # Examples /// /// Read unsigned 8 bit integers from an `AsyncRead`: @@ -368,6 +374,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is cancel safe. If this method is used as an event in a + /// [`tokio::select!`](crate::select) statement and some other branch + /// completes first, it is guaranteed that no data were read. + /// /// # Examples /// /// Read unsigned 8 bit integers from an `AsyncRead`: @@ -407,6 +419,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is not cancellation safe. If the method is used as the + /// event in a [`tokio::select!`](crate::select) statement and some + /// other branch completes first, then some data may be lost. + /// /// # Examples /// /// Read unsigned 16 bit big-endian integers from a `AsyncRead`: @@ -445,6 +463,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is not cancellation safe. If the method is used as the + /// event in a [`tokio::select!`](crate::select) statement and some + /// other branch completes first, then some data may be lost. + /// /// # Examples /// /// Read signed 16 bit big-endian integers from a `AsyncRead`: @@ -483,6 +507,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is not cancellation safe. If the method is used as the + /// event in a [`tokio::select!`](crate::select) statement and some + /// other branch completes first, then some data may be lost. + /// /// # Examples /// /// Read unsigned 32-bit big-endian integers from a `AsyncRead`: @@ -521,6 +551,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is not cancellation safe. If the method is used as the + /// event in a [`tokio::select!`](crate::select) statement and some + /// other branch completes first, then some data may be lost. + /// /// # Examples /// /// Read signed 32-bit big-endian integers from a `AsyncRead`: @@ -558,6 +594,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is not cancellation safe. If the method is used as the + /// event in a [`tokio::select!`](crate::select) statement and some + /// other branch completes first, then some data may be lost. + /// /// # Examples /// /// Read unsigned 64-bit big-endian integers from a `AsyncRead`: @@ -597,6 +639,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is not cancellation safe. If the method is used as the + /// event in a [`tokio::select!`](crate::select) statement and some + /// other branch completes first, then some data may be lost. + /// /// # Examples /// /// Read signed 64-bit big-endian integers from a `AsyncRead`: @@ -634,6 +682,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is not cancellation safe. If the method is used as the + /// event in a [`tokio::select!`](crate::select) statement and some + /// other branch completes first, then some data may be lost. + /// /// # Examples /// /// Read unsigned 128-bit big-endian integers from a `AsyncRead`: @@ -674,6 +728,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is not cancellation safe. If the method is used as the + /// event in a [`tokio::select!`](crate::select) statement and some + /// other branch completes first, then some data may be lost. + /// /// # Examples /// /// Read signed 128-bit big-endian integers from a `AsyncRead`: @@ -714,6 +774,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is not cancellation safe. If the method is used as the + /// event in a [`tokio::select!`](crate::select) statement and some + /// other branch completes first, then some data may be lost. + /// /// # Examples /// /// Read 32-bit floating point type from a `AsyncRead`: @@ -751,6 +817,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is not cancellation safe. If the method is used as the + /// event in a [`tokio::select!`](crate::select) statement and some + /// other branch completes first, then some data may be lost. + /// /// # Examples /// /// Read 64-bit floating point type from a `AsyncRead`: @@ -790,6 +862,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is not cancellation safe. If the method is used as the + /// event in a [`tokio::select!`](crate::select) statement and some + /// other branch completes first, then some data may be lost. + /// /// # Examples /// /// Read unsigned 16 bit little-endian integers from a `AsyncRead`: @@ -828,6 +906,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is not cancellation safe. If the method is used as the + /// event in a [`tokio::select!`](crate::select) statement and some + /// other branch completes first, then some data may be lost. + /// /// # Examples /// /// Read signed 16 bit little-endian integers from a `AsyncRead`: @@ -866,6 +950,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is not cancellation safe. If the method is used as the + /// event in a [`tokio::select!`](crate::select) statement and some + /// other branch completes first, then some data may be lost. + /// /// # Examples /// /// Read unsigned 32-bit little-endian integers from a `AsyncRead`: @@ -904,6 +994,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is not cancellation safe. If the method is used as the + /// event in a [`tokio::select!`](crate::select) statement and some + /// other branch completes first, then some data may be lost. + /// /// # Examples /// /// Read signed 32-bit little-endian integers from a `AsyncRead`: @@ -941,6 +1037,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is not cancellation safe. If the method is used as the + /// event in a [`tokio::select!`](crate::select) statement and some + /// other branch completes first, then some data may be lost. + /// /// # Examples /// /// Read unsigned 64-bit little-endian integers from a `AsyncRead`: @@ -980,6 +1082,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is not cancellation safe. If the method is used as the + /// event in a [`tokio::select!`](crate::select) statement and some + /// other branch completes first, then some data may be lost. + /// /// # Examples /// /// Read signed 64-bit little-endian integers from a `AsyncRead`: @@ -1017,6 +1125,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is not cancellation safe. If the method is used as the + /// event in a [`tokio::select!`](crate::select) statement and some + /// other branch completes first, then some data may be lost. + /// /// # Examples /// /// Read unsigned 128-bit little-endian integers from a `AsyncRead`: @@ -1057,6 +1171,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is not cancellation safe. If the method is used as the + /// event in a [`tokio::select!`](crate::select) statement and some + /// other branch completes first, then some data may be lost. + /// /// # Examples /// /// Read signed 128-bit little-endian integers from a `AsyncRead`: @@ -1097,6 +1217,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is not cancellation safe. If the method is used as the + /// event in a [`tokio::select!`](crate::select) statement and some + /// other branch completes first, then some data may be lost. + /// /// # Examples /// /// Read 32-bit floating point type from a `AsyncRead`: @@ -1134,6 +1260,12 @@ cfg_io_util! { /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// + /// # Cancel safety + /// + /// This method is not cancellation safe. If the method is used as the + /// event in a [`tokio::select!`](crate::select) statement and some + /// other branch completes first, then some data may be lost. + /// /// # Examples /// /// Read 64-bit floating point type from a `AsyncRead`: From 2ce1cee0f9386bc808a32010339464ebbbd38e2d Mon Sep 17 00:00:00 2001 From: Gil Shoshan Date: Tue, 13 Feb 2024 10:28:16 +0200 Subject: [PATCH 052/162] macros: allow select with only else branch (#6339) --- tokio/src/macros/select.rs | 5 +++++ tokio/tests/macros_select.rs | 19 +++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/tokio/src/macros/select.rs b/tokio/src/macros/select.rs index 31c9b3ac2e5..8cf28405e7f 100644 --- a/tokio/src/macros/select.rs +++ b/tokio/src/macros/select.rs @@ -608,6 +608,10 @@ macro_rules! select { // ===== Entry point ===== + ($(biased;)? else => $else:expr $(,)? ) => {{ + $else + }}; + (biased; $p:pat = $($t:tt)* ) => { $crate::select!(@{ start=0; () } $p = $($t)*) }; @@ -617,6 +621,7 @@ macro_rules! select { // fair and avoids always polling the first future. $crate::select!(@{ start={ $crate::macros::support::thread_rng_n(BRANCHES) }; () } $p = $($t)*) }; + () => { compile_error!("select! requires at least one branch.") }; diff --git a/tokio/tests/macros_select.rs b/tokio/tests/macros_select.rs index 68a607b27f4..f65cbdf2267 100644 --- a/tokio/tests/macros_select.rs +++ b/tokio/tests/macros_select.rs @@ -22,6 +22,25 @@ async fn sync_one_lit_expr_comma() { assert_eq!(foo, 1); } +#[maybe_tokio_test] +async fn no_branch_else_only() { + let foo = tokio::select! { + else => 1, + }; + + assert_eq!(foo, 1); +} + +#[maybe_tokio_test] +async fn no_branch_else_only_biased() { + let foo = tokio::select! { + biased; + else => 1, + }; + + assert_eq!(foo, 1); +} + #[maybe_tokio_test] async fn nested_one() { let foo = tokio::select! { From b32826bc937a34e4d871c89bb2c3711ed3e20cdc Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Thu, 15 Feb 2024 03:13:08 +0330 Subject: [PATCH 053/162] runtime: fix leaking registration entries when os registration fails (#6329) --- tokio/src/runtime/io/driver.rs | 13 +++++++++++-- tokio/src/runtime/io/registration_set.rs | 12 ++++++++++-- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/tokio/src/runtime/io/driver.rs b/tokio/src/runtime/io/driver.rs index bece3560b72..0f7b1e57acb 100644 --- a/tokio/src/runtime/io/driver.rs +++ b/tokio/src/runtime/io/driver.rs @@ -220,8 +220,17 @@ impl Handle { let scheduled_io = self.registrations.allocate(&mut self.synced.lock())?; let token = scheduled_io.token(); - // TODO: if this returns an err, the `ScheduledIo` leaks... - self.registry.register(source, token, interest.to_mio())?; + // we should remove the `scheduled_io` from the `registrations` set if registering + // the `source` with the OS fails. Otherwise it will leak the `scheduled_io`. + if let Err(e) = self.registry.register(source, token, interest.to_mio()) { + // safety: `scheduled_io` is part of the `registrations` set. + unsafe { + self.registrations + .remove(&mut self.synced.lock(), &scheduled_io) + }; + + return Err(e); + } // TODO: move this logic to `RegistrationSet` and use a `CountedLinkedList` self.metrics.incr_fd_count(); diff --git a/tokio/src/runtime/io/registration_set.rs b/tokio/src/runtime/io/registration_set.rs index 028eb2ecdbe..1a8bd09c310 100644 --- a/tokio/src/runtime/io/registration_set.rs +++ b/tokio/src/runtime/io/registration_set.rs @@ -102,13 +102,21 @@ impl RegistrationSet { } pub(super) fn release(&self, synced: &mut Synced) { - for io in synced.pending_release.drain(..) { + let pending = std::mem::take(&mut synced.pending_release); + + for io in pending { // safety: the registration is part of our list - let _ = unsafe { synced.registrations.remove(io.as_ref().into()) }; + unsafe { self.remove(synced, io.as_ref()) } } self.num_pending_release.store(0, Release); } + + // This function is marked as unsafe, because the caller must make sure that + // `io` is part of the registration set. + pub(super) unsafe fn remove(&self, synced: &mut Synced, io: &ScheduledIo) { + let _ = synced.registrations.remove(io.into()); + } } // Safety: `Arc` pins the inner data From 099ee23b65a2755589fa0543ebd7b30eb9fb823e Mon Sep 17 00:00:00 2001 From: qj <60492138+qrnch-jan@users.noreply.github.com> Date: Mon, 19 Feb 2024 12:37:09 +0100 Subject: [PATCH 054/162] docs: fix docsrs build without net feature (#6360) --- tokio/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tokio/src/lib.rs b/tokio/src/lib.rs index 3f035098832..57b6560bf0d 100644 --- a/tokio/src/lib.rs +++ b/tokio/src/lib.rs @@ -630,6 +630,7 @@ pub mod stream {} #[cfg(docsrs)] pub mod doc; +#[cfg(feature = "net")] #[cfg(docsrs)] #[allow(unused)] pub(crate) use self::doc::os; From 94db07b379092ac49527d98166dab43fc1197f27 Mon Sep 17 00:00:00 2001 From: kim / Motoyuki Kimura Date: Wed, 21 Feb 2024 19:23:54 +0900 Subject: [PATCH 055/162] task: add `TaskLocalFuture::take_value` (#6340) --- tokio/src/task/task_local.rs | 44 ++++++++++++++++++++++++++++++++++++ tokio/tests/task_local.rs | 28 +++++++++++++++++++++++ 2 files changed, 72 insertions(+) diff --git a/tokio/src/task/task_local.rs b/tokio/src/task/task_local.rs index 4abeeb37e10..ba58ea6ae8b 100644 --- a/tokio/src/task/task_local.rs +++ b/tokio/src/task/task_local.rs @@ -332,6 +332,50 @@ pin_project! { } } +impl TaskLocalFuture +where + T: 'static, +{ + /// Returns the value stored in the task local by this `TaskLocalFuture`. + /// + /// The function returns: + /// + /// * `Some(T)` if the task local value exists. + /// * `None` if the task local value has already been taken. + /// + /// Note that this function attempts to take the task local value even if + /// the future has not yet completed. In that case, the value will no longer + /// be available via the task local after the call to `take_value`. + /// + /// # Examples + /// + /// ``` + /// # async fn dox() { + /// tokio::task_local! { + /// static KEY: u32; + /// } + /// + /// let fut = KEY.scope(42, async { + /// // Do some async work + /// }); + /// + /// let mut pinned = Box::pin(fut); + /// + /// // Complete the TaskLocalFuture + /// let _ = pinned.as_mut().await; + /// + /// // And here, we can take task local value + /// let value = pinned.as_mut().take_value(); + /// + /// assert_eq!(value, Some(42)); + /// # } + /// ``` + pub fn take_value(self: Pin<&mut Self>) -> Option { + let this = self.project(); + this.slot.take() + } +} + impl Future for TaskLocalFuture { type Output = F::Output; diff --git a/tokio/tests/task_local.rs b/tokio/tests/task_local.rs index fbc885c3599..a4718dc45bb 100644 --- a/tokio/tests/task_local.rs +++ b/tokio/tests/task_local.rs @@ -117,3 +117,31 @@ async fn task_local_available_on_completion_drop() { assert_eq!(rx.await.unwrap(), 42); h.await.unwrap(); } + +#[tokio::test] +async fn take_value() { + tokio::task_local! { + static KEY: u32 + } + let fut = KEY.scope(1, async {}); + let mut pinned = Box::pin(fut); + assert_eq!(pinned.as_mut().take_value(), Some(1)); + assert_eq!(pinned.as_mut().take_value(), None); +} + +#[tokio::test] +async fn poll_after_take_value_should_fail() { + tokio::task_local! { + static KEY: u32 + } + let fut = KEY.scope(1, async { + let result = KEY.try_with(|_| {}); + // The task local value no longer exists. + assert!(result.is_err()); + }); + let mut fut = Box::pin(fut); + fut.as_mut().take_value(); + + // Poll the future after `take_value` has been called + fut.await; +} From a2096049ee2b8e30b420ba7ecff6b81e609428b3 Mon Sep 17 00:00:00 2001 From: Patrick McGleenon Date: Fri, 23 Feb 2024 03:08:57 +0000 Subject: [PATCH 056/162] ci: downgrade to QEMU 7.2 for cross-compile builds (#6361) (#6363) --- .github/workflows/ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b2934b822b9..4374d99ff63 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -537,6 +537,7 @@ jobs: - uses: taiki-e/setup-cross-toolchain-action@v1 with: target: ${{ matrix.target }} + qemu: '7.2' - uses: Swatinem/rust-cache@v2 - name: Tests run with all features (including parking_lot) @@ -576,6 +577,7 @@ jobs: - uses: taiki-e/setup-cross-toolchain-action@v1 with: target: ${{ matrix.target }} + qemu: '7.2' - name: Remove `parking_lot` from `full` feature run: sed -i '0,/parking_lot/{/parking_lot/d;}' tokio/Cargo.toml @@ -612,6 +614,7 @@ jobs: - uses: taiki-e/setup-cross-toolchain-action@v1 with: target: i686-unknown-linux-gnu + qemu: '7.2' - uses: Swatinem/rust-cache@v2 - name: test tokio --all-features From 5658d7c5032ade1bd0b18e3ec5b2b788eeac630e Mon Sep 17 00:00:00 2001 From: Kevin Reid Date: Sun, 25 Feb 2024 13:17:26 -0800 Subject: [PATCH 057/162] runtime: add doc link from `Runtime` to `#[tokio::main]` (#6366) --- tokio/src/runtime/runtime.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tokio/src/runtime/runtime.rs b/tokio/src/runtime/runtime.rs index a8a95428095..2bf1636d502 100644 --- a/tokio/src/runtime/runtime.rs +++ b/tokio/src/runtime/runtime.rs @@ -21,8 +21,8 @@ cfg_rt_multi_thread! { /// blocking pool, necessary for running asynchronous tasks. /// /// Instances of `Runtime` can be created using [`new`], or [`Builder`]. -/// However, most users will use the `#[tokio::main]` annotation on their -/// entry point instead. +/// However, most users will use the [`#[tokio::main]`][main] annotation on +/// their entry point instead. /// /// See [module level][mod] documentation for more details. /// @@ -86,6 +86,7 @@ cfg_rt_multi_thread! { /// [`new`]: method@Self::new /// [`Builder`]: struct@Builder /// [`Handle`]: struct@Handle +/// [main]: macro@crate::main /// [`tokio::spawn`]: crate::spawn /// [`Arc::try_unwrap`]: std::sync::Arc::try_unwrap /// [Arc]: std::sync::Arc From 31252796978aee69d0c859dacceee92ff6a7ccf5 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sun, 3 Mar 2024 21:12:58 +0100 Subject: [PATCH 058/162] ci: update FreeBSD image to 14 (#6376) --- .cirrus.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cirrus.yml b/.cirrus.yml index b61a6a83f11..8aea3efa74b 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -1,7 +1,7 @@ only_if: $CIRRUS_TAG == '' && ($CIRRUS_PR != '' || $CIRRUS_BRANCH == 'master' || $CIRRUS_BRANCH =~ 'tokio-.*') auto_cancellation: $CIRRUS_BRANCH != 'master' && $CIRRUS_BRANCH !=~ 'tokio-.*' freebsd_instance: - image_family: freebsd-13-2 + image_family: freebsd-14-0 env: RUST_STABLE: stable RUST_NIGHTLY: nightly-2023-10-21 From e0d33c4a202896127cb134aebbb8cf535b0abb9d Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sun, 3 Mar 2024 21:44:38 +0100 Subject: [PATCH 059/162] tokio: mark 1.36 as an LTS release (#6375) --- README.md | 7 ++++--- tokio/README.md | 7 ++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index ec947925d59..fcfc1b92bc2 100644 --- a/README.md +++ b/README.md @@ -215,18 +215,18 @@ warrants a patch release with a fix for the bug, it will be backported and released as a new patch release for each LTS minor version. Our current LTS releases are: - * `1.25.x` - LTS release until March 2024. (MSRV 1.49) * `1.32.x` - LTS release until September 2024. (MSRV 1.63) + * `1.36.x` - LTS release until March 2025. (MSRV 1.63) Each LTS release will continue to receive backported fixes for at least a year. If you wish to use a fixed minor release in your project, we recommend that you use an LTS release. To use a fixed minor version, you can specify the version with a tilde. For -example, to specify that you wish to use the newest `1.25.x` patch release, you +example, to specify that you wish to use the newest `1.32.x` patch release, you can use the following dependency specification: ```text -tokio = { version = "~1.25", features = [...] } +tokio = { version = "~1.32", features = [...] } ``` ### Previous LTS releases @@ -235,6 +235,7 @@ tokio = { version = "~1.25", features = [...] } * `1.14.x` - LTS release until June 2022. * `1.18.x` - LTS release until June 2023. * `1.20.x` - LTS release until September 2023. + * `1.25.x` - LTS release until March 2024. ## License diff --git a/tokio/README.md b/tokio/README.md index ec947925d59..fcfc1b92bc2 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -215,18 +215,18 @@ warrants a patch release with a fix for the bug, it will be backported and released as a new patch release for each LTS minor version. Our current LTS releases are: - * `1.25.x` - LTS release until March 2024. (MSRV 1.49) * `1.32.x` - LTS release until September 2024. (MSRV 1.63) + * `1.36.x` - LTS release until March 2025. (MSRV 1.63) Each LTS release will continue to receive backported fixes for at least a year. If you wish to use a fixed minor release in your project, we recommend that you use an LTS release. To use a fixed minor version, you can specify the version with a tilde. For -example, to specify that you wish to use the newest `1.25.x` patch release, you +example, to specify that you wish to use the newest `1.32.x` patch release, you can use the following dependency specification: ```text -tokio = { version = "~1.25", features = [...] } +tokio = { version = "~1.32", features = [...] } ``` ### Previous LTS releases @@ -235,6 +235,7 @@ tokio = { version = "~1.25", features = [...] } * `1.14.x` - LTS release until June 2022. * `1.18.x` - LTS release until June 2023. * `1.20.x` - LTS release until September 2023. + * `1.25.x` - LTS release until March 2024. ## License From f5ca423bf1587a17f9c0f02b75d6ad5860b9c029 Mon Sep 17 00:00:00 2001 From: Lev Kokotov Date: Mon, 4 Mar 2024 00:40:03 -0800 Subject: [PATCH 060/162] sync: fix missing period in broadcast docs (#6377) --- tokio/src/sync/broadcast.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio/src/sync/broadcast.rs b/tokio/src/sync/broadcast.rs index 499e5296da4..326b81b4d5d 100644 --- a/tokio/src/sync/broadcast.rs +++ b/tokio/src/sync/broadcast.rs @@ -741,7 +741,7 @@ impl Sender { self.shared.buffer[idx].read().unwrap().rem.load(SeqCst) == 0 } - /// Returns the number of active receivers + /// Returns the number of active receivers. /// /// An active receiver is a [`Receiver`] handle returned from [`channel`] or /// [`subscribe`]. These are the handles that will receive values sent on From f6d061919f7a56eebf78edb1d1d379fadad33b6a Mon Sep 17 00:00:00 2001 From: kim / Motoyuki Kimura Date: Tue, 5 Mar 2024 00:56:40 +0900 Subject: [PATCH 061/162] test: mark `Spawn` as `#[must_use]` (#6371) --- tokio-test/src/task.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tokio-test/src/task.rs b/tokio-test/src/task.rs index a9cf50f52f9..812f908aca8 100644 --- a/tokio-test/src/task.rs +++ b/tokio-test/src/task.rs @@ -49,6 +49,7 @@ pub fn spawn(task: T) -> Spawn { /// Future spawned on a mock task that can be used to poll the future or stream /// without needing pinning or context types. #[derive(Debug)] +#[must_use = "futures do nothing unless you `.await` or poll them"] pub struct Spawn { task: MockTask, future: Pin>, From 1f924f95f12c362f5116c560acfa005149f110a1 Mon Sep 17 00:00:00 2001 From: Patrick McGleenon Date: Tue, 5 Mar 2024 15:23:38 +0000 Subject: [PATCH 062/162] chore: fix deprecated circleci image (#6379) --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 6f901a95e93..6e30b9822fe 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -2,7 +2,7 @@ version: 2.1 jobs: test-arm: machine: - image: ubuntu-2004:202101-01 + image: default resource_class: arm.medium environment: # Change to pin rust version From 3133af42e123b9469dad292ae3a090da915d23c5 Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Sun, 10 Mar 2024 14:07:18 +0330 Subject: [PATCH 063/162] runtime: make the `enter` example deterministic (#6351) --- tokio/src/runtime/runtime.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tokio/src/runtime/runtime.rs b/tokio/src/runtime/runtime.rs index 2bf1636d502..917c3f8ce91 100644 --- a/tokio/src/runtime/runtime.rs +++ b/tokio/src/runtime/runtime.rs @@ -368,12 +368,13 @@ impl Runtime { /// /// ``` /// use tokio::runtime::Runtime; + /// use tokio::task::JoinHandle; /// - /// fn function_that_spawns(msg: String) { + /// fn function_that_spawns(msg: String) -> JoinHandle<()> { /// // Had we not used `rt.enter` below, this would panic. /// tokio::spawn(async move { /// println!("{}", msg); - /// }); + /// }) /// } /// /// fn main() { @@ -383,7 +384,10 @@ impl Runtime { /// /// // By entering the context, we tie `tokio::spawn` to this executor. /// let _guard = rt.enter(); - /// function_that_spawns(s); + /// let handle = function_that_spawns(s); + /// + /// // Wait for the task before we end the test. + /// rt.block_on(handle).unwrap(); /// } /// ``` pub fn enter(&self) -> EnterGuard<'_> { From b4ab6472de9a0708873c5f14f3b8de03b149a1c5 Mon Sep 17 00:00:00 2001 From: Yotam Ofek Date: Sun, 10 Mar 2024 17:24:07 +0200 Subject: [PATCH 064/162] signal: fix typo in argument name (#6389) --- tokio-stream/src/wrappers/signal_unix.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tokio-stream/src/wrappers/signal_unix.rs b/tokio-stream/src/wrappers/signal_unix.rs index 2f74e7d1527..6dcdff7fc55 100644 --- a/tokio-stream/src/wrappers/signal_unix.rs +++ b/tokio-stream/src/wrappers/signal_unix.rs @@ -15,8 +15,8 @@ pub struct SignalStream { impl SignalStream { /// Create a new `SignalStream`. - pub fn new(interval: Signal) -> Self { - Self { inner: interval } + pub fn new(signal: Signal) -> Self { + Self { inner: signal } } /// Get back the inner `Signal`. From a3d2548789d22d4bf193a4614f229944270e912c Mon Sep 17 00:00:00 2001 From: kim / Motoyuki Kimura Date: Tue, 12 Mar 2024 18:25:43 +0900 Subject: [PATCH 065/162] sync: implement `Clone` for `watch::Sender` (#6388) --- tokio/src/sync/tests/loom_watch.rs | 20 ++++++++++++++++ tokio/src/sync/watch.rs | 22 ++++++++++++++--- tokio/tests/sync_watch.rs | 38 +++++++++++++++++++++++++++++- 3 files changed, 76 insertions(+), 4 deletions(-) diff --git a/tokio/src/sync/tests/loom_watch.rs b/tokio/src/sync/tests/loom_watch.rs index 51589cd8042..0f69ae7e938 100644 --- a/tokio/src/sync/tests/loom_watch.rs +++ b/tokio/src/sync/tests/loom_watch.rs @@ -88,3 +88,23 @@ fn wait_for_returns_correct_value() { jh.join().unwrap(); }); } + +#[test] +fn multiple_sender_drop_concurrently() { + loom::model(move || { + let (tx1, rx) = watch::channel(0); + let tx2 = tx1.clone(); + + let jh = thread::spawn(move || { + drop(tx2); + }); + assert!(rx.has_changed().is_ok()); + + drop(tx1); + + jh.join().unwrap(); + + // Check if all sender are dropped and closed flag is set. + assert!(rx.has_changed().is_err()); + }); +} diff --git a/tokio/src/sync/watch.rs b/tokio/src/sync/watch.rs index 3979b07202f..a813a829b40 100644 --- a/tokio/src/sync/watch.rs +++ b/tokio/src/sync/watch.rs @@ -114,7 +114,7 @@ use crate::sync::notify::Notify; use crate::loom::sync::atomic::AtomicUsize; -use crate::loom::sync::atomic::Ordering::Relaxed; +use crate::loom::sync::atomic::Ordering::{AcqRel, Relaxed}; use crate::loom::sync::{Arc, RwLock, RwLockReadGuard}; use std::fmt; use std::mem; @@ -146,6 +146,16 @@ pub struct Sender { shared: Arc>, } +impl Clone for Sender { + fn clone(&self) -> Self { + self.shared.ref_count_tx.fetch_add(1, Relaxed); + + Self { + shared: self.shared.clone(), + } + } +} + /// Returns a reference to the inner value. /// /// Outstanding borrows hold a read lock on the inner value. This means that @@ -238,6 +248,9 @@ struct Shared { /// Tracks the number of `Receiver` instances. ref_count_rx: AtomicUsize, + /// Tracks the number of `Sender` instances. + ref_count_tx: AtomicUsize, + /// Notifies waiting receivers that the value changed. notify_rx: big_notify::BigNotify, @@ -485,6 +498,7 @@ pub fn channel(init: T) -> (Sender, Receiver) { value: RwLock::new(init), state: AtomicState::new(), ref_count_rx: AtomicUsize::new(1), + ref_count_tx: AtomicUsize::new(1), notify_rx: big_notify::BigNotify::new(), notify_tx: Notify::new(), }); @@ -1302,8 +1316,10 @@ impl Sender { impl Drop for Sender { fn drop(&mut self) { - self.shared.state.set_closed(); - self.shared.notify_rx.notify_waiters(); + if self.shared.ref_count_tx.fetch_sub(1, AcqRel) == 1 { + self.shared.state.set_closed(); + self.shared.notify_rx.notify_waiters(); + } } } diff --git a/tokio/tests/sync_watch.rs b/tokio/tests/sync_watch.rs index a5b229f7ddc..7957c7da23f 100644 --- a/tokio/tests/sync_watch.rs +++ b/tokio/tests/sync_watch.rs @@ -7,7 +7,9 @@ use wasm_bindgen_test::wasm_bindgen_test as test; use tokio::sync::watch; use tokio_test::task::spawn; -use tokio_test::{assert_pending, assert_ready, assert_ready_err, assert_ready_ok}; +use tokio_test::{ + assert_pending, assert_ready, assert_ready_eq, assert_ready_err, assert_ready_ok, +}; #[test] fn single_rx_recv() { @@ -332,3 +334,37 @@ fn send_modify_panic() { assert_ready_ok!(task.poll()); assert_eq!(*rx.borrow_and_update(), "three"); } + +#[tokio::test] +async fn multiple_sender() { + let (tx1, mut rx) = watch::channel(0); + let tx2 = tx1.clone(); + + let mut t = spawn(async { + rx.changed().await.unwrap(); + let v1 = *rx.borrow_and_update(); + rx.changed().await.unwrap(); + let v2 = *rx.borrow_and_update(); + (v1, v2) + }); + + tx1.send(1).unwrap(); + assert_pending!(t.poll()); + tx2.send(2).unwrap(); + assert_ready_eq!(t.poll(), (1, 2)); +} + +#[tokio::test] +async fn reciever_is_notified_when_last_sender_is_dropped() { + let (tx1, mut rx) = watch::channel(0); + let tx2 = tx1.clone(); + + let mut t = spawn(rx.changed()); + assert_pending!(t.poll()); + + drop(tx1); + assert!(!t.is_woken()); + drop(tx2); + + assert!(t.is_woken()); +} From fb2dc97468fead3c1d318f209a65648e11ade55d Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Tue, 12 Mar 2024 15:21:08 +0100 Subject: [PATCH 066/162] readme: update commit message guidelines (#6393) --- CONTRIBUTING.md | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7e9bb2c998e..623efd1337e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -368,14 +368,16 @@ A good commit message should describe what changed and why. and no more than 72 characters) * be entirely in lowercase with the exception of proper nouns, acronyms, and the words that refer to code, like function/variable names - * be prefixed with the name of the sub crate being changed (without the `tokio-` - prefix) and start with an imperative verb. If modifying `tokio` proper, - omit the crate prefix. + * start with an imperative verb + * not have a period at the end + * be prefixed with the name of the module being changed; usually this is the + same as the M-* label on the PR Examples: - * timer: introduce `Timeout` and deprecate `Deadline` - * export `Encoder`, `Decoder`, `Framed*` from tokio_codec + * time: introduce `Timeout` and deprecate `Deadline` + * codec: export `Encoder`, `Decoder`, `Framed*` + * ci: fix the FreeBSD ci configuration 2. Keep the second line blank. 3. Wrap all other lines at 72 columns (except for long URLs). @@ -392,7 +394,7 @@ A good commit message should describe what changed and why. Sample complete commit message: ```txt -subcrate: explain the commit in one line +module: explain the commit in one line Body of commit message is a few lines of text, explaining things in more detail, possibly giving some background about the issue From ea1cfbdb97f524a1ae465c0f5454dd6690dd5e6e Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Tue, 12 Mar 2024 15:22:25 +0100 Subject: [PATCH 067/162] sync: reorder const_new before new_with (#6392) --- tokio/src/sync/once_cell.rs | 80 ++++++++++++++++++------------------- 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/tokio/src/sync/once_cell.rs b/tokio/src/sync/once_cell.rs index 1a2303e8e6a..82b8259c667 100644 --- a/tokio/src/sync/once_cell.rs +++ b/tokio/src/sync/once_cell.rs @@ -132,36 +132,22 @@ impl OnceCell { } } - /// Creates a new `OnceCell` that contains the provided value, if any. + /// Creates a new empty `OnceCell` instance. /// - /// If the `Option` is `None`, this is equivalent to `OnceCell::new`. + /// Equivalent to `OnceCell::new`, except that it can be used in static + /// variables. /// - /// [`OnceCell::new`]: crate::sync::OnceCell::new - // Once https://github.com/rust-lang/rust/issues/73255 lands - // and tokio MSRV is bumped to the rustc version with it stablised, - // we can made this function available in const context, - // by creating `Semaphore::const_new_closed`. - pub fn new_with(value: Option) -> Self { - if let Some(v) = value { - OnceCell::from(v) - } else { - OnceCell::new() - } - } - - /// Creates a new `OnceCell` that contains the provided value. + /// When using the `tracing` [unstable feature], a `OnceCell` created with + /// `const_new` will not be instrumented. As such, it will not be visible + /// in [`tokio-console`]. Instead, [`OnceCell::new`] should be used to + /// create an instrumented object if that is needed. /// /// # Example /// - /// When using the `tracing` [unstable feature], a `OnceCell` created with - /// `const_new_with` will not be instrumented. As such, it will not be - /// visible in [`tokio-console`]. Instead, [`OnceCell::new_with`] should be - /// used to create an instrumented object if that is needed. - /// /// ``` /// use tokio::sync::OnceCell; /// - /// static ONCE: OnceCell = OnceCell::const_new_with(1); + /// static ONCE: OnceCell = OnceCell::const_new(); /// /// async fn get_global_integer() -> &'static u32 { /// ONCE.get_or_init(|| async { @@ -172,37 +158,51 @@ impl OnceCell { /// #[tokio::main] /// async fn main() { /// let result = get_global_integer().await; - /// assert_eq!(*result, 1); + /// assert_eq!(*result, 2); /// } /// ``` /// /// [`tokio-console`]: https://github.com/tokio-rs/console /// [unstable feature]: crate#unstable-features #[cfg(not(all(loom, test)))] - pub const fn const_new_with(value: T) -> Self { + pub const fn const_new() -> Self { OnceCell { - value_set: AtomicBool::new(true), - value: UnsafeCell::new(MaybeUninit::new(value)), - semaphore: Semaphore::const_new_closed(), + value_set: AtomicBool::new(false), + value: UnsafeCell::new(MaybeUninit::uninit()), + semaphore: Semaphore::const_new(1), } } - /// Creates a new empty `OnceCell` instance. + /// Creates a new `OnceCell` that contains the provided value, if any. /// - /// Equivalent to `OnceCell::new`, except that it can be used in static - /// variables. + /// If the `Option` is `None`, this is equivalent to `OnceCell::new`. /// - /// When using the `tracing` [unstable feature], a `OnceCell` created with - /// `const_new` will not be instrumented. As such, it will not be visible - /// in [`tokio-console`]. Instead, [`OnceCell::new`] should be used to - /// create an instrumented object if that is needed. + /// [`OnceCell::new`]: crate::sync::OnceCell::new + // Once https://github.com/rust-lang/rust/issues/73255 lands + // and tokio MSRV is bumped to the rustc version with it stablised, + // we can made this function available in const context, + // by creating `Semaphore::const_new_closed`. + pub fn new_with(value: Option) -> Self { + if let Some(v) = value { + OnceCell::from(v) + } else { + OnceCell::new() + } + } + + /// Creates a new `OnceCell` that contains the provided value. /// /// # Example /// + /// When using the `tracing` [unstable feature], a `OnceCell` created with + /// `const_new_with` will not be instrumented. As such, it will not be + /// visible in [`tokio-console`]. Instead, [`OnceCell::new_with`] should be + /// used to create an instrumented object if that is needed. + /// /// ``` /// use tokio::sync::OnceCell; /// - /// static ONCE: OnceCell = OnceCell::const_new(); + /// static ONCE: OnceCell = OnceCell::const_new_with(1); /// /// async fn get_global_integer() -> &'static u32 { /// ONCE.get_or_init(|| async { @@ -213,18 +213,18 @@ impl OnceCell { /// #[tokio::main] /// async fn main() { /// let result = get_global_integer().await; - /// assert_eq!(*result, 2); + /// assert_eq!(*result, 1); /// } /// ``` /// /// [`tokio-console`]: https://github.com/tokio-rs/console /// [unstable feature]: crate#unstable-features #[cfg(not(all(loom, test)))] - pub const fn const_new() -> Self { + pub const fn const_new_with(value: T) -> Self { OnceCell { - value_set: AtomicBool::new(false), - value: UnsafeCell::new(MaybeUninit::uninit()), - semaphore: Semaphore::const_new(1), + value_set: AtomicBool::new(true), + value: UnsafeCell::new(MaybeUninit::new(value)), + semaphore: Semaphore::const_new_closed(), } } From 3141ed62287043b5400e24ac930b002dc7d11d92 Mon Sep 17 00:00:00 2001 From: kim / Motoyuki Kimura Date: Thu, 14 Mar 2024 01:24:19 +0900 Subject: [PATCH 068/162] sync: update watch channel docs (#6395) --- tokio/src/sync/mod.rs | 2 +- tokio/src/sync/watch.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tokio/src/sync/mod.rs b/tokio/src/sync/mod.rs index 33f3a35f128..ef7a09a89b6 100644 --- a/tokio/src/sync/mod.rs +++ b/tokio/src/sync/mod.rs @@ -278,7 +278,7 @@ //! //! ## `watch` channel //! -//! The [`watch` channel] supports sending **many** values from a **single** +//! The [`watch` channel] supports sending **many** values from a **many** //! producer to **many** consumers. However, only the **most recent** value is //! stored in the channel. Consumers are notified when a new value is sent, but //! there is no guarantee that consumers will see **all** values. diff --git a/tokio/src/sync/watch.rs b/tokio/src/sync/watch.rs index a813a829b40..c3ab0bdc695 100644 --- a/tokio/src/sync/watch.rs +++ b/tokio/src/sync/watch.rs @@ -1,6 +1,6 @@ #![cfg_attr(not(feature = "sync"), allow(dead_code, unreachable_pub))] -//! A single-producer, multi-consumer channel that only retains the *last* sent +//! A multi-producer, multi-consumer channel that only retains the *last* sent //! value. //! //! This channel is useful for watching for changes to a value from multiple From c9e75785c84a441199992ed38e49aeba2f061a24 Mon Sep 17 00:00:00 2001 From: Vincent Palancher Date: Thu, 14 Mar 2024 10:23:59 +0100 Subject: [PATCH 069/162] sync: remove `'static` bound on `impl Sink for PollSender` (#6397) In PR #5665, the `'static` bound has been removed on values sent into `PollSender`. One of this bound was remaining on the `PollSender` implementation of `Sink`. This patch removes it and adds some tests on the `Sink` interface for `PollSender`. --- tokio-util/src/sync/mpsc.rs | 2 +- tokio-util/tests/mpsc.rs | 89 ++++++++++++++++++++++++++++++++++++- 2 files changed, 89 insertions(+), 2 deletions(-) diff --git a/tokio-util/src/sync/mpsc.rs b/tokio-util/src/sync/mpsc.rs index fd48c72582b..8e2ff814622 100644 --- a/tokio-util/src/sync/mpsc.rs +++ b/tokio-util/src/sync/mpsc.rs @@ -303,7 +303,7 @@ impl Clone for PollSender { } } -impl Sink for PollSender { +impl Sink for PollSender { type Error = PollSendError; fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { diff --git a/tokio-util/tests/mpsc.rs b/tokio-util/tests/mpsc.rs index 545a580318d..cf4dcd55f63 100644 --- a/tokio-util/tests/mpsc.rs +++ b/tokio-util/tests/mpsc.rs @@ -1,7 +1,10 @@ use futures::future::poll_fn; +use futures::sink::SinkExt; use tokio::sync::mpsc::channel; use tokio_test::task::spawn; -use tokio_test::{assert_pending, assert_ready, assert_ready_err, assert_ready_ok}; +use tokio_test::{ + assert_ok, assert_pending, assert_ready, assert_ready_eq, assert_ready_err, assert_ready_ok, +}; use tokio_util::sync::PollSender; #[tokio::test] @@ -260,3 +263,87 @@ fn start_send_panics_when_acquiring() { assert_pending!(reserve.poll()); send.send_item(2).unwrap(); } + +#[test] +fn sink_send_then_flush() { + let (send, mut recv) = channel(1); + let mut send = PollSender::new(send); + + let mut recv_task = spawn(recv.recv()); + assert_pending!(recv_task.poll()); + + let mut ready = spawn(poll_fn(|cx| send.poll_ready_unpin(cx))); + assert_ready_ok!(ready.poll()); + assert_ok!(send.start_send_unpin(())); + + let mut ready = spawn(poll_fn(|cx| send.poll_ready_unpin(cx))); + assert_pending!(ready.poll()); + + let mut flush = spawn(poll_fn(|cx| send.poll_flush_unpin(cx))); + assert_ready_ok!(flush.poll()); + + // Flushing does not mean that the sender becomes ready. + let mut ready = spawn(poll_fn(|cx| send.poll_ready_unpin(cx))); + assert_pending!(ready.poll()); + + assert_ready_eq!(recv_task.poll(), Some(())); + assert!(ready.is_woken()); + assert_ready_ok!(ready.poll()); +} + +#[test] +fn sink_send_then_close() { + let (send, mut recv) = channel(1); + let mut send = PollSender::new(send); + + let mut recv_task = spawn(recv.recv()); + assert_pending!(recv_task.poll()); + + let mut ready = spawn(poll_fn(|cx| send.poll_ready_unpin(cx))); + assert_ready_ok!(ready.poll()); + assert_ok!(send.start_send_unpin(1)); + + let mut ready = spawn(poll_fn(|cx| send.poll_ready_unpin(cx))); + assert_pending!(ready.poll()); + + assert!(recv_task.is_woken()); + assert_ready_eq!(recv_task.poll(), Some(1)); + + assert!(ready.is_woken()); + assert_ready_ok!(ready.poll()); + + drop(recv_task); + let mut recv_task = spawn(recv.recv()); + assert_pending!(recv_task.poll()); + assert_ok!(send.start_send_unpin(2)); + + let mut close = spawn(poll_fn(|cx| send.poll_close_unpin(cx))); + assert_ready_ok!(close.poll()); + + assert!(recv_task.is_woken()); + assert_ready_eq!(recv_task.poll(), Some(2)); + + drop(recv_task); + let mut recv_task = spawn(recv.recv()); + assert_ready_eq!(recv_task.poll(), None); +} + +#[test] +fn sink_send_ref() { + let data = "data".to_owned(); + let (send, mut recv) = channel(1); + let mut send = PollSender::new(send); + + let mut recv_task = spawn(recv.recv()); + assert_pending!(recv_task.poll()); + + let mut ready = spawn(poll_fn(|cx| send.poll_ready_unpin(cx))); + assert_ready_ok!(ready.poll()); + + assert_ok!(send.start_send_unpin(data.as_str())); + + let mut flush = spawn(poll_fn(|cx| send.poll_flush_unpin(cx))); + assert_ready_ok!(flush.poll()); + + assert_ready_eq!(recv_task.poll(), Some("data")); +} From e37bd6385430620f850a644d58945ace541afb6e Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Thu, 14 Mar 2024 17:18:26 +0330 Subject: [PATCH 070/162] io: implement `try_new` and `try_with_interest` for `AsyncFd` (#6345) --- tokio/src/io/async_fd.rs | 114 ++++++++++++++++++++++++++++++++++--- tokio/src/io/mod.rs | 2 +- tokio/tests/io_async_fd.rs | 30 ++++++++++ tokio/tests/io_panic.rs | 45 +++++++++++++++ 4 files changed, 183 insertions(+), 8 deletions(-) diff --git a/tokio/src/io/async_fd.rs b/tokio/src/io/async_fd.rs index aaf17584198..96d0518a6e5 100644 --- a/tokio/src/io/async_fd.rs +++ b/tokio/src/io/async_fd.rs @@ -3,6 +3,8 @@ use crate::runtime::io::{ReadyEvent, Registration}; use crate::runtime::scheduler; use mio::unix::SourceFd; +use std::error::Error; +use std::fmt; use std::io; use std::os::unix::io::{AsRawFd, RawFd}; use std::{task::Context, task::Poll}; @@ -249,15 +251,69 @@ impl AsyncFd { handle: scheduler::Handle, interest: Interest, ) -> io::Result { - let fd = inner.as_raw_fd(); + Self::try_new_with_handle_and_interest(inner, handle, interest).map_err(Into::into) + } - let registration = - Registration::new_with_interest_and_handle(&mut SourceFd(&fd), interest, handle)?; + /// Creates an [`AsyncFd`] backed by (and taking ownership of) an object + /// implementing [`AsRawFd`]. The backing file descriptor is cached at the + /// time of creation. + /// + /// Only configures the [`Interest::READABLE`] and [`Interest::WRITABLE`] interests. For more + /// control, use [`AsyncFd::try_with_interest`]. + /// + /// This method must be called in the context of a tokio runtime. + /// + /// In the case of failure, it returns [`AsyncFdTryNewError`] that contains the original object + /// passed to this function. + /// + /// # Panics + /// + /// This function panics if there is no current reactor set, or if the `rt` + /// feature flag is not enabled. + #[inline] + #[track_caller] + pub fn try_new(inner: T) -> Result> + where + T: AsRawFd, + { + Self::try_with_interest(inner, Interest::READABLE | Interest::WRITABLE) + } - Ok(AsyncFd { - registration, - inner: Some(inner), - }) + /// Creates an [`AsyncFd`] backed by (and taking ownership of) an object + /// implementing [`AsRawFd`], with a specific [`Interest`]. The backing + /// file descriptor is cached at the time of creation. + /// + /// In the case of failure, it returns [`AsyncFdTryNewError`] that contains the original object + /// passed to this function. + /// + /// # Panics + /// + /// This function panics if there is no current reactor set, or if the `rt` + /// feature flag is not enabled. + #[inline] + #[track_caller] + pub fn try_with_interest(inner: T, interest: Interest) -> Result> + where + T: AsRawFd, + { + Self::try_new_with_handle_and_interest(inner, scheduler::Handle::current(), interest) + } + + #[track_caller] + pub(crate) fn try_new_with_handle_and_interest( + inner: T, + handle: scheduler::Handle, + interest: Interest, + ) -> Result> { + let fd = inner.as_raw_fd(); + + match Registration::new_with_interest_and_handle(&mut SourceFd(&fd), interest, handle) { + Ok(registration) => Ok(AsyncFd { + registration, + inner: Some(inner), + }), + Err(cause) => Err(AsyncFdTryNewError { inner, cause }), + } } /// Returns a shared reference to the backing object of this [`AsyncFd`]. @@ -1257,3 +1313,47 @@ impl<'a, T: std::fmt::Debug + AsRawFd> std::fmt::Debug for AsyncFdReadyMutGuard< /// [`try_io`]: method@AsyncFdReadyGuard::try_io #[derive(Debug)] pub struct TryIoError(()); + +/// Error returned by [`try_new`] or [`try_with_interest`]. +/// +/// [`try_new`]: AsyncFd::try_new +/// [`try_with_interest`]: AsyncFd::try_with_interest +pub struct AsyncFdTryNewError { + inner: T, + cause: io::Error, +} + +impl AsyncFdTryNewError { + /// Returns the original object passed to [`try_new`] or [`try_with_interest`] + /// alongside the error that caused these functions to fail. + /// + /// [`try_new`]: AsyncFd::try_new + /// [`try_with_interest`]: AsyncFd::try_with_interest + pub fn into_parts(self) -> (T, io::Error) { + (self.inner, self.cause) + } +} + +impl fmt::Display for AsyncFdTryNewError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.cause, f) + } +} + +impl fmt::Debug for AsyncFdTryNewError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&self.cause, f) + } +} + +impl Error for AsyncFdTryNewError { + fn source(&self) -> Option<&(dyn Error + 'static)> { + Some(&self.cause) + } +} + +impl From> for io::Error { + fn from(value: AsyncFdTryNewError) -> Self { + value.cause + } +} diff --git a/tokio/src/io/mod.rs b/tokio/src/io/mod.rs index 5e903c04842..7dab413ceb6 100644 --- a/tokio/src/io/mod.rs +++ b/tokio/src/io/mod.rs @@ -245,7 +245,7 @@ cfg_net_unix! { pub mod unix { //! Asynchronous IO structures specific to Unix-like operating systems. - pub use super::async_fd::{AsyncFd, AsyncFdReadyGuard, AsyncFdReadyMutGuard, TryIoError}; + pub use super::async_fd::{AsyncFd, AsyncFdTryNewError, AsyncFdReadyGuard, AsyncFdReadyMutGuard, TryIoError}; } } diff --git a/tokio/tests/io_async_fd.rs b/tokio/tests/io_async_fd.rs index 1fb203a6524..6f8a10aefbc 100644 --- a/tokio/tests/io_async_fd.rs +++ b/tokio/tests/io_async_fd.rs @@ -18,6 +18,7 @@ use nix::unistd::{close, read, write}; use futures::poll; use tokio::io::unix::{AsyncFd, AsyncFdReadyGuard}; +use tokio::io::Interest; use tokio_test::{assert_err, assert_pending}; struct TestWaker { @@ -834,3 +835,32 @@ async fn await_error_readiness_invalid_address() { let guard = fd.ready(Interest::ERROR).await.unwrap(); assert_eq!(guard.ready(), Ready::ERROR); } + +#[derive(Debug, PartialEq, Eq)] +struct InvalidSource; + +impl AsRawFd for InvalidSource { + fn as_raw_fd(&self) -> RawFd { + -1 + } +} + +#[tokio::test] +async fn try_new() { + let original = Arc::new(InvalidSource); + + let error = AsyncFd::try_new(original.clone()).unwrap_err(); + let (returned, _cause) = error.into_parts(); + + assert!(Arc::ptr_eq(&original, &returned)); +} + +#[tokio::test] +async fn try_with_interest() { + let original = Arc::new(InvalidSource); + + let error = AsyncFd::try_with_interest(original.clone(), Interest::READABLE).unwrap_err(); + let (returned, _cause) = error.into_parts(); + + assert!(Arc::ptr_eq(&original, &returned)); +} diff --git a/tokio/tests/io_panic.rs b/tokio/tests/io_panic.rs index b2cbad2751d..9e4cda21f3b 100644 --- a/tokio/tests/io_panic.rs +++ b/tokio/tests/io_panic.rs @@ -175,3 +175,48 @@ fn async_fd_with_interest_panic_caller() -> Result<(), Box> { Ok(()) } + +#[test] +#[cfg(unix)] +fn async_fd_try_new_panic_caller() -> Result<(), Box> { + use tokio::io::unix::AsyncFd; + use tokio::runtime::Builder; + + let panic_location_file = test_panic(|| { + // Runtime without `enable_io` so it has no IO driver set. + let rt = Builder::new_current_thread().build().unwrap(); + rt.block_on(async { + let fd = unix::MockFd; + + let _ = AsyncFd::try_new(fd); + }); + }); + + // The panic location should be in this file + assert_eq!(&panic_location_file.unwrap(), file!()); + + Ok(()) +} + +#[test] +#[cfg(unix)] +fn async_fd_try_with_interest_panic_caller() -> Result<(), Box> { + use tokio::io::unix::AsyncFd; + use tokio::io::Interest; + use tokio::runtime::Builder; + + let panic_location_file = test_panic(|| { + // Runtime without `enable_io` so it has no IO driver set. + let rt = Builder::new_current_thread().build().unwrap(); + rt.block_on(async { + let fd = unix::MockFd; + + let _ = AsyncFd::try_with_interest(fd, Interest::READABLE); + }); + }); + + // The panic location should be in this file + assert_eq!(&panic_location_file.unwrap(), file!()); + + Ok(()) +} From 7cfb1007969e3fcb28b03854f3126caeca93932e Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Thu, 14 Mar 2024 22:10:05 +0100 Subject: [PATCH 071/162] chore: prepare tokio-stream v0.1.15 (#6401) --- tokio-stream/CHANGELOG.md | 16 ++++++++++++++++ tokio-stream/Cargo.toml | 2 +- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/tokio-stream/CHANGELOG.md b/tokio-stream/CHANGELOG.md index c14ad075d90..7f4ed6c32e3 100644 --- a/tokio-stream/CHANGELOG.md +++ b/tokio-stream/CHANGELOG.md @@ -1,3 +1,19 @@ +# 0.1.15 (March 14th, 2024) + +This release bumps the MSRV of tokio-stream to 1.63. + +- docs: fix typo in argument name ([#6389]) +- docs: fix typo in peekable docs ([#6130]) +- docs: link to latest version of tokio-util docs ([#5694]) +- docs: typographic improvements ([#6262]) +- stream: add `StreamExt::peekable` ([#6095]) + +[#5694]: https://github.com/tokio-rs/tokio/pull/5694 +[#6095]: https://github.com/tokio-rs/tokio/pull/6095 +[#6130]: https://github.com/tokio-rs/tokio/pull/6130 +[#6262]: https://github.com/tokio-rs/tokio/pull/6262 +[#6389]: https://github.com/tokio-rs/tokio/pull/6389 + # 0.1.14 (April 26th, 2023) This bugfix release bumps the minimum version of Tokio to 1.15, which is diff --git a/tokio-stream/Cargo.toml b/tokio-stream/Cargo.toml index b71119eda2e..d3ea3076930 100644 --- a/tokio-stream/Cargo.toml +++ b/tokio-stream/Cargo.toml @@ -4,7 +4,7 @@ name = "tokio-stream" # - Remove path dependencies # - Update CHANGELOG.md. # - Create "tokio-stream-0.1.x" git tag. -version = "0.1.14" +version = "0.1.15" edition = "2021" rust-version = "1.63" authors = ["Tokio Contributors "] From 3d0d0fd2af9192ca5cf2836451e96dffab68216a Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Thu, 14 Mar 2024 22:10:15 +0100 Subject: [PATCH 072/162] chore: prepare tokio-test v0.4.4 (#6400) --- tokio-test/CHANGELOG.md | 10 ++++++++++ tokio-test/Cargo.toml | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/tokio-test/CHANGELOG.md b/tokio-test/CHANGELOG.md index d7ca989e100..d506bfc40fb 100644 --- a/tokio-test/CHANGELOG.md +++ b/tokio-test/CHANGELOG.md @@ -1,3 +1,13 @@ +# 0.4.4 (March 14, 2024) + +- task: mark `Spawn` as `#[must_use]` ([#6371]) +- test: increase MSRV to 1.63 ([#6126]) +- test: update category slug ([#5953]) + +[#5953]: https://github.com/tokio-rs/tokio/pull/5953 +[#6126]: https://github.com/tokio-rs/tokio/pull/6126 +[#6371]: https://github.com/tokio-rs/tokio/pull/6371 + # 0.4.3 (August 23, 2023) - deps: fix minimum required version of `async-stream` ([#5347]) diff --git a/tokio-test/Cargo.toml b/tokio-test/Cargo.toml index 29a35054679..076f79bf5e8 100644 --- a/tokio-test/Cargo.toml +++ b/tokio-test/Cargo.toml @@ -4,7 +4,7 @@ name = "tokio-test" # - Remove path dependencies # - Update CHANGELOG.md. # - Create "tokio-test-0.4.x" git tag. -version = "0.4.3" +version = "0.4.4" edition = "2021" rust-version = "1.63" authors = ["Tokio Contributors "] From b2896feb5bf4c123c0c5b92f97f948ae11c7cdd2 Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Sat, 16 Mar 2024 12:36:20 +0330 Subject: [PATCH 073/162] io: use Mutex instead of spinlock (#6403) --- tokio/src/io/split.rs | 72 ++++++++++++------------------------------- 1 file changed, 19 insertions(+), 53 deletions(-) diff --git a/tokio/src/io/split.rs b/tokio/src/io/split.rs index 63f0960e4f3..2602929cdd1 100644 --- a/tokio/src/io/split.rs +++ b/tokio/src/io/split.rs @@ -6,13 +6,11 @@ use crate::io::{AsyncRead, AsyncWrite, ReadBuf}; -use std::cell::UnsafeCell; use std::fmt; use std::io; use std::pin::Pin; -use std::sync::atomic::AtomicBool; -use std::sync::atomic::Ordering::{Acquire, Release}; use std::sync::Arc; +use std::sync::Mutex; use std::task::{Context, Poll}; cfg_io_util! { @@ -38,8 +36,7 @@ cfg_io_util! { let is_write_vectored = stream.is_write_vectored(); let inner = Arc::new(Inner { - locked: AtomicBool::new(false), - stream: UnsafeCell::new(stream), + stream: Mutex::new(stream), is_write_vectored, }); @@ -54,13 +51,19 @@ cfg_io_util! { } struct Inner { - locked: AtomicBool, - stream: UnsafeCell, + stream: Mutex, is_write_vectored: bool, } -struct Guard<'a, T> { - inner: &'a Inner, +impl Inner { + fn with_lock(&self, f: impl FnOnce(Pin<&mut T>) -> R) -> R { + let mut guard = self.stream.lock().unwrap(); + + // safety: we do not move the stream. + let stream = unsafe { Pin::new_unchecked(&mut *guard) }; + + f(stream) + } } impl ReadHalf { @@ -90,7 +93,7 @@ impl ReadHalf { .ok() .expect("`Arc::try_unwrap` failed"); - inner.stream.into_inner() + inner.stream.into_inner().unwrap() } else { panic!("Unrelated `split::Write` passed to `split::Read::unsplit`.") } @@ -111,8 +114,7 @@ impl AsyncRead for ReadHalf { cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { - let mut inner = ready!(self.inner.poll_lock(cx)); - inner.stream_pin().poll_read(cx, buf) + self.inner.with_lock(|stream| stream.poll_read(cx, buf)) } } @@ -122,18 +124,15 @@ impl AsyncWrite for WriteHalf { cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { - let mut inner = ready!(self.inner.poll_lock(cx)); - inner.stream_pin().poll_write(cx, buf) + self.inner.with_lock(|stream| stream.poll_write(cx, buf)) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut inner = ready!(self.inner.poll_lock(cx)); - inner.stream_pin().poll_flush(cx) + self.inner.with_lock(|stream| stream.poll_flush(cx)) } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut inner = ready!(self.inner.poll_lock(cx)); - inner.stream_pin().poll_shutdown(cx) + self.inner.with_lock(|stream| stream.poll_shutdown(cx)) } fn poll_write_vectored( @@ -141,8 +140,8 @@ impl AsyncWrite for WriteHalf { cx: &mut Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll> { - let mut inner = ready!(self.inner.poll_lock(cx)); - inner.stream_pin().poll_write_vectored(cx, bufs) + self.inner + .with_lock(|stream| stream.poll_write_vectored(cx, bufs)) } fn is_write_vectored(&self) -> bool { @@ -150,39 +149,6 @@ impl AsyncWrite for WriteHalf { } } -impl Inner { - fn poll_lock(&self, cx: &mut Context<'_>) -> Poll> { - if self - .locked - .compare_exchange(false, true, Acquire, Acquire) - .is_ok() - { - Poll::Ready(Guard { inner: self }) - } else { - // Spin... but investigate a better strategy - - std::thread::yield_now(); - cx.waker().wake_by_ref(); - - Poll::Pending - } - } -} - -impl Guard<'_, T> { - fn stream_pin(&mut self) -> Pin<&mut T> { - // safety: the stream is pinned in `Arc` and the `Guard` ensures mutual - // exclusion. - unsafe { Pin::new_unchecked(&mut *self.inner.stream.get()) } - } -} - -impl Drop for Guard<'_, T> { - fn drop(&mut self) { - self.inner.locked.store(false, Release); - } -} - unsafe impl Send for ReadHalf {} unsafe impl Send for WriteHalf {} unsafe impl Sync for ReadHalf {} From bd51feac474c3d08868a70dd7f262d37780a5370 Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Sun, 17 Mar 2024 14:57:31 +0330 Subject: [PATCH 074/162] readme: fix running loom tests guide (#6408) --- CONTRIBUTING.md | 4 +++- tokio/src/runtime/tests/loom_multi_thread_alt.rs | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 623efd1337e..240b08be616 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -184,9 +184,11 @@ it, `rustfmt` will update your files locally instead. You can run loom tests with ``` cd tokio # tokio crate in workspace -LOOM_MAX_PREEMPTIONS=1 RUSTFLAGS="--cfg loom" \ +LOOM_MAX_PREEMPTIONS=1 LOOM_MAX_BRANCHES=10000 RUSTFLAGS="--cfg loom -C debug_assertions" \ cargo test --lib --release --features full -- --test-threads=1 --nocapture ``` +Additionally, you can also add `--cfg tokio_unstable` to the `RUSTFLAGS` environment variable to +run loom tests that test unstable features. You can run miri tests with ``` diff --git a/tokio/src/runtime/tests/loom_multi_thread_alt.rs b/tokio/src/runtime/tests/loom_multi_thread_alt.rs index 1b9c3b477c6..c8d140e09e3 100644 --- a/tokio/src/runtime/tests/loom_multi_thread_alt.rs +++ b/tokio/src/runtime/tests/loom_multi_thread_alt.rs @@ -1,3 +1,5 @@ +#![cfg(tokio_unstable)] + mod queue; mod shutdown; mod yield_now; From 5baa8d58ecca5f4b5c4fafe8ec030b8d0baa3ced Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Sun, 17 Mar 2024 19:29:10 +0330 Subject: [PATCH 075/162] sync: remove stale comment (#6406) --- tokio/src/sync/mpsc/block.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/tokio/src/sync/mpsc/block.rs b/tokio/src/sync/mpsc/block.rs index befcfd29efa..e81db44726b 100644 --- a/tokio/src/sync/mpsc/block.rs +++ b/tokio/src/sync/mpsc/block.rs @@ -243,13 +243,6 @@ impl Block { /// /// This indicates that the block is in its final state and will no longer /// be mutated. - /// - /// # Implementation - /// - /// The implementation walks each slot checking the `ready` flag. It might - /// be that it would make more sense to coalesce ready flags as bits in a - /// single atomic cell. However, this could have negative impact on cache - /// behavior as there would be many more mutations to a single slot. pub(crate) fn is_final(&self) -> bool { self.header.ready_slots.load(Acquire) & READY_MASK == READY_MASK } From d51f16855bce90c3c73ae199c7d6bdb340297e99 Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Wed, 20 Mar 2024 12:18:09 +0330 Subject: [PATCH 076/162] runtime: panic if `unhandled_panic` is enabled when not supported (#6410) --- tokio/src/runtime/builder.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tokio/src/runtime/builder.rs b/tokio/src/runtime/builder.rs index e20a3c4955b..82d3596915e 100644 --- a/tokio/src/runtime/builder.rs +++ b/tokio/src/runtime/builder.rs @@ -824,6 +824,10 @@ impl Builder { /// will immediately terminate and further calls to /// [`Runtime::block_on`] will panic. /// + /// # Panics + /// This method panics if called with [`UnhandledPanic::ShutdownRuntime`] + /// on a runtime other than the current thread runtime. + /// /// # Unstable /// /// This option is currently unstable and its implementation is @@ -861,6 +865,10 @@ impl Builder { /// /// [`JoinHandle`]: struct@crate::task::JoinHandle pub fn unhandled_panic(&mut self, behavior: UnhandledPanic) -> &mut Self { + if !matches!(self.kind, Kind::CurrentThread) && matches!(behavior, UnhandledPanic::ShutdownRuntime) { + panic!("UnhandledPanic::ShutdownRuntime is only supported in current thread runtime"); + } + self.unhandled_panic = behavior; self } From bb25a06f348c8069308e6145623dd3743d87564d Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Fri, 22 Mar 2024 13:44:18 +0330 Subject: [PATCH 077/162] chore: fix dead code warnings (#6423) --- benches/sync_mpsc.rs | 4 ++-- tokio/src/sync/tests/notify.rs | 2 +- tokio/src/sync/tests/semaphore_batch.rs | 2 +- tokio/src/util/markers.rs | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/benches/sync_mpsc.rs b/benches/sync_mpsc.rs index 117b3babdde..1a3f37cab81 100644 --- a/benches/sync_mpsc.rs +++ b/benches/sync_mpsc.rs @@ -4,7 +4,7 @@ use criterion::measurement::WallTime; use criterion::{black_box, criterion_group, criterion_main, BenchmarkGroup, Criterion}; #[derive(Debug, Copy, Clone)] -struct Medium([usize; 64]); +struct Medium(#[allow(dead_code)] [usize; 64]); impl Default for Medium { fn default() -> Self { Medium([0; 64]) @@ -12,7 +12,7 @@ impl Default for Medium { } #[derive(Debug, Copy, Clone)] -struct Large([Medium; 64]); +struct Large(#[allow(dead_code)] [Medium; 64]); impl Default for Large { fn default() -> Self { Large([Medium::default(); 64]) diff --git a/tokio/src/sync/tests/notify.rs b/tokio/src/sync/tests/notify.rs index 323b5c65ae3..83182464c50 100644 --- a/tokio/src/sync/tests/notify.rs +++ b/tokio/src/sync/tests/notify.rs @@ -52,7 +52,7 @@ fn notify_waiters_handles_panicking_waker() { let notify = Arc::new(Notify::new()); - struct PanickingWaker(Arc); + struct PanickingWaker(#[allow(dead_code)] Arc); impl ArcWake for PanickingWaker { fn wake_by_ref(_arc_self: &Arc) { diff --git a/tokio/src/sync/tests/semaphore_batch.rs b/tokio/src/sync/tests/semaphore_batch.rs index 09610ce71f2..fb5e8fdd6f7 100644 --- a/tokio/src/sync/tests/semaphore_batch.rs +++ b/tokio/src/sync/tests/semaphore_batch.rs @@ -268,7 +268,7 @@ fn release_permits_at_drop() { let sem = Arc::new(Semaphore::new(1)); - struct ReleaseOnDrop(Option); + struct ReleaseOnDrop(#[allow(dead_code)] Option); impl ArcWake for ReleaseOnDrop { fn wake_by_ref(_arc_self: &Arc) {} diff --git a/tokio/src/util/markers.rs b/tokio/src/util/markers.rs index 7031fb6bcd1..c16ebdf0bc6 100644 --- a/tokio/src/util/markers.rs +++ b/tokio/src/util/markers.rs @@ -1,8 +1,8 @@ /// Marker for types that are `Sync` but not `Send` -pub(crate) struct SyncNotSend(*mut ()); +pub(crate) struct SyncNotSend(#[allow(dead_code)] *mut ()); unsafe impl Sync for SyncNotSend {} cfg_rt! { - pub(crate) struct NotSendOrSync(*mut ()); + pub(crate) struct NotSendOrSync(#[allow(dead_code)] *mut ()); } From f9d78fbe924f2255cbc8a8fc256767813b6451dc Mon Sep 17 00:00:00 2001 From: Motoyuki Kimura Date: Fri, 22 Mar 2024 22:58:29 +0900 Subject: [PATCH 078/162] fs: add `set_max_buf_size` to `tokio::fs::File` (#6411) --- tokio/src/fs/file.rs | 38 ++++++++++++++++++++++++++++++++---- tokio/src/fs/file/tests.rs | 4 ++-- tokio/src/fs/mocks.rs | 1 + tokio/src/io/blocking.rs | 20 +++++++++---------- tokio/src/io/stdio_common.rs | 19 +++++++++--------- tokio/tests/fs_file.rs | 22 +++++++++++++++++++++ 6 files changed, 79 insertions(+), 25 deletions(-) diff --git a/tokio/src/fs/file.rs b/tokio/src/fs/file.rs index 033b2982c7d..efce9fda990 100644 --- a/tokio/src/fs/file.rs +++ b/tokio/src/fs/file.rs @@ -3,7 +3,7 @@ //! [`File`]: File use crate::fs::{asyncify, OpenOptions}; -use crate::io::blocking::Buf; +use crate::io::blocking::{Buf, DEFAULT_MAX_BUF_SIZE}; use crate::io::{AsyncRead, AsyncSeek, AsyncWrite, ReadBuf}; use crate::sync::Mutex; @@ -90,6 +90,7 @@ use std::fs::File as StdFile; pub struct File { std: Arc, inner: Mutex, + max_buf_size: usize, } struct Inner { @@ -241,6 +242,7 @@ impl File { last_write_err: None, pos: 0, }), + max_buf_size: DEFAULT_MAX_BUF_SIZE, } } @@ -508,6 +510,34 @@ impl File { let std = self.std.clone(); asyncify(move || std.set_permissions(perm)).await } + + /// Set the maximum buffer size for the underlying [`AsyncRead`] / [`AsyncWrite`] operation. + /// + /// Although Tokio uses a sensible default value for this buffer size, this function would be + /// useful for changing that default depending on the situation. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::File; + /// use tokio::io::AsyncWriteExt; + /// + /// # async fn dox() -> std::io::Result<()> { + /// let mut file = File::open("foo.txt").await?; + /// + /// // Set maximum buffer size to 8 MiB + /// file.set_max_buf_size(8 * 1024 * 1024); + /// + /// let mut buf = vec![1; 1024 * 1024 * 1024]; + /// + /// // Write the 1 GiB buffer in chunks up to 8 MiB each. + /// file.write_all(&mut buf).await?; + /// # Ok(()) + /// # } + /// ``` + pub fn set_max_buf_size(&mut self, max_buf_size: usize) { + self.max_buf_size = max_buf_size; + } } impl AsyncRead for File { @@ -531,7 +561,7 @@ impl AsyncRead for File { return Poll::Ready(Ok(())); } - buf.ensure_capacity_for(dst); + buf.ensure_capacity_for(dst, me.max_buf_size); let std = me.std.clone(); inner.state = State::Busy(spawn_blocking(move || { @@ -668,7 +698,7 @@ impl AsyncWrite for File { None }; - let n = buf.copy_from(src); + let n = buf.copy_from(src, me.max_buf_size); let std = me.std.clone(); let blocking_task_join_handle = spawn_mandatory_blocking(move || { @@ -739,7 +769,7 @@ impl AsyncWrite for File { None }; - let n = buf.copy_from_bufs(bufs); + let n = buf.copy_from_bufs(bufs, me.max_buf_size); let std = me.std.clone(); let blocking_task_join_handle = spawn_mandatory_blocking(move || { diff --git a/tokio/src/fs/file/tests.rs b/tokio/src/fs/file/tests.rs index 7c61b3c4b31..e824876c131 100644 --- a/tokio/src/fs/file/tests.rs +++ b/tokio/src/fs/file/tests.rs @@ -231,7 +231,7 @@ fn flush_while_idle() { #[cfg_attr(miri, ignore)] // takes a really long time with miri fn read_with_buffer_larger_than_max() { // Chunks - let chunk_a = crate::io::blocking::MAX_BUF; + let chunk_a = crate::io::blocking::DEFAULT_MAX_BUF_SIZE; let chunk_b = chunk_a * 2; let chunk_c = chunk_a * 3; let chunk_d = chunk_a * 4; @@ -303,7 +303,7 @@ fn read_with_buffer_larger_than_max() { #[cfg_attr(miri, ignore)] // takes a really long time with miri fn write_with_buffer_larger_than_max() { // Chunks - let chunk_a = crate::io::blocking::MAX_BUF; + let chunk_a = crate::io::blocking::DEFAULT_MAX_BUF_SIZE; let chunk_b = chunk_a * 2; let chunk_c = chunk_a * 3; let chunk_d = chunk_a * 4; diff --git a/tokio/src/fs/mocks.rs b/tokio/src/fs/mocks.rs index b718ed54f95..a2ce1cd6ca3 100644 --- a/tokio/src/fs/mocks.rs +++ b/tokio/src/fs/mocks.rs @@ -30,6 +30,7 @@ mock! { pub fn open(pb: PathBuf) -> io::Result; pub fn set_len(&self, size: u64) -> io::Result<()>; pub fn set_permissions(&self, _perm: Permissions) -> io::Result<()>; + pub fn set_max_buf_size(&self, max_buf_size: usize); pub fn sync_all(&self) -> io::Result<()>; pub fn sync_data(&self) -> io::Result<()>; pub fn try_clone(&self) -> io::Result; diff --git a/tokio/src/io/blocking.rs b/tokio/src/io/blocking.rs index b5d7dca2b5c..52aa798c4fe 100644 --- a/tokio/src/io/blocking.rs +++ b/tokio/src/io/blocking.rs @@ -23,7 +23,7 @@ pub(crate) struct Buf { pos: usize, } -pub(crate) const MAX_BUF: usize = 2 * 1024 * 1024; +pub(crate) const DEFAULT_MAX_BUF_SIZE: usize = 2 * 1024 * 1024; #[derive(Debug)] enum State { @@ -64,7 +64,7 @@ where return Poll::Ready(Ok(())); } - buf.ensure_capacity_for(dst); + buf.ensure_capacity_for(dst, DEFAULT_MAX_BUF_SIZE); let mut inner = self.inner.take().unwrap(); self.state = State::Busy(sys::run(move || { @@ -111,7 +111,7 @@ where assert!(buf.is_empty()); - let n = buf.copy_from(src); + let n = buf.copy_from(src, DEFAULT_MAX_BUF_SIZE); let mut inner = self.inner.take().unwrap(); self.state = State::Busy(sys::run(move || { @@ -214,10 +214,10 @@ impl Buf { n } - pub(crate) fn copy_from(&mut self, src: &[u8]) -> usize { + pub(crate) fn copy_from(&mut self, src: &[u8], max_buf_size: usize) -> usize { assert!(self.is_empty()); - let n = cmp::min(src.len(), MAX_BUF); + let n = cmp::min(src.len(), max_buf_size); self.buf.extend_from_slice(&src[..n]); n @@ -227,10 +227,10 @@ impl Buf { &self.buf[self.pos..] } - pub(crate) fn ensure_capacity_for(&mut self, bytes: &ReadBuf<'_>) { + pub(crate) fn ensure_capacity_for(&mut self, bytes: &ReadBuf<'_>, max_buf_size: usize) { assert!(self.is_empty()); - let len = cmp::min(bytes.remaining(), MAX_BUF); + let len = cmp::min(bytes.remaining(), max_buf_size); if self.buf.len() < len { self.buf.reserve(len - self.buf.len()); @@ -274,10 +274,10 @@ cfg_fs! { ret } - pub(crate) fn copy_from_bufs(&mut self, bufs: &[io::IoSlice<'_>]) -> usize { + pub(crate) fn copy_from_bufs(&mut self, bufs: &[io::IoSlice<'_>], max_buf_size: usize) -> usize { assert!(self.is_empty()); - let mut rem = MAX_BUF; + let mut rem = max_buf_size; for buf in bufs { if rem == 0 { break @@ -288,7 +288,7 @@ cfg_fs! { rem -= len; } - MAX_BUF - rem + max_buf_size - rem } } } diff --git a/tokio/src/io/stdio_common.rs b/tokio/src/io/stdio_common.rs index c32b889e582..4adbfe23606 100644 --- a/tokio/src/io/stdio_common.rs +++ b/tokio/src/io/stdio_common.rs @@ -4,7 +4,7 @@ use std::pin::Pin; use std::task::{Context, Poll}; /// # Windows /// [`AsyncWrite`] adapter that finds last char boundary in given buffer and does not write the rest, -/// if buffer contents seems to be `utf8`. Otherwise it only trims buffer down to `MAX_BUF`. +/// if buffer contents seems to be `utf8`. Otherwise it only trims buffer down to `DEFAULT_MAX_BUF_SIZE`. /// That's why, wrapped writer will always receive well-formed utf-8 bytes. /// # Other platforms /// Passes data to `inner` as is. @@ -45,12 +45,13 @@ where // 2. If buffer is small, it will not be shrunk. // That's why, it's "textness" will not change, so we don't have // to fixup it. - if cfg!(not(any(target_os = "windows", test))) || buf.len() <= crate::io::blocking::MAX_BUF + if cfg!(not(any(target_os = "windows", test))) + || buf.len() <= crate::io::blocking::DEFAULT_MAX_BUF_SIZE { return call_inner(buf); } - buf = &buf[..crate::io::blocking::MAX_BUF]; + buf = &buf[..crate::io::blocking::DEFAULT_MAX_BUF_SIZE]; // Now there are two possibilities. // If caller gave is binary buffer, we **should not** shrink it @@ -108,7 +109,7 @@ where #[cfg(test)] #[cfg(not(loom))] mod tests { - use crate::io::blocking::MAX_BUF; + use crate::io::blocking::DEFAULT_MAX_BUF_SIZE; use crate::io::AsyncWriteExt; use std::io; use std::pin::Pin; @@ -123,7 +124,7 @@ mod tests { _cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { - assert!(buf.len() <= MAX_BUF); + assert!(buf.len() <= DEFAULT_MAX_BUF_SIZE); assert!(std::str::from_utf8(buf).is_ok()); Poll::Ready(Ok(buf.len())) } @@ -158,7 +159,7 @@ mod tests { _cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { - assert!(buf.len() <= MAX_BUF); + assert!(buf.len() <= DEFAULT_MAX_BUF_SIZE); self.write_history.push(buf.len()); Poll::Ready(Ok(buf.len())) } @@ -178,7 +179,7 @@ mod tests { #[test] #[cfg_attr(miri, ignore)] fn test_splitter() { - let data = str::repeat("â–ˆ", MAX_BUF); + let data = str::repeat("â–ˆ", DEFAULT_MAX_BUF_SIZE); let mut wr = super::SplitByUtf8BoundaryIfWindows::new(TextMockWriter); let fut = async move { wr.write_all(data.as_bytes()).await.unwrap(); @@ -197,7 +198,7 @@ mod tests { // was not shrunk too much. let checked_count = super::MAGIC_CONST * super::MAX_BYTES_PER_CHAR; let mut data: Vec = str::repeat("a", checked_count).into(); - data.extend(std::iter::repeat(0b1010_1010).take(MAX_BUF - checked_count + 1)); + data.extend(std::iter::repeat(0b1010_1010).take(DEFAULT_MAX_BUF_SIZE - checked_count + 1)); let mut writer = LoggingMockWriter::new(); let mut splitter = super::SplitByUtf8BoundaryIfWindows::new(&mut writer); crate::runtime::Builder::new_current_thread() @@ -214,7 +215,7 @@ mod tests { data.len() ); // Check that at most MAX_BYTES_PER_CHAR + 1 (i.e. 5) bytes were shrunk - // from the buffer: one because it was outside of MAX_BUF boundary, and + // from the buffer: one because it was outside of DEFAULT_MAX_BUF_SIZE boundary, and // up to one "utf8 code point". assert!(data.len() - writer.write_history[0] <= super::MAX_BYTES_PER_CHAR + 1); } diff --git a/tokio/tests/fs_file.rs b/tokio/tests/fs_file.rs index 6a8b07a7ffe..520c4ec8438 100644 --- a/tokio/tests/fs_file.rs +++ b/tokio/tests/fs_file.rs @@ -180,6 +180,28 @@ fn tempfile() -> NamedTempFile { NamedTempFile::new().unwrap() } +#[tokio::test] +async fn set_max_buf_size_read() { + let mut tempfile = tempfile(); + tempfile.write_all(HELLO).unwrap(); + let mut file = File::open(tempfile.path()).await.unwrap(); + let mut buf = [0; 1024]; + file.set_max_buf_size(1); + + // A single read operation reads a maximum of 1 byte. + assert_eq!(file.read(&mut buf).await.unwrap(), 1); +} + +#[tokio::test] +async fn set_max_buf_size_write() { + let tempfile = tempfile(); + let mut file = File::create(tempfile.path()).await.unwrap(); + file.set_max_buf_size(1); + + // A single write operation writes a maximum of 1 byte. + assert_eq!(file.write(HELLO).await.unwrap(), 1); +} + #[tokio::test] #[cfg(unix)] async fn file_debug_fmt() { From baad270b98acbc735f9e8baddc93ae8a18a652ce Mon Sep 17 00:00:00 2001 From: Christopher Acosta Date: Fri, 22 Mar 2024 19:03:28 +0100 Subject: [PATCH 079/162] sync: add Semaphore example for limiting the number of outgoing requests (#6419) --- tokio/src/sync/semaphore.rs | 53 +++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/tokio/src/sync/semaphore.rs b/tokio/src/sync/semaphore.rs index d0ee12591ee..a2b4074590b 100644 --- a/tokio/src/sync/semaphore.rs +++ b/tokio/src/sync/semaphore.rs @@ -76,6 +76,59 @@ use std::sync::Arc; /// } /// ``` /// +/// ## Limit the number of outgoing requests being sent at the same time +/// +/// In some scenarios, it might be required to limit the number of outgoing +/// requests being sent in parallel. This could be due to limits of a consumed +/// API or the network resources of the system the application is running on. +/// +/// This example uses an `Arc` with 10 permits. Each task spawned is +/// given a reference to the semaphore by cloning the `Arc`. Before +/// a task sends a request, it must acquire a permit from the semaphore by +/// calling [`Semaphore::acquire`]. This ensures that at most 10 requests are +/// sent in parallel at any given time. After a task has sent a request, it +/// drops the permit to allow other tasks to send requests. +/// +/// ``` +/// use std::sync::Arc; +/// use tokio::sync::Semaphore; +/// +/// #[tokio::main] +/// async fn main() { +/// // Define maximum number of parallel requests. +/// let semaphore = Arc::new(Semaphore::new(10)); +/// // Spawn many tasks that will send requests. +/// let mut jhs = Vec::new(); +/// for task_id in 0..100 { +/// let semaphore = semaphore.clone(); +/// let jh = tokio::spawn(async move { +/// // Acquire permit before sending request. +/// let _permit = semaphore.acquire().await.unwrap(); +/// // Send the request. +/// let response = send_request(task_id).await; +/// // Drop the permit after the request has been sent. +/// drop(_permit); +/// // Handle response. +/// // ... +/// +/// response +/// }); +/// jhs.push(jh); +/// } +/// // Collect responses from tasks. +/// let mut responses = Vec::new(); +/// for jh in jhs { +/// let response = jh.await.unwrap(); +/// responses.push(response); +/// } +/// // Process responses. +/// // ... +/// } +/// # async fn send_request(task_id: usize) { +/// # // Send request. +/// # } +/// ``` +/// /// ## Limit the number of incoming requests being handled at the same time /// /// Similar to limiting the number of simultaneously opened files, network handles From 1846483f1953f6ac4dd89f434e78ff99eb0c92f9 Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Fri, 22 Mar 2024 23:25:33 +0330 Subject: [PATCH 080/162] sync: expose strong and weak counts of mpsc sender handles (#6405) --- tokio/src/sync/mpsc/bounded.rs | 28 ++++++ tokio/src/sync/mpsc/chan.rs | 30 +++++++ tokio/src/sync/mpsc/unbounded.rs | 28 ++++++ tokio/tests/sync_mpsc_weak.rs | 142 +++++++++++++++++++++++++++++++ 4 files changed, 228 insertions(+) diff --git a/tokio/src/sync/mpsc/bounded.rs b/tokio/src/sync/mpsc/bounded.rs index 3cdba3dc237..b7b1ce7f623 100644 --- a/tokio/src/sync/mpsc/bounded.rs +++ b/tokio/src/sync/mpsc/bounded.rs @@ -1409,6 +1409,16 @@ impl Sender { pub fn max_capacity(&self) -> usize { self.chan.semaphore().bound } + + /// Returns the number of [`Sender`] handles. + pub fn strong_count(&self) -> usize { + self.chan.strong_count() + } + + /// Returns the number of [`WeakSender`] handles. + pub fn weak_count(&self) -> usize { + self.chan.weak_count() + } } impl Clone for Sender { @@ -1429,12 +1439,20 @@ impl fmt::Debug for Sender { impl Clone for WeakSender { fn clone(&self) -> Self { + self.chan.increment_weak_count(); + WeakSender { chan: self.chan.clone(), } } } +impl Drop for WeakSender { + fn drop(&mut self) { + self.chan.decrement_weak_count(); + } +} + impl WeakSender { /// Tries to convert a `WeakSender` into a [`Sender`]. This will return `Some` /// if there are other `Sender` instances alive and the channel wasn't @@ -1442,6 +1460,16 @@ impl WeakSender { pub fn upgrade(&self) -> Option> { chan::Tx::upgrade(self.chan.clone()).map(Sender::new) } + + /// Returns the number of [`Sender`] handles. + pub fn strong_count(&self) -> usize { + self.chan.strong_count() + } + + /// Returns the number of [`WeakSender`] handles. + pub fn weak_count(&self) -> usize { + self.chan.weak_count() + } } impl fmt::Debug for WeakSender { diff --git a/tokio/src/sync/mpsc/chan.rs b/tokio/src/sync/mpsc/chan.rs index c05a4abb7c0..179a69f5700 100644 --- a/tokio/src/sync/mpsc/chan.rs +++ b/tokio/src/sync/mpsc/chan.rs @@ -66,6 +66,9 @@ pub(super) struct Chan { /// When this drops to zero, the send half of the channel is closed. tx_count: AtomicUsize, + /// Tracks the number of outstanding weak sender handles. + tx_weak_count: AtomicUsize, + /// Only accessed by `Rx` handle. rx_fields: UnsafeCell>, } @@ -115,6 +118,7 @@ pub(crate) fn channel(semaphore: S) -> (Tx, Rx) { semaphore, rx_waker: CachePadded::new(AtomicWaker::new()), tx_count: AtomicUsize::new(1), + tx_weak_count: AtomicUsize::new(0), rx_fields: UnsafeCell::new(RxFields { list: rx, rx_closed: false, @@ -131,7 +135,17 @@ impl Tx { Tx { inner: chan } } + pub(super) fn strong_count(&self) -> usize { + self.inner.tx_count.load(Acquire) + } + + pub(super) fn weak_count(&self) -> usize { + self.inner.tx_weak_count.load(Relaxed) + } + pub(super) fn downgrade(&self) -> Arc> { + self.inner.increment_weak_count(); + self.inner.clone() } @@ -452,6 +466,22 @@ impl Chan { // Notify the rx task self.rx_waker.wake(); } + + pub(super) fn decrement_weak_count(&self) { + self.tx_weak_count.fetch_sub(1, Relaxed); + } + + pub(super) fn increment_weak_count(&self) { + self.tx_weak_count.fetch_add(1, Relaxed); + } + + pub(super) fn strong_count(&self) -> usize { + self.tx_count.load(Acquire) + } + + pub(super) fn weak_count(&self) -> usize { + self.tx_weak_count.load(Relaxed) + } } impl Drop for Chan { diff --git a/tokio/src/sync/mpsc/unbounded.rs b/tokio/src/sync/mpsc/unbounded.rs index b87b07ba653..e5ef0adef38 100644 --- a/tokio/src/sync/mpsc/unbounded.rs +++ b/tokio/src/sync/mpsc/unbounded.rs @@ -578,16 +578,34 @@ impl UnboundedSender { chan: self.chan.downgrade(), } } + + /// Returns the number of [`UnboundedSender`] handles. + pub fn strong_count(&self) -> usize { + self.chan.strong_count() + } + + /// Returns the number of [`WeakUnboundedSender`] handles. + pub fn weak_count(&self) -> usize { + self.chan.weak_count() + } } impl Clone for WeakUnboundedSender { fn clone(&self) -> Self { + self.chan.increment_weak_count(); + WeakUnboundedSender { chan: self.chan.clone(), } } } +impl Drop for WeakUnboundedSender { + fn drop(&mut self) { + self.chan.decrement_weak_count(); + } +} + impl WeakUnboundedSender { /// Tries to convert a `WeakUnboundedSender` into an [`UnboundedSender`]. /// This will return `Some` if there are other `Sender` instances alive and @@ -595,6 +613,16 @@ impl WeakUnboundedSender { pub fn upgrade(&self) -> Option> { chan::Tx::upgrade(self.chan.clone()).map(UnboundedSender::new) } + + /// Returns the number of [`UnboundedSender`] handles. + pub fn strong_count(&self) -> usize { + self.chan.strong_count() + } + + /// Returns the number of [`WeakUnboundedSender`] handles. + pub fn weak_count(&self) -> usize { + self.chan.weak_count() + } } impl fmt::Debug for WeakUnboundedSender { diff --git a/tokio/tests/sync_mpsc_weak.rs b/tokio/tests/sync_mpsc_weak.rs index fad4c72f799..7716902f959 100644 --- a/tokio/tests/sync_mpsc_weak.rs +++ b/tokio/tests/sync_mpsc_weak.rs @@ -511,3 +511,145 @@ fn test_tx_count_weak_unbounded_sender() { assert!(tx_weak.upgrade().is_none() && tx_weak2.upgrade().is_none()); } + +#[tokio::test] +async fn sender_strong_count_when_cloned() { + let (tx, _rx) = mpsc::channel::<()>(1); + + let tx2 = tx.clone(); + + assert_eq!(tx.strong_count(), 2); + assert_eq!(tx2.strong_count(), 2); +} + +#[tokio::test] +async fn sender_weak_count_when_downgraded() { + let (tx, _rx) = mpsc::channel::<()>(1); + + let weak = tx.downgrade(); + + assert_eq!(tx.weak_count(), 1); + assert_eq!(weak.weak_count(), 1); +} + +#[tokio::test] +async fn sender_strong_count_when_dropped() { + let (tx, _rx) = mpsc::channel::<()>(1); + + let tx2 = tx.clone(); + + drop(tx2); + + assert_eq!(tx.strong_count(), 1); +} + +#[tokio::test] +async fn sender_weak_count_when_dropped() { + let (tx, _rx) = mpsc::channel::<()>(1); + + let weak = tx.downgrade(); + + drop(weak); + + assert_eq!(tx.weak_count(), 0); +} + +#[tokio::test] +async fn sender_strong_and_weak_conut() { + let (tx, _rx) = mpsc::channel::<()>(1); + + let tx2 = tx.clone(); + + let weak = tx.downgrade(); + let weak2 = tx2.downgrade(); + + assert_eq!(tx.strong_count(), 2); + assert_eq!(tx2.strong_count(), 2); + assert_eq!(weak.strong_count(), 2); + assert_eq!(weak2.strong_count(), 2); + + assert_eq!(tx.weak_count(), 2); + assert_eq!(tx2.weak_count(), 2); + assert_eq!(weak.weak_count(), 2); + assert_eq!(weak2.weak_count(), 2); + + drop(tx2); + drop(weak2); + + assert_eq!(tx.strong_count(), 1); + assert_eq!(weak.strong_count(), 1); + + assert_eq!(tx.weak_count(), 1); + assert_eq!(weak.weak_count(), 1); +} + +#[tokio::test] +async fn unbounded_sender_strong_count_when_cloned() { + let (tx, _rx) = mpsc::unbounded_channel::<()>(); + + let tx2 = tx.clone(); + + assert_eq!(tx.strong_count(), 2); + assert_eq!(tx2.strong_count(), 2); +} + +#[tokio::test] +async fn unbounded_sender_weak_count_when_downgraded() { + let (tx, _rx) = mpsc::unbounded_channel::<()>(); + + let weak = tx.downgrade(); + + assert_eq!(tx.weak_count(), 1); + assert_eq!(weak.weak_count(), 1); +} + +#[tokio::test] +async fn unbounded_sender_strong_count_when_dropped() { + let (tx, _rx) = mpsc::unbounded_channel::<()>(); + + let tx2 = tx.clone(); + + drop(tx2); + + assert_eq!(tx.strong_count(), 1); +} + +#[tokio::test] +async fn unbounded_sender_weak_count_when_dropped() { + let (tx, _rx) = mpsc::unbounded_channel::<()>(); + + let weak = tx.downgrade(); + + drop(weak); + + assert_eq!(tx.weak_count(), 0); +} + +#[tokio::test] +async fn unbounded_sender_strong_and_weak_conut() { + let (tx, _rx) = mpsc::unbounded_channel::<()>(); + + let tx2 = tx.clone(); + + let weak = tx.downgrade(); + let weak2 = tx2.downgrade(); + + assert_eq!(tx.strong_count(), 2); + assert_eq!(tx2.strong_count(), 2); + assert_eq!(weak.strong_count(), 2); + assert_eq!(weak2.strong_count(), 2); + + assert_eq!(tx.weak_count(), 2); + assert_eq!(tx2.weak_count(), 2); + assert_eq!(weak.weak_count(), 2); + assert_eq!(weak2.weak_count(), 2); + + drop(tx2); + drop(weak2); + + assert_eq!(tx.strong_count(), 1); + assert_eq!(weak.strong_count(), 1); + + assert_eq!(tx.weak_count(), 1); + assert_eq!(weak.weak_count(), 1); +} From 4c453e9790d1cc5a2fd5c13fc2f63a145a3fd8da Mon Sep 17 00:00:00 2001 From: Motoyuki Kimura Date: Sat, 23 Mar 2024 23:41:41 +0900 Subject: [PATCH 081/162] readme: add description about benchmarks (#6425) --- .github/workflows/ci.yml | 3 +-- CONTRIBUTING.md | 23 +++++++++++++++++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4374d99ff63..e9738caebfc 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -210,11 +210,10 @@ jobs: run: cargo hack test --each-feature working-directory: tests-build - # Check benchmarks. Run of benchmarks is done by bench.yml workflow. + # Check benchmarks. - name: Check benches run: cargo check --benches working-directory: benches - # bench.yml workflow runs benchmarks only on linux. if: startsWith(matrix.os, 'ubuntu') test-parking_lot: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 240b08be616..369a898fd98 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -347,6 +347,29 @@ example would explicitly use `Timeout::new`. For example: /// # } ``` +### Benchmarks + +You can run benchmarks locally for the changes you've made to the tokio codebase. +Tokio currently uses [Criterion](https://github.com/bheisler/criterion.rs) as its benchmarking tool. To run a benchmark +against the changes you have made, for example, you can run; + +```bash +cd benches + +# Run all benchmarks. +cargo bench + +# Run all tests in the `benches/fs.rs` file +cargo bench --bench fs + +# Run the `async_read_buf` benchmark in `benches/fs.rs` specifically. +cargo bench async_read_buf + +# After running benches, you can check the statistics under `tokio/target/criterion/` +``` + +You can also refer to Criterion docs for additional options and details. + ### Commits It is a recommended best practice to keep your changes as logically grouped as From 8342e4b524984d5e80168da89760799aa1a2bfba Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Sun, 24 Mar 2024 01:12:24 +0330 Subject: [PATCH 082/162] util: assert compatibility between `LengthDelimitedCodec` options (#6414) --- tokio-util/src/codec/length_delimited.rs | 39 ++++++++++++++- tokio-util/tests/length_delimited.rs | 60 ++++++++++++++++++++++++ 2 files changed, 98 insertions(+), 1 deletion(-) diff --git a/tokio-util/src/codec/length_delimited.rs b/tokio-util/src/codec/length_delimited.rs index a182dcaec0c..92d76b2cd28 100644 --- a/tokio-util/src/codec/length_delimited.rs +++ b/tokio-util/src/codec/length_delimited.rs @@ -386,6 +386,10 @@ use std::{cmp, fmt, mem}; /// `Builder` enables constructing configured length delimited codecs. Note /// that not all configuration settings apply to both encoding and decoding. See /// the documentation for specific methods for more detail. +/// +/// Note that the if the value of [`Builder::max_frame_length`] becomes larger than +/// what can actually fit in [`Builder::length_field_length`], it will be clipped to +/// the maximum value that can fit. #[derive(Debug, Clone, Copy)] pub struct Builder { // Maximum frame length @@ -935,8 +939,12 @@ impl Builder { /// # } /// ``` pub fn new_codec(&self) -> LengthDelimitedCodec { + let mut builder = *self; + + builder.adjust_max_frame_len(); + LengthDelimitedCodec { - builder: *self, + builder, state: DecodeState::Head, } } @@ -1018,6 +1026,35 @@ impl Builder { self.num_skip .unwrap_or(self.length_field_offset + self.length_field_len) } + + fn adjust_max_frame_len(&mut self) { + // This function is basically `std::u64::saturating_add_signed`. Since it + // requires MSRV 1.66, its implementation is copied here. + // + // TODO: use the method from std when MSRV becomes >= 1.66 + fn saturating_add_signed(num: u64, rhs: i64) -> u64 { + let (res, overflow) = num.overflowing_add(rhs as u64); + if overflow == (rhs < 0) { + res + } else if overflow { + u64::MAX + } else { + 0 + } + } + + // Calculate the maximum number that can be represented using `length_field_len` bytes. + let max_number = match 1u64.checked_shl((8 * self.length_field_len) as u32) { + Some(shl) => shl - 1, + None => u64::MAX, + }; + + let max_allowed_len = saturating_add_signed(max_number, self.length_adjustment as i64); + + if self.max_frame_len as u64 > max_allowed_len { + self.max_frame_len = usize::try_from(max_allowed_len).unwrap_or(usize::MAX); + } + } } impl Default for Builder { diff --git a/tokio-util/tests/length_delimited.rs b/tokio-util/tests/length_delimited.rs index ed5590f9644..091a5b449e4 100644 --- a/tokio-util/tests/length_delimited.rs +++ b/tokio-util/tests/length_delimited.rs @@ -689,6 +689,66 @@ fn encode_overflow() { codec.encode(Bytes::from("hello"), &mut buf).unwrap(); } +#[test] +fn frame_does_not_fit() { + let codec = LengthDelimitedCodec::builder() + .length_field_length(1) + .max_frame_length(256) + .new_codec(); + + assert_eq!(codec.max_frame_length(), 255); +} + +#[test] +fn neg_adjusted_frame_does_not_fit() { + let codec = LengthDelimitedCodec::builder() + .length_field_length(1) + .length_adjustment(-1) + .new_codec(); + + assert_eq!(codec.max_frame_length(), 254); +} + +#[test] +fn pos_adjusted_frame_does_not_fit() { + let codec = LengthDelimitedCodec::builder() + .length_field_length(1) + .length_adjustment(1) + .new_codec(); + + assert_eq!(codec.max_frame_length(), 256); +} + +#[test] +fn max_allowed_frame_fits() { + let codec = LengthDelimitedCodec::builder() + .length_field_length(std::mem::size_of::()) + .max_frame_length(usize::MAX) + .new_codec(); + + assert_eq!(codec.max_frame_length(), usize::MAX); +} + +#[test] +fn smaller_frame_len_not_adjusted() { + let codec = LengthDelimitedCodec::builder() + .max_frame_length(10) + .length_field_length(std::mem::size_of::()) + .new_codec(); + + assert_eq!(codec.max_frame_length(), 10); +} + +#[test] +fn max_allowed_length_field() { + let codec = LengthDelimitedCodec::builder() + .length_field_length(8) + .max_frame_length(usize::MAX) + .new_codec(); + + assert_eq!(codec.max_frame_length(), usize::MAX); +} + // ===== Test utils ===== struct Mock { From 3ce4720a4532e40c78f7d851b1cfb8ea26542177 Mon Sep 17 00:00:00 2001 From: Ilson Balliego Date: Sun, 24 Mar 2024 14:46:02 +0100 Subject: [PATCH 083/162] sync: add `is_closed`, `is_empty`, and `len` to mpsc receivers (#6348) Fixes: #4638 --- tokio/src/sync/mpsc/block.rs | 18 ++ tokio/src/sync/mpsc/bounded.rs | 67 +++++ tokio/src/sync/mpsc/chan.rs | 27 ++ tokio/src/sync/mpsc/list.rs | 27 ++ tokio/src/sync/mpsc/unbounded.rs | 67 +++++ tokio/src/sync/tests/loom_mpsc.rs | 34 +++ tokio/tests/sync_mpsc.rs | 403 ++++++++++++++++++++++++++++++ tokio/tests/sync_mpsc_weak.rs | 18 ++ 8 files changed, 661 insertions(+) diff --git a/tokio/src/sync/mpsc/block.rs b/tokio/src/sync/mpsc/block.rs index e81db44726b..e7798592531 100644 --- a/tokio/src/sync/mpsc/block.rs +++ b/tokio/src/sync/mpsc/block.rs @@ -168,6 +168,19 @@ impl Block { Some(Read::Value(value.assume_init())) } + /// Returns true if there is a value in the slot to be consumed + /// + /// # Safety + /// + /// To maintain safety, the caller must ensure: + /// + /// * No concurrent access to the slot. + pub(crate) fn has_value(&self, slot_index: usize) -> bool { + let offset = offset(slot_index); + let ready_bits = self.header.ready_slots.load(Acquire); + is_ready(ready_bits, offset) + } + /// Writes a value to the block at the given offset. /// /// # Safety @@ -195,6 +208,11 @@ impl Block { self.header.ready_slots.fetch_or(TX_CLOSED, Release); } + pub(crate) unsafe fn is_closed(&self) -> bool { + let ready_bits = self.header.ready_slots.load(Acquire); + is_tx_closed(ready_bits) + } + /// Resets the block to a blank state. This enables reusing blocks in the /// channel. /// diff --git a/tokio/src/sync/mpsc/bounded.rs b/tokio/src/sync/mpsc/bounded.rs index b7b1ce7f623..6ac97591fea 100644 --- a/tokio/src/sync/mpsc/bounded.rs +++ b/tokio/src/sync/mpsc/bounded.rs @@ -463,6 +463,73 @@ impl Receiver { self.chan.close(); } + /// Checks if a channel is closed. + /// + /// This method returns `true` if the channel has been closed. The channel is closed + /// when all [`Sender`] have been dropped, or when [`Receiver::close`] is called. + /// + /// [`Sender`]: crate::sync::mpsc::Sender + /// [`Receiver::close`]: crate::sync::mpsc::Receiver::close + /// + /// # Examples + /// ``` + /// use tokio::sync::mpsc; + /// + /// #[tokio::main] + /// async fn main() { + /// let (_tx, mut rx) = mpsc::channel::<()>(10); + /// assert!(!rx.is_closed()); + /// + /// rx.close(); + /// + /// assert!(rx.is_closed()); + /// } + /// ``` + pub fn is_closed(&self) -> bool { + self.chan.is_closed() + } + + /// Checks if a channel is empty. + /// + /// This method returns `true` if the channel has no messages. + /// + /// # Examples + /// ``` + /// use tokio::sync::mpsc; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, rx) = mpsc::channel(10); + /// assert!(rx.is_empty()); + /// + /// tx.send(0).await.unwrap(); + /// assert!(!rx.is_empty()); + /// } + /// + /// ``` + pub fn is_empty(&self) -> bool { + self.chan.is_empty() + } + + /// Returns the number of messages in the channel. + /// + /// # Examples + /// ``` + /// use tokio::sync::mpsc; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, rx) = mpsc::channel(10); + /// assert_eq!(0, rx.len()); + /// + /// tx.send(0).await.unwrap(); + /// assert_eq!(1, rx.len()); + /// } + /// ``` + pub fn len(&self) -> usize { + self.chan.len() + } + /// Polls to receive the next message on this channel. /// /// This method returns: diff --git a/tokio/src/sync/mpsc/chan.rs b/tokio/src/sync/mpsc/chan.rs index 179a69f5700..ae378d7ecb2 100644 --- a/tokio/src/sync/mpsc/chan.rs +++ b/tokio/src/sync/mpsc/chan.rs @@ -255,6 +255,33 @@ impl Rx { self.inner.notify_rx_closed.notify_waiters(); } + pub(crate) fn is_closed(&self) -> bool { + // There two internal states that can represent a closed channel + // + // 1. When `close` is called. + // In this case, the inner semaphore will be closed. + // + // 2. When all senders are dropped. + // In this case, the semaphore remains unclosed, and the `index` in the list won't + // reach the tail position. It is necessary to check the list if the last block is + // `closed`. + self.inner.semaphore.is_closed() || self.inner.tx_count.load(Acquire) == 0 + } + + pub(crate) fn is_empty(&self) -> bool { + self.inner.rx_fields.with(|rx_fields_ptr| { + let rx_fields = unsafe { &*rx_fields_ptr }; + rx_fields.list.is_empty(&self.inner.tx) + }) + } + + pub(crate) fn len(&self) -> usize { + self.inner.rx_fields.with(|rx_fields_ptr| { + let rx_fields = unsafe { &*rx_fields_ptr }; + rx_fields.list.len(&self.inner.tx) + }) + } + /// Receive the next value pub(crate) fn recv(&mut self, cx: &mut Context<'_>) -> Poll> { use super::block::Read; diff --git a/tokio/src/sync/mpsc/list.rs b/tokio/src/sync/mpsc/list.rs index a8b48a87574..90d9b828c8e 100644 --- a/tokio/src/sync/mpsc/list.rs +++ b/tokio/src/sync/mpsc/list.rs @@ -218,6 +218,15 @@ impl Tx { let _ = Box::from_raw(block.as_ptr()); } } + + pub(crate) fn is_closed(&self) -> bool { + let tail = self.block_tail.load(Acquire); + + unsafe { + let tail_block = &*tail; + tail_block.is_closed() + } + } } impl fmt::Debug for Tx { @@ -230,6 +239,24 @@ impl fmt::Debug for Tx { } impl Rx { + pub(crate) fn is_empty(&self, tx: &Tx) -> bool { + let block = unsafe { self.head.as_ref() }; + if block.has_value(self.index) { + return false; + } + + // It is possible that a block has no value "now" but the list is still not empty. + // To be sure, it is necessary to check the length of the list. + self.len(tx) == 0 + } + + pub(crate) fn len(&self, tx: &Tx) -> usize { + // When all the senders are dropped, there will be a last block in the tail position, + // but it will be closed + let tail_position = tx.tail_position.load(Acquire); + tail_position - self.index - (tx.is_closed() as usize) + } + /// Pops the next value off the queue. pub(crate) fn pop(&mut self, tx: &Tx) -> Option> { // Advance `head`, if needed diff --git a/tokio/src/sync/mpsc/unbounded.rs b/tokio/src/sync/mpsc/unbounded.rs index e5ef0adef38..a3398c4bf54 100644 --- a/tokio/src/sync/mpsc/unbounded.rs +++ b/tokio/src/sync/mpsc/unbounded.rs @@ -330,6 +330,73 @@ impl UnboundedReceiver { self.chan.close(); } + /// Checks if a channel is closed. + /// + /// This method returns `true` if the channel has been closed. The channel is closed + /// when all [`UnboundedSender`] have been dropped, or when [`UnboundedReceiver::close`] is called. + /// + /// [`UnboundedSender`]: crate::sync::mpsc::UnboundedSender + /// [`UnboundedReceiver::close`]: crate::sync::mpsc::UnboundedReceiver::close + /// + /// # Examples + /// ``` + /// use tokio::sync::mpsc; + /// + /// #[tokio::main] + /// async fn main() { + /// let (_tx, mut rx) = mpsc::unbounded_channel::<()>(); + /// assert!(!rx.is_closed()); + /// + /// rx.close(); + /// + /// assert!(rx.is_closed()); + /// } + /// ``` + pub fn is_closed(&self) -> bool { + self.chan.is_closed() + } + + /// Checks if a channel is empty. + /// + /// This method returns `true` if the channel has no messages. + /// + /// # Examples + /// ``` + /// use tokio::sync::mpsc; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, rx) = mpsc::unbounded_channel(); + /// assert!(rx.is_empty()); + /// + /// tx.send(0).unwrap(); + /// assert!(!rx.is_empty()); + /// } + /// + /// ``` + pub fn is_empty(&self) -> bool { + self.chan.is_empty() + } + + /// Returns the number of messages in the channel. + /// + /// # Examples + /// ``` + /// use tokio::sync::mpsc; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, rx) = mpsc::unbounded_channel(); + /// assert_eq!(0, rx.len()); + /// + /// tx.send(0).unwrap(); + /// assert_eq!(1, rx.len()); + /// } + /// ``` + pub fn len(&self) -> usize { + self.chan.len() + } + /// Polls to receive the next message on this channel. /// /// This method returns: diff --git a/tokio/src/sync/tests/loom_mpsc.rs b/tokio/src/sync/tests/loom_mpsc.rs index f165e7076e7..1dbe5ea419c 100644 --- a/tokio/src/sync/tests/loom_mpsc.rs +++ b/tokio/src/sync/tests/loom_mpsc.rs @@ -188,3 +188,37 @@ fn try_recv() { } }); } + +#[test] +fn len_nonzero_after_send() { + loom::model(|| { + let (send, recv) = mpsc::channel(10); + let send2 = send.clone(); + + let join = thread::spawn(move || { + block_on(send2.send("message2")).unwrap(); + }); + + block_on(send.send("message1")).unwrap(); + assert!(recv.len() != 0); + + join.join().unwrap(); + }); +} + +#[test] +fn nonempty_after_send() { + loom::model(|| { + let (send, recv) = mpsc::channel(10); + let send2 = send.clone(); + + let join = thread::spawn(move || { + block_on(send2.send("message2")).unwrap(); + }); + + block_on(send.send("message1")).unwrap(); + assert!(!recv.is_empty()); + + join.join().unwrap(); + }); +} diff --git a/tokio/tests/sync_mpsc.rs b/tokio/tests/sync_mpsc.rs index 1b581ce98c1..4a7eced13ee 100644 --- a/tokio/tests/sync_mpsc.rs +++ b/tokio/tests/sync_mpsc.rs @@ -1017,4 +1017,407 @@ async fn test_tx_capacity() { assert_eq!(tx.max_capacity(), 10); } +#[tokio::test] +async fn test_rx_is_closed_when_calling_close_with_sender() { + // is_closed should return true after calling close but still has a sender + let (_tx, mut rx) = mpsc::channel::<()>(10); + rx.close(); + + assert!(rx.is_closed()); +} + +#[tokio::test] +async fn test_rx_is_closed_when_dropping_all_senders() { + // is_closed should return true after dropping all senders + let (tx, rx) = mpsc::channel::<()>(10); + let another_tx = tx.clone(); + let task = tokio::spawn(async move { + drop(another_tx); + }); + + drop(tx); + let _ = task.await; + + assert!(rx.is_closed()); +} + +#[tokio::test] +async fn test_rx_is_not_closed_when_there_are_senders() { + // is_closed should return false when there is a sender + let (_tx, rx) = mpsc::channel::<()>(10); + assert!(!rx.is_closed()); +} + +#[tokio::test] +async fn test_rx_is_not_closed_when_there_are_senders_and_buffer_filled() { + // is_closed should return false when there is a sender, even if enough messages have been sent to fill the channel + let (tx, rx) = mpsc::channel(10); + for i in 0..10 { + assert!(tx.send(i).await.is_ok()); + } + assert!(!rx.is_closed()); +} + +#[tokio::test] +async fn test_rx_is_closed_when_there_are_no_senders_and_there_are_messages() { + // is_closed should return true when there are messages in the buffer, but no senders + let (tx, rx) = mpsc::channel(10); + for i in 0..10 { + assert!(tx.send(i).await.is_ok()); + } + drop(tx); + assert!(rx.is_closed()); +} + +#[tokio::test] +async fn test_rx_is_closed_when_there_are_messages_and_close_is_called() { + // is_closed should return true when there are messages in the buffer, and close is called + let (tx, mut rx) = mpsc::channel(10); + for i in 0..10 { + assert!(tx.send(i).await.is_ok()); + } + rx.close(); + assert!(rx.is_closed()); +} + +#[tokio::test] +async fn test_rx_is_not_closed_when_there_are_permits_but_not_senders() { + // is_closed should return false when there is a permit (but no senders) + let (tx, rx) = mpsc::channel::<()>(10); + let _permit = tx.reserve_owned().await.expect("Failed to reserve permit"); + assert!(!rx.is_closed()); +} + +#[tokio::test] +async fn test_rx_is_empty_when_no_messages_were_sent() { + let (_tx, rx) = mpsc::channel::<()>(10); + assert!(rx.is_empty()) +} + +#[tokio::test] +async fn test_rx_is_not_empty_when_there_are_messages_in_the_buffer() { + let (tx, rx) = mpsc::channel::<()>(10); + assert!(tx.send(()).await.is_ok()); + assert!(!rx.is_empty()) +} + +#[tokio::test] +async fn test_rx_is_not_empty_when_the_buffer_is_full() { + let (tx, rx) = mpsc::channel(10); + for i in 0..10 { + assert!(tx.send(i).await.is_ok()); + } + assert!(!rx.is_empty()) +} + +#[tokio::test] +async fn test_rx_is_not_empty_when_all_but_one_messages_are_consumed() { + let (tx, mut rx) = mpsc::channel(10); + for i in 0..10 { + assert!(tx.send(i).await.is_ok()); + } + + for _ in 0..9 { + assert!(rx.recv().await.is_some()); + } + + assert!(!rx.is_empty()) +} + +#[tokio::test] +async fn test_rx_is_empty_when_all_messages_are_consumed() { + let (tx, mut rx) = mpsc::channel(10); + for i in 0..10 { + assert!(tx.send(i).await.is_ok()); + } + while rx.try_recv().is_ok() {} + assert!(rx.is_empty()) +} + +#[tokio::test] +async fn test_rx_is_empty_all_senders_are_dropped_and_messages_consumed() { + let (tx, mut rx) = mpsc::channel(10); + for i in 0..10 { + assert!(tx.send(i).await.is_ok()); + } + drop(tx); + + for _ in 0..10 { + assert!(rx.recv().await.is_some()); + } + + assert!(rx.is_empty()) +} + +#[tokio::test] +async fn test_rx_len_on_empty_channel() { + let (_tx, rx) = mpsc::channel::<()>(100); + assert_eq!(rx.len(), 0); +} + +#[tokio::test] +async fn test_rx_len_on_empty_channel_without_senders() { + // when all senders are dropped, a "closed" value is added to the end of the linked list. + // here we test that the "closed" value does not change the len of the channel. + + let (tx, rx) = mpsc::channel::<()>(100); + drop(tx); + assert_eq!(rx.len(), 0); +} + +#[tokio::test] +async fn test_rx_len_on_filled_channel() { + let (tx, rx) = mpsc::channel(100); + + for i in 0..100 { + assert!(tx.send(i).await.is_ok()); + } + assert_eq!(rx.len(), 100); +} + +#[tokio::test] +async fn test_rx_len_on_filled_channel_without_senders() { + let (tx, rx) = mpsc::channel(100); + + for i in 0..100 { + assert!(tx.send(i).await.is_ok()); + } + drop(tx); + assert_eq!(rx.len(), 100); +} + +#[tokio::test] +async fn test_rx_len_when_consuming_all_messages() { + let (tx, mut rx) = mpsc::channel(100); + + for i in 0..100 { + assert!(tx.send(i).await.is_ok()); + assert_eq!(rx.len(), i + 1); + } + + drop(tx); + + for i in (0..100).rev() { + assert!(rx.recv().await.is_some()); + assert_eq!(rx.len(), i); + } +} + +#[tokio::test] +async fn test_rx_len_when_close_is_called() { + let (tx, mut rx) = mpsc::channel(100); + tx.send(()).await.unwrap(); + rx.close(); + + assert_eq!(rx.len(), 1); +} + +#[tokio::test] +async fn test_rx_len_when_close_is_called_before_dropping_sender() { + let (tx, mut rx) = mpsc::channel(100); + tx.send(()).await.unwrap(); + rx.close(); + drop(tx); + + assert_eq!(rx.len(), 1); +} + +#[tokio::test] +async fn test_rx_len_when_close_is_called_after_dropping_sender() { + let (tx, mut rx) = mpsc::channel(100); + tx.send(()).await.unwrap(); + drop(tx); + rx.close(); + + assert_eq!(rx.len(), 1); +} + +#[tokio::test] +async fn test_rx_unbounded_is_closed_when_calling_close_with_sender() { + // is_closed should return true after calling close but still has a sender + let (_tx, mut rx) = mpsc::unbounded_channel::<()>(); + rx.close(); + + assert!(rx.is_closed()); +} + +#[tokio::test] +async fn test_rx_unbounded_is_closed_when_dropping_all_senders() { + // is_closed should return true after dropping all senders + let (tx, rx) = mpsc::unbounded_channel::<()>(); + let another_tx = tx.clone(); + let task = tokio::spawn(async move { + drop(another_tx); + }); + + drop(tx); + let _ = task.await; + + assert!(rx.is_closed()); +} + +#[tokio::test] +async fn test_rx_unbounded_is_not_closed_when_there_are_senders() { + // is_closed should return false when there is a sender + let (_tx, rx) = mpsc::unbounded_channel::<()>(); + assert!(!rx.is_closed()); +} + +#[tokio::test] +async fn test_rx_unbounded_is_closed_when_there_are_no_senders_and_there_are_messages() { + // is_closed should return true when there are messages in the buffer, but no senders + let (tx, rx) = mpsc::unbounded_channel(); + for i in 0..10 { + assert!(tx.send(i).is_ok()); + } + drop(tx); + assert!(rx.is_closed()); +} + +#[tokio::test] +async fn test_rx_unbounded_is_closed_when_there_are_messages_and_close_is_called() { + // is_closed should return true when there are messages in the buffer, and close is called + let (tx, mut rx) = mpsc::unbounded_channel(); + for i in 0..10 { + assert!(tx.send(i).is_ok()); + } + rx.close(); + assert!(rx.is_closed()); +} + +#[tokio::test] +async fn test_rx_unbounded_is_empty_when_no_messages_were_sent() { + let (_tx, rx) = mpsc::unbounded_channel::<()>(); + assert!(rx.is_empty()) +} + +#[tokio::test] +async fn test_rx_unbounded_is_not_empty_when_there_are_messages_in_the_buffer() { + let (tx, rx) = mpsc::unbounded_channel(); + assert!(tx.send(()).is_ok()); + assert!(!rx.is_empty()) +} + +#[tokio::test] +async fn test_rx_unbounded_is_not_empty_when_all_but_one_messages_are_consumed() { + let (tx, mut rx) = mpsc::unbounded_channel(); + for i in 0..10 { + assert!(tx.send(i).is_ok()); + } + + for _ in 0..9 { + assert!(rx.recv().await.is_some()); + } + + assert!(!rx.is_empty()) +} + +#[tokio::test] +async fn test_rx_unbounded_is_empty_when_all_messages_are_consumed() { + let (tx, mut rx) = mpsc::unbounded_channel(); + for i in 0..10 { + assert!(tx.send(i).is_ok()); + } + while rx.try_recv().is_ok() {} + assert!(rx.is_empty()) +} + +#[tokio::test] +async fn test_rx_unbounded_is_empty_all_senders_are_dropped_and_messages_consumed() { + let (tx, mut rx) = mpsc::unbounded_channel(); + for i in 0..10 { + assert!(tx.send(i).is_ok()); + } + drop(tx); + + for _ in 0..10 { + assert!(rx.recv().await.is_some()); + } + + assert!(rx.is_empty()) +} + +#[tokio::test] +async fn test_rx_unbounded_len_on_empty_channel() { + let (_tx, rx) = mpsc::unbounded_channel::<()>(); + assert_eq!(rx.len(), 0); +} + +#[tokio::test] +async fn test_rx_unbounded_len_on_empty_channel_without_senders() { + // when all senders are dropped, a "closed" value is added to the end of the linked list. + // here we test that the "closed" value does not change the len of the channel. + + let (tx, rx) = mpsc::unbounded_channel::<()>(); + drop(tx); + assert_eq!(rx.len(), 0); +} + +#[tokio::test] +async fn test_rx_unbounded_len_with_multiple_messages() { + let (tx, rx) = mpsc::unbounded_channel(); + + for i in 0..100 { + assert!(tx.send(i).is_ok()); + } + assert_eq!(rx.len(), 100); +} + +#[tokio::test] +async fn test_rx_unbounded_len_with_multiple_messages_and_dropped_senders() { + let (tx, rx) = mpsc::unbounded_channel(); + + for i in 0..100 { + assert!(tx.send(i).is_ok()); + } + drop(tx); + assert_eq!(rx.len(), 100); +} + +#[tokio::test] +async fn test_rx_unbounded_len_when_consuming_all_messages() { + let (tx, mut rx) = mpsc::unbounded_channel(); + + for i in 0..100 { + assert!(tx.send(i).is_ok()); + assert_eq!(rx.len(), i + 1); + } + + drop(tx); + + for i in (0..100).rev() { + assert!(rx.recv().await.is_some()); + assert_eq!(rx.len(), i); + } +} + +#[tokio::test] +async fn test_rx_unbounded_len_when_close_is_called() { + let (tx, mut rx) = mpsc::unbounded_channel(); + tx.send(()).unwrap(); + rx.close(); + + assert_eq!(rx.len(), 1); +} + +#[tokio::test] +async fn test_rx_unbounded_len_when_close_is_called_before_dropping_sender() { + let (tx, mut rx) = mpsc::unbounded_channel(); + tx.send(()).unwrap(); + rx.close(); + drop(tx); + + assert_eq!(rx.len(), 1); +} + +#[tokio::test] +async fn test_rx_unbounded_len_when_close_is_called_after_dropping_sender() { + let (tx, mut rx) = mpsc::unbounded_channel(); + tx.send(()).unwrap(); + drop(tx); + rx.close(); + + assert_eq!(rx.len(), 1); +} + fn is_debug(_: &T) {} diff --git a/tokio/tests/sync_mpsc_weak.rs b/tokio/tests/sync_mpsc_weak.rs index 7716902f959..6b7555a5cdd 100644 --- a/tokio/tests/sync_mpsc_weak.rs +++ b/tokio/tests/sync_mpsc_weak.rs @@ -512,6 +512,24 @@ fn test_tx_count_weak_unbounded_sender() { assert!(tx_weak.upgrade().is_none() && tx_weak2.upgrade().is_none()); } +#[tokio::test] +async fn test_rx_is_closed_when_dropping_all_senders_except_weak_senders() { + // is_closed should return true after dropping all senders except for a weak sender + let (tx, rx) = mpsc::channel::<()>(10); + let _weak_sender = tx.clone().downgrade(); + drop(tx); + assert!(rx.is_closed()); +} + +#[tokio::test] +async fn test_rx_unbounded_is_closed_when_dropping_all_senders_except_weak_senders() { + // is_closed should return true after dropping all senders except for a weak sender + let (tx, rx) = mpsc::unbounded_channel::<()>(); + let _weak_sender = tx.clone().downgrade(); + drop(tx); + assert!(rx.is_closed()); +} + #[tokio::test] async fn sender_strong_count_when_cloned() { let (tx, _rx) = mpsc::channel::<()>(1); From 4565b81097e8938761431592c0ad36df3bd20cd2 Mon Sep 17 00:00:00 2001 From: "Matthieu Le brazidec (r3v2d0g)" Date: Sun, 24 Mar 2024 16:37:05 +0100 Subject: [PATCH 084/162] sync: add a `rwlock()` method to owned `RwLock` guards (#6418) --- tokio/src/sync/rwlock/owned_read_guard.rs | 26 +++++++++++++++++++ tokio/src/sync/rwlock/owned_write_guard.rs | 20 ++++++++++++++ .../sync/rwlock/owned_write_guard_mapped.rs | 25 ++++++++++++++++++ 3 files changed, 71 insertions(+) diff --git a/tokio/src/sync/rwlock/owned_read_guard.rs b/tokio/src/sync/rwlock/owned_read_guard.rs index 273e7b86f2f..f50b2abcaf5 100644 --- a/tokio/src/sync/rwlock/owned_read_guard.rs +++ b/tokio/src/sync/rwlock/owned_read_guard.rs @@ -138,6 +138,32 @@ impl OwnedRwLockReadGuard { resource_span: this.resource_span, }) } + + /// Returns a reference to the original `Arc`. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use tokio::sync::{RwLock, OwnedRwLockReadGuard}; + /// + /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] + /// struct Foo(u32); + /// + /// # #[tokio::main] + /// # async fn main() { + /// let lock = Arc::new(RwLock::new(Foo(1))); + /// + /// let guard = lock.clone().read_owned().await; + /// assert!(Arc::ptr_eq(&lock, OwnedRwLockReadGuard::rwlock(&guard))); + /// + /// let guard = OwnedRwLockReadGuard::map(guard, |f| &f.0); + /// assert!(Arc::ptr_eq(&lock, OwnedRwLockReadGuard::rwlock(&guard))); + /// # } + /// ``` + pub fn rwlock(this: &Self) -> &Arc> { + &this.lock + } } impl ops::Deref for OwnedRwLockReadGuard { diff --git a/tokio/src/sync/rwlock/owned_write_guard.rs b/tokio/src/sync/rwlock/owned_write_guard.rs index a8ce4a1603f..11be26a9bad 100644 --- a/tokio/src/sync/rwlock/owned_write_guard.rs +++ b/tokio/src/sync/rwlock/owned_write_guard.rs @@ -390,6 +390,26 @@ impl OwnedRwLockWriteGuard { guard } + + /// Returns a reference to the original `Arc`. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use tokio::sync::{RwLock, OwnedRwLockWriteGuard}; + /// + /// # #[tokio::main] + /// # async fn main() { + /// let lock = Arc::new(RwLock::new(1)); + /// + /// let guard = lock.clone().write_owned().await; + /// assert!(Arc::ptr_eq(&lock, OwnedRwLockWriteGuard::rwlock(&guard))); + /// # } + /// ``` + pub fn rwlock(this: &Self) -> &Arc> { + &this.lock + } } impl ops::Deref for OwnedRwLockWriteGuard { diff --git a/tokio/src/sync/rwlock/owned_write_guard_mapped.rs b/tokio/src/sync/rwlock/owned_write_guard_mapped.rs index 9f4952100a5..e0699d09794 100644 --- a/tokio/src/sync/rwlock/owned_write_guard_mapped.rs +++ b/tokio/src/sync/rwlock/owned_write_guard_mapped.rs @@ -155,6 +155,31 @@ impl OwnedRwLockMappedWriteGuard { resource_span: this.resource_span, }) } + + /// Returns a reference to the original `Arc`. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use tokio::sync::{ + /// RwLock, + /// OwnedRwLockWriteGuard, + /// OwnedRwLockMappedWriteGuard, + /// }; + /// + /// # #[tokio::main] + /// # async fn main() { + /// let lock = Arc::new(RwLock::new(1)); + /// + /// let guard = lock.clone().write_owned().await; + /// let guard = OwnedRwLockWriteGuard::map(guard, |x| x); + /// assert!(Arc::ptr_eq(&lock, OwnedRwLockMappedWriteGuard::rwlock(&guard))); + /// # } + /// ``` + pub fn rwlock(this: &Self) -> &Arc> { + &this.lock + } } impl ops::Deref for OwnedRwLockMappedWriteGuard { From deff2524c354d3d3038e1c3813032701946a5c68 Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Mon, 25 Mar 2024 01:53:01 +0330 Subject: [PATCH 085/162] util: document cancel safety of `SinkExt::send` and `StreamExt::next` (#6417) --- tokio-util/src/codec/framed.rs | 11 +++++++++++ tokio-util/src/codec/framed_read.rs | 6 ++++++ tokio-util/src/codec/framed_write.rs | 7 +++++++ 3 files changed, 24 insertions(+) diff --git a/tokio-util/src/codec/framed.rs b/tokio-util/src/codec/framed.rs index e988da0a734..09a5b30b925 100644 --- a/tokio-util/src/codec/framed.rs +++ b/tokio-util/src/codec/framed.rs @@ -20,10 +20,21 @@ pin_project! { /// You can create a `Framed` instance by using the [`Decoder::framed`] adapter, or /// by using the `new` function seen below. /// + /// # Cancellation safety + /// + /// * [`futures_util::sink::SinkExt::send`]: if send is used as the event in a + /// `tokio::select!` statement and some other branch completes first, then it is + /// guaranteed that the message was not sent, but the message itself is lost. + /// * [`tokio_stream::StreamExt::next`]: This method is cancel safe. The returned + /// future only holds onto a reference to the underlying stream, so dropping it will + /// never lose a value. + /// /// [`Stream`]: futures_core::Stream /// [`Sink`]: futures_sink::Sink /// [`AsyncRead`]: tokio::io::AsyncRead /// [`Decoder::framed`]: crate::codec::Decoder::framed() + /// [`futures_util::sink::SinkExt::send`]: futures_util::sink::SinkExt::send + /// [`tokio_stream::StreamExt::next`]: https://docs.rs/tokio-stream/latest/tokio_stream/trait.StreamExt.html#method.next pub struct Framed { #[pin] inner: FramedImpl diff --git a/tokio-util/src/codec/framed_read.rs b/tokio-util/src/codec/framed_read.rs index 90ba5e7c9d0..3ede5876b57 100644 --- a/tokio-util/src/codec/framed_read.rs +++ b/tokio-util/src/codec/framed_read.rs @@ -17,9 +17,15 @@ pin_project! { /// For examples of how to use `FramedRead` with a codec, see the /// examples on the [`codec`] module. /// + /// # Cancellation safety + /// * [`tokio_stream::StreamExt::next`]: This method is cancel safe. The returned + /// future only holds onto a reference to the underlying stream, so dropping it will + /// never lose a value. + /// /// [`Stream`]: futures_core::Stream /// [`AsyncRead`]: tokio::io::AsyncRead /// [`codec`]: crate::codec + /// [`tokio_stream::StreamExt::next`]: https://docs.rs/tokio-stream/latest/tokio_stream/trait.StreamExt.html#method.next pub struct FramedRead { #[pin] inner: FramedImpl, diff --git a/tokio-util/src/codec/framed_write.rs b/tokio-util/src/codec/framed_write.rs index a7efaadd2b9..b2cab069c1f 100644 --- a/tokio-util/src/codec/framed_write.rs +++ b/tokio-util/src/codec/framed_write.rs @@ -18,8 +18,15 @@ pin_project! { /// For examples of how to use `FramedWrite` with a codec, see the /// examples on the [`codec`] module. /// + /// # Cancellation safety + /// + /// * [`futures_util::sink::SinkExt::send`]: if send is used as the event in a + /// `tokio::select!` statement and some other branch completes first, then it is + /// guaranteed that the message was not sent, but the message itself is lost. + /// /// [`Sink`]: futures_sink::Sink /// [`codec`]: crate::codec + /// [`futures_util::sink::SinkExt::send`]: futures_util::sink::SinkExt::send pub struct FramedWrite { #[pin] inner: FramedImpl, From 4601c84718aafa9e46fed1c16f31dd500052b368 Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Tue, 26 Mar 2024 18:38:53 +0330 Subject: [PATCH 086/162] stream: add `next_many` and `poll_next_many` to `StreamMap` (#6409) --- tokio-stream/src/lib.rs | 3 + tokio-stream/src/poll_fn.rs | 35 ++++ tokio-stream/src/stream_map.rs | 106 ++++++++++- tokio-stream/tests/stream_stream_map.rs | 237 +++++++++++++++++++++++- 4 files changed, 378 insertions(+), 3 deletions(-) create mode 100644 tokio-stream/src/poll_fn.rs diff --git a/tokio-stream/src/lib.rs b/tokio-stream/src/lib.rs index b6e651c7b8b..6ff1085a552 100644 --- a/tokio-stream/src/lib.rs +++ b/tokio-stream/src/lib.rs @@ -73,6 +73,9 @@ #[macro_use] mod macros; +mod poll_fn; +pub(crate) use poll_fn::poll_fn; + pub mod wrappers; mod stream_ext; diff --git a/tokio-stream/src/poll_fn.rs b/tokio-stream/src/poll_fn.rs new file mode 100644 index 00000000000..744f22f02b4 --- /dev/null +++ b/tokio-stream/src/poll_fn.rs @@ -0,0 +1,35 @@ +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; + +pub(crate) struct PollFn { + f: F, +} + +pub(crate) fn poll_fn(f: F) -> PollFn +where + F: FnMut(&mut Context<'_>) -> Poll, +{ + PollFn { f } +} + +impl Future for PollFn +where + F: FnMut(&mut Context<'_>) -> Poll, +{ + type Output = T; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + // Safety: We never construct a `Pin<&mut F>` anywhere, so accessing `f` + // mutably in an unpinned way is sound. + // + // This use of unsafe cannot be replaced with the pin-project macro + // because: + // * If we put `#[pin]` on the field, then it gives us a `Pin<&mut F>`, + // which we can't use to call the closure. + // * If we don't put `#[pin]` on the field, then it makes `PollFn` be + // unconditionally `Unpin`, which we also don't want. + let me = unsafe { Pin::into_inner_unchecked(self) }; + (me.f)(cx) + } +} diff --git a/tokio-stream/src/stream_map.rs b/tokio-stream/src/stream_map.rs index 3f424eca221..41ab9648cad 100644 --- a/tokio-stream/src/stream_map.rs +++ b/tokio-stream/src/stream_map.rs @@ -1,4 +1,4 @@ -use crate::Stream; +use crate::{poll_fn, Stream}; use std::borrow::Borrow; use std::hash::Hash; @@ -561,6 +561,110 @@ impl Default for StreamMap { } } +impl StreamMap +where + K: Clone + Unpin, + V: Stream + Unpin, +{ + /// Receives multiple items on this [`StreamMap`], extending the provided `buffer`. + /// + /// This method returns the number of items that is appended to the `buffer`. + /// + /// Note that this method does not guarantee that exactly `limit` items + /// are received. Rather, if at least one item is available, it returns + /// as many items as it can up to the given limit. This method returns + /// zero only if the `StreamMap` is empty (or if `limit` is zero). + /// + /// # Cancel safety + /// + /// This method is cancel safe. If `next_many` is used as the event in a + /// [`tokio::select!`](tokio::select) statement and some other branch + /// completes first, it is guaranteed that no items were received on any of + /// the underlying streams. + pub async fn next_many(&mut self, buffer: &mut Vec<(K, V::Item)>, limit: usize) -> usize { + poll_fn(|cx| self.poll_next_many(cx, buffer, limit)).await + } + + /// Polls to receive multiple items on this `StreamMap`, extending the provided `buffer`. + /// + /// This method returns: + /// * `Poll::Pending` if no items are available but the `StreamMap` is not empty. + /// * `Poll::Ready(count)` where `count` is the number of items successfully received and + /// stored in `buffer`. This can be less than, or equal to, `limit`. + /// * `Poll::Ready(0)` if `limit` is set to zero or when the `StreamMap` is empty. + /// + /// Note that this method does not guarantee that exactly `limit` items + /// are received. Rather, if at least one item is available, it returns + /// as many items as it can up to the given limit. This method returns + /// zero only if the `StreamMap` is empty (or if `limit` is zero). + pub fn poll_next_many( + &mut self, + cx: &mut Context<'_>, + buffer: &mut Vec<(K, V::Item)>, + limit: usize, + ) -> Poll { + if limit == 0 || self.entries.is_empty() { + return Poll::Ready(0); + } + + let mut added = 0; + + let start = self::rand::thread_rng_n(self.entries.len() as u32) as usize; + let mut idx = start; + + while added < limit { + // Indicates whether at least one stream returned a value when polled or not + let mut should_loop = false; + + for _ in 0..self.entries.len() { + let (_, stream) = &mut self.entries[idx]; + + match Pin::new(stream).poll_next(cx) { + Poll::Ready(Some(val)) => { + added += 1; + + let key = self.entries[idx].0.clone(); + buffer.push((key, val)); + + should_loop = true; + + idx = idx.wrapping_add(1) % self.entries.len(); + } + Poll::Ready(None) => { + // Remove the entry + self.entries.swap_remove(idx); + + // Check if this was the last entry, if so the cursor needs + // to wrap + if idx == self.entries.len() { + idx = 0; + } else if idx < start && start <= self.entries.len() { + // The stream being swapped into the current index has + // already been polled, so skip it. + idx = idx.wrapping_add(1) % self.entries.len(); + } + } + Poll::Pending => { + idx = idx.wrapping_add(1) % self.entries.len(); + } + } + } + + if !should_loop { + break; + } + } + + if added > 0 { + Poll::Ready(added) + } else if self.entries.is_empty() { + Poll::Ready(0) + } else { + Poll::Pending + } + } +} + impl Stream for StreamMap where K: Clone + Unpin, diff --git a/tokio-stream/tests/stream_stream_map.rs b/tokio-stream/tests/stream_stream_map.rs index b6b87e9d0ac..5acceb5c9a6 100644 --- a/tokio-stream/tests/stream_stream_map.rs +++ b/tokio-stream/tests/stream_stream_map.rs @@ -1,14 +1,17 @@ +use futures::stream::iter; use tokio_stream::{self as stream, pending, Stream, StreamExt, StreamMap}; use tokio_test::{assert_ok, assert_pending, assert_ready, task}; +use std::future::{poll_fn, Future}; +use std::pin::{pin, Pin}; +use std::task::Poll; + mod support { pub(crate) mod mpsc; } use support::mpsc; -use std::pin::Pin; - macro_rules! assert_ready_some { ($($t:tt)*) => { match assert_ready!($($t)*) { @@ -328,3 +331,233 @@ fn one_ready_many_none() { fn pin_box + 'static, U>(s: T) -> Pin>> { Box::pin(s) } + +type UsizeStream = Pin + Send>>; + +#[tokio::test] +async fn poll_next_many_zero() { + let mut stream_map: StreamMap = StreamMap::new(); + + stream_map.insert(0, Box::pin(pending()) as UsizeStream); + + let n = poll_fn(|cx| stream_map.poll_next_many(cx, &mut vec![], 0)).await; + + assert_eq!(n, 0); +} + +#[tokio::test] +async fn poll_next_many_empty() { + let mut stream_map: StreamMap = StreamMap::new(); + + let n = poll_fn(|cx| stream_map.poll_next_many(cx, &mut vec![], 1)).await; + + assert_eq!(n, 0); +} + +#[tokio::test] +async fn poll_next_many_pending() { + let mut stream_map: StreamMap = StreamMap::new(); + + stream_map.insert(0, Box::pin(pending()) as UsizeStream); + + let mut is_pending = false; + poll_fn(|cx| { + let poll = stream_map.poll_next_many(cx, &mut vec![], 1); + + is_pending = poll.is_pending(); + + Poll::Ready(()) + }) + .await; + + assert!(is_pending); +} + +#[tokio::test] +async fn poll_next_many_not_enough() { + let mut stream_map: StreamMap = StreamMap::new(); + + stream_map.insert(0, Box::pin(iter([0usize].into_iter())) as UsizeStream); + stream_map.insert(1, Box::pin(iter([1usize].into_iter())) as UsizeStream); + + let mut buffer = vec![]; + let n = poll_fn(|cx| stream_map.poll_next_many(cx, &mut buffer, 3)).await; + + assert_eq!(n, 2); + assert_eq!(buffer.len(), 2); + assert!(buffer.contains(&(0, 0))); + assert!(buffer.contains(&(1, 1))); +} + +#[tokio::test] +async fn poll_next_many_enough() { + let mut stream_map: StreamMap = StreamMap::new(); + + stream_map.insert(0, Box::pin(iter([0usize].into_iter())) as UsizeStream); + stream_map.insert(1, Box::pin(iter([1usize].into_iter())) as UsizeStream); + + let mut buffer = vec![]; + let n = poll_fn(|cx| stream_map.poll_next_many(cx, &mut buffer, 2)).await; + + assert_eq!(n, 2); + assert_eq!(buffer.len(), 2); + assert!(buffer.contains(&(0, 0))); + assert!(buffer.contains(&(1, 1))); +} + +#[tokio::test] +async fn poll_next_many_correctly_loops_around() { + for _ in 0..10 { + let mut stream_map: StreamMap = StreamMap::new(); + + stream_map.insert(0, Box::pin(iter([0usize].into_iter())) as UsizeStream); + stream_map.insert(1, Box::pin(iter([0usize, 1].into_iter())) as UsizeStream); + stream_map.insert(2, Box::pin(iter([0usize, 1, 2].into_iter())) as UsizeStream); + + let mut buffer = vec![]; + + let n = poll_fn(|cx| stream_map.poll_next_many(cx, &mut buffer, 3)).await; + assert_eq!(n, 3); + assert_eq!( + std::mem::take(&mut buffer) + .into_iter() + .map(|(_, v)| v) + .collect::>(), + vec![0, 0, 0] + ); + + let n = poll_fn(|cx| stream_map.poll_next_many(cx, &mut buffer, 2)).await; + assert_eq!(n, 2); + assert_eq!( + std::mem::take(&mut buffer) + .into_iter() + .map(|(_, v)| v) + .collect::>(), + vec![1, 1] + ); + + let n = poll_fn(|cx| stream_map.poll_next_many(cx, &mut buffer, 1)).await; + assert_eq!(n, 1); + assert_eq!( + std::mem::take(&mut buffer) + .into_iter() + .map(|(_, v)| v) + .collect::>(), + vec![2] + ); + } +} + +#[tokio::test] +async fn next_many_zero() { + let mut stream_map: StreamMap = StreamMap::new(); + + stream_map.insert(0, Box::pin(pending()) as UsizeStream); + + let n = poll_fn(|cx| pin!(stream_map.next_many(&mut vec![], 0)).poll(cx)).await; + + assert_eq!(n, 0); +} + +#[tokio::test] +async fn next_many_empty() { + let mut stream_map: StreamMap = StreamMap::new(); + + let n = stream_map.next_many(&mut vec![], 1).await; + + assert_eq!(n, 0); +} + +#[tokio::test] +async fn next_many_pending() { + let mut stream_map: StreamMap = StreamMap::new(); + + stream_map.insert(0, Box::pin(pending()) as UsizeStream); + + let mut is_pending = false; + poll_fn(|cx| { + let poll = pin!(stream_map.next_many(&mut vec![], 1)).poll(cx); + + is_pending = poll.is_pending(); + + Poll::Ready(()) + }) + .await; + + assert!(is_pending); +} + +#[tokio::test] +async fn next_many_not_enough() { + let mut stream_map: StreamMap = StreamMap::new(); + + stream_map.insert(0, Box::pin(iter([0usize].into_iter())) as UsizeStream); + stream_map.insert(1, Box::pin(iter([1usize].into_iter())) as UsizeStream); + + let mut buffer = vec![]; + let n = poll_fn(|cx| pin!(stream_map.next_many(&mut buffer, 3)).poll(cx)).await; + + assert_eq!(n, 2); + assert_eq!(buffer.len(), 2); + assert!(buffer.contains(&(0, 0))); + assert!(buffer.contains(&(1, 1))); +} + +#[tokio::test] +async fn next_many_enough() { + let mut stream_map: StreamMap = StreamMap::new(); + + stream_map.insert(0, Box::pin(iter([0usize].into_iter())) as UsizeStream); + stream_map.insert(1, Box::pin(iter([1usize].into_iter())) as UsizeStream); + + let mut buffer = vec![]; + let n = poll_fn(|cx| pin!(stream_map.next_many(&mut buffer, 2)).poll(cx)).await; + + assert_eq!(n, 2); + assert_eq!(buffer.len(), 2); + assert!(buffer.contains(&(0, 0))); + assert!(buffer.contains(&(1, 1))); +} + +#[tokio::test] +async fn next_many_correctly_loops_around() { + for _ in 0..10 { + let mut stream_map: StreamMap = StreamMap::new(); + + stream_map.insert(0, Box::pin(iter([0usize].into_iter())) as UsizeStream); + stream_map.insert(1, Box::pin(iter([0usize, 1].into_iter())) as UsizeStream); + stream_map.insert(2, Box::pin(iter([0usize, 1, 2].into_iter())) as UsizeStream); + + let mut buffer = vec![]; + + let n = poll_fn(|cx| pin!(stream_map.next_many(&mut buffer, 3)).poll(cx)).await; + assert_eq!(n, 3); + assert_eq!( + std::mem::take(&mut buffer) + .into_iter() + .map(|(_, v)| v) + .collect::>(), + vec![0, 0, 0] + ); + + let n = poll_fn(|cx| pin!(stream_map.next_many(&mut buffer, 2)).poll(cx)).await; + assert_eq!(n, 2); + assert_eq!( + std::mem::take(&mut buffer) + .into_iter() + .map(|(_, v)| v) + .collect::>(), + vec![1, 1] + ); + + let n = poll_fn(|cx| pin!(stream_map.next_many(&mut buffer, 1)).poll(cx)).await; + assert_eq!(n, 1); + assert_eq!( + std::mem::take(&mut buffer) + .into_iter() + .map(|(_, v)| v) + .collect::>(), + vec![2] + ); + } +} From e5425014392de0a44c27fac054472b4c3926ef26 Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Tue, 26 Mar 2024 20:45:15 +0330 Subject: [PATCH 087/162] io: document cancel safety of `AsyncBufReadExt::fill_buf` (#6431) --- tokio/src/io/util/async_buf_read_ext.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tokio/src/io/util/async_buf_read_ext.rs b/tokio/src/io/util/async_buf_read_ext.rs index 92500f7f869..1e9da4c8c4d 100644 --- a/tokio/src/io/util/async_buf_read_ext.rs +++ b/tokio/src/io/util/async_buf_read_ext.rs @@ -267,6 +267,12 @@ cfg_io_util! { /// This function will return an I/O error if the underlying reader was /// read, but returned an error. /// + /// # Cancel safety + /// + /// This method is cancel safe. If you use it as the event in a + /// [`tokio::select!`](crate::select) statement and some other branch + /// completes first, then it is guaranteed that no data was read. + /// /// [`consume`]: crate::io::AsyncBufReadExt::consume fn fill_buf(&mut self) -> FillBuf<'_, Self> where From 9c337ca1a306be38e3474082be14bdef4bcb45b5 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Thu, 28 Mar 2024 17:34:42 +0100 Subject: [PATCH 088/162] chore: prepare Tokio v1.37.0 (#6435) --- README.md | 2 +- tokio/CHANGELOG.md | 68 ++++++++++++++++++++++++++++++++++++++++++++++ tokio/Cargo.toml | 2 +- tokio/README.md | 2 +- 4 files changed, 71 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index fcfc1b92bc2..5706d1ab6b0 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.36.0", features = ["full"] } +tokio = { version = "1.37.0", features = ["full"] } ``` Then, on your main.rs: diff --git a/tokio/CHANGELOG.md b/tokio/CHANGELOG.md index d9b17bfbf8a..54346a78d97 100644 --- a/tokio/CHANGELOG.md +++ b/tokio/CHANGELOG.md @@ -1,3 +1,71 @@ +# 1.37.0 (March 28th, 2024) + +### Added + +- fs: add `set_max_buf_size` to `tokio::fs::File` ([#6411]) +- io: add `try_new` and `try_with_interest` to `AsyncFd` ([#6345]) +- sync: add `forget_permits` method to semaphore ([#6331]) +- sync: add `is_closed`, `is_empty`, and `len` to mpsc receivers ([#6348]) +- sync: add a `rwlock()` method to owned `RwLock` guards ([#6418]) +- sync: expose strong and weak counts of mpsc sender handles ([#6405]) +- sync: implement `Clone` for `watch::Sender` ([#6388]) +- task: add `TaskLocalFuture::take_value` ([#6340]) +- task: implement `FromIterator` for `JoinSet` ([#6300]) + +### Changed + +- io: make `io::split` use a mutex instead of a spinlock ([#6403]) + +### Fixed + +- docs: fix docsrs build without net feature ([#6360]) +- macros: allow select with only else branch ([#6339]) +- runtime: fix leaking registration entries when os registration fails ([#6329]) + +### Documented + +- io: document cancel safety of `AsyncBufReadExt::fill_buf` ([#6431]) +- io: document cancel safety of `AsyncReadExt`'s primitive read functions ([#6337]) +- runtime: add doc link from `Runtime` to `#[tokio::main]` ([#6366]) +- runtime: make the `enter` example deterministic ([#6351]) +- sync: add Semaphore example for limiting the number of outgoing requests ([#6419]) +- sync: fix missing period in broadcast docs ([#6377]) +- sync: mark `mpsc::Sender::downgrade` with `#[must_use]` ([#6326]) +- sync: reorder `const_new` before `new_with` ([#6392]) +- sync: update watch channel docs ([#6395]) +- task: fix documentation links ([#6336]) + +### Changed (unstable) + +- runtime: include task `Id` in taskdumps ([#6328]) +- runtime: panic if `unhandled_panic` is enabled when not supported ([#6410]) + +[#6300]: https://github.com/tokio-rs/tokio/pull/6300 +[#6326]: https://github.com/tokio-rs/tokio/pull/6326 +[#6328]: https://github.com/tokio-rs/tokio/pull/6328 +[#6329]: https://github.com/tokio-rs/tokio/pull/6329 +[#6331]: https://github.com/tokio-rs/tokio/pull/6331 +[#6336]: https://github.com/tokio-rs/tokio/pull/6336 +[#6337]: https://github.com/tokio-rs/tokio/pull/6337 +[#6339]: https://github.com/tokio-rs/tokio/pull/6339 +[#6340]: https://github.com/tokio-rs/tokio/pull/6340 +[#6345]: https://github.com/tokio-rs/tokio/pull/6345 +[#6348]: https://github.com/tokio-rs/tokio/pull/6348 +[#6351]: https://github.com/tokio-rs/tokio/pull/6351 +[#6360]: https://github.com/tokio-rs/tokio/pull/6360 +[#6366]: https://github.com/tokio-rs/tokio/pull/6366 +[#6377]: https://github.com/tokio-rs/tokio/pull/6377 +[#6388]: https://github.com/tokio-rs/tokio/pull/6388 +[#6392]: https://github.com/tokio-rs/tokio/pull/6392 +[#6395]: https://github.com/tokio-rs/tokio/pull/6395 +[#6403]: https://github.com/tokio-rs/tokio/pull/6403 +[#6405]: https://github.com/tokio-rs/tokio/pull/6405 +[#6410]: https://github.com/tokio-rs/tokio/pull/6410 +[#6411]: https://github.com/tokio-rs/tokio/pull/6411 +[#6418]: https://github.com/tokio-rs/tokio/pull/6418 +[#6419]: https://github.com/tokio-rs/tokio/pull/6419 +[#6431]: https://github.com/tokio-rs/tokio/pull/6431 + # 1.36.0 (February 2nd, 2024) ### Added diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 194494dabf7..020cc1e4ac2 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -6,7 +6,7 @@ name = "tokio" # - README.md # - Update CHANGELOG.md. # - Create "v1.x.y" git tag. -version = "1.36.0" +version = "1.37.0" edition = "2021" rust-version = "1.63" authors = ["Tokio Contributors "] diff --git a/tokio/README.md b/tokio/README.md index fcfc1b92bc2..5706d1ab6b0 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.36.0", features = ["full"] } +tokio = { version = "1.37.0", features = ["full"] } ``` Then, on your main.rs: From 3936ebdfe4f44eda5630a9b461bbbc9976e5542c Mon Sep 17 00:00:00 2001 From: Siech0 Date: Sat, 30 Mar 2024 02:49:42 -0400 Subject: [PATCH 089/162] chore: update CI to clippy 1.77 (#6443) --- .github/workflows/ci.yml | 2 +- CONTRIBUTING.md | 2 +- tokio-util/tests/compat.rs | 1 + tokio/src/runtime/signal/mod.rs | 1 + tokio/tests/io_async_fd.rs | 2 +- tokio/tests/rt_common.rs | 4 ++-- tokio/tests/task_local_set.rs | 16 ++++++++-------- 7 files changed, 15 insertions(+), 13 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e9738caebfc..1ff7da915bd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,7 +16,7 @@ env: # Change to specific Rust release to pin rust_stable: stable rust_nightly: nightly-2023-10-21 - rust_clippy: '1.76' + rust_clippy: '1.77' # When updating this, also update: # - README.md # - tokio/README.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 369a898fd98..b6b9f4301f8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -149,7 +149,7 @@ When updating this, also update: --> ``` -cargo +1.76 clippy --all --tests --all-features +cargo +1.77 clippy --all --tests --all-features ``` When building documentation normally, the markers that list the features diff --git a/tokio-util/tests/compat.rs b/tokio-util/tests/compat.rs index 8a0eab3407d..1c77081f898 100644 --- a/tokio-util/tests/compat.rs +++ b/tokio-util/tests/compat.rs @@ -15,6 +15,7 @@ async fn compat_file_seek() -> futures_util::io::Result<()> { .read(true) .write(true) .create(true) + .truncate(true) .open(temp_file) .await? .compat_write(); diff --git a/tokio/src/runtime/signal/mod.rs b/tokio/src/runtime/signal/mod.rs index 0dea1879488..bc50c6e982c 100644 --- a/tokio/src/runtime/signal/mod.rs +++ b/tokio/src/runtime/signal/mod.rs @@ -112,6 +112,7 @@ impl Driver { // Drain the pipe completely so we can receive a new readiness event // if another signal has come in. let mut buf = [0; 128]; + #[allow(clippy::unused_io_amount)] loop { match self.receiver.read(&mut buf) { Ok(0) => panic!("EOF on self-pipe"), diff --git a/tokio/tests/io_async_fd.rs b/tokio/tests/io_async_fd.rs index 6f8a10aefbc..ea798b3067a 100644 --- a/tokio/tests/io_async_fd.rs +++ b/tokio/tests/io_async_fd.rs @@ -150,7 +150,7 @@ fn socketpair() -> (FileDescriptor, FileDescriptor) { fn drain(mut fd: &FileDescriptor) { let mut buf = [0u8; 512]; - + #[allow(clippy::unused_io_amount)] loop { match fd.read(&mut buf[..]) { Err(e) if e.kind() == ErrorKind::WouldBlock => break, diff --git a/tokio/tests/rt_common.rs b/tokio/tests/rt_common.rs index 11c44a8d1c2..a71fc4a735e 100644 --- a/tokio/tests/rt_common.rs +++ b/tokio/tests/rt_common.rs @@ -1089,7 +1089,7 @@ rt_test! { use std::thread; thread_local!( - static R: RefCell> = RefCell::new(None); + static R: RefCell> = const { RefCell::new(None) }; ); thread::spawn(|| { @@ -1402,7 +1402,7 @@ rt_test! { } std::thread_local! { - static TL_DATA: RefCell> = RefCell::new(None); + static TL_DATA: RefCell> = const { RefCell::new(None) }; }; let (send, recv) = channel(); diff --git a/tokio/tests/task_local_set.rs b/tokio/tests/task_local_set.rs index 168a05808bd..d965eb341eb 100644 --- a/tokio/tests/task_local_set.rs +++ b/tokio/tests/task_local_set.rs @@ -34,7 +34,7 @@ async fn local_current_thread_scheduler() { #[tokio::test(flavor = "multi_thread")] async fn local_threadpool() { thread_local! { - static ON_RT_THREAD: Cell = Cell::new(false); + static ON_RT_THREAD: Cell = const { Cell::new(false) }; } ON_RT_THREAD.with(|cell| cell.set(true)); @@ -55,7 +55,7 @@ async fn local_threadpool() { #[tokio::test(flavor = "multi_thread")] async fn localset_future_threadpool() { thread_local! { - static ON_LOCAL_THREAD: Cell = Cell::new(false); + static ON_LOCAL_THREAD: Cell = const { Cell::new(false) }; } ON_LOCAL_THREAD.with(|cell| cell.set(true)); @@ -118,7 +118,7 @@ async fn local_threadpool_timer() { // This test ensures that runtime services like the timer are properly // set for the local task set. thread_local! { - static ON_RT_THREAD: Cell = Cell::new(false); + static ON_RT_THREAD: Cell = const { Cell::new(false) }; } ON_RT_THREAD.with(|cell| cell.set(true)); @@ -158,7 +158,7 @@ fn enter_guard_spawn() { #[should_panic] fn local_threadpool_blocking_in_place() { thread_local! { - static ON_RT_THREAD: Cell = Cell::new(false); + static ON_RT_THREAD: Cell = const { Cell::new(false) }; } ON_RT_THREAD.with(|cell| cell.set(true)); @@ -182,7 +182,7 @@ fn local_threadpool_blocking_in_place() { #[tokio::test(flavor = "multi_thread")] async fn local_threadpool_blocking_run() { thread_local! { - static ON_RT_THREAD: Cell = Cell::new(false); + static ON_RT_THREAD: Cell = const { Cell::new(false) }; } ON_RT_THREAD.with(|cell| cell.set(true)); @@ -212,7 +212,7 @@ async fn local_threadpool_blocking_run() { async fn all_spawns_are_local() { use futures::future; thread_local! { - static ON_RT_THREAD: Cell = Cell::new(false); + static ON_RT_THREAD: Cell = const { Cell::new(false) }; } ON_RT_THREAD.with(|cell| cell.set(true)); @@ -238,7 +238,7 @@ async fn all_spawns_are_local() { #[tokio::test(flavor = "multi_thread")] async fn nested_spawn_is_local() { thread_local! { - static ON_RT_THREAD: Cell = Cell::new(false); + static ON_RT_THREAD: Cell = const { Cell::new(false) }; } ON_RT_THREAD.with(|cell| cell.set(true)); @@ -274,7 +274,7 @@ async fn nested_spawn_is_local() { #[test] fn join_local_future_elsewhere() { thread_local! { - static ON_RT_THREAD: Cell = Cell::new(false); + static ON_RT_THREAD: Cell = const { Cell::new(false) }; } ON_RT_THREAD.with(|cell| cell.set(true)); From 1fcb77db34e2534267f5dadeba2af12ebf6f2de8 Mon Sep 17 00:00:00 2001 From: Timo <39920115+tglane@users.noreply.github.com> Date: Sat, 30 Mar 2024 13:13:06 +0100 Subject: [PATCH 090/162] io: add `T: ?Sized` to `tokio_util::io::poll_read_buf` (#6441) --- tokio-util/src/util/poll_buf.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tokio-util/src/util/poll_buf.rs b/tokio-util/src/util/poll_buf.rs index 82af1bbfc87..5a72bc4c822 100644 --- a/tokio-util/src/util/poll_buf.rs +++ b/tokio-util/src/util/poll_buf.rs @@ -46,7 +46,7 @@ use std::task::{Context, Poll}; /// # } /// ``` #[cfg_attr(not(feature = "io"), allow(unreachable_pub))] -pub fn poll_read_buf( +pub fn poll_read_buf( io: Pin<&mut T>, cx: &mut Context<'_>, buf: &mut B, @@ -120,7 +120,7 @@ pub fn poll_read_buf( /// [`File`]: tokio::fs::File /// [vectored writes]: tokio::io::AsyncWrite::poll_write_vectored #[cfg_attr(not(feature = "io"), allow(unreachable_pub))] -pub fn poll_write_buf( +pub fn poll_write_buf( io: Pin<&mut T>, cx: &mut Context<'_>, buf: &mut B, From d2980492991f223ad05f94f5d22d380e9cfd71d8 Mon Sep 17 00:00:00 2001 From: Jens Reidel Date: Sat, 30 Mar 2024 18:20:05 +0100 Subject: [PATCH 091/162] codec: make tracing feature optional for codecs (#6434) Signed-off-by: Jens Reidel --- tokio-util/Cargo.toml | 2 +- tokio-util/src/codec/framed_impl.rs | 1 - tokio-util/src/lib.rs | 3 +++ tokio-util/src/tracing.rs | 6 ++++++ 4 files changed, 10 insertions(+), 2 deletions(-) create mode 100644 tokio-util/src/tracing.rs diff --git a/tokio-util/Cargo.toml b/tokio-util/Cargo.toml index 12c3c813656..47f443aeee7 100644 --- a/tokio-util/Cargo.toml +++ b/tokio-util/Cargo.toml @@ -25,7 +25,7 @@ full = ["codec", "compat", "io-util", "time", "net", "rt"] net = ["tokio/net"] compat = ["futures-io",] -codec = ["tracing"] +codec = [] time = ["tokio/time","slab"] io = [] io-util = ["io", "tokio/rt", "tokio/io-util"] diff --git a/tokio-util/src/codec/framed_impl.rs b/tokio-util/src/codec/framed_impl.rs index 9a4e2a8f6b9..e7cb691aed5 100644 --- a/tokio-util/src/codec/framed_impl.rs +++ b/tokio-util/src/codec/framed_impl.rs @@ -12,7 +12,6 @@ use std::borrow::{Borrow, BorrowMut}; use std::io; use std::pin::Pin; use std::task::{Context, Poll}; -use tracing::trace; pin_project! { #[derive(Debug)] diff --git a/tokio-util/src/lib.rs b/tokio-util/src/lib.rs index 22ad92b8c4b..1df4de1b459 100644 --- a/tokio-util/src/lib.rs +++ b/tokio-util/src/lib.rs @@ -25,6 +25,9 @@ mod cfg; mod loom; cfg_codec! { + #[macro_use] + mod tracing; + pub mod codec; } diff --git a/tokio-util/src/tracing.rs b/tokio-util/src/tracing.rs new file mode 100644 index 00000000000..e1e9ed0827f --- /dev/null +++ b/tokio-util/src/tracing.rs @@ -0,0 +1,6 @@ +macro_rules! trace { + ($($arg:tt)*) => { + #[cfg(feature = "tracing")] + tracing::trace!($($arg)*); + }; +} From e9ae5d4ce993d8c8611b86065f1fe26188fff0ca Mon Sep 17 00:00:00 2001 From: Bharath Vedartham Date: Tue, 2 Apr 2024 16:30:44 +0530 Subject: [PATCH 092/162] io: implement `AsyncBufRead` for `Join` (#6449) Fixes: #6446 --- tokio/src/io/join.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/tokio/src/io/join.rs b/tokio/src/io/join.rs index dbc7043b67e..6e41f89e867 100644 --- a/tokio/src/io/join.rs +++ b/tokio/src/io/join.rs @@ -1,6 +1,6 @@ //! Join two values implementing `AsyncRead` and `AsyncWrite` into a single one. -use crate::io::{AsyncRead, AsyncWrite, ReadBuf}; +use crate::io::{AsyncBufRead, AsyncRead, AsyncWrite, ReadBuf}; use std::io; use std::pin::Pin; @@ -115,3 +115,16 @@ where self.writer.is_write_vectored() } } + +impl AsyncBufRead for Join +where + R: AsyncBufRead, +{ + fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.project().reader.poll_fill_buf(cx) + } + + fn consume(self: Pin<&mut Self>, amt: usize) { + self.project().reader.consume(amt) + } +} From 328a02c1ce08df6e888b19c81cc81d59422af5ef Mon Sep 17 00:00:00 2001 From: Motoyuki Kimura Date: Wed, 3 Apr 2024 14:10:13 +0900 Subject: [PATCH 093/162] runtime: improvements for `global_queue_interval` (#6445) --- tokio/src/runtime/builder.rs | 6 ++++ .../runtime/scheduler/multi_thread/stats.rs | 2 +- .../runtime/scheduler/multi_thread/worker.rs | 2 -- .../scheduler/multi_thread_alt/stats.rs | 2 +- .../scheduler/multi_thread_alt/worker.rs | 3 -- tokio/tests/rt_panic.rs | 12 ++++++++ tokio/tests/rt_threaded.rs | 30 ++++++++++++++++++- tokio/tests/rt_threaded_alt.rs | 27 +++++++++++++++++ 8 files changed, 76 insertions(+), 8 deletions(-) diff --git a/tokio/src/runtime/builder.rs b/tokio/src/runtime/builder.rs index 82d3596915e..499ba97f14a 100644 --- a/tokio/src/runtime/builder.rs +++ b/tokio/src/runtime/builder.rs @@ -758,6 +758,10 @@ impl Builder { /// /// [the module documentation]: crate::runtime#multi-threaded-runtime-behavior-at-the-time-of-writing /// + /// # Panics + /// + /// This function will panic if 0 is passed as an argument. + /// /// # Examples /// /// ``` @@ -768,7 +772,9 @@ impl Builder { /// .build(); /// # } /// ``` + #[track_caller] pub fn global_queue_interval(&mut self, val: u32) -> &mut Self { + assert!(val > 0, "global_queue_interval must be greater than 0"); self.global_queue_interval = Some(val); self } diff --git a/tokio/src/runtime/scheduler/multi_thread/stats.rs b/tokio/src/runtime/scheduler/multi_thread/stats.rs index 30c108c9dd6..03cfc790054 100644 --- a/tokio/src/runtime/scheduler/multi_thread/stats.rs +++ b/tokio/src/runtime/scheduler/multi_thread/stats.rs @@ -63,7 +63,7 @@ impl Stats { let tasks_per_interval = (TARGET_GLOBAL_QUEUE_INTERVAL / self.task_poll_time_ewma) as u32; cmp::max( - // We don't want to return less than 2 as that would result in the + // If we are using self-tuning, we don't want to return less than 2 as that would result in the // global queue always getting checked first. 2, cmp::min( diff --git a/tokio/src/runtime/scheduler/multi_thread/worker.rs b/tokio/src/runtime/scheduler/multi_thread/worker.rs index 9998870ab4d..f07fb8568cd 100644 --- a/tokio/src/runtime/scheduler/multi_thread/worker.rs +++ b/tokio/src/runtime/scheduler/multi_thread/worker.rs @@ -985,8 +985,6 @@ impl Core { .stats .tuned_global_queue_interval(&worker.handle.shared.config); - debug_assert!(next > 1); - // Smooth out jitter if abs_diff(self.global_queue_interval, next) > 2 { self.global_queue_interval = next; diff --git a/tokio/src/runtime/scheduler/multi_thread_alt/stats.rs b/tokio/src/runtime/scheduler/multi_thread_alt/stats.rs index 7118e4915a0..c2045602797 100644 --- a/tokio/src/runtime/scheduler/multi_thread_alt/stats.rs +++ b/tokio/src/runtime/scheduler/multi_thread_alt/stats.rs @@ -82,7 +82,7 @@ impl Stats { let tasks_per_interval = (TARGET_GLOBAL_QUEUE_INTERVAL / self.task_poll_time_ewma) as u32; cmp::max( - // We don't want to return less than 2 as that would result in the + // If we are using self-tuning, we don't want to return less than 2 as that would result in the // global queue always getting checked first. 2, cmp::min( diff --git a/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs b/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs index 54c6b0ed7ba..c315e382291 100644 --- a/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs +++ b/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs @@ -664,7 +664,6 @@ impl Worker { /// Ensure core's state is set correctly for the worker to start using. fn reset_acquired_core(&mut self, cx: &Context, synced: &mut Synced, core: &mut Core) { self.global_queue_interval = core.stats.tuned_global_queue_interval(&cx.shared().config); - debug_assert!(self.global_queue_interval > 1); // Reset `lifo_enabled` here in case the core was previously stolen from // a task that had the LIFO slot disabled. @@ -1288,8 +1287,6 @@ impl Worker { fn tune_global_queue_interval(&mut self, cx: &Context, core: &mut Core) { let next = core.stats.tuned_global_queue_interval(&cx.shared().config); - debug_assert!(next > 1); - // Smooth out jitter if abs_diff(self.global_queue_interval, next) > 2 { self.global_queue_interval = next; diff --git a/tokio/tests/rt_panic.rs b/tokio/tests/rt_panic.rs index ecaf977c881..5c0bd37a79e 100644 --- a/tokio/tests/rt_panic.rs +++ b/tokio/tests/rt_panic.rs @@ -70,6 +70,18 @@ fn builder_max_blocking_threads_panic_caller() -> Result<(), Box> { Ok(()) } +#[test] +fn builder_global_queue_interval_panic_caller() -> Result<(), Box> { + let panic_location_file = test_panic(|| { + let _ = Builder::new_multi_thread().global_queue_interval(0).build(); + }); + + // The panic location should be in this file + assert_eq!(&panic_location_file.unwrap(), file!()); + + Ok(()) +} + fn current_thread() -> Runtime { tokio::runtime::Builder::new_current_thread() .enable_all() diff --git a/tokio/tests/rt_threaded.rs b/tokio/tests/rt_threaded.rs index 8a61c6ad38f..6e769fc831f 100644 --- a/tokio/tests/rt_threaded.rs +++ b/tokio/tests/rt_threaded.rs @@ -10,8 +10,8 @@ use tokio_test::{assert_err, assert_ok}; use futures::future::poll_fn; use std::future::Future; use std::pin::Pin; -use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::Relaxed; +use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{mpsc, Arc, Mutex}; use std::task::{Context, Poll, Waker}; @@ -486,6 +486,34 @@ fn max_blocking_threads_set_to_zero() { .unwrap(); } +/// Regression test for #6445. +/// +/// After #6445, setting `global_queue_interval` to 1 is now technically valid. +/// This test confirms that there is no regression in `multi_thread_runtime` +/// when global_queue_interval is set to 1. +#[test] +fn global_queue_interval_set_to_one() { + let rt = tokio::runtime::Builder::new_multi_thread() + .global_queue_interval(1) + .build() + .unwrap(); + + // Perform a simple work. + let cnt = Arc::new(AtomicUsize::new(0)); + rt.block_on(async { + let mut set = tokio::task::JoinSet::new(); + for _ in 0..10 { + let cnt = cnt.clone(); + set.spawn(async move { cnt.fetch_add(1, Ordering::Relaxed) }); + } + + while let Some(res) = set.join_next().await { + res.unwrap(); + } + }); + assert_eq!(cnt.load(Relaxed), 10); +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn hang_on_shutdown() { let (sync_tx, sync_rx) = std::sync::mpsc::channel::<()>(); diff --git a/tokio/tests/rt_threaded_alt.rs b/tokio/tests/rt_threaded_alt.rs index 3e3ac076290..8b7143b2f97 100644 --- a/tokio/tests/rt_threaded_alt.rs +++ b/tokio/tests/rt_threaded_alt.rs @@ -487,6 +487,33 @@ fn max_blocking_threads_set_to_zero() { .unwrap(); } +/// Regression test for #6445. +/// +/// After #6445, setting `global_queue_interval` to 1 is now technically valid. +/// This test confirms that there is no regression in `multi_thread_runtime` +/// when global_queue_interval is set to 1. +#[test] +fn global_queue_interval_set_to_one() { + let rt = tokio::runtime::Builder::new_multi_thread_alt() + .global_queue_interval(1) + .build() + .unwrap(); + + // Perform a simple work. + let cnt = Arc::new(AtomicUsize::new(0)); + rt.block_on(async { + let mut set = tokio::task::JoinSet::new(); + for _ in 0..10 { + let cnt = cnt.clone(); + set.spawn(async move { cnt.fetch_add(1, Relaxed) }); + } + while let Some(res) = set.join_next().await { + res.unwrap(); + } + }); + assert_eq!(cnt.load(Relaxed), 10); +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn hang_on_shutdown() { let (sync_tx, sync_rx) = std::sync::mpsc::channel::<()>(); From a1acfd8c20475e8a878b012ac4aa90b361f004ae Mon Sep 17 00:00:00 2001 From: Aoi Kurokawa <62386689+aoikurokawa@users.noreply.github.com> Date: Wed, 3 Apr 2024 15:36:08 +0900 Subject: [PATCH 094/162] readme: add readme for tokio-stream (#6456) --- tokio-stream/README.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 tokio-stream/README.md diff --git a/tokio-stream/README.md b/tokio-stream/README.md new file mode 100644 index 00000000000..cd62f8995fa --- /dev/null +++ b/tokio-stream/README.md @@ -0,0 +1,13 @@ +# tokio-stream + +Utilities to work with `Stream` and `tokio`. + +## License + +This project is licensed under the [MIT license](LICENSE). + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in Tokio by you, shall be licensed as MIT, without any additional +terms or conditions. From 035a968bddcadcbfeff02efa7ccf1f5d9b69f149 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sat, 6 Apr 2024 00:43:13 +0900 Subject: [PATCH 095/162] chore: fix typo (#6464) --- tokio/src/runtime/metrics/worker.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio/src/runtime/metrics/worker.rs b/tokio/src/runtime/metrics/worker.rs index b53a86bcc87..cefe4d2abc6 100644 --- a/tokio/src/runtime/metrics/worker.rs +++ b/tokio/src/runtime/metrics/worker.rs @@ -44,7 +44,7 @@ pub(crate) struct WorkerMetrics { /// current-thread scheduler. pub(crate) queue_depth: AtomicUsize, - /// If `Some`, tracks the the number of polls by duration range. + /// If `Some`, tracks the number of polls by duration range. pub(super) poll_count_histogram: Option, } From 01ed7b55f753b104ec03627e1b6238287cc09c4f Mon Sep 17 00:00:00 2001 From: Quentin Perez Date: Sat, 6 Apr 2024 18:09:43 +0200 Subject: [PATCH 096/162] net: add Apple visionOS support (#6465) --- tokio-util/tests/udp.rs | 6 ++++-- tokio/src/net/unix/ucred.rs | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/tokio-util/tests/udp.rs b/tokio-util/tests/udp.rs index eb95a3d18ec..8a70289c263 100644 --- a/tokio-util/tests/udp.rs +++ b/tokio-util/tests/udp.rs @@ -18,7 +18,8 @@ use std::sync::Arc; target_os = "macos", target_os = "ios", target_os = "tvos", - target_os = "watchos" + target_os = "watchos", + target_os = "visionos" ), allow(unused_assignments) )] @@ -53,7 +54,8 @@ async fn send_framed_byte_codec() -> std::io::Result<()> { target_os = "macos", target_os = "ios", target_os = "tvos", - target_os = "watchos" + target_os = "watchos", + target_os = "visionos" )))] // test sending & receiving an empty message { diff --git a/tokio/src/net/unix/ucred.rs b/tokio/src/net/unix/ucred.rs index cb0ee51947f..3390819160a 100644 --- a/tokio/src/net/unix/ucred.rs +++ b/tokio/src/net/unix/ucred.rs @@ -49,7 +49,8 @@ pub(crate) use self::impl_bsd::get_peer_cred; target_os = "macos", target_os = "ios", target_os = "tvos", - target_os = "watchos" + target_os = "watchos", + target_os = "visionos" ))] pub(crate) use self::impl_macos::get_peer_cred; @@ -196,7 +197,8 @@ pub(crate) mod impl_bsd { target_os = "macos", target_os = "ios", target_os = "tvos", - target_os = "watchos" + target_os = "watchos", + target_os = "visionos" ))] pub(crate) mod impl_macos { use crate::net::unix::{self, UnixStream}; From 431b7c52636850820dd1f2b45e02c06cf366cb1c Mon Sep 17 00:00:00 2001 From: David Tolnay Date: Sat, 6 Apr 2024 15:57:29 -0700 Subject: [PATCH 097/162] macros: render more comprehensible documentation for `select!` (#6468) --- tokio/src/macros/select.rs | 828 +++++++++++++++++++------------------ 1 file changed, 426 insertions(+), 402 deletions(-) diff --git a/tokio/src/macros/select.rs b/tokio/src/macros/select.rs index 8cf28405e7f..124d7827086 100644 --- a/tokio/src/macros/select.rs +++ b/tokio/src/macros/select.rs @@ -1,404 +1,428 @@ -/// Waits on multiple concurrent branches, returning when the **first** branch -/// completes, cancelling the remaining branches. -/// -/// The `select!` macro must be used inside of async functions, closures, and -/// blocks. -/// -/// The `select!` macro accepts one or more branches with the following pattern: -/// -/// ```text -/// = (, if )? => , -/// ``` -/// -/// Additionally, the `select!` macro may include a single, optional `else` -/// branch, which evaluates if none of the other branches match their patterns: -/// -/// ```text -/// else => -/// ``` -/// -/// The macro aggregates all `` expressions and runs them -/// concurrently on the **current** task. Once the **first** expression -/// completes with a value that matches its ``, the `select!` macro -/// returns the result of evaluating the completed branch's `` -/// expression. -/// -/// Additionally, each branch may include an optional `if` precondition. If the -/// precondition returns `false`, then the branch is disabled. The provided -/// `` is still evaluated but the resulting future is never -/// polled. This capability is useful when using `select!` within a loop. -/// -/// The complete lifecycle of a `select!` expression is as follows: -/// -/// 1. Evaluate all provided `` expressions. If the precondition -/// returns `false`, disable the branch for the remainder of the current call -/// to `select!`. Re-entering `select!` due to a loop clears the "disabled" -/// state. -/// 2. Aggregate the ``s from each branch, including the -/// disabled ones. If the branch is disabled, `` is still -/// evaluated, but the resulting future is not polled. -/// 3. Concurrently await on the results for all remaining ``s. -/// 4. Once an `` returns a value, attempt to apply the value -/// to the provided ``, if the pattern matches, evaluate `` -/// and return. If the pattern **does not** match, disable the current branch -/// and for the remainder of the current call to `select!`. Continue from step 3. -/// 5. If **all** branches are disabled, evaluate the `else` expression. If no -/// else branch is provided, panic. -/// -/// # Runtime characteristics -/// -/// By running all async expressions on the current task, the expressions are -/// able to run **concurrently** but not in **parallel**. This means all -/// expressions are run on the same thread and if one branch blocks the thread, -/// all other expressions will be unable to continue. If parallelism is -/// required, spawn each async expression using [`tokio::spawn`] and pass the -/// join handle to `select!`. -/// -/// [`tokio::spawn`]: crate::spawn -/// -/// # Fairness -/// -/// By default, `select!` randomly picks a branch to check first. This provides -/// some level of fairness when calling `select!` in a loop with branches that -/// are always ready. -/// -/// This behavior can be overridden by adding `biased;` to the beginning of the -/// macro usage. See the examples for details. This will cause `select` to poll -/// the futures in the order they appear from top to bottom. There are a few -/// reasons you may want this: -/// -/// - The random number generation of `tokio::select!` has a non-zero CPU cost -/// - Your futures may interact in a way where known polling order is significant -/// -/// But there is an important caveat to this mode. It becomes your responsibility -/// to ensure that the polling order of your futures is fair. If for example you -/// are selecting between a stream and a shutdown future, and the stream has a -/// huge volume of messages and zero or nearly zero time between them, you should -/// place the shutdown future earlier in the `select!` list to ensure that it is -/// always polled, and will not be ignored due to the stream being constantly -/// ready. -/// -/// # Panics -/// -/// The `select!` macro panics if all branches are disabled **and** there is no -/// provided `else` branch. A branch is disabled when the provided `if` -/// precondition returns `false` **or** when the pattern does not match the -/// result of ``. -/// -/// # Cancellation safety -/// -/// When using `select!` in a loop to receive messages from multiple sources, -/// you should make sure that the receive call is cancellation safe to avoid -/// losing messages. This section goes through various common methods and -/// describes whether they are cancel safe. The lists in this section are not -/// exhaustive. -/// -/// The following methods are cancellation safe: -/// -/// * [`tokio::sync::mpsc::Receiver::recv`](crate::sync::mpsc::Receiver::recv) -/// * [`tokio::sync::mpsc::UnboundedReceiver::recv`](crate::sync::mpsc::UnboundedReceiver::recv) -/// * [`tokio::sync::broadcast::Receiver::recv`](crate::sync::broadcast::Receiver::recv) -/// * [`tokio::sync::watch::Receiver::changed`](crate::sync::watch::Receiver::changed) -/// * [`tokio::net::TcpListener::accept`](crate::net::TcpListener::accept) -/// * [`tokio::net::UnixListener::accept`](crate::net::UnixListener::accept) -/// * [`tokio::signal::unix::Signal::recv`](crate::signal::unix::Signal::recv) -/// * [`tokio::io::AsyncReadExt::read`](crate::io::AsyncReadExt::read) on any `AsyncRead` -/// * [`tokio::io::AsyncReadExt::read_buf`](crate::io::AsyncReadExt::read_buf) on any `AsyncRead` -/// * [`tokio::io::AsyncWriteExt::write`](crate::io::AsyncWriteExt::write) on any `AsyncWrite` -/// * [`tokio::io::AsyncWriteExt::write_buf`](crate::io::AsyncWriteExt::write_buf) on any `AsyncWrite` -/// * [`tokio_stream::StreamExt::next`](https://docs.rs/tokio-stream/0.1/tokio_stream/trait.StreamExt.html#method.next) on any `Stream` -/// * [`futures::stream::StreamExt::next`](https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html#method.next) on any `Stream` -/// -/// The following methods are not cancellation safe and can lead to loss of data: -/// -/// * [`tokio::io::AsyncReadExt::read_exact`](crate::io::AsyncReadExt::read_exact) -/// * [`tokio::io::AsyncReadExt::read_to_end`](crate::io::AsyncReadExt::read_to_end) -/// * [`tokio::io::AsyncReadExt::read_to_string`](crate::io::AsyncReadExt::read_to_string) -/// * [`tokio::io::AsyncWriteExt::write_all`](crate::io::AsyncWriteExt::write_all) -/// -/// The following methods are not cancellation safe because they use a queue for -/// fairness and cancellation makes you lose your place in the queue: -/// -/// * [`tokio::sync::Mutex::lock`](crate::sync::Mutex::lock) -/// * [`tokio::sync::RwLock::read`](crate::sync::RwLock::read) -/// * [`tokio::sync::RwLock::write`](crate::sync::RwLock::write) -/// * [`tokio::sync::Semaphore::acquire`](crate::sync::Semaphore::acquire) -/// * [`tokio::sync::Notify::notified`](crate::sync::Notify::notified) -/// -/// To determine whether your own methods are cancellation safe, look for the -/// location of uses of `.await`. This is because when an asynchronous method is -/// cancelled, that always happens at an `.await`. If your function behaves -/// correctly even if it is restarted while waiting at an `.await`, then it is -/// cancellation safe. -/// -/// Cancellation safety can be defined in the following way: If you have a -/// future that has not yet completed, then it must be a no-op to drop that -/// future and recreate it. This definition is motivated by the situation where -/// a `select!` is used in a loop. Without this guarantee, you would lose your -/// progress when another branch completes and you restart the `select!` by -/// going around the loop. -/// -/// Be aware that cancelling something that is not cancellation safe is not -/// necessarily wrong. For example, if you are cancelling a task because the -/// application is shutting down, then you probably don't care that partially -/// read data is lost. -/// -/// # Examples -/// -/// Basic select with two branches. -/// -/// ``` -/// async fn do_stuff_async() { -/// // async work -/// } -/// -/// async fn more_async_work() { -/// // more here -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// tokio::select! { -/// _ = do_stuff_async() => { -/// println!("do_stuff_async() completed first") -/// } -/// _ = more_async_work() => { -/// println!("more_async_work() completed first") -/// } -/// }; -/// } -/// ``` -/// -/// Basic stream selecting. -/// -/// ``` -/// use tokio_stream::{self as stream, StreamExt}; -/// -/// #[tokio::main] -/// async fn main() { -/// let mut stream1 = stream::iter(vec![1, 2, 3]); -/// let mut stream2 = stream::iter(vec![4, 5, 6]); -/// -/// let next = tokio::select! { -/// v = stream1.next() => v.unwrap(), -/// v = stream2.next() => v.unwrap(), -/// }; -/// -/// assert!(next == 1 || next == 4); -/// } -/// ``` -/// -/// Collect the contents of two streams. In this example, we rely on pattern -/// matching and the fact that `stream::iter` is "fused", i.e. once the stream -/// is complete, all calls to `next()` return `None`. -/// -/// ``` -/// use tokio_stream::{self as stream, StreamExt}; -/// -/// #[tokio::main] -/// async fn main() { -/// let mut stream1 = stream::iter(vec![1, 2, 3]); -/// let mut stream2 = stream::iter(vec![4, 5, 6]); -/// -/// let mut values = vec![]; -/// -/// loop { -/// tokio::select! { -/// Some(v) = stream1.next() => values.push(v), -/// Some(v) = stream2.next() => values.push(v), -/// else => break, -/// } -/// } -/// -/// values.sort(); -/// assert_eq!(&[1, 2, 3, 4, 5, 6], &values[..]); -/// } -/// ``` -/// -/// Using the same future in multiple `select!` expressions can be done by passing -/// a reference to the future. Doing so requires the future to be [`Unpin`]. A -/// future can be made [`Unpin`] by either using [`Box::pin`] or stack pinning. -/// -/// [`Unpin`]: std::marker::Unpin -/// [`Box::pin`]: std::boxed::Box::pin -/// -/// Here, a stream is consumed for at most 1 second. -/// -/// ``` -/// use tokio_stream::{self as stream, StreamExt}; -/// use tokio::time::{self, Duration}; -/// -/// #[tokio::main] -/// async fn main() { -/// let mut stream = stream::iter(vec![1, 2, 3]); -/// let sleep = time::sleep(Duration::from_secs(1)); -/// tokio::pin!(sleep); -/// -/// loop { -/// tokio::select! { -/// maybe_v = stream.next() => { -/// if let Some(v) = maybe_v { -/// println!("got = {}", v); -/// } else { -/// break; -/// } -/// } -/// _ = &mut sleep => { -/// println!("timeout"); -/// break; -/// } -/// } -/// } -/// } -/// ``` -/// -/// Joining two values using `select!`. -/// -/// ``` -/// use tokio::sync::oneshot; -/// -/// #[tokio::main] -/// async fn main() { -/// let (tx1, mut rx1) = oneshot::channel(); -/// let (tx2, mut rx2) = oneshot::channel(); -/// -/// tokio::spawn(async move { -/// tx1.send("first").unwrap(); -/// }); -/// -/// tokio::spawn(async move { -/// tx2.send("second").unwrap(); -/// }); -/// -/// let mut a = None; -/// let mut b = None; -/// -/// while a.is_none() || b.is_none() { -/// tokio::select! { -/// v1 = (&mut rx1), if a.is_none() => a = Some(v1.unwrap()), -/// v2 = (&mut rx2), if b.is_none() => b = Some(v2.unwrap()), -/// } -/// } -/// -/// let res = (a.unwrap(), b.unwrap()); -/// -/// assert_eq!(res.0, "first"); -/// assert_eq!(res.1, "second"); -/// } -/// ``` -/// -/// Using the `biased;` mode to control polling order. -/// -/// ``` -/// #[tokio::main] -/// async fn main() { -/// let mut count = 0u8; -/// -/// loop { -/// tokio::select! { -/// // If you run this example without `biased;`, the polling order is -/// // pseudo-random, and the assertions on the value of count will -/// // (probably) fail. -/// biased; -/// -/// _ = async {}, if count < 1 => { -/// count += 1; -/// assert_eq!(count, 1); -/// } -/// _ = async {}, if count < 2 => { -/// count += 1; -/// assert_eq!(count, 2); -/// } -/// _ = async {}, if count < 3 => { -/// count += 1; -/// assert_eq!(count, 3); -/// } -/// _ = async {}, if count < 4 => { -/// count += 1; -/// assert_eq!(count, 4); -/// } -/// -/// else => { -/// break; -/// } -/// }; -/// } -/// } -/// ``` -/// -/// ## Avoid racy `if` preconditions -/// -/// Given that `if` preconditions are used to disable `select!` branches, some -/// caution must be used to avoid missing values. -/// -/// For example, here is **incorrect** usage of `sleep` with `if`. The objective -/// is to repeatedly run an asynchronous task for up to 50 milliseconds. -/// However, there is a potential for the `sleep` completion to be missed. -/// -/// ```no_run,should_panic -/// use tokio::time::{self, Duration}; -/// -/// async fn some_async_work() { -/// // do work -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let sleep = time::sleep(Duration::from_millis(50)); -/// tokio::pin!(sleep); -/// -/// while !sleep.is_elapsed() { -/// tokio::select! { -/// _ = &mut sleep, if !sleep.is_elapsed() => { -/// println!("operation timed out"); -/// } -/// _ = some_async_work() => { -/// println!("operation completed"); -/// } -/// } -/// } -/// -/// panic!("This example shows how not to do it!"); -/// } -/// ``` -/// -/// In the above example, `sleep.is_elapsed()` may return `true` even if -/// `sleep.poll()` never returned `Ready`. This opens up a potential race -/// condition where `sleep` expires between the `while !sleep.is_elapsed()` -/// check and the call to `select!` resulting in the `some_async_work()` call to -/// run uninterrupted despite the sleep having elapsed. -/// -/// One way to write the above example without the race would be: -/// -/// ``` -/// use tokio::time::{self, Duration}; -/// -/// async fn some_async_work() { -/// # time::sleep(Duration::from_millis(10)).await; -/// // do work -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let sleep = time::sleep(Duration::from_millis(50)); -/// tokio::pin!(sleep); -/// -/// loop { -/// tokio::select! { -/// _ = &mut sleep => { -/// println!("operation timed out"); -/// break; -/// } -/// _ = some_async_work() => { -/// println!("operation completed"); -/// } -/// } -/// } -/// } -/// ``` -#[macro_export] -#[cfg_attr(docsrs, doc(cfg(feature = "macros")))] -macro_rules! select { +macro_rules! doc { + ($select:item) => { + /// Waits on multiple concurrent branches, returning when the **first** branch + /// completes, cancelling the remaining branches. + /// + /// The `select!` macro must be used inside of async functions, closures, and + /// blocks. + /// + /// The `select!` macro accepts one or more branches with the following pattern: + /// + /// ```text + /// = (, if )? => , + /// ``` + /// + /// Additionally, the `select!` macro may include a single, optional `else` + /// branch, which evaluates if none of the other branches match their patterns: + /// + /// ```text + /// else => + /// ``` + /// + /// The macro aggregates all `` expressions and runs them + /// concurrently on the **current** task. Once the **first** expression + /// completes with a value that matches its ``, the `select!` macro + /// returns the result of evaluating the completed branch's `` + /// expression. + /// + /// Additionally, each branch may include an optional `if` precondition. If the + /// precondition returns `false`, then the branch is disabled. The provided + /// `` is still evaluated but the resulting future is never + /// polled. This capability is useful when using `select!` within a loop. + /// + /// The complete lifecycle of a `select!` expression is as follows: + /// + /// 1. Evaluate all provided `` expressions. If the precondition + /// returns `false`, disable the branch for the remainder of the current call + /// to `select!`. Re-entering `select!` due to a loop clears the "disabled" + /// state. + /// 2. Aggregate the ``s from each branch, including the + /// disabled ones. If the branch is disabled, `` is still + /// evaluated, but the resulting future is not polled. + /// 3. Concurrently await on the results for all remaining ``s. + /// 4. Once an `` returns a value, attempt to apply the value + /// to the provided ``, if the pattern matches, evaluate `` + /// and return. If the pattern **does not** match, disable the current branch + /// and for the remainder of the current call to `select!`. Continue from step 3. + /// 5. If **all** branches are disabled, evaluate the `else` expression. If no + /// else branch is provided, panic. + /// + /// # Runtime characteristics + /// + /// By running all async expressions on the current task, the expressions are + /// able to run **concurrently** but not in **parallel**. This means all + /// expressions are run on the same thread and if one branch blocks the thread, + /// all other expressions will be unable to continue. If parallelism is + /// required, spawn each async expression using [`tokio::spawn`] and pass the + /// join handle to `select!`. + /// + /// [`tokio::spawn`]: crate::spawn + /// + /// # Fairness + /// + /// By default, `select!` randomly picks a branch to check first. This provides + /// some level of fairness when calling `select!` in a loop with branches that + /// are always ready. + /// + /// This behavior can be overridden by adding `biased;` to the beginning of the + /// macro usage. See the examples for details. This will cause `select` to poll + /// the futures in the order they appear from top to bottom. There are a few + /// reasons you may want this: + /// + /// - The random number generation of `tokio::select!` has a non-zero CPU cost + /// - Your futures may interact in a way where known polling order is significant + /// + /// But there is an important caveat to this mode. It becomes your responsibility + /// to ensure that the polling order of your futures is fair. If for example you + /// are selecting between a stream and a shutdown future, and the stream has a + /// huge volume of messages and zero or nearly zero time between them, you should + /// place the shutdown future earlier in the `select!` list to ensure that it is + /// always polled, and will not be ignored due to the stream being constantly + /// ready. + /// + /// # Panics + /// + /// The `select!` macro panics if all branches are disabled **and** there is no + /// provided `else` branch. A branch is disabled when the provided `if` + /// precondition returns `false` **or** when the pattern does not match the + /// result of ``. + /// + /// # Cancellation safety + /// + /// When using `select!` in a loop to receive messages from multiple sources, + /// you should make sure that the receive call is cancellation safe to avoid + /// losing messages. This section goes through various common methods and + /// describes whether they are cancel safe. The lists in this section are not + /// exhaustive. + /// + /// The following methods are cancellation safe: + /// + /// * [`tokio::sync::mpsc::Receiver::recv`](crate::sync::mpsc::Receiver::recv) + /// * [`tokio::sync::mpsc::UnboundedReceiver::recv`](crate::sync::mpsc::UnboundedReceiver::recv) + /// * [`tokio::sync::broadcast::Receiver::recv`](crate::sync::broadcast::Receiver::recv) + /// * [`tokio::sync::watch::Receiver::changed`](crate::sync::watch::Receiver::changed) + /// * [`tokio::net::TcpListener::accept`](crate::net::TcpListener::accept) + /// * [`tokio::net::UnixListener::accept`](crate::net::UnixListener::accept) + /// * [`tokio::signal::unix::Signal::recv`](crate::signal::unix::Signal::recv) + /// * [`tokio::io::AsyncReadExt::read`](crate::io::AsyncReadExt::read) on any `AsyncRead` + /// * [`tokio::io::AsyncReadExt::read_buf`](crate::io::AsyncReadExt::read_buf) on any `AsyncRead` + /// * [`tokio::io::AsyncWriteExt::write`](crate::io::AsyncWriteExt::write) on any `AsyncWrite` + /// * [`tokio::io::AsyncWriteExt::write_buf`](crate::io::AsyncWriteExt::write_buf) on any `AsyncWrite` + /// * [`tokio_stream::StreamExt::next`](https://docs.rs/tokio-stream/0.1/tokio_stream/trait.StreamExt.html#method.next) on any `Stream` + /// * [`futures::stream::StreamExt::next`](https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html#method.next) on any `Stream` + /// + /// The following methods are not cancellation safe and can lead to loss of data: + /// + /// * [`tokio::io::AsyncReadExt::read_exact`](crate::io::AsyncReadExt::read_exact) + /// * [`tokio::io::AsyncReadExt::read_to_end`](crate::io::AsyncReadExt::read_to_end) + /// * [`tokio::io::AsyncReadExt::read_to_string`](crate::io::AsyncReadExt::read_to_string) + /// * [`tokio::io::AsyncWriteExt::write_all`](crate::io::AsyncWriteExt::write_all) + /// + /// The following methods are not cancellation safe because they use a queue for + /// fairness and cancellation makes you lose your place in the queue: + /// + /// * [`tokio::sync::Mutex::lock`](crate::sync::Mutex::lock) + /// * [`tokio::sync::RwLock::read`](crate::sync::RwLock::read) + /// * [`tokio::sync::RwLock::write`](crate::sync::RwLock::write) + /// * [`tokio::sync::Semaphore::acquire`](crate::sync::Semaphore::acquire) + /// * [`tokio::sync::Notify::notified`](crate::sync::Notify::notified) + /// + /// To determine whether your own methods are cancellation safe, look for the + /// location of uses of `.await`. This is because when an asynchronous method is + /// cancelled, that always happens at an `.await`. If your function behaves + /// correctly even if it is restarted while waiting at an `.await`, then it is + /// cancellation safe. + /// + /// Cancellation safety can be defined in the following way: If you have a + /// future that has not yet completed, then it must be a no-op to drop that + /// future and recreate it. This definition is motivated by the situation where + /// a `select!` is used in a loop. Without this guarantee, you would lose your + /// progress when another branch completes and you restart the `select!` by + /// going around the loop. + /// + /// Be aware that cancelling something that is not cancellation safe is not + /// necessarily wrong. For example, if you are cancelling a task because the + /// application is shutting down, then you probably don't care that partially + /// read data is lost. + /// + /// # Examples + /// + /// Basic select with two branches. + /// + /// ``` + /// async fn do_stuff_async() { + /// // async work + /// } + /// + /// async fn more_async_work() { + /// // more here + /// } + /// + /// #[tokio::main] + /// async fn main() { + /// tokio::select! { + /// _ = do_stuff_async() => { + /// println!("do_stuff_async() completed first") + /// } + /// _ = more_async_work() => { + /// println!("more_async_work() completed first") + /// } + /// }; + /// } + /// ``` + /// + /// Basic stream selecting. + /// + /// ``` + /// use tokio_stream::{self as stream, StreamExt}; + /// + /// #[tokio::main] + /// async fn main() { + /// let mut stream1 = stream::iter(vec![1, 2, 3]); + /// let mut stream2 = stream::iter(vec![4, 5, 6]); + /// + /// let next = tokio::select! { + /// v = stream1.next() => v.unwrap(), + /// v = stream2.next() => v.unwrap(), + /// }; + /// + /// assert!(next == 1 || next == 4); + /// } + /// ``` + /// + /// Collect the contents of two streams. In this example, we rely on pattern + /// matching and the fact that `stream::iter` is "fused", i.e. once the stream + /// is complete, all calls to `next()` return `None`. + /// + /// ``` + /// use tokio_stream::{self as stream, StreamExt}; + /// + /// #[tokio::main] + /// async fn main() { + /// let mut stream1 = stream::iter(vec![1, 2, 3]); + /// let mut stream2 = stream::iter(vec![4, 5, 6]); + /// + /// let mut values = vec![]; + /// + /// loop { + /// tokio::select! { + /// Some(v) = stream1.next() => values.push(v), + /// Some(v) = stream2.next() => values.push(v), + /// else => break, + /// } + /// } + /// + /// values.sort(); + /// assert_eq!(&[1, 2, 3, 4, 5, 6], &values[..]); + /// } + /// ``` + /// + /// Using the same future in multiple `select!` expressions can be done by passing + /// a reference to the future. Doing so requires the future to be [`Unpin`]. A + /// future can be made [`Unpin`] by either using [`Box::pin`] or stack pinning. + /// + /// [`Unpin`]: std::marker::Unpin + /// [`Box::pin`]: std::boxed::Box::pin + /// + /// Here, a stream is consumed for at most 1 second. + /// + /// ``` + /// use tokio_stream::{self as stream, StreamExt}; + /// use tokio::time::{self, Duration}; + /// + /// #[tokio::main] + /// async fn main() { + /// let mut stream = stream::iter(vec![1, 2, 3]); + /// let sleep = time::sleep(Duration::from_secs(1)); + /// tokio::pin!(sleep); + /// + /// loop { + /// tokio::select! { + /// maybe_v = stream.next() => { + /// if let Some(v) = maybe_v { + /// println!("got = {}", v); + /// } else { + /// break; + /// } + /// } + /// _ = &mut sleep => { + /// println!("timeout"); + /// break; + /// } + /// } + /// } + /// } + /// ``` + /// + /// Joining two values using `select!`. + /// + /// ``` + /// use tokio::sync::oneshot; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx1, mut rx1) = oneshot::channel(); + /// let (tx2, mut rx2) = oneshot::channel(); + /// + /// tokio::spawn(async move { + /// tx1.send("first").unwrap(); + /// }); + /// + /// tokio::spawn(async move { + /// tx2.send("second").unwrap(); + /// }); + /// + /// let mut a = None; + /// let mut b = None; + /// + /// while a.is_none() || b.is_none() { + /// tokio::select! { + /// v1 = (&mut rx1), if a.is_none() => a = Some(v1.unwrap()), + /// v2 = (&mut rx2), if b.is_none() => b = Some(v2.unwrap()), + /// } + /// } + /// + /// let res = (a.unwrap(), b.unwrap()); + /// + /// assert_eq!(res.0, "first"); + /// assert_eq!(res.1, "second"); + /// } + /// ``` + /// + /// Using the `biased;` mode to control polling order. + /// + /// ``` + /// #[tokio::main] + /// async fn main() { + /// let mut count = 0u8; + /// + /// loop { + /// tokio::select! { + /// // If you run this example without `biased;`, the polling order is + /// // pseudo-random, and the assertions on the value of count will + /// // (probably) fail. + /// biased; + /// + /// _ = async {}, if count < 1 => { + /// count += 1; + /// assert_eq!(count, 1); + /// } + /// _ = async {}, if count < 2 => { + /// count += 1; + /// assert_eq!(count, 2); + /// } + /// _ = async {}, if count < 3 => { + /// count += 1; + /// assert_eq!(count, 3); + /// } + /// _ = async {}, if count < 4 => { + /// count += 1; + /// assert_eq!(count, 4); + /// } + /// + /// else => { + /// break; + /// } + /// }; + /// } + /// } + /// ``` + /// + /// ## Avoid racy `if` preconditions + /// + /// Given that `if` preconditions are used to disable `select!` branches, some + /// caution must be used to avoid missing values. + /// + /// For example, here is **incorrect** usage of `sleep` with `if`. The objective + /// is to repeatedly run an asynchronous task for up to 50 milliseconds. + /// However, there is a potential for the `sleep` completion to be missed. + /// + /// ```no_run,should_panic + /// use tokio::time::{self, Duration}; + /// + /// async fn some_async_work() { + /// // do work + /// } + /// + /// #[tokio::main] + /// async fn main() { + /// let sleep = time::sleep(Duration::from_millis(50)); + /// tokio::pin!(sleep); + /// + /// while !sleep.is_elapsed() { + /// tokio::select! { + /// _ = &mut sleep, if !sleep.is_elapsed() => { + /// println!("operation timed out"); + /// } + /// _ = some_async_work() => { + /// println!("operation completed"); + /// } + /// } + /// } + /// + /// panic!("This example shows how not to do it!"); + /// } + /// ``` + /// + /// In the above example, `sleep.is_elapsed()` may return `true` even if + /// `sleep.poll()` never returned `Ready`. This opens up a potential race + /// condition where `sleep` expires between the `while !sleep.is_elapsed()` + /// check and the call to `select!` resulting in the `some_async_work()` call to + /// run uninterrupted despite the sleep having elapsed. + /// + /// One way to write the above example without the race would be: + /// + /// ``` + /// use tokio::time::{self, Duration}; + /// + /// async fn some_async_work() { + /// # time::sleep(Duration::from_millis(10)).await; + /// // do work + /// } + /// + /// #[tokio::main] + /// async fn main() { + /// let sleep = time::sleep(Duration::from_millis(50)); + /// tokio::pin!(sleep); + /// + /// loop { + /// tokio::select! { + /// _ = &mut sleep => { + /// println!("operation timed out"); + /// break; + /// } + /// _ = some_async_work() => { + /// println!("operation completed"); + /// } + /// } + /// } + /// } + /// ``` + #[macro_export] + #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] + $select + }; +} + +#[cfg(doc)] +doc! {macro_rules! select { + { + $( + biased; + )? + $( + $bind:pat = $fut:expr $(, if $cond:expr)? => $handler:expr, + )* + $( + else => $els:expr $(,)? + )? + } => { + unimplemented!() + }; +}} + +#[cfg(not(doc))] +doc! {macro_rules! select { // Uses a declarative macro to do **most** of the work. While it is possible // to implement fully with a declarative macro, a procedural macro is used // to enable improved error messages. @@ -625,7 +649,7 @@ macro_rules! select { () => { compile_error!("select! requires at least one branch.") }; -} +}} // And here... we manually list out matches for up to 64 branches... I'm not // happy about it either, but this is how we manage to use a declarative macro! From 5712aaf995b244226aff78ed4a13bf6c9105f3ab Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sun, 7 Apr 2024 16:30:33 +0200 Subject: [PATCH 098/162] metrics: ignore `worker_steal_count` test (#6471) --- tokio/tests/rt_metrics.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tokio/tests/rt_metrics.rs b/tokio/tests/rt_metrics.rs index ec4856cb5c2..4dfed06fed4 100644 --- a/tokio/tests/rt_metrics.rs +++ b/tokio/tests/rt_metrics.rs @@ -175,6 +175,7 @@ fn worker_noop_count() { } #[test] +#[ignore] // this test is flaky, see https://github.com/tokio-rs/tokio/issues/6470 fn worker_steal_count() { // This metric only applies to the multi-threaded runtime. // From b6d74ac4ebe9cca0a8f0d964bf8f51a2e0807e26 Mon Sep 17 00:00:00 2001 From: Aoi Kurokawa <62386689+aoikurokawa@users.noreply.github.com> Date: Mon, 8 Apr 2024 17:47:57 +0900 Subject: [PATCH 099/162] runtime: add links in docs for issues and crates (#6473) --- tokio/src/runtime/builder.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tokio/src/runtime/builder.rs b/tokio/src/runtime/builder.rs index 499ba97f14a..27ae4c80167 100644 --- a/tokio/src/runtime/builder.rs +++ b/tokio/src/runtime/builder.rs @@ -838,7 +838,7 @@ impl Builder { /// /// This option is currently unstable and its implementation is /// incomplete. The API may change or be removed in the future. See - /// tokio-rs/tokio#4516 for more details. + /// issue [tokio-rs/tokio#4516] for more details. /// /// # Examples /// @@ -870,6 +870,7 @@ impl Builder { /// ``` /// /// [`JoinHandle`]: struct@crate::task::JoinHandle + /// [tokio-rs/tokio#4516]: https://github.com/tokio-rs/tokio/issues/4516 pub fn unhandled_panic(&mut self, behavior: UnhandledPanic) -> &mut Self { if !matches!(self.kind, Kind::CurrentThread) && matches!(behavior, UnhandledPanic::ShutdownRuntime) { panic!("UnhandledPanic::ShutdownRuntime is only supported in current thread runtime"); @@ -895,7 +896,7 @@ impl Builder { /// is stealable. /// /// Consider trying this option when the task "scheduled" time is high - /// but the runtime is underutilized. Use tokio-rs/tokio-metrics to + /// but the runtime is underutilized. Use [tokio-rs/tokio-metrics] to /// collect this data. /// /// # Unstable @@ -903,7 +904,7 @@ impl Builder { /// This configuration option is considered a workaround for the LIFO /// slot not being stealable. When the slot becomes stealable, we will /// revisit whether or not this option is necessary. See - /// tokio-rs/tokio#4941. + /// issue [tokio-rs/tokio#4941]. /// /// # Examples /// @@ -915,6 +916,9 @@ impl Builder { /// .build() /// .unwrap(); /// ``` + /// + /// [tokio-rs/tokio-metrics]: https://github.com/tokio-rs/tokio-metrics + /// [tokio-rs/tokio#4941]: https://github.com/tokio-rs/tokio/issues/4941 pub fn disable_lifo_slot(&mut self) -> &mut Self { self.disable_lifo_slot = true; self From be9328da75c34b14dbbf017c344fee6219985559 Mon Sep 17 00:00:00 2001 From: Rafael Bachmann Date: Mon, 8 Apr 2024 13:44:18 +0200 Subject: [PATCH 100/162] chore: fix clippy warnings (#6466) --- examples/tinyhttp.rs | 4 ++-- tokio-stream/src/stream_map.rs | 8 ++++---- tokio-stream/tests/async_send_sync.rs | 3 +++ tokio-stream/tests/stream_timeout.rs | 2 +- tokio-util/src/sync/cancellation_token/tree_node.rs | 2 +- tokio-util/src/task/spawn_pinned.rs | 2 +- tokio/src/fs/open_options.rs | 1 - tokio/src/runtime/io/scheduled_io.rs | 2 +- tokio/src/sync/tests/atomic_waker.rs | 3 +++ tokio/tests/async_send_sync.rs | 7 +++++++ tokio/tests/sync_broadcast.rs | 1 + tokio/tests/sync_mpsc.rs | 1 + tokio/tests/sync_notify.rs | 1 + tokio/tests/sync_once_cell.rs | 1 - tokio/tests/sync_oneshot.rs | 3 +++ 15 files changed, 29 insertions(+), 12 deletions(-) diff --git a/examples/tinyhttp.rs b/examples/tinyhttp.rs index 8c6184f948e..dceccf47a89 100644 --- a/examples/tinyhttp.rs +++ b/examples/tinyhttp.rs @@ -259,11 +259,11 @@ mod date { unix_date: u64, } - thread_local!(static LAST: RefCell = RefCell::new(LastRenderedNow { + thread_local!(static LAST: RefCell = const { RefCell::new(LastRenderedNow { bytes: [0; 128], amt: 0, unix_date: 0, - })); + }) }); impl fmt::Display for Now { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { diff --git a/tokio-stream/src/stream_map.rs b/tokio-stream/src/stream_map.rs index 41ab9648cad..cefedcd7e19 100644 --- a/tokio-stream/src/stream_map.rs +++ b/tokio-stream/src/stream_map.rs @@ -467,10 +467,10 @@ impl StreamMap { /// assert!(map.remove(&1).is_some()); /// assert!(map.remove(&1).is_none()); /// ``` - pub fn remove(&mut self, k: &Q) -> Option + pub fn remove(&mut self, k: &Q) -> Option where K: Borrow, - Q: Hash + Eq, + Q: Hash + Eq + ?Sized, { for i in 0..self.entries.len() { if self.entries[i].0.borrow() == k { @@ -496,10 +496,10 @@ impl StreamMap { /// assert_eq!(map.contains_key(&1), true); /// assert_eq!(map.contains_key(&2), false); /// ``` - pub fn contains_key(&self, k: &Q) -> bool + pub fn contains_key(&self, k: &Q) -> bool where K: Borrow, - Q: Hash + Eq, + Q: Hash + Eq + ?Sized, { for i in 0..self.entries.len() { if self.entries[i].0.borrow() == k { diff --git a/tokio-stream/tests/async_send_sync.rs b/tokio-stream/tests/async_send_sync.rs index f1c8b4efe25..23d50d08160 100644 --- a/tokio-stream/tests/async_send_sync.rs +++ b/tokio-stream/tests/async_send_sync.rs @@ -15,18 +15,21 @@ fn require_unpin(_t: &T) {} #[allow(dead_code)] struct Invalid; +#[allow(unused)] trait AmbiguousIfSend { fn some_item(&self) {} } impl AmbiguousIfSend<()> for T {} impl AmbiguousIfSend for T {} +#[allow(unused)] trait AmbiguousIfSync { fn some_item(&self) {} } impl AmbiguousIfSync<()> for T {} impl AmbiguousIfSync for T {} +#[allow(unused)] trait AmbiguousIfUnpin { fn some_item(&self) {} } diff --git a/tokio-stream/tests/stream_timeout.rs b/tokio-stream/tests/stream_timeout.rs index 2338f833587..19b4c3d533f 100644 --- a/tokio-stream/tests/stream_timeout.rs +++ b/tokio-stream/tests/stream_timeout.rs @@ -1,7 +1,7 @@ #![cfg(all(feature = "time", feature = "sync", feature = "io-util"))] use tokio::time::{self, sleep, Duration}; -use tokio_stream::{self, StreamExt}; +use tokio_stream::StreamExt; use tokio_test::*; use futures::stream; diff --git a/tokio-util/src/sync/cancellation_token/tree_node.rs b/tokio-util/src/sync/cancellation_token/tree_node.rs index f042e4e79e1..d2d7e89f606 100644 --- a/tokio-util/src/sync/cancellation_token/tree_node.rs +++ b/tokio-util/src/sync/cancellation_token/tree_node.rs @@ -206,7 +206,7 @@ fn move_children_to_parent(node: &mut Inner, parent: &mut Inner) { for child in std::mem::take(&mut node.children) { { let mut child_locked = child.inner.lock().unwrap(); - child_locked.parent = node.parent.clone(); + child_locked.parent.clone_from(&node.parent); child_locked.parent_idx = parent.children.len(); } parent.children.push(child); diff --git a/tokio-util/src/task/spawn_pinned.rs b/tokio-util/src/task/spawn_pinned.rs index b4102ec7279..5e4d6cda3ef 100644 --- a/tokio-util/src/task/spawn_pinned.rs +++ b/tokio-util/src/task/spawn_pinned.rs @@ -23,7 +23,7 @@ use tokio::task::{spawn_local, JoinHandle, LocalSet}; /// /// ``` /// use std::rc::Rc; -/// use tokio::{self, task }; +/// use tokio::task; /// use tokio_util::task::LocalPoolHandle; /// /// #[tokio::main(flavor = "current_thread")] diff --git a/tokio/src/fs/open_options.rs b/tokio/src/fs/open_options.rs index 6e2aa0cdfc4..12615393867 100644 --- a/tokio/src/fs/open_options.rs +++ b/tokio/src/fs/open_options.rs @@ -442,7 +442,6 @@ feature! { /// # Examples /// /// ```no_run - /// use libc; /// use tokio::fs::OpenOptions; /// use std::io; /// diff --git a/tokio/src/runtime/io/scheduled_io.rs b/tokio/src/runtime/io/scheduled_io.rs index 527bb9808de..cf25b63867c 100644 --- a/tokio/src/runtime/io/scheduled_io.rs +++ b/tokio/src/runtime/io/scheduled_io.rs @@ -346,7 +346,7 @@ impl ScheduledIo { match slot { Some(existing) => { if !existing.will_wake(cx.waker()) { - *existing = cx.waker().clone(); + existing.clone_from(cx.waker()); } } None => { diff --git a/tokio/src/sync/tests/atomic_waker.rs b/tokio/src/sync/tests/atomic_waker.rs index 8a12012873a..d89b44fa493 100644 --- a/tokio/src/sync/tests/atomic_waker.rs +++ b/tokio/src/sync/tests/atomic_waker.rs @@ -3,7 +3,10 @@ use tokio_test::task; use std::task::Waker; +#[allow(unused)] trait AssertSend: Send {} + +#[allow(unused)] trait AssertSync: Sync {} impl AssertSend for AtomicWaker {} diff --git a/tokio/tests/async_send_sync.rs b/tokio/tests/async_send_sync.rs index dfd26f9e9e7..52b3835b753 100644 --- a/tokio/tests/async_send_sync.rs +++ b/tokio/tests/async_send_sync.rs @@ -14,16 +14,19 @@ use tokio::time::{Duration, Instant}; // The names of these structs behaves better when sorted. // Send: Yes, Sync: Yes #[derive(Clone)] +#[allow(unused)] struct YY {} // Send: Yes, Sync: No #[derive(Clone)] +#[allow(unused)] struct YN { _value: Cell, } // Send: No, Sync: No #[derive(Clone)] +#[allow(unused)] struct NN { _value: Rc, } @@ -52,18 +55,21 @@ fn require_unpin(_t: &T) {} #[allow(dead_code)] struct Invalid; +#[allow(unused)] trait AmbiguousIfSend { fn some_item(&self) {} } impl AmbiguousIfSend<()> for T {} impl AmbiguousIfSend for T {} +#[allow(unused)] trait AmbiguousIfSync { fn some_item(&self) {} } impl AmbiguousIfSync<()> for T {} impl AmbiguousIfSync for T {} +#[allow(unused)] trait AmbiguousIfUnpin { fn some_item(&self) {} } @@ -712,6 +718,7 @@ mod unix_asyncfd { use super::*; use tokio::io::unix::*; + #[allow(unused)] struct ImplsFd { _t: T, } diff --git a/tokio/tests/sync_broadcast.rs b/tokio/tests/sync_broadcast.rs index 16b9a0abb73..17fe44f3e89 100644 --- a/tokio/tests/sync_broadcast.rs +++ b/tokio/tests/sync_broadcast.rs @@ -52,6 +52,7 @@ macro_rules! assert_closed { }; } +#[allow(unused)] trait AssertSend: Send + Sync {} impl AssertSend for broadcast::Sender {} impl AssertSend for broadcast::Receiver {} diff --git a/tokio/tests/sync_mpsc.rs b/tokio/tests/sync_mpsc.rs index 4a7eced13ee..10a80561537 100644 --- a/tokio/tests/sync_mpsc.rs +++ b/tokio/tests/sync_mpsc.rs @@ -21,6 +21,7 @@ mod support { pub(crate) mod mpsc_stream; } +#[allow(unused)] trait AssertSend: Send {} impl AssertSend for mpsc::Sender {} impl AssertSend for mpsc::Receiver {} diff --git a/tokio/tests/sync_notify.rs b/tokio/tests/sync_notify.rs index e31b9d49cff..01b8ce86537 100644 --- a/tokio/tests/sync_notify.rs +++ b/tokio/tests/sync_notify.rs @@ -8,6 +8,7 @@ use tokio::sync::Notify; use tokio_test::task::spawn; use tokio_test::*; +#[allow(unused)] trait AssertSend: Send + Sync {} impl AssertSend for Notify {} diff --git a/tokio/tests/sync_once_cell.rs b/tokio/tests/sync_once_cell.rs index d5a69478ef2..b662db3add1 100644 --- a/tokio/tests/sync_once_cell.rs +++ b/tokio/tests/sync_once_cell.rs @@ -2,7 +2,6 @@ #![cfg(feature = "full")] use std::mem; -use std::ops::Drop; use std::sync::atomic::{AtomicU32, Ordering}; use std::time::Duration; use tokio::runtime; diff --git a/tokio/tests/sync_oneshot.rs b/tokio/tests/sync_oneshot.rs index 163d50de9d2..127f7cb61b8 100644 --- a/tokio/tests/sync_oneshot.rs +++ b/tokio/tests/sync_oneshot.rs @@ -17,13 +17,16 @@ use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; +#[allow(unused)] trait AssertSend: Send {} impl AssertSend for oneshot::Sender {} impl AssertSend for oneshot::Receiver {} +#[allow(unused)] trait SenderExt { fn poll_closed(&mut self, cx: &mut Context<'_>) -> Poll<()>; } + impl SenderExt for oneshot::Sender { fn poll_closed(&mut self, cx: &mut Context<'_>) -> Poll<()> { tokio::pin! { From ccee1d44934b3b019e7fd6cbb0d0c96603cd097b Mon Sep 17 00:00:00 2001 From: Pierre Fenoll Date: Wed, 10 Apr 2024 11:41:58 +0200 Subject: [PATCH 101/162] task: make `LocalKey::get` work with Clone types (#6433) Signed-off-by: Pierre Fenoll --- tokio/src/task/task_local.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tokio/src/task/task_local.rs b/tokio/src/task/task_local.rs index ba58ea6ae8b..cb9d22c61e7 100644 --- a/tokio/src/task/task_local.rs +++ b/tokio/src/task/task_local.rs @@ -264,16 +264,16 @@ impl LocalKey { } } -impl LocalKey { +impl LocalKey { /// Returns a copy of the task-local value - /// if the task-local value implements `Copy`. + /// if the task-local value implements `Clone`. /// /// # Panics /// /// This function will panic if the task local doesn't have a value set. #[track_caller] pub fn get(&'static self) -> T { - self.with(|v| *v) + self.with(|v| v.clone()) } } From 224fea4f3ca116f1fb416da5acc36faa8e3e8b5d Mon Sep 17 00:00:00 2001 From: vvvviiv Date: Wed, 10 Apr 2024 21:55:44 +0800 Subject: [PATCH 102/162] sync: add `split` method to the semaphore permit (#6472) --- tokio/src/sync/semaphore.rs | 46 +++++++++++++++++++++++++++++ tokio/tests/sync_semaphore.rs | 26 ++++++++++++++++ tokio/tests/sync_semaphore_owned.rs | 26 ++++++++++++++++ 3 files changed, 98 insertions(+) diff --git a/tokio/src/sync/semaphore.rs b/tokio/src/sync/semaphore.rs index a2b4074590b..a952729b563 100644 --- a/tokio/src/sync/semaphore.rs +++ b/tokio/src/sync/semaphore.rs @@ -990,6 +990,27 @@ impl<'a> SemaphorePermit<'a> { self.permits += other.permits; other.permits = 0; } + + /// Splits `n` permits from `self` and returns a new [`SemaphorePermit`] instance that holds `n` permits. + /// + /// If there are insufficient permits and it's not possible to reduce by `n`, returns `None`. + pub fn split(&mut self, n: u32) -> Option { + if n > self.permits { + return None; + } + + self.permits -= n; + + Some(Self { + sem: self.sem, + permits: n, + }) + } + + /// Returns the number of permits held by `self`. + pub fn num_permits(&self) -> u32 { + self.permits + } } impl OwnedSemaphorePermit { @@ -1019,10 +1040,35 @@ impl OwnedSemaphorePermit { other.permits = 0; } + /// Splits `n` permits from `self` and returns a new [`OwnedSemaphorePermit`] instance that holds `n` permits. + /// + /// If there are insufficient permits and it's not possible to reduce by `n`, returns `None`. + /// + /// # Note + /// + /// It will clone the owned `Arc` to construct the new instance. + pub fn split(&mut self, n: u32) -> Option { + if n > self.permits { + return None; + } + + self.permits -= n; + + Some(Self { + sem: self.sem.clone(), + permits: n, + }) + } + /// Returns the [`Semaphore`] from which this permit was acquired. pub fn semaphore(&self) -> &Arc { &self.sem } + + /// Returns the number of permits held by `self`. + pub fn num_permits(&self) -> u32 { + self.permits + } } impl Drop for SemaphorePermit<'_> { diff --git a/tokio/tests/sync_semaphore.rs b/tokio/tests/sync_semaphore.rs index 40a5a0802a6..ab4b316cee1 100644 --- a/tokio/tests/sync_semaphore.rs +++ b/tokio/tests/sync_semaphore.rs @@ -88,6 +88,32 @@ fn merge_unrelated_permits() { p1.merge(p2); } +#[test] +fn split() { + let sem = Semaphore::new(5); + let mut p1 = sem.try_acquire_many(3).unwrap(); + assert_eq!(sem.available_permits(), 2); + assert_eq!(p1.num_permits(), 3); + let mut p2 = p1.split(1).unwrap(); + assert_eq!(sem.available_permits(), 2); + assert_eq!(p1.num_permits(), 2); + assert_eq!(p2.num_permits(), 1); + let p3 = p1.split(0).unwrap(); + assert_eq!(p3.num_permits(), 0); + drop(p1); + assert_eq!(sem.available_permits(), 4); + let p4 = p2.split(1).unwrap(); + assert_eq!(p2.num_permits(), 0); + assert_eq!(p4.num_permits(), 1); + assert!(p2.split(1).is_none()); + drop(p2); + assert_eq!(sem.available_permits(), 4); + drop(p3); + assert_eq!(sem.available_permits(), 4); + drop(p4); + assert_eq!(sem.available_permits(), 5); +} + #[tokio::test] #[cfg(feature = "full")] async fn stress_test() { diff --git a/tokio/tests/sync_semaphore_owned.rs b/tokio/tests/sync_semaphore_owned.rs index d4b12d40e45..f9eeee0cfab 100644 --- a/tokio/tests/sync_semaphore_owned.rs +++ b/tokio/tests/sync_semaphore_owned.rs @@ -114,6 +114,32 @@ fn merge_unrelated_permits() { p1.merge(p2) } +#[test] +fn split() { + let sem = Arc::new(Semaphore::new(5)); + let mut p1 = sem.clone().try_acquire_many_owned(3).unwrap(); + assert_eq!(sem.available_permits(), 2); + assert_eq!(p1.num_permits(), 3); + let mut p2 = p1.split(1).unwrap(); + assert_eq!(sem.available_permits(), 2); + assert_eq!(p1.num_permits(), 2); + assert_eq!(p2.num_permits(), 1); + let p3 = p1.split(0).unwrap(); + assert_eq!(p3.num_permits(), 0); + drop(p1); + assert_eq!(sem.available_permits(), 4); + let p4 = p2.split(1).unwrap(); + assert_eq!(p2.num_permits(), 0); + assert_eq!(p4.num_permits(), 1); + assert!(p2.split(1).is_none()); + drop(p2); + assert_eq!(sem.available_permits(), 4); + drop(p3); + assert_eq!(sem.available_permits(), 4); + drop(p4); + assert_eq!(sem.available_permits(), 5); +} + #[tokio::test] #[cfg(feature = "full")] async fn stress_test() { From 43de364cd9446a3f378ae7a54a1e26d19858a5fb Mon Sep 17 00:00:00 2001 From: David Tolnay Date: Thu, 11 Apr 2024 01:33:01 -0700 Subject: [PATCH 103/162] readme: mention cargo-docs-rs in CONTRIBUTING.md (#6475) --- CONTRIBUTING.md | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b6b9f4301f8..5d080064773 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -152,20 +152,28 @@ When updating this, also update: cargo +1.77 clippy --all --tests --all-features ``` -When building documentation normally, the markers that list the features -required for various parts of Tokio are missing. To build the documentation -correctly, use this command: +When building documentation, a simple `cargo doc` is not sufficient. To produce +documentation equivalent to what will be produced in docs.rs's builds of Tokio's +docs, please use: ``` -RUSTDOCFLAGS="--cfg docsrs" RUSTFLAGS="--cfg docsrs" cargo +nightly doc --all-features +RUSTDOCFLAGS="--cfg docsrs --cfg tokio_unstable" RUSTFLAGS="--cfg docsrs --cfg tokio_unstable" cargo +nightly doc --all-features [--open] ``` -To build documentation including Tokio's unstable features, it is necessary to -pass `--cfg tokio_unstable` to both RustDoc *and* rustc. To build the -documentation for unstable features, use this command: +This turns on indicators to display the Cargo features required for +conditionally compiled APIs in Tokio, and it enables documentation of unstable +Tokio features. Notice that it is necessary to pass cfg flags to both RustDoc +*and* rustc. + +There is a more concise way to build docs.rs-equivalent docs by using [`cargo +docs-rs`], which reads the above documentation flags out of Tokio's Cargo.toml +as docs.rs itself does. + +[`cargo docs-rs`]: https://github.com/dtolnay/cargo-docs-rs ``` -RUSTDOCFLAGS="--cfg docsrs --cfg tokio_unstable" RUSTFLAGS="--cfg docsrs --cfg tokio_unstable" cargo +nightly doc --all-features +cargo install cargo-docs-rs +cargo +nightly docs-rs [--open] ``` The `cargo fmt` command does not work on the Tokio codebase. You can use the From bdf4c142c978320edb7ebeaf18ae60a343e0ffd8 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Thu, 11 Apr 2024 10:33:51 +0200 Subject: [PATCH 104/162] fs: rewrite file system docs (#6467) --- spellcheck.dic | 3 +- tokio/src/fs/mod.rs | 228 ++++++++++++++++++++++++++++++++++++++------ 2 files changed, 202 insertions(+), 29 deletions(-) diff --git a/spellcheck.dic b/spellcheck.dic index 5a0745df32d..4b9288118d2 100644 --- a/spellcheck.dic +++ b/spellcheck.dic @@ -1,4 +1,4 @@ -283 +284 & + < @@ -131,6 +131,7 @@ io IOCP iOS IOs +io_uring IP IPv4 IPv6 diff --git a/tokio/src/fs/mod.rs b/tokio/src/fs/mod.rs index f6d9605fe94..c1855c42aeb 100644 --- a/tokio/src/fs/mod.rs +++ b/tokio/src/fs/mod.rs @@ -1,46 +1,218 @@ #![cfg(not(loom))] -//! Asynchronous file and standard stream adaptation. +//! Asynchronous file utilities. //! -//! This module contains utility methods and adapter types for input/output to -//! files or standard streams (`Stdin`, `Stdout`, `Stderr`), and -//! filesystem manipulation, for use within (and only within) a Tokio runtime. +//! This module contains utility methods for working with the file system +//! asynchronously. This includes reading/writing to files, and working with +//! directories. //! -//! Tasks run by *worker* threads should not block, as this could delay -//! servicing reactor events. Portable filesystem operations are blocking, -//! however. This module offers adapters which use a `blocking` annotation -//! to inform the runtime that a blocking operation is required. When -//! necessary, this allows the runtime to convert the current thread from a -//! *worker* to a *backup* thread, where blocking is acceptable. +//! Be aware that most operating systems do not provide asynchronous file system +//! APIs. Because of that, Tokio will use ordinary blocking file operations +//! behind the scenes. This is done using the [`spawn_blocking`] threadpool to +//! run them in the background. //! -//! ## Usage +//! The `tokio::fs` module should only be used for ordinary files. Trying to use +//! it with e.g., a named pipe on Linux can result in surprising behavior, +//! such as hangs during runtime shutdown. For special files, you should use a +//! dedicated type such as [`tokio::net::unix::pipe`] or [`AsyncFd`] instead. //! -//! Where possible, users should prefer the provided asynchronous-specific -//! traits such as [`AsyncRead`], or methods returning a `Future` or `Poll` -//! type. Adaptions also extend to traits like `std::io::Read` where methods -//! return `std::io::Result`. Be warned that these adapted methods may return -//! `std::io::ErrorKind::WouldBlock` if a *worker* thread can not be converted -//! to a *backup* thread immediately. +//! Currently, Tokio will always use [`spawn_blocking`] on all platforms, but it +//! may be changed to use asynchronous file system APIs such as io_uring in the +//! future. //! -//! **Warning**: These adapters may create a large number of temporary tasks, -//! especially when reading large files. When performing a lot of operations -//! in one batch, it may be significantly faster to use [`spawn_blocking`] -//! directly: +//! # Usage +//! +//! The easiest way to use this module is to use the utility functions that +//! operate on entire files: +//! +//! * [`tokio::fs::read`](fn@crate::fs::read) +//! * [`tokio::fs::read_to_string`](fn@crate::fs::read_to_string) +//! * [`tokio::fs::write`](fn@crate::fs::write) +//! +//! The two `read` functions reads the entire file and returns its contents. +//! The `write` function takes the contents of the file and writes those +//! contents to the file. It overwrites the existing file, if any. +//! +//! For example, to read the file: //! //! ``` +//! # async fn dox() -> std::io::Result<()> { +//! let contents = tokio::fs::read_to_string("my_file.txt").await?; +//! +//! println!("File has {} lines.", contents.lines().count()); +//! # Ok(()) +//! # } +//! ``` +//! +//! To overwrite the file: +//! +//! ``` +//! # async fn dox() -> std::io::Result<()> { +//! let contents = "First line.\nSecond line.\nThird line.\n"; +//! +//! tokio::fs::write("my_file.txt", contents.as_bytes()).await?; +//! # Ok(()) +//! # } +//! ``` +//! +//! ## Using `File` +//! +//! The main type for interacting with files is [`File`]. It can be used to read +//! from and write to a given file. This is done using the [`AsyncRead`] and +//! [`AsyncWrite`] traits. This type is generally used when you want to do +//! something more complex than just reading or writing the entire contents in +//! one go. +//! +//! **Note:** It is important to use [`flush`] when writing to a Tokio +//! [`File`]. This is because calls to `write` will return before the write has +//! finished, and [`flush`] will wait for the write to finish. (The write will +//! happen even if you don't flush; it will just happen later.) This is +//! different from [`std::fs::File`], and is due to the fact that `File` uses +//! `spawn_blocking` behind the scenes. +//! +//! For example, to count the number of lines in a file without loading the +//! entire file into memory: +//! +//! ```no_run //! use tokio::fs::File; -//! use std::io::{BufReader, BufRead}; -//! async fn count_lines(file: File) -> Result { -//! let file = file.into_std().await; -//! tokio::task::spawn_blocking(move || { -//! let line_count = BufReader::new(file).lines().count(); -//! Ok(line_count) -//! }).await? +//! use tokio::io::AsyncReadExt; +//! +//! # async fn dox() -> std::io::Result<()> { +//! let mut file = File::open("my_file.txt").await?; +//! +//! let mut chunk = vec![0; 4096]; +//! let mut number_of_lines = 0; +//! loop { +//! let len = file.read(&mut chunk).await?; +//! if len == 0 { +//! // Length of zero means end of file. +//! break; +//! } +//! for &b in &chunk[..len] { +//! if b == b'\n' { +//! number_of_lines += 1; +//! } +//! } //! } +//! +//! println!("File has {} lines.", number_of_lines); +//! # Ok(()) +//! # } +//! ``` +//! +//! For example, to write a file line-by-line: +//! +//! ```no_run +//! use tokio::fs::File; +//! use tokio::io::AsyncWriteExt; +//! +//! # async fn dox() -> std::io::Result<()> { +//! let mut file = File::create("my_file.txt").await?; +//! +//! file.write_all(b"First line.\n").await?; +//! file.write_all(b"Second line.\n").await?; +//! file.write_all(b"Third line.\n").await?; +//! +//! // Remember to call `flush` after writing! +//! file.flush().await?; +//! # Ok(()) +//! # } //! ``` //! +//! ## Tuning your file IO +//! +//! Tokio's file uses [`spawn_blocking`] behind the scenes, and this has serious +//! performance consequences. To get good performance with file IO on Tokio, it +//! is recommended to batch your operations into as few `spawn_blocking` calls +//! as possible. +//! +//! One example of this difference can be seen by comparing the two reading +//! examples above. The first example uses [`tokio::fs::read`], which reads the +//! entire file in a single `spawn_blocking` call, and then returns it. The +//! second example will read the file in chunks using many `spawn_blocking` +//! calls. This means that the second example will most likely be more expensive +//! for large files. (Of course, using chunks may be necessary for very large +//! files that don't fit in memory.) +//! +//! The following examples will show some strategies for this: +//! +//! When creating a file, write the data to a `String` or `Vec` and then +//! write the entire file in a single `spawn_blocking` call with +//! `tokio::fs::write`. +//! +//! ```no_run +//! # async fn dox() -> std::io::Result<()> { +//! let mut contents = String::new(); +//! +//! contents.push_str("First line.\n"); +//! contents.push_str("Second line.\n"); +//! contents.push_str("Third line.\n"); +//! +//! tokio::fs::write("my_file.txt", contents.as_bytes()).await?; +//! # Ok(()) +//! # } +//! ``` +//! +//! Use [`BufReader`] and [`BufWriter`] to buffer many small reads or writes +//! into a few large ones. This example will most likely only perform one +//! `spawn_blocking` call. +//! +//! ```no_run +//! use tokio::fs::File; +//! use tokio::io::{AsyncWriteExt, BufWriter}; +//! +//! # async fn dox() -> std::io::Result<()> { +//! let mut file = BufWriter::new(File::create("my_file.txt").await?); +//! +//! file.write_all(b"First line.\n").await?; +//! file.write_all(b"Second line.\n").await?; +//! file.write_all(b"Third line.\n").await?; +//! +//! // Due to the BufWriter, the actual write and spawn_blocking +//! // call happens when you flush. +//! file.flush().await?; +//! # Ok(()) +//! # } +//! ``` +//! +//! Manually use [`std::fs`] inside [`spawn_blocking`]. +//! +//! ```no_run +//! use std::fs::File; +//! use std::io::{self, Write}; +//! use tokio::task::spawn_blocking; +//! +//! # async fn dox() -> std::io::Result<()> { +//! spawn_blocking(move || { +//! let mut file = File::create("my_file.txt")?; +//! +//! file.write_all(b"First line.\n")?; +//! file.write_all(b"Second line.\n")?; +//! file.write_all(b"Third line.\n")?; +//! +//! // Unlike Tokio's file, the std::fs file does +//! // not need flush. +//! +//! io::Result::Ok(()) +//! }).await.unwrap()?; +//! # Ok(()) +//! # } +//! ``` +//! +//! It's also good to be aware of [`File::set_max_buf_size`], which controls the +//! maximum amount of bytes that Tokio's [`File`] will read or write in a single +//! [`spawn_blocking`] call. The default is two megabytes, but this is subject +//! to change. +//! //! [`spawn_blocking`]: fn@crate::task::spawn_blocking //! [`AsyncRead`]: trait@crate::io::AsyncRead +//! [`AsyncWrite`]: trait@crate::io::AsyncWrite +//! [`BufReader`]: struct@crate::io::BufReader +//! [`BufWriter`]: struct@crate::io::BufWriter +//! [`tokio::net::unix::pipe`]: crate::net::unix::pipe +//! [`AsyncFd`]: crate::io::unix::AsyncFd +//! [`flush`]: crate::io::AsyncWriteExt::flush +//! [`tokio::fs::read`]: fn@crate::fs::read mod canonicalize; pub use self::canonicalize::canonicalize; From 90e20bcad372d01e9db9a4357351b19982d66783 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Thu, 11 Apr 2024 19:21:55 +0200 Subject: [PATCH 105/162] sync: use usize instead of u32 for `SemaphorePermit::split` (#6478) Signed-off-by: Alice Ryhl --- tokio/src/sync/semaphore.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/tokio/src/sync/semaphore.rs b/tokio/src/sync/semaphore.rs index a952729b563..0955554d2f3 100644 --- a/tokio/src/sync/semaphore.rs +++ b/tokio/src/sync/semaphore.rs @@ -994,7 +994,9 @@ impl<'a> SemaphorePermit<'a> { /// Splits `n` permits from `self` and returns a new [`SemaphorePermit`] instance that holds `n` permits. /// /// If there are insufficient permits and it's not possible to reduce by `n`, returns `None`. - pub fn split(&mut self, n: u32) -> Option { + pub fn split(&mut self, n: usize) -> Option { + let n = u32::try_from(n).ok()?; + if n > self.permits { return None; } @@ -1008,8 +1010,8 @@ impl<'a> SemaphorePermit<'a> { } /// Returns the number of permits held by `self`. - pub fn num_permits(&self) -> u32 { - self.permits + pub fn num_permits(&self) -> usize { + self.permits as usize } } @@ -1047,7 +1049,9 @@ impl OwnedSemaphorePermit { /// # Note /// /// It will clone the owned `Arc` to construct the new instance. - pub fn split(&mut self, n: u32) -> Option { + pub fn split(&mut self, n: usize) -> Option { + let n = u32::try_from(n).ok()?; + if n > self.permits { return None; } @@ -1066,8 +1070,8 @@ impl OwnedSemaphorePermit { } /// Returns the number of permits held by `self`. - pub fn num_permits(&self) -> u32 { - self.permits + pub fn num_permits(&self) -> usize { + self.permits as usize } } From ee68c1a8c211300ee862cbdd34c48292fa47ac3b Mon Sep 17 00:00:00 2001 From: Motoyuki Kimura Date: Fri, 12 Apr 2024 20:14:08 +0900 Subject: [PATCH 106/162] sync: add examples for `SemaphorePermit`, `OwnedSemaphorePermit` (#6477) --- tokio/src/sync/semaphore.rs | 112 ++++++++++++++++++++++++++++++++++++ 1 file changed, 112 insertions(+) diff --git a/tokio/src/sync/semaphore.rs b/tokio/src/sync/semaphore.rs index 0955554d2f3..97963afddc2 100644 --- a/tokio/src/sync/semaphore.rs +++ b/tokio/src/sync/semaphore.rs @@ -968,6 +968,24 @@ impl<'a> SemaphorePermit<'a> { /// Forgets the permit **without** releasing it back to the semaphore. /// This can be used to reduce the amount of permits available from a /// semaphore. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use tokio::sync::Semaphore; + /// + /// let sem = Arc::new(Semaphore::new(10)); + /// { + /// let permit = sem.try_acquire_many(5).unwrap(); + /// assert_eq!(sem.available_permits(), 5); + /// permit.forget(); + /// } + /// + /// // Since we forgot the permit, available permits won't go back to its initial value + /// // even after the permit is dropped. + /// assert_eq!(sem.available_permits(), 5); + /// ``` pub fn forget(mut self) { self.permits = 0; } @@ -981,6 +999,29 @@ impl<'a> SemaphorePermit<'a> { /// /// This function panics if permits from different [`Semaphore`] instances /// are merged. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use tokio::sync::Semaphore; + /// + /// let sem = Arc::new(Semaphore::new(10)); + /// let mut permit = sem.try_acquire().unwrap(); + /// + /// for _ in 0..9 { + /// let _permit = sem.try_acquire().unwrap(); + /// // Merge individual permits into a single one. + /// permit.merge(_permit) + /// } + /// + /// assert_eq!(sem.available_permits(), 0); + /// + /// // Release all permits in a single batch. + /// drop(permit); + /// + /// assert_eq!(sem.available_permits(), 10); + /// ``` #[track_caller] pub fn merge(&mut self, mut other: Self) { assert!( @@ -994,6 +1035,21 @@ impl<'a> SemaphorePermit<'a> { /// Splits `n` permits from `self` and returns a new [`SemaphorePermit`] instance that holds `n` permits. /// /// If there are insufficient permits and it's not possible to reduce by `n`, returns `None`. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use tokio::sync::Semaphore; + /// + /// let sem = Arc::new(Semaphore::new(3)); + /// + /// let mut p1 = sem.try_acquire_many(3).unwrap(); + /// let p2 = p1.split(1).unwrap(); + /// + /// assert_eq!(p1.num_permits(), 2); + /// assert_eq!(p2.num_permits(), 1); + /// ``` pub fn split(&mut self, n: usize) -> Option { let n = u32::try_from(n).ok()?; @@ -1019,6 +1075,24 @@ impl OwnedSemaphorePermit { /// Forgets the permit **without** releasing it back to the semaphore. /// This can be used to reduce the amount of permits available from a /// semaphore. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use tokio::sync::Semaphore; + /// + /// let sem = Arc::new(Semaphore::new(10)); + /// { + /// let permit = sem.clone().try_acquire_many_owned(5).unwrap(); + /// assert_eq!(sem.available_permits(), 5); + /// permit.forget(); + /// } + /// + /// // Since we forgot the permit, available permits won't go back to its initial value + /// // even after the permit is dropped. + /// assert_eq!(sem.available_permits(), 5); + /// ``` pub fn forget(mut self) { self.permits = 0; } @@ -1032,6 +1106,29 @@ impl OwnedSemaphorePermit { /// /// This function panics if permits from different [`Semaphore`] instances /// are merged. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use tokio::sync::Semaphore; + /// + /// let sem = Arc::new(Semaphore::new(10)); + /// let mut permit = sem.clone().try_acquire_owned().unwrap(); + /// + /// for _ in 0..9 { + /// let _permit = sem.clone().try_acquire_owned().unwrap(); + /// // Merge individual permits into a single one. + /// permit.merge(_permit) + /// } + /// + /// assert_eq!(sem.available_permits(), 0); + /// + /// // Release all permits in a single batch. + /// drop(permit); + /// + /// assert_eq!(sem.available_permits(), 10); + /// ``` #[track_caller] pub fn merge(&mut self, mut other: Self) { assert!( @@ -1049,6 +1146,21 @@ impl OwnedSemaphorePermit { /// # Note /// /// It will clone the owned `Arc` to construct the new instance. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use tokio::sync::Semaphore; + /// + /// let sem = Arc::new(Semaphore::new(3)); + /// + /// let mut p1 = sem.try_acquire_many_owned(3).unwrap(); + /// let p2 = p1.split(1).unwrap(); + /// + /// assert_eq!(p1.num_permits(), 2); + /// assert_eq!(p2.num_permits(), 1); + /// ``` pub fn split(&mut self, n: usize) -> Option { let n = u32::try_from(n).ok()?; From a5bd0deaa571ec2c3f3069f99b41c7449ed8adec Mon Sep 17 00:00:00 2001 From: Josh McKinney Date: Sat, 13 Apr 2024 02:44:33 -0700 Subject: [PATCH 107/162] docs: add --locked to cargo install commands (#6479) --- CONTRIBUTING.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5d080064773..ad7239f4476 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -172,7 +172,7 @@ as docs.rs itself does. [`cargo docs-rs`]: https://github.com/dtolnay/cargo-docs-rs ``` -cargo install cargo-docs-rs +cargo install --locked cargo-docs-rs cargo +nightly docs-rs [--open] ``` @@ -261,7 +261,7 @@ directory `fuzz`. It is a good idea to run fuzz tests after each change. To get started with fuzz testing you'll need to install [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz). -`cargo install cargo-fuzz` +`cargo install --locked cargo-fuzz` To list the available fuzzing harnesses you can run; From 1f6fc55917f971791d76dc91cce795e656c0e0d3 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sat, 13 Apr 2024 17:22:11 +0200 Subject: [PATCH 108/162] net: add missing types to module docs (#6482) Signed-off-by: Alice Ryhl --- tokio/src/net/mod.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tokio/src/net/mod.rs b/tokio/src/net/mod.rs index abc270bd0d8..d5562ac5d47 100644 --- a/tokio/src/net/mod.rs +++ b/tokio/src/net/mod.rs @@ -13,7 +13,10 @@ //! Unix Domain Stream Socket **(available on Unix only)** //! * [`UnixDatagram`] provides functionality for communication //! over Unix Domain Datagram Socket **(available on Unix only)** - +//! * [`tokio::net::unix::pipe`] for FIFO pipes **(available on Unix only)** +//! * [`tokio::net::windows::named_pipe`] for Named Pipes **(available on Windows only)** +//! +//! For IO resources not available in `tokio::net`, you can use [`AsyncFd`]. //! //! [`TcpListener`]: TcpListener //! [`TcpStream`]: TcpStream @@ -21,6 +24,9 @@ //! [`UnixListener`]: UnixListener //! [`UnixStream`]: UnixStream //! [`UnixDatagram`]: UnixDatagram +//! [`tokio::net::unix::pipe`]: unix::pipe +//! [`tokio::net::windows::named_pipe`]: windows::named_pipe +//! [`AsyncFd`]: crate::io::unix::AsyncFd mod addr; cfg_not_wasi! { From 9c6ff3b7e5e86dd2e0104a8ac0a034a3695cfe84 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Wed, 17 Apr 2024 16:16:14 +0200 Subject: [PATCH 109/162] sync: explain relation between `watch::Sender::{subscribe,closed}` (#6490) Signed-off-by: Alice Ryhl --- tokio/src/sync/watch.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tokio/src/sync/watch.rs b/tokio/src/sync/watch.rs index c3ab0bdc695..80d479300db 100644 --- a/tokio/src/sync/watch.rs +++ b/tokio/src/sync/watch.rs @@ -1185,12 +1185,18 @@ impl Sender { /// Completes when all receivers have dropped. /// /// This allows the producer to get notified when interest in the produced - /// values is canceled and immediately stop doing work. + /// values is canceled and immediately stop doing work. Once a channel is + /// closed, the only way to reopen it is to call [`Sender::subscribe`] to + /// get a new receiver. + /// + /// If the channel becomes closed for a brief amount of time (e.g., the last + /// receiver is dropped and then `subscribe` is called), then this call to + /// `closed` might return, but it is also possible that it does not "notice" + /// that the channel was closed for a brief amount of time. /// /// # Cancel safety /// - /// This method is cancel safe. Once the channel is closed, it stays closed - /// forever and all future calls to `closed` will return immediately. + /// This method is cancel safe. /// /// # Examples /// From 5ba12a71451c00e9e220ec87ff869394acac2603 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Thu, 18 Apr 2024 17:21:34 +0200 Subject: [PATCH 110/162] sync: document that `Barrier::wait` is not cancel safe (#6494) Signed-off-by: Alice Ryhl --- tokio/src/sync/barrier.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tokio/src/sync/barrier.rs b/tokio/src/sync/barrier.rs index a1f87c44a3d..aa2f9e0cf75 100644 --- a/tokio/src/sync/barrier.rs +++ b/tokio/src/sync/barrier.rs @@ -118,6 +118,10 @@ impl Barrier { /// A single (arbitrary) future will receive a [`BarrierWaitResult`] that returns `true` from /// [`BarrierWaitResult::is_leader`] when returning from this function, and all other tasks /// will receive a result that will return `false` from `is_leader`. + /// + /// # Cancel safety + /// + /// This method is not cancel safe. pub async fn wait(&self) -> BarrierWaitResult { #[cfg(all(tokio_unstable, feature = "tracing"))] return trace::async_op( From f3bfed30eb66d60c3c28691c1dd040c6531a337b Mon Sep 17 00:00:00 2001 From: Kenny <86278669+kvcache@users.noreply.github.com> Date: Fri, 19 Apr 2024 01:07:55 -0700 Subject: [PATCH 111/162] metrics: document `Runtime::metrics` (#6496) --- tokio/src/runtime/runtime.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tokio/src/runtime/runtime.rs b/tokio/src/runtime/runtime.rs index 917c3f8ce91..ec5b65faac6 100644 --- a/tokio/src/runtime/runtime.rs +++ b/tokio/src/runtime/runtime.rs @@ -491,7 +491,8 @@ impl std::panic::RefUnwindSafe for Runtime {} cfg_metrics! { impl Runtime { - /// TODO + /// Returns a view that lets you get information about how the runtime + /// is performing. pub fn metrics(&self) -> crate::runtime::RuntimeMetrics { self.handle.metrics() } From 125a185749533375a52f5be6c16cef42557efa03 Mon Sep 17 00:00:00 2001 From: Kevin Reid Date: Fri, 19 Apr 2024 22:21:31 -0700 Subject: [PATCH 112/162] io: fix obsolete reference in `ReadHalf::unsplit()` documentation (#6498) --- tokio/src/io/split.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tokio/src/io/split.rs b/tokio/src/io/split.rs index 2602929cdd1..453508eda55 100644 --- a/tokio/src/io/split.rs +++ b/tokio/src/io/split.rs @@ -79,8 +79,7 @@ impl ReadHalf { /// /// If this `ReadHalf` and the given `WriteHalf` do not originate from the /// same `split` operation this method will panic. - /// This can be checked ahead of time by comparing the stream ID - /// of the two halves. + /// This can be checked ahead of time by calling [`is_pair_of()`](Self::is_pair_of). #[track_caller] pub fn unsplit(self, wr: WriteHalf) -> T where From 2438b436716ed21432b809a21024f54caebab887 Mon Sep 17 00:00:00 2001 From: Motoyuki Kimura Date: Sat, 20 Apr 2024 19:35:11 +0900 Subject: [PATCH 113/162] sync: instrument `Semaphore` for task dumps (#6499) --- tokio/src/sync/batch_semaphore.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tokio/src/sync/batch_semaphore.rs b/tokio/src/sync/batch_semaphore.rs index d7eb1d6b77e..def5cbc9f51 100644 --- a/tokio/src/sync/batch_semaphore.rs +++ b/tokio/src/sync/batch_semaphore.rs @@ -575,6 +575,8 @@ impl Future for Acquire<'_> { type Output = Result<(), AcquireError>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + ready!(crate::trace::trace_leaf(cx)); + #[cfg(all(tokio_unstable, feature = "tracing"))] let _resource_span = self.node.ctx.resource_span.clone().entered(); #[cfg(all(tokio_unstable, feature = "tracing"))] From 19618905dea0d45a91e30d614b910514673dd01c Mon Sep 17 00:00:00 2001 From: Jainil Patel <95585633+jainl28patel@users.noreply.github.com> Date: Sun, 21 Apr 2024 17:42:36 +0530 Subject: [PATCH 114/162] time: fix test-util requirement for pause and resume in docs (#6503) --- tokio/src/time/mod.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tokio/src/time/mod.rs b/tokio/src/time/mod.rs index a1f27b839e9..c0cd7c62856 100644 --- a/tokio/src/time/mod.rs +++ b/tokio/src/time/mod.rs @@ -86,8 +86,9 @@ mod clock; pub(crate) use self::clock::Clock; -#[cfg(feature = "test-util")] -pub use clock::{advance, pause, resume}; +cfg_test_util! { + pub use clock::{advance, pause, resume}; +} pub mod error; From 8093712604e54a434658e693ca80535d906e26a7 Mon Sep 17 00:00:00 2001 From: Alex Yusiuk <55661041+RRRadicalEdward@users.noreply.github.com> Date: Mon, 22 Apr 2024 12:03:33 +0300 Subject: [PATCH 115/162] io: add `copy_bidirectional_with_sizes` (#6500) --- tokio/src/io/mod.rs | 2 +- tokio/src/io/util/copy.rs | 6 +-- tokio/src/io/util/copy_bidirectional.rs | 52 +++++++++++++++++++++++-- tokio/src/io/util/mod.rs | 2 +- 4 files changed, 54 insertions(+), 8 deletions(-) diff --git a/tokio/src/io/mod.rs b/tokio/src/io/mod.rs index 7dab413ceb6..b35a20dd35b 100644 --- a/tokio/src/io/mod.rs +++ b/tokio/src/io/mod.rs @@ -271,7 +271,7 @@ cfg_io_util! { pub(crate) mod seek; pub(crate) mod util; pub use util::{ - copy, copy_bidirectional, copy_buf, duplex, empty, repeat, sink, AsyncBufReadExt, AsyncReadExt, AsyncSeekExt, AsyncWriteExt, + copy, copy_bidirectional, copy_bidirectional_with_sizes, copy_buf, duplex, empty, repeat, sink, AsyncBufReadExt, AsyncReadExt, AsyncSeekExt, AsyncWriteExt, BufReader, BufStream, BufWriter, DuplexStream, Empty, Lines, Repeat, Sink, Split, Take, }; } diff --git a/tokio/src/io/util/copy.rs b/tokio/src/io/util/copy.rs index 56310c86f59..47f8d4ebec6 100644 --- a/tokio/src/io/util/copy.rs +++ b/tokio/src/io/util/copy.rs @@ -16,14 +16,14 @@ pub(super) struct CopyBuffer { } impl CopyBuffer { - pub(super) fn new() -> Self { + pub(super) fn new(buf_size: usize) -> Self { Self { read_done: false, need_flush: false, pos: 0, cap: 0, amt: 0, - buf: vec![0; super::DEFAULT_BUF_SIZE].into_boxed_slice(), + buf: vec![0; buf_size].into_boxed_slice(), } } @@ -269,7 +269,7 @@ cfg_io_util! { Copy { reader, writer, - buf: CopyBuffer::new() + buf: CopyBuffer::new(super::DEFAULT_BUF_SIZE) }.await } } diff --git a/tokio/src/io/util/copy_bidirectional.rs b/tokio/src/io/util/copy_bidirectional.rs index e1a7db127a7..ce90141e5a5 100644 --- a/tokio/src/io/util/copy_bidirectional.rs +++ b/tokio/src/io/util/copy_bidirectional.rs @@ -57,6 +57,9 @@ where /// it will return a tuple of the number of bytes copied from a to b /// and the number of bytes copied from b to a, in that order. /// +/// It uses two 8 KB buffers for transferring bytes between `a` and `b` by default. +/// To set your own buffers sizes use [`copy_bidirectional_with_sizes()`]. +/// /// [`shutdown()`]: crate::io::AsyncWriteExt::shutdown /// /// # Errors @@ -69,13 +72,56 @@ where /// /// Returns a tuple of bytes copied `a` to `b` and bytes copied `b` to `a`. #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] -pub async fn copy_bidirectional(a: &mut A, b: &mut B) -> Result<(u64, u64), std::io::Error> +pub async fn copy_bidirectional(a: &mut A, b: &mut B) -> io::Result<(u64, u64)> +where + A: AsyncRead + AsyncWrite + Unpin + ?Sized, + B: AsyncRead + AsyncWrite + Unpin + ?Sized, +{ + copy_bidirectional_impl( + a, + b, + CopyBuffer::new(super::DEFAULT_BUF_SIZE), + CopyBuffer::new(super::DEFAULT_BUF_SIZE), + ) + .await +} + +/// Copies data in both directions between `a` and `b` using buffers of the specified size. +/// +/// This method is the same as the [`copy_bidirectional()`], except that it allows you to set the +/// size of the internal buffers used when copying data. +#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] +pub async fn copy_bidirectional_with_sizes( + a: &mut A, + b: &mut B, + a_to_b_buf_size: usize, + b_to_a_buf_size: usize, +) -> io::Result<(u64, u64)> +where + A: AsyncRead + AsyncWrite + Unpin + ?Sized, + B: AsyncRead + AsyncWrite + Unpin + ?Sized, +{ + copy_bidirectional_impl( + a, + b, + CopyBuffer::new(a_to_b_buf_size), + CopyBuffer::new(b_to_a_buf_size), + ) + .await +} + +async fn copy_bidirectional_impl( + a: &mut A, + b: &mut B, + a_to_b_buffer: CopyBuffer, + b_to_a_buffer: CopyBuffer, +) -> io::Result<(u64, u64)> where A: AsyncRead + AsyncWrite + Unpin + ?Sized, B: AsyncRead + AsyncWrite + Unpin + ?Sized, { - let mut a_to_b = TransferState::Running(CopyBuffer::new()); - let mut b_to_a = TransferState::Running(CopyBuffer::new()); + let mut a_to_b = TransferState::Running(a_to_b_buffer); + let mut b_to_a = TransferState::Running(b_to_a_buffer); poll_fn(|cx| { let a_to_b = transfer_one_direction(cx, &mut a_to_b, a, b)?; let b_to_a = transfer_one_direction(cx, &mut b_to_a, b, a)?; diff --git a/tokio/src/io/util/mod.rs b/tokio/src/io/util/mod.rs index 47b951f2b83..5010fc01d29 100644 --- a/tokio/src/io/util/mod.rs +++ b/tokio/src/io/util/mod.rs @@ -28,7 +28,7 @@ cfg_io_util! { pub use copy::copy; mod copy_bidirectional; - pub use copy_bidirectional::copy_bidirectional; + pub use copy_bidirectional::{copy_bidirectional, copy_bidirectional_with_sizes}; mod copy_buf; pub use copy_buf::copy_buf; From a73d6bf33ab72eb23522f04bde869b42a7cb68e0 Mon Sep 17 00:00:00 2001 From: avtrujillo Date: Tue, 23 Apr 2024 05:05:14 -0700 Subject: [PATCH 116/162] macros: `#[cfg(not(test))]` is no longer necessary for main macros (#6508) --- tokio-macros/src/entry.rs | 1 - tokio-macros/src/lib.rs | 2 -- tokio/src/lib.rs | 3 --- 3 files changed, 6 deletions(-) diff --git a/tokio-macros/src/entry.rs b/tokio-macros/src/entry.rs index ed782ad38f6..20cbdb1c92a 100644 --- a/tokio-macros/src/entry.rs +++ b/tokio-macros/src/entry.rs @@ -418,7 +418,6 @@ fn token_stream_with_error(mut tokens: TokenStream, error: syn::Error) -> TokenS tokens } -#[cfg(not(test))] // Work around for rust-lang/rust#62127 pub(crate) fn main(args: TokenStream, item: TokenStream, rt_multi_thread: bool) -> TokenStream { // If any of the steps for this macro fail, we still want to expand to an item that is as close // to the expected output as possible. This helps out IDEs such that completions and other diff --git a/tokio-macros/src/lib.rs b/tokio-macros/src/lib.rs index 919c4ac0ba9..ab7b083d381 100644 --- a/tokio-macros/src/lib.rs +++ b/tokio-macros/src/lib.rs @@ -202,7 +202,6 @@ use proc_macro::TokenStream; /// } /// ``` #[proc_macro_attribute] -#[cfg(not(test))] // Work around for rust-lang/rust#62127 pub fn main(args: TokenStream, item: TokenStream) -> TokenStream { entry::main(args.into(), item.into(), true).into() } @@ -267,7 +266,6 @@ pub fn main(args: TokenStream, item: TokenStream) -> TokenStream { /// } /// ``` #[proc_macro_attribute] -#[cfg(not(test))] // Work around for rust-lang/rust#62127 pub fn main_rt(args: TokenStream, item: TokenStream) -> TokenStream { entry::main(args.into(), item.into(), false).into() } diff --git a/tokio/src/lib.rs b/tokio/src/lib.rs index 57b6560bf0d..52b098986fb 100644 --- a/tokio/src/lib.rs +++ b/tokio/src/lib.rs @@ -654,7 +654,6 @@ cfg_macros! { cfg_rt! { #[cfg(feature = "rt-multi-thread")] - #[cfg(not(test))] // Work around for rust-lang/rust#62127 #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] #[doc(inline)] pub use tokio_macros::main; @@ -665,7 +664,6 @@ cfg_macros! { pub use tokio_macros::test; cfg_not_rt_multi_thread! { - #[cfg(not(test))] // Work around for rust-lang/rust#62127 #[doc(inline)] pub use tokio_macros::main_rt as main; @@ -676,7 +674,6 @@ cfg_macros! { // Always fail if rt is not enabled. cfg_not_rt! { - #[cfg(not(test))] #[doc(inline)] pub use tokio_macros::main_fail as main; From 9ed595767d01c400955122d276b34ab52b3a6aab Mon Sep 17 00:00:00 2001 From: LongYinan Date: Wed, 24 Apr 2024 15:45:54 +0800 Subject: [PATCH 117/162] wasm: support rt-multi-thread with wasm32-wasi-preview1-threads (#6510) --- .github/workflows/ci.yml | 33 +++++++-- tests-integration/Cargo.toml | 1 + tests-integration/tests/macros_main.rs | 6 +- tests-integration/tests/macros_select.rs | 5 +- tokio-util/src/task/mod.rs | 2 - tokio/src/macros/cfg.rs | 2 +- tokio/src/runtime/blocking/schedule.rs | 8 +- tokio/src/runtime/builder.rs | 58 +++++++-------- tokio/src/runtime/handle.rs | 8 +- tokio/src/runtime/runtime.rs | 78 ++++++++++---------- tokio/src/runtime/scheduler/inject/shared.rs | 5 +- tokio/src/runtime/scheduler/mod.rs | 32 ++++---- tokio/src/runtime/task/trace/mod.rs | 8 +- tokio/src/task/mod.rs | 6 +- 14 files changed, 127 insertions(+), 125 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1ff7da915bd..e39333ba60d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -909,16 +909,21 @@ jobs: working-directory: tokio wasm32-wasi: - name: wasm32-wasi + name: ${{ matrix.target }} needs: basics runs-on: ubuntu-latest + strategy: + matrix: + target: + - wasm32-wasi + - wasm32-wasi-preview1-threads steps: - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} uses: dtolnay/rust-toolchain@stable with: toolchain: ${{ env.rust_stable }} - targets: wasm32-wasi + targets: ${{ matrix.target }} # Install dependencies - name: Install cargo-hack, wasmtime, and cargo-wasi @@ -928,27 +933,39 @@ jobs: - uses: Swatinem/rust-cache@v2 - name: WASI test tokio full - run: cargo test -p tokio --target wasm32-wasi --features full + run: cargo test -p tokio --target ${{ matrix.target }} --features full env: CARGO_TARGET_WASM32_WASI_RUNNER: "wasmtime run --" - RUSTFLAGS: --cfg tokio_unstable -Dwarnings + CARGO_TARGET_WASM32_WASI_PREVIEW1_THREADS_RUNNER: "wasmtime run -W bulk-memory=y -W threads=y -S threads=y --" + RUSTFLAGS: --cfg tokio_unstable -Dwarnings -C target-feature=+atomics,+bulk-memory -C link-args=--max-memory=67108864 - name: WASI test tokio-util full - run: cargo test -p tokio-util --target wasm32-wasi --features full + run: cargo test -p tokio-util --target ${{ matrix.target }} --features full env: CARGO_TARGET_WASM32_WASI_RUNNER: "wasmtime run --" - RUSTFLAGS: --cfg tokio_unstable -Dwarnings + CARGO_TARGET_WASM32_WASI_PREVIEW1_THREADS_RUNNER: "wasmtime run -W bulk-memory=y -W threads=y -S threads=y --" + RUSTFLAGS: --cfg tokio_unstable -Dwarnings -C target-feature=+atomics,+bulk-memory -C link-args=--max-memory=67108864 - name: WASI test tokio-stream - run: cargo test -p tokio-stream --target wasm32-wasi --features time,net,io-util,sync + run: cargo test -p tokio-stream --target ${{ matrix.target }} --features time,net,io-util,sync env: CARGO_TARGET_WASM32_WASI_RUNNER: "wasmtime run --" - RUSTFLAGS: --cfg tokio_unstable -Dwarnings + CARGO_TARGET_WASM32_WASI_PREVIEW1_THREADS_RUNNER: "wasmtime run -W bulk-memory=y -W threads=y -S threads=y --" + RUSTFLAGS: --cfg tokio_unstable -Dwarnings -C target-feature=+atomics,+bulk-memory -C link-args=--max-memory=67108864 - name: test tests-integration --features wasi-rt # TODO: this should become: `cargo hack wasi test --each-feature` run: cargo wasi test --test rt_yield --features wasi-rt + if: matrix.target == 'wasm32-wasi' + working-directory: tests-integration + + - name: test tests-integration --features wasi-threads-rt + run: cargo test --target ${{ matrix.target }} --features wasi-threads-rt + if: matrix.target == 'wasm32-wasi-preview1-threads' working-directory: tests-integration + env: + CARGO_TARGET_WASM32_WASI_PREVIEW1_THREADS_RUNNER: "wasmtime run -W bulk-memory=y -W threads=y -S threads=y --" + RUSTFLAGS: --cfg tokio_unstable -Dwarnings -C target-feature=+atomics,+bulk-memory -C link-args=--max-memory=67108864 check-external-types: name: check-external-types (${{ matrix.os }}) diff --git a/tests-integration/Cargo.toml b/tests-integration/Cargo.toml index 76b9956b8fd..4852f7d23ae 100644 --- a/tests-integration/Cargo.toml +++ b/tests-integration/Cargo.toml @@ -39,6 +39,7 @@ rt-process-signal = ["rt-net", "tokio/process", "tokio/signal"] # This is an explicit feature so we can use `cargo hack` testing single features # instead of all possible permutations. wasi-rt = ["rt", "macros", "sync"] +wasi-threads-rt = ["wasi-rt", "rt-multi-thread"] full = [ "macros", diff --git a/tests-integration/tests/macros_main.rs b/tests-integration/tests/macros_main.rs index e34387e5ec1..31442805141 100644 --- a/tests-integration/tests/macros_main.rs +++ b/tests-integration/tests/macros_main.rs @@ -1,8 +1,4 @@ -#![cfg(all( - feature = "macros", - feature = "rt-multi-thread", - not(target_os = "wasi") -))] +#![cfg(all(feature = "macros", feature = "rt-multi-thread"))] #[tokio::main] async fn basic_main() -> usize { diff --git a/tests-integration/tests/macros_select.rs b/tests-integration/tests/macros_select.rs index a1a242c0f4e..18338445603 100644 --- a/tests-integration/tests/macros_select.rs +++ b/tests-integration/tests/macros_select.rs @@ -4,7 +4,10 @@ use futures::channel::oneshot; use futures::executor::block_on; use std::thread; -#[cfg_attr(target_os = "wasi", ignore = "WASI: std::thread::spawn not supported")] +#[cfg_attr( + not(feature = "rt-multi-thread"), + ignore = "WASI: std::thread::spawn not supported" +)] #[test] fn join_with_select() { block_on(async { diff --git a/tokio-util/src/task/mod.rs b/tokio-util/src/task/mod.rs index e37015a4e3c..1ab3ff13dbe 100644 --- a/tokio-util/src/task/mod.rs +++ b/tokio-util/src/task/mod.rs @@ -2,9 +2,7 @@ #[cfg(tokio_unstable)] mod join_map; -#[cfg(not(target_os = "wasi"))] mod spawn_pinned; -#[cfg(not(target_os = "wasi"))] pub use spawn_pinned::LocalPoolHandle; #[cfg(tokio_unstable)] diff --git a/tokio/src/macros/cfg.rs b/tokio/src/macros/cfg.rs index d2f7b42bf60..c67e0e8379f 100644 --- a/tokio/src/macros/cfg.rs +++ b/tokio/src/macros/cfg.rs @@ -384,7 +384,7 @@ macro_rules! cfg_not_rt { macro_rules! cfg_rt_multi_thread { ($($item:item)*) => { $( - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(feature = "rt-multi-thread")] #[cfg_attr(docsrs, doc(cfg(feature = "rt-multi-thread")))] $item )* diff --git a/tokio/src/runtime/blocking/schedule.rs b/tokio/src/runtime/blocking/schedule.rs index 8dfe5fd10f2..6c9fdf3f8e3 100644 --- a/tokio/src/runtime/blocking/schedule.rs +++ b/tokio/src/runtime/blocking/schedule.rs @@ -23,9 +23,9 @@ impl BlockingSchedule { scheduler::Handle::CurrentThread(handle) => { handle.driver.clock.inhibit_auto_advance(); } - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(feature = "rt-multi-thread")] scheduler::Handle::MultiThread(_) => {} - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] scheduler::Handle::MultiThreadAlt(_) => {} } } @@ -45,9 +45,9 @@ impl task::Schedule for BlockingSchedule { handle.driver.clock.allow_auto_advance(); handle.driver.unpark(); } - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(feature = "rt-multi-thread")] scheduler::Handle::MultiThread(_) => {} - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] scheduler::Handle::MultiThreadAlt(_) => {} } } diff --git a/tokio/src/runtime/builder.rs b/tokio/src/runtime/builder.rs index 27ae4c80167..3b09c0d4b10 100644 --- a/tokio/src/runtime/builder.rs +++ b/tokio/src/runtime/builder.rs @@ -199,9 +199,9 @@ pub(crate) type ThreadNameFn = std::sync::Arc String + Send + Sync + #[derive(Clone, Copy)] pub(crate) enum Kind { CurrentThread, - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(feature = "rt-multi-thread")] MultiThread, - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] MultiThreadAlt, } @@ -224,35 +224,33 @@ impl Builder { Builder::new(Kind::CurrentThread, EVENT_INTERVAL) } - cfg_not_wasi! { - /// Returns a new builder with the multi thread scheduler selected. + /// Returns a new builder with the multi thread scheduler selected. + /// + /// Configuration methods can be chained on the return value. + #[cfg(feature = "rt-multi-thread")] + #[cfg_attr(docsrs, doc(cfg(feature = "rt-multi-thread")))] + pub fn new_multi_thread() -> Builder { + // The number `61` is fairly arbitrary. I believe this value was copied from golang. + Builder::new(Kind::MultiThread, 61) + } + + cfg_unstable! { + /// Returns a new builder with the alternate multi thread scheduler + /// selected. + /// + /// The alternate multi threaded scheduler is an in-progress + /// candidate to replace the existing multi threaded scheduler. It + /// currently does not scale as well to 16+ processors. + /// + /// This runtime flavor is currently **not considered production + /// ready**. /// /// Configuration methods can be chained on the return value. #[cfg(feature = "rt-multi-thread")] #[cfg_attr(docsrs, doc(cfg(feature = "rt-multi-thread")))] - pub fn new_multi_thread() -> Builder { + pub fn new_multi_thread_alt() -> Builder { // The number `61` is fairly arbitrary. I believe this value was copied from golang. - Builder::new(Kind::MultiThread, 61) - } - - cfg_unstable! { - /// Returns a new builder with the alternate multi thread scheduler - /// selected. - /// - /// The alternate multi threaded scheduler is an in-progress - /// candidate to replace the existing multi threaded scheduler. It - /// currently does not scale as well to 16+ processors. - /// - /// This runtime flavor is currently **not considered production - /// ready**. - /// - /// Configuration methods can be chained on the return value. - #[cfg(feature = "rt-multi-thread")] - #[cfg_attr(docsrs, doc(cfg(feature = "rt-multi-thread")))] - pub fn new_multi_thread_alt() -> Builder { - // The number `61` is fairly arbitrary. I believe this value was copied from golang. - Builder::new(Kind::MultiThreadAlt, 61) - } + Builder::new(Kind::MultiThreadAlt, 61) } } @@ -697,9 +695,9 @@ impl Builder { pub fn build(&mut self) -> io::Result { match &self.kind { Kind::CurrentThread => self.build_current_thread_runtime(), - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(feature = "rt-multi-thread")] Kind::MultiThread => self.build_threaded_runtime(), - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] Kind::MultiThreadAlt => self.build_alt_threaded_runtime(), } } @@ -708,9 +706,9 @@ impl Builder { driver::Cfg { enable_pause_time: match self.kind { Kind::CurrentThread => true, - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(feature = "rt-multi-thread")] Kind::MultiThread => false, - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] Kind::MultiThreadAlt => false, }, enable_io: self.enable_io, diff --git a/tokio/src/runtime/handle.rs b/tokio/src/runtime/handle.rs index 7e7e5636c80..01d210cd36f 100644 --- a/tokio/src/runtime/handle.rs +++ b/tokio/src/runtime/handle.rs @@ -355,9 +355,9 @@ impl Handle { pub fn runtime_flavor(&self) -> RuntimeFlavor { match self.inner { scheduler::Handle::CurrentThread(_) => RuntimeFlavor::CurrentThread, - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(feature = "rt-multi-thread")] scheduler::Handle::MultiThread(_) => RuntimeFlavor::MultiThread, - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] scheduler::Handle::MultiThreadAlt(_) => RuntimeFlavor::MultiThreadAlt, } } @@ -385,9 +385,9 @@ impl Handle { pub fn id(&self) -> runtime::Id { let owned_id = match &self.inner { scheduler::Handle::CurrentThread(handle) => handle.owned_id(), - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(feature = "rt-multi-thread")] scheduler::Handle::MultiThread(handle) => handle.owned_id(), - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] scheduler::Handle::MultiThreadAlt(handle) => handle.owned_id(), }; owned_id.into() diff --git a/tokio/src/runtime/runtime.rs b/tokio/src/runtime/runtime.rs index ec5b65faac6..7cf2cebeffc 100644 --- a/tokio/src/runtime/runtime.rs +++ b/tokio/src/runtime/runtime.rs @@ -126,11 +126,11 @@ pub(super) enum Scheduler { CurrentThread(CurrentThread), /// Execute tasks across multiple threads. - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(feature = "rt-multi-thread")] MultiThread(MultiThread), /// Execute tasks across multiple threads. - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] MultiThreadAlt(MultiThreadAlt), } @@ -147,40 +147,38 @@ impl Runtime { } } - cfg_not_wasi! { - /// Creates a new runtime instance with default configuration values. - /// - /// This results in the multi threaded scheduler, I/O driver, and time driver being - /// initialized. - /// - /// Most applications will not need to call this function directly. Instead, - /// they will use the [`#[tokio::main]` attribute][main]. When a more complex - /// configuration is necessary, the [runtime builder] may be used. - /// - /// See [module level][mod] documentation for more details. - /// - /// # Examples - /// - /// Creating a new `Runtime` with default configuration values. - /// - /// ``` - /// use tokio::runtime::Runtime; - /// - /// let rt = Runtime::new() - /// .unwrap(); - /// - /// // Use the runtime... - /// ``` - /// - /// [mod]: index.html - /// [main]: ../attr.main.html - /// [threaded scheduler]: index.html#threaded-scheduler - /// [runtime builder]: crate::runtime::Builder - #[cfg(feature = "rt-multi-thread")] - #[cfg_attr(docsrs, doc(cfg(feature = "rt-multi-thread")))] - pub fn new() -> std::io::Result { - Builder::new_multi_thread().enable_all().build() - } + /// Creates a new runtime instance with default configuration values. + /// + /// This results in the multi threaded scheduler, I/O driver, and time driver being + /// initialized. + /// + /// Most applications will not need to call this function directly. Instead, + /// they will use the [`#[tokio::main]` attribute][main]. When a more complex + /// configuration is necessary, the [runtime builder] may be used. + /// + /// See [module level][mod] documentation for more details. + /// + /// # Examples + /// + /// Creating a new `Runtime` with default configuration values. + /// + /// ``` + /// use tokio::runtime::Runtime; + /// + /// let rt = Runtime::new() + /// .unwrap(); + /// + /// // Use the runtime... + /// ``` + /// + /// [mod]: index.html + /// [main]: ../attr.main.html + /// [threaded scheduler]: index.html#threaded-scheduler + /// [runtime builder]: crate::runtime::Builder + #[cfg(feature = "rt-multi-thread")] + #[cfg_attr(docsrs, doc(cfg(feature = "rt-multi-thread")))] + pub fn new() -> std::io::Result { + Builder::new_multi_thread().enable_all().build() } /// Returns a handle to the runtime's spawner. @@ -347,9 +345,9 @@ impl Runtime { match &self.scheduler { Scheduler::CurrentThread(exec) => exec.block_on(&self.handle.inner, future), - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(feature = "rt-multi-thread")] Scheduler::MultiThread(exec) => exec.block_on(&self.handle.inner, future), - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] Scheduler::MultiThreadAlt(exec) => exec.block_on(&self.handle.inner, future), } } @@ -469,13 +467,13 @@ impl Drop for Runtime { let _guard = context::try_set_current(&self.handle.inner); current_thread.shutdown(&self.handle.inner); } - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(feature = "rt-multi-thread")] Scheduler::MultiThread(multi_thread) => { // The threaded scheduler drops its tasks on its worker threads, which is // already in the runtime's context. multi_thread.shutdown(&self.handle.inner); } - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] Scheduler::MultiThreadAlt(multi_thread) => { // The threaded scheduler drops its tasks on its worker threads, which is // already in the runtime's context. diff --git a/tokio/src/runtime/scheduler/inject/shared.rs b/tokio/src/runtime/scheduler/inject/shared.rs index 2d29486db73..e32c2e4d719 100644 --- a/tokio/src/runtime/scheduler/inject/shared.rs +++ b/tokio/src/runtime/scheduler/inject/shared.rs @@ -38,10 +38,7 @@ impl Shared { } // Kind of annoying to have to include the cfg here - #[cfg(any( - tokio_taskdump, - all(feature = "rt-multi-thread", not(target_os = "wasi")) - ))] + #[cfg(any(tokio_taskdump, feature = "rt-multi-thread"))] pub(crate) fn is_closed(&self, synced: &Synced) -> bool { synced.is_closed } diff --git a/tokio/src/runtime/scheduler/mod.rs b/tokio/src/runtime/scheduler/mod.rs index 42368e5bed8..04fbff39e47 100644 --- a/tokio/src/runtime/scheduler/mod.rs +++ b/tokio/src/runtime/scheduler/mod.rs @@ -32,10 +32,10 @@ pub(crate) enum Handle { #[cfg(feature = "rt")] CurrentThread(Arc), - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(feature = "rt-multi-thread")] MultiThread(Arc), - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] MultiThreadAlt(Arc), // TODO: This is to avoid triggering "dead code" warnings many other places @@ -49,10 +49,10 @@ pub(crate) enum Handle { pub(super) enum Context { CurrentThread(current_thread::Context), - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(feature = "rt-multi-thread")] MultiThread(multi_thread::Context), - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] MultiThreadAlt(multi_thread_alt::Context), } @@ -63,10 +63,10 @@ impl Handle { #[cfg(feature = "rt")] Handle::CurrentThread(ref h) => &h.driver, - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(feature = "rt-multi-thread")] Handle::MultiThread(ref h) => &h.driver, - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] Handle::MultiThreadAlt(ref h) => &h.driver, #[cfg(not(feature = "rt"))] @@ -89,10 +89,10 @@ cfg_rt! { match $self { $ty::CurrentThread($h) => $e, - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(feature = "rt-multi-thread")] $ty::MultiThread($h) => $e, - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] $ty::MultiThreadAlt($h) => $e, } } @@ -119,10 +119,10 @@ cfg_rt! { match self { Handle::CurrentThread(h) => current_thread::Handle::spawn(h, future, id), - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(feature = "rt-multi-thread")] Handle::MultiThread(h) => multi_thread::Handle::spawn(h, future, id), - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] Handle::MultiThreadAlt(h) => multi_thread_alt::Handle::spawn(h, future, id), } } @@ -131,10 +131,10 @@ cfg_rt! { match *self { Handle::CurrentThread(_) => {}, - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(feature = "rt-multi-thread")] Handle::MultiThread(ref h) => h.shutdown(), - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] Handle::MultiThreadAlt(ref h) => h.shutdown(), } } @@ -146,7 +146,7 @@ cfg_rt! { pub(crate) fn as_current_thread(&self) -> &Arc { match self { Handle::CurrentThread(handle) => handle, - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(feature = "rt-multi-thread")] _ => panic!("not a CurrentThread handle"), } } @@ -170,9 +170,9 @@ cfg_rt! { pub(crate) fn num_workers(&self) -> usize { match self { Handle::CurrentThread(_) => 1, - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(feature = "rt-multi-thread")] Handle::MultiThread(handle) => handle.num_workers(), - #[cfg(all(tokio_unstable, feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] Handle::MultiThreadAlt(handle) => handle.num_workers(), } } @@ -216,7 +216,7 @@ cfg_rt! { pub(crate) fn expect_current_thread(&self) -> ¤t_thread::Context { match self { Context::CurrentThread(context) => context, - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(feature = "rt-multi-thread")] _ => panic!("expected `CurrentThread::Context`") } } diff --git a/tokio/src/runtime/task/trace/mod.rs b/tokio/src/runtime/task/trace/mod.rs index ec2e8432216..bb411f42d72 100644 --- a/tokio/src/runtime/task/trace/mod.rs +++ b/tokio/src/runtime/task/trace/mod.rs @@ -195,13 +195,9 @@ pub(crate) fn trace_leaf(cx: &mut task::Context<'_>) -> Poll<()> { if let Some(scheduler) = scheduler { match scheduler { scheduler::Context::CurrentThread(s) => s.defer.defer(cx.waker()), - #[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] + #[cfg(feature = "rt-multi-thread")] scheduler::Context::MultiThread(s) => s.defer.defer(cx.waker()), - #[cfg(all( - tokio_unstable, - feature = "rt-multi-thread", - not(target_os = "wasi") - ))] + #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] scheduler::Context::MultiThreadAlt(_) => unimplemented!(), } } diff --git a/tokio/src/task/mod.rs b/tokio/src/task/mod.rs index f45df10a982..32a87c93c55 100644 --- a/tokio/src/task/mod.rs +++ b/tokio/src/task/mod.rs @@ -318,10 +318,8 @@ cfg_rt! { pub use crate::runtime::task::{JoinError, JoinHandle}; - cfg_not_wasi! { - mod blocking; - pub use blocking::spawn_blocking; - } + mod blocking; + pub use blocking::spawn_blocking; mod spawn; pub use spawn::spawn; From 731dde21dc92e9cb9d25cf74a2b29e15cd364dbb Mon Sep 17 00:00:00 2001 From: Valentin Date: Thu, 25 Apr 2024 09:01:15 +0200 Subject: [PATCH 118/162] runtime: clarify misleading use of `UnsafeCell::with_mut` (#6513) The code that we're removing calls UnsafeCell::with_mut with the argument `std::mem::drop`. This is misleading because the use of `drop` has no effect. `with_mut` takes an argument of type `impl FnOnce(*mut T) -> R`. The argument to the argument function is a pointer. Dropping a pointer has no effect. The comment above the first instance of this pattern claims that this releases some resource. This is false because the call has no effect. The intention might have been to drop the value behind the pointer. If this did happen, it would be a bug because the resource (`waker`) would be dropped again at the end of the function when the containing object is dropped. I looked through the history of this code. This code originally called `with_mut` with the argument `|_| ()`. Calling `with_mut` with an argument function that does nothing has a side effect when testing with loom. When testing with loom, the code uses loom's UnsafeCell type instead of std's. The intention of the code was likely to make use of that side effect because we expect to have exclusive access here as we are going to drop the containing object. The side effect is that loom checks that Rust's reference uniqueness properties are upheld. To continue to check this, I have only removed the use of `drop` while keeping `with_mut`. It would be even better to have loom check this implicitly when UnsafeCell is dropped. I created an issue about this in loom [1]. Links: https://github.com/tokio-rs/loom/issues/349 [1] --- tokio/src/runtime/task/harness.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tokio/src/runtime/task/harness.rs b/tokio/src/runtime/task/harness.rs index cf19eea83bb..8479becd80a 100644 --- a/tokio/src/runtime/task/harness.rs +++ b/tokio/src/runtime/task/harness.rs @@ -249,11 +249,11 @@ where } pub(super) fn dealloc(self) { - // Release the join waker, if there is one. - self.trailer().waker.with_mut(drop); - - // Check causality - self.core().stage.with_mut(drop); + // Observe that we expect to have mutable access to these objects + // because we are going to drop them. This only matters when running + // under loom. + self.trailer().waker.with_mut(|_| ()); + self.core().stage.with_mut(|_| ()); // Safety: The caller of this method just transitioned our ref-count to // zero, so it is our responsibility to release the allocation. From d33fdd86a3de75500fe554d6547cf5ad43e006bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Thu, 25 Apr 2024 08:02:11 +0100 Subject: [PATCH 119/162] time: check for overflow in `Interval::poll_tick` (#6487) --- tokio/src/time/interval.rs | 4 +++- tokio/tests/time_interval.rs | 8 +++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/tokio/src/time/interval.rs b/tokio/src/time/interval.rs index dee28793a32..2b5246acfa4 100644 --- a/tokio/src/time/interval.rs +++ b/tokio/src/time/interval.rs @@ -480,7 +480,9 @@ impl Interval { self.missed_tick_behavior .next_timeout(timeout, now, self.period) } else { - timeout + self.period + timeout + .checked_add(self.period) + .unwrap_or_else(Instant::far_future) }; // When we arrive here, the internal delay returned `Poll::Ready`. diff --git a/tokio/tests/time_interval.rs b/tokio/tests/time_interval.rs index 4f3e95b0d2a..7472a37123c 100644 --- a/tokio/tests/time_interval.rs +++ b/tokio/tests/time_interval.rs @@ -6,7 +6,7 @@ use std::task::{Context, Poll}; use futures::{Stream, StreamExt}; use tokio::time::{self, Duration, Instant, Interval, MissedTickBehavior}; -use tokio_test::{assert_pending, assert_ready_eq, task}; +use tokio_test::{assert_pending, assert_ready, assert_ready_eq, task}; // Takes the `Interval` task, `start` variable, and optional time deltas // For each time delta, it polls the `Interval` and asserts that the result is @@ -469,3 +469,9 @@ async fn stream_with_interval_poll_tick_no_waking() { // task when returning `Poll::Ready`. assert_eq!(items, vec![]); } + +#[tokio::test(start_paused = true)] +async fn interval_doesnt_panic_max_duration_when_polling() { + let mut timer = task::spawn(time::interval(Duration::MAX)); + assert_ready!(timer.enter(|cx, mut timer| timer.poll_tick(cx))); +} From 28439e2269f2696a5009f2f05ce8f39b7fa13217 Mon Sep 17 00:00:00 2001 From: Paolo Barbolini Date: Wed, 1 May 2024 14:50:40 +0200 Subject: [PATCH 120/162] time: clean up implementation (#6517) --- tokio/src/runtime/time/entry.rs | 24 +++---- tokio/src/runtime/time/mod.rs | 22 +++---- tokio/src/runtime/time/source.rs | 4 +- tokio/src/runtime/time/wheel/level.rs | 90 ++------------------------- tokio/src/runtime/time/wheel/mod.rs | 22 +++---- tokio/src/util/mod.rs | 3 + 6 files changed, 31 insertions(+), 134 deletions(-) diff --git a/tokio/src/runtime/time/entry.rs b/tokio/src/runtime/time/entry.rs index 0998b53011d..f6f56e277b2 100644 --- a/tokio/src/runtime/time/entry.rs +++ b/tokio/src/runtime/time/entry.rs @@ -75,7 +75,7 @@ const STATE_MIN_VALUE: u64 = STATE_PENDING_FIRE; /// The largest safe integer to use for ticks. /// /// This value should be updated if any other signal values are added above. -pub(super) const MAX_SAFE_MILLIS_DURATION: u64 = u64::MAX - 2; +pub(super) const MAX_SAFE_MILLIS_DURATION: u64 = STATE_MIN_VALUE - 1; /// This structure holds the current shared state of the timer - its scheduled /// time (if registered), or otherwise the result of the timer completing, as @@ -187,18 +187,14 @@ impl StateCell { break Err(cur_state); } - match self.state.compare_exchange( + match self.state.compare_exchange_weak( cur_state, STATE_PENDING_FIRE, Ordering::AcqRel, Ordering::Acquire, ) { - Ok(_) => { - break Ok(()); - } - Err(actual_state) => { - cur_state = actual_state; - } + Ok(_) => break Ok(()), + Err(actual_state) => cur_state = actual_state, } } } @@ -266,12 +262,8 @@ impl StateCell { Ordering::AcqRel, Ordering::Acquire, ) { - Ok(_) => { - return Ok(()); - } - Err(true_prior) => { - prior = true_prior; - } + Ok(_) => return Ok(()), + Err(true_prior) => prior = true_prior, } } } @@ -564,9 +556,7 @@ impl TimerEntry { self.as_mut().reset(deadline, true); } - let this = unsafe { self.get_unchecked_mut() }; - - this.inner().state.poll(cx.waker()) + self.inner().state.poll(cx.waker()) } pub(crate) fn driver(&self) -> &super::Handle { diff --git a/tokio/src/runtime/time/mod.rs b/tokio/src/runtime/time/mod.rs index a30393a02b2..8cd51c5cb4a 100644 --- a/tokio/src/runtime/time/mod.rs +++ b/tokio/src/runtime/time/mod.rs @@ -23,9 +23,10 @@ use crate::loom::sync::Mutex; use crate::runtime::driver::{self, IoHandle, IoStack}; use crate::time::error::Error; use crate::time::{Clock, Duration}; +use crate::util::WakeList; use std::fmt; -use std::{num::NonZeroU64, ptr::NonNull, task::Waker}; +use std::{num::NonZeroU64, ptr::NonNull}; /// Time implementation that drives [`Sleep`][sleep], [`Interval`][interval], and [`Timeout`][timeout]. /// @@ -253,8 +254,7 @@ impl Handle { } pub(self) fn process_at_time(&self, mut now: u64) { - let mut waker_list: [Option; 32] = Default::default(); - let mut waker_idx = 0; + let mut waker_list = WakeList::new(); let mut lock = self.inner.lock(); @@ -273,19 +273,13 @@ impl Handle { // SAFETY: We hold the driver lock, and just removed the entry from any linked lists. if let Some(waker) = unsafe { entry.fire(Ok(())) } { - waker_list[waker_idx] = Some(waker); + waker_list.push(waker); - waker_idx += 1; - - if waker_idx == waker_list.len() { + if !waker_list.can_push() { // Wake a batch of wakers. To avoid deadlock, we must do this with the lock temporarily dropped. drop(lock); - for waker in waker_list.iter_mut() { - waker.take().unwrap().wake(); - } - - waker_idx = 0; + waker_list.wake_all(); lock = self.inner.lock(); } @@ -299,9 +293,7 @@ impl Handle { drop(lock); - for waker in &mut waker_list[0..waker_idx] { - waker.take().unwrap().wake(); - } + waker_list.wake_all(); } /// Removes a registered timer from the driver. diff --git a/tokio/src/runtime/time/source.rs b/tokio/src/runtime/time/source.rs index c709dc5380f..e371c207cdb 100644 --- a/tokio/src/runtime/time/source.rs +++ b/tokio/src/runtime/time/source.rs @@ -21,9 +21,7 @@ impl TimeSource { pub(crate) fn instant_to_tick(&self, t: Instant) -> u64 { // round up - let dur: Duration = t - .checked_duration_since(self.start_time) - .unwrap_or_else(|| Duration::from_secs(0)); + let dur: Duration = t.saturating_duration_since(self.start_time); let ms = dur.as_millis(); ms.try_into().unwrap_or(MAX_SAFE_MILLIS_DURATION) diff --git a/tokio/src/runtime/time/wheel/level.rs b/tokio/src/runtime/time/wheel/level.rs index a828c0067ef..d31eaf46879 100644 --- a/tokio/src/runtime/time/wheel/level.rs +++ b/tokio/src/runtime/time/wheel/level.rs @@ -1,6 +1,6 @@ use crate::runtime::time::{EntryList, TimerHandle, TimerShared}; -use std::{fmt, ptr::NonNull}; +use std::{array, fmt, ptr::NonNull}; /// Wheel for a single level in the timer. This wheel contains 64 slots. pub(crate) struct Level { @@ -39,89 +39,10 @@ const LEVEL_MULT: usize = 64; impl Level { pub(crate) fn new(level: usize) -> Level { - // A value has to be Copy in order to use syntax like: - // let stack = Stack::default(); - // ... - // slots: [stack; 64], - // - // Alternatively, since Stack is Default one can - // use syntax like: - // let slots: [Stack; 64] = Default::default(); - // - // However, that is only supported for arrays of size - // 32 or fewer. So in our case we have to explicitly - // invoke the constructor for each array element. - let ctor = EntryList::default; - Level { level, occupied: 0, - slot: [ - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ctor(), - ], + slot: array::from_fn(|_| EntryList::default()), } } @@ -130,10 +51,7 @@ impl Level { pub(crate) fn next_expiration(&self, now: u64) -> Option { // Use the `occupied` bit field to get the index of the next slot that // needs to be processed. - let slot = match self.next_occupied_slot(now) { - Some(slot) => slot, - None => return None, - }; + let slot = self.next_occupied_slot(now)?; // From the slot index, calculate the `Instant` at which it needs to be // processed. This value *must* be in the future with respect to `now`. @@ -196,7 +114,7 @@ impl Level { let now_slot = (now / slot_range(self.level)) as usize; let occupied = self.occupied.rotate_right(now_slot as u32); let zeros = occupied.trailing_zeros() as usize; - let slot = (zeros + now_slot) % 64; + let slot = (zeros + now_slot) % LEVEL_MULT; Some(slot) } diff --git a/tokio/src/runtime/time/wheel/mod.rs b/tokio/src/runtime/time/wheel/mod.rs index bf13b7b2415..f2b4228514c 100644 --- a/tokio/src/runtime/time/wheel/mod.rs +++ b/tokio/src/runtime/time/wheel/mod.rs @@ -5,7 +5,7 @@ mod level; pub(crate) use self::level::Expiration; use self::level::Level; -use std::ptr::NonNull; +use std::{array, ptr::NonNull}; use super::EntryList; @@ -35,7 +35,7 @@ pub(crate) struct Wheel { /// * ~ 4 min slots / ~ 4 hr range /// * ~ 4 hr slots / ~ 12 day range /// * ~ 12 day slots / ~ 2 yr range - levels: Vec, + levels: Box<[Level; NUM_LEVELS]>, /// Entries queued for firing pending: EntryList, @@ -52,11 +52,9 @@ pub(super) const MAX_DURATION: u64 = (1 << (6 * NUM_LEVELS)) - 1; impl Wheel { /// Creates a new timing wheel. pub(crate) fn new() -> Wheel { - let levels = (0..NUM_LEVELS).map(Level::new).collect(); - Wheel { elapsed: 0, - levels, + levels: Box::new(array::from_fn(Level::new)), pending: EntryList::new(), } } @@ -130,7 +128,6 @@ impl Wheel { ); let level = self.level_for(when); - self.levels[level].remove_entry(item); } } @@ -180,11 +177,11 @@ impl Wheel { } // Check all levels - for level in 0..NUM_LEVELS { - if let Some(expiration) = self.levels[level].next_expiration(self.elapsed) { + for (level_num, level) in self.levels.iter().enumerate() { + if let Some(expiration) = level.next_expiration(self.elapsed) { // There cannot be any expirations at a higher level that happen // before this one. - debug_assert!(self.no_expirations_before(level + 1, expiration.deadline)); + debug_assert!(self.no_expirations_before(level_num + 1, expiration.deadline)); return Some(expiration); } @@ -203,8 +200,8 @@ impl Wheel { fn no_expirations_before(&self, start_level: usize, before: u64) -> bool { let mut res = true; - for l2 in start_level..NUM_LEVELS { - if let Some(e2) = self.levels[l2].next_expiration(self.elapsed) { + for level in &self.levels[start_level..] { + if let Some(e2) = level.next_expiration(self.elapsed) { if e2.deadline < before { res = false; } @@ -267,7 +264,6 @@ impl Wheel { } /// Obtains the list of entries that need processing for the given expiration. - /// fn take_entries(&mut self, expiration: &Expiration) -> EntryList { self.levels[expiration.level].take_slot(expiration.slot) } @@ -292,7 +288,7 @@ fn level_for(elapsed: u64, when: u64) -> usize { let leading_zeros = masked.leading_zeros() as usize; let significant = 63 - leading_zeros; - significant / 6 + significant / NUM_LEVELS } #[cfg(all(test, not(loom)))] diff --git a/tokio/src/util/mod.rs b/tokio/src/util/mod.rs index abdb70406d2..d821ec897cf 100644 --- a/tokio/src/util/mod.rs +++ b/tokio/src/util/mod.rs @@ -19,6 +19,8 @@ pub(crate) mod once_cell; // rt and signal use `Notify`, which requires `WakeList`. feature = "rt", feature = "signal", + // time driver uses `WakeList` in `Handle::process_at_time`. + feature = "time", ))] mod wake_list; #[cfg(any( @@ -28,6 +30,7 @@ mod wake_list; feature = "fs", feature = "rt", feature = "signal", + feature = "time", ))] pub(crate) use wake_list::WakeList; From e971a5e7d73757f3631738c8e32c94bbd78f8e26 Mon Sep 17 00:00:00 2001 From: Paolo Barbolini Date: Wed, 1 May 2024 16:19:23 +0200 Subject: [PATCH 121/162] util: use FIFO ordering in `WakeList` (#6521) --- tokio/src/util/wake_list.rs | 37 +++++++++++++++++++++++++++++++------ 1 file changed, 31 insertions(+), 6 deletions(-) diff --git a/tokio/src/util/wake_list.rs b/tokio/src/util/wake_list.rs index c5f432b0bb8..23a559d02be 100644 --- a/tokio/src/util/wake_list.rs +++ b/tokio/src/util/wake_list.rs @@ -37,12 +37,37 @@ impl WakeList { } pub(crate) fn wake_all(&mut self) { - assert!(self.curr <= NUM_WAKERS); - while self.curr > 0 { - self.curr -= 1; - // SAFETY: The first `curr` elements of `WakeList` are initialized, so by decrementing - // `curr`, we can take ownership of the last item. - let waker = unsafe { ptr::read(self.inner[self.curr].as_mut_ptr()) }; + struct DropGuard { + start: *mut Waker, + end: *mut Waker, + } + + impl Drop for DropGuard { + fn drop(&mut self) { + // SAFETY: Both pointers are part of the same object, with `start <= end`. + let len = unsafe { self.end.offset_from(self.start) } as usize; + let slice = ptr::slice_from_raw_parts_mut(self.start, len); + // SAFETY: All elements in `start..len` are initialized, so we can drop them. + unsafe { ptr::drop_in_place(slice) }; + } + } + + debug_assert!(self.curr <= NUM_WAKERS); + + let mut guard = { + let start = self.inner.as_mut_ptr().cast::(); + // SAFETY: The resulting pointer is in bounds or one after the length of the same object. + let end = unsafe { start.add(self.curr) }; + // Transfer ownership of the wakers in `inner` to `DropGuard`. + self.curr = 0; + DropGuard { start, end } + }; + while !ptr::eq(guard.start, guard.end) { + // SAFETY: `start` is always initialized if `start != end`. + let waker = unsafe { ptr::read(guard.start) }; + // SAFETY: The resulting pointer is in bounds or one after the length of the same object. + guard.start = unsafe { guard.start.add(1) }; + // If this panics, then `guard` will clean up the remaining wakers. waker.wake(); } } From 3c8d8e60ca6ffa4a7669f35b098e454893a6a9a6 Mon Sep 17 00:00:00 2001 From: Motoyuki Kimura Date: Fri, 3 May 2024 18:23:25 +0900 Subject: [PATCH 122/162] chore: fix latest rust-1.78.0 warnings (#6528) --- .github/workflows/ci.yml | 26 +++++++++---------- tokio/src/process/unix/orphan.rs | 1 + .../runtime/scheduler/multi_thread/worker.rs | 1 + tokio/src/util/markers.rs | 1 + 4 files changed, 16 insertions(+), 13 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e39333ba60d..4f5e9004959 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -67,7 +67,7 @@ jobs: - x86_64-fortanix-unknown-sgx - check-redox - wasm32-unknown-unknown - - wasm32-wasi + - wasm32-wasip1 - check-external-types - check-fuzzing - check-unstable-mt-counters @@ -908,15 +908,15 @@ jobs: run: wasm-pack test --node -- --features "macros sync" working-directory: tokio - wasm32-wasi: + wasm32-wasip1: name: ${{ matrix.target }} needs: basics runs-on: ubuntu-latest strategy: matrix: target: - - wasm32-wasi - - wasm32-wasi-preview1-threads + - wasm32-wasip1 + - wasm32-wasip1-threads steps: - uses: actions/checkout@v4 - name: Install Rust ${{ env.rust_stable }} @@ -935,36 +935,36 @@ jobs: - name: WASI test tokio full run: cargo test -p tokio --target ${{ matrix.target }} --features full env: - CARGO_TARGET_WASM32_WASI_RUNNER: "wasmtime run --" - CARGO_TARGET_WASM32_WASI_PREVIEW1_THREADS_RUNNER: "wasmtime run -W bulk-memory=y -W threads=y -S threads=y --" + CARGO_TARGET_WASM32_WASIP1_RUNNER: "wasmtime run --" + CARGO_TARGET_WASM32_WASIP1_THREADS_RUNNER: "wasmtime run -W bulk-memory=y -W threads=y -S threads=y --" RUSTFLAGS: --cfg tokio_unstable -Dwarnings -C target-feature=+atomics,+bulk-memory -C link-args=--max-memory=67108864 - name: WASI test tokio-util full run: cargo test -p tokio-util --target ${{ matrix.target }} --features full env: - CARGO_TARGET_WASM32_WASI_RUNNER: "wasmtime run --" - CARGO_TARGET_WASM32_WASI_PREVIEW1_THREADS_RUNNER: "wasmtime run -W bulk-memory=y -W threads=y -S threads=y --" + CARGO_TARGET_WASM32_WASIP1_RUNNER: "wasmtime run --" + CARGO_TARGET_WASM32_WASIP1_THREADS_RUNNER: "wasmtime run -W bulk-memory=y -W threads=y -S threads=y --" RUSTFLAGS: --cfg tokio_unstable -Dwarnings -C target-feature=+atomics,+bulk-memory -C link-args=--max-memory=67108864 - name: WASI test tokio-stream run: cargo test -p tokio-stream --target ${{ matrix.target }} --features time,net,io-util,sync env: - CARGO_TARGET_WASM32_WASI_RUNNER: "wasmtime run --" - CARGO_TARGET_WASM32_WASI_PREVIEW1_THREADS_RUNNER: "wasmtime run -W bulk-memory=y -W threads=y -S threads=y --" + CARGO_TARGET_WASM32_WASIP1_RUNNER: "wasmtime run --" + CARGO_TARGET_WASM32_WASIP1_THREADS_RUNNER: "wasmtime run -W bulk-memory=y -W threads=y -S threads=y --" RUSTFLAGS: --cfg tokio_unstable -Dwarnings -C target-feature=+atomics,+bulk-memory -C link-args=--max-memory=67108864 - name: test tests-integration --features wasi-rt # TODO: this should become: `cargo hack wasi test --each-feature` run: cargo wasi test --test rt_yield --features wasi-rt - if: matrix.target == 'wasm32-wasi' + if: matrix.target == 'wasm32-wasip1' working-directory: tests-integration - name: test tests-integration --features wasi-threads-rt run: cargo test --target ${{ matrix.target }} --features wasi-threads-rt - if: matrix.target == 'wasm32-wasi-preview1-threads' + if: matrix.target == 'wasm32-wasip1-threads' working-directory: tests-integration env: - CARGO_TARGET_WASM32_WASI_PREVIEW1_THREADS_RUNNER: "wasmtime run -W bulk-memory=y -W threads=y -S threads=y --" + CARGO_TARGET_WASM32_WASIP1_THREADS_RUNNER: "wasmtime run -W bulk-memory=y -W threads=y -S threads=y --" RUSTFLAGS: --cfg tokio_unstable -Dwarnings -C target-feature=+atomics,+bulk-memory -C link-args=--max-memory=67108864 check-external-types: diff --git a/tokio/src/process/unix/orphan.rs b/tokio/src/process/unix/orphan.rs index b6ca7da238b..a89555f5876 100644 --- a/tokio/src/process/unix/orphan.rs +++ b/tokio/src/process/unix/orphan.rs @@ -8,6 +8,7 @@ use std::process::ExitStatus; /// An interface for waiting on a process to exit. pub(crate) trait Wait { /// Get the identifier for this process or diagnostics. + #[allow(dead_code)] fn id(&self) -> u32; /// Try waiting for a process to exit in a non-blocking manner. fn try_wait(&mut self) -> io::Result>; diff --git a/tokio/src/runtime/scheduler/multi_thread/worker.rs b/tokio/src/runtime/scheduler/multi_thread/worker.rs index f07fb8568cd..83e70795f4f 100644 --- a/tokio/src/runtime/scheduler/multi_thread/worker.rs +++ b/tokio/src/runtime/scheduler/multi_thread/worker.rs @@ -450,6 +450,7 @@ impl Launch { } fn run(worker: Arc) { + #[allow(dead_code)] struct AbortOnPanic; impl Drop for AbortOnPanic { diff --git a/tokio/src/util/markers.rs b/tokio/src/util/markers.rs index c16ebdf0bc6..fee1d1ccc78 100644 --- a/tokio/src/util/markers.rs +++ b/tokio/src/util/markers.rs @@ -1,4 +1,5 @@ /// Marker for types that are `Sync` but not `Send` +#[allow(dead_code)] pub(crate) struct SyncNotSend(#[allow(dead_code)] *mut ()); unsafe impl Sync for SyncNotSend {} From b7d4fba70755e2ec682665f22f630b8a725a3705 Mon Sep 17 00:00:00 2001 From: Matthijs Brobbel Date: Fri, 3 May 2024 13:44:01 +0200 Subject: [PATCH 123/162] sync: add `mpsc::Receiver::{capacity,max_capacity}` (#6511) --- tokio/src/sync/mpsc/bounded.rs | 88 ++++++++++++++++++++++++++++++++-- tokio/src/sync/mpsc/chan.rs | 4 ++ 2 files changed, 88 insertions(+), 4 deletions(-) diff --git a/tokio/src/sync/mpsc/bounded.rs b/tokio/src/sync/mpsc/bounded.rs index 6ac97591fea..a4f98060b19 100644 --- a/tokio/src/sync/mpsc/bounded.rs +++ b/tokio/src/sync/mpsc/bounded.rs @@ -481,7 +481,7 @@ impl Receiver { /// assert!(!rx.is_closed()); /// /// rx.close(); - /// + /// /// assert!(rx.is_closed()); /// } /// ``` @@ -530,6 +530,86 @@ impl Receiver { self.chan.len() } + /// Returns the current capacity of the channel. + /// + /// The capacity goes down when the sender sends a value by calling [`Sender::send`] or by reserving + /// capacity with [`Sender::reserve`]. The capacity goes up when values are received. + /// This is distinct from [`max_capacity`], which always returns buffer capacity initially + /// specified when calling [`channel`]. + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::mpsc; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, mut rx) = mpsc::channel::<()>(5); + /// + /// assert_eq!(rx.capacity(), 5); + /// + /// // Making a reservation drops the capacity by one. + /// let permit = tx.reserve().await.unwrap(); + /// assert_eq!(rx.capacity(), 4); + /// assert_eq!(rx.len(), 0); + /// + /// // Sending and receiving a value increases the capacity by one. + /// permit.send(()); + /// assert_eq!(rx.len(), 1); + /// rx.recv().await.unwrap(); + /// assert_eq!(rx.capacity(), 5); + /// + /// // Directly sending a message drops the capacity by one. + /// tx.send(()).await.unwrap(); + /// assert_eq!(rx.capacity(), 4); + /// assert_eq!(rx.len(), 1); + /// + /// // Receiving the message increases the capacity by one. + /// rx.recv().await.unwrap(); + /// assert_eq!(rx.capacity(), 5); + /// assert_eq!(rx.len(), 0); + /// } + /// ``` + /// [`capacity`]: Receiver::capacity + /// [`max_capacity`]: Receiver::max_capacity + pub fn capacity(&self) -> usize { + self.chan.semaphore().semaphore.available_permits() + } + + /// Returns the maximum buffer capacity of the channel. + /// + /// The maximum capacity is the buffer capacity initially specified when calling + /// [`channel`]. This is distinct from [`capacity`], which returns the *current* + /// available buffer capacity: as messages are sent and received, the value + /// returned by [`capacity`] will go up or down, whereas the value + /// returned by [`max_capacity`] will remain constant. + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::mpsc; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, rx) = mpsc::channel::<()>(5); + /// + /// // both max capacity and capacity are the same at first + /// assert_eq!(rx.max_capacity(), 5); + /// assert_eq!(rx.capacity(), 5); + /// + /// // Making a reservation doesn't change the max capacity. + /// let permit = tx.reserve().await.unwrap(); + /// assert_eq!(rx.max_capacity(), 5); + /// // but drops the capacity by one + /// assert_eq!(rx.capacity(), 4); + /// } + /// ``` + /// [`capacity`]: Receiver::capacity + /// [`max_capacity`]: Receiver::max_capacity + pub fn max_capacity(&self) -> usize { + self.chan.semaphore().bound + } + /// Polls to receive the next message on this channel. /// /// This method returns: @@ -1059,7 +1139,7 @@ impl Sender { /// /// // The iterator should now be exhausted /// assert!(permit.next().is_none()); - /// + /// /// // The value sent on the permit is received /// assert_eq!(rx.recv().await.unwrap(), 456); /// assert_eq!(rx.recv().await.unwrap(), 457); @@ -1274,7 +1354,7 @@ impl Sender { /// // The value sent on the permit is received /// assert_eq!(rx.recv().await.unwrap(), 456); /// assert_eq!(rx.recv().await.unwrap(), 457); - /// + /// /// // Trying to call try_reserve_many with 0 will return an empty iterator /// let mut permit = tx.try_reserve_many(0).unwrap(); /// assert!(permit.next().is_none()); @@ -1447,7 +1527,7 @@ impl Sender { /// [`channel`]. This is distinct from [`capacity`], which returns the *current* /// available buffer capacity: as messages are sent and received, the /// value returned by [`capacity`] will go up or down, whereas the value - /// returned by `max_capacity` will remain constant. + /// returned by [`max_capacity`] will remain constant. /// /// # Examples /// diff --git a/tokio/src/sync/mpsc/chan.rs b/tokio/src/sync/mpsc/chan.rs index ae378d7ecb2..d8838242a39 100644 --- a/tokio/src/sync/mpsc/chan.rs +++ b/tokio/src/sync/mpsc/chan.rs @@ -465,6 +465,10 @@ impl Rx { } }) } + + pub(super) fn semaphore(&self) -> &S { + &self.inner.semaphore + } } impl Drop for Rx { From f6eb1ee19687bfd6cc7bf5c675d946970854013e Mon Sep 17 00:00:00 2001 From: Weijia Jiang Date: Fri, 3 May 2024 21:37:52 +0800 Subject: [PATCH 124/162] time: lazily init timers on first poll (#6512) --- benches/Cargo.toml | 5 ++ benches/time_timeout.rs | 109 ++++++++++++++++++++++++++++ tokio/src/runtime/time/entry.rs | 34 ++++++--- tokio/src/runtime/time/tests/mod.rs | 12 +-- tokio/src/time/sleep.rs | 5 +- 5 files changed, 146 insertions(+), 19 deletions(-) create mode 100644 benches/time_timeout.rs diff --git a/benches/Cargo.toml b/benches/Cargo.toml index c581055cf65..c1d13bac279 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -90,3 +90,8 @@ harness = false name = "time_now" path = "time_now.rs" harness = false + +[[bench]] +name = "time_timeout" +path = "time_timeout.rs" +harness = false diff --git a/benches/time_timeout.rs b/benches/time_timeout.rs new file mode 100644 index 00000000000..c961477562c --- /dev/null +++ b/benches/time_timeout.rs @@ -0,0 +1,109 @@ +use std::time::{Duration, Instant}; + +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use tokio::{ + runtime::Runtime, + time::{sleep, timeout}, +}; + +// a very quick async task, but might timeout +async fn quick_job() -> usize { + 1 +} + +fn build_run_time(workers: usize) -> Runtime { + if workers == 1 { + tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .unwrap() + } else { + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .worker_threads(workers) + .build() + .unwrap() + } +} + +fn single_thread_scheduler_timeout(c: &mut Criterion) { + do_timeout_test(c, 1, "single_thread_timeout"); +} + +fn multi_thread_scheduler_timeout(c: &mut Criterion) { + do_timeout_test(c, 8, "multi_thread_timeout-8"); +} + +fn do_timeout_test(c: &mut Criterion, workers: usize, name: &str) { + let runtime = build_run_time(workers); + c.bench_function(name, |b| { + b.iter_custom(|iters| { + let start = Instant::now(); + runtime.block_on(async { + black_box(spawn_timeout_job(iters as usize, workers).await); + }); + start.elapsed() + }) + }); +} + +async fn spawn_timeout_job(iters: usize, procs: usize) { + let mut handles = Vec::with_capacity(procs); + for _ in 0..procs { + handles.push(tokio::spawn(async move { + for _ in 0..iters / procs { + let h = timeout(Duration::from_secs(1), quick_job()); + assert_eq!(black_box(h.await.unwrap()), 1); + } + })); + } + for handle in handles { + handle.await.unwrap(); + } +} + +fn single_thread_scheduler_sleep(c: &mut Criterion) { + do_sleep_test(c, 1, "single_thread_sleep"); +} + +fn multi_thread_scheduler_sleep(c: &mut Criterion) { + do_sleep_test(c, 8, "multi_thread_sleep-8"); +} + +fn do_sleep_test(c: &mut Criterion, workers: usize, name: &str) { + let runtime = build_run_time(workers); + + c.bench_function(name, |b| { + b.iter_custom(|iters| { + let start = Instant::now(); + runtime.block_on(async { + black_box(spawn_sleep_job(iters as usize, workers).await); + }); + start.elapsed() + }) + }); +} + +async fn spawn_sleep_job(iters: usize, procs: usize) { + let mut handles = Vec::with_capacity(procs); + for _ in 0..procs { + handles.push(tokio::spawn(async move { + for _ in 0..iters / procs { + let _h = black_box(sleep(Duration::from_secs(1))); + } + })); + } + for handle in handles { + handle.await.unwrap(); + } +} + +criterion_group!( + timeout_benchmark, + single_thread_scheduler_timeout, + multi_thread_scheduler_timeout, + single_thread_scheduler_sleep, + multi_thread_scheduler_sleep +); + +criterion_main!(timeout_benchmark); diff --git a/tokio/src/runtime/time/entry.rs b/tokio/src/runtime/time/entry.rs index f6f56e277b2..a6be0e62a13 100644 --- a/tokio/src/runtime/time/entry.rs +++ b/tokio/src/runtime/time/entry.rs @@ -293,7 +293,7 @@ pub(crate) struct TimerEntry { /// /// This is manipulated only under the inner mutex. TODO: Can we use loom /// cells for this? - inner: StdUnsafeCell, + inner: StdUnsafeCell>, /// Deadline for the timer. This is used to register on the first /// poll, as we can't register prior to being pinned. deadline: Instant, @@ -469,23 +469,32 @@ unsafe impl linked_list::Link for TimerShared { impl TimerEntry { #[track_caller] - pub(crate) fn new(handle: &scheduler::Handle, deadline: Instant) -> Self { + pub(crate) fn new(handle: scheduler::Handle, deadline: Instant) -> Self { // Panic if the time driver is not enabled let _ = handle.driver().time(); - let driver = handle.clone(); - Self { - driver, - inner: StdUnsafeCell::new(TimerShared::new()), + driver: handle, + inner: StdUnsafeCell::new(None), deadline, registered: false, _m: std::marker::PhantomPinned, } } + fn is_inner_init(&self) -> bool { + unsafe { &*self.inner.get() }.is_some() + } + + // This lazy initialization is for performance purposes. fn inner(&self) -> &TimerShared { - unsafe { &*self.inner.get() } + let inner = unsafe { &*self.inner.get() }; + if inner.is_none() { + unsafe { + *self.inner.get() = Some(TimerShared::new()); + } + } + return inner.as_ref().unwrap(); } pub(crate) fn deadline(&self) -> Instant { @@ -493,11 +502,15 @@ impl TimerEntry { } pub(crate) fn is_elapsed(&self) -> bool { - !self.inner().state.might_be_registered() && self.registered + self.is_inner_init() && !self.inner().state.might_be_registered() && self.registered } /// Cancels and deregisters the timer. This operation is irreversible. pub(crate) fn cancel(self: Pin<&mut Self>) { + // Avoid calling the `clear_entry` method, because it has not been initialized yet. + if !self.is_inner_init() { + return; + } // We need to perform an acq/rel fence with the driver thread, and the // simplest way to do so is to grab the driver lock. // @@ -524,8 +537,9 @@ impl TimerEntry { } pub(crate) fn reset(mut self: Pin<&mut Self>, new_time: Instant, reregister: bool) { - unsafe { self.as_mut().get_unchecked_mut() }.deadline = new_time; - unsafe { self.as_mut().get_unchecked_mut() }.registered = reregister; + let this = unsafe { self.as_mut().get_unchecked_mut() }; + this.deadline = new_time; + this.registered = reregister; let tick = self.driver().time_source().deadline_to_tick(new_time); diff --git a/tokio/src/runtime/time/tests/mod.rs b/tokio/src/runtime/time/tests/mod.rs index e7ab222ef63..520dc00a462 100644 --- a/tokio/src/runtime/time/tests/mod.rs +++ b/tokio/src/runtime/time/tests/mod.rs @@ -49,7 +49,7 @@ fn single_timer() { let handle_ = handle.clone(); let jh = thread::spawn(move || { let entry = TimerEntry::new( - &handle_.inner, + handle_.inner.clone(), handle_.inner.driver().clock().now() + Duration::from_secs(1), ); pin!(entry); @@ -83,7 +83,7 @@ fn drop_timer() { let handle_ = handle.clone(); let jh = thread::spawn(move || { let entry = TimerEntry::new( - &handle_.inner, + handle_.inner.clone(), handle_.inner.driver().clock().now() + Duration::from_secs(1), ); pin!(entry); @@ -117,7 +117,7 @@ fn change_waker() { let handle_ = handle.clone(); let jh = thread::spawn(move || { let entry = TimerEntry::new( - &handle_.inner, + handle_.inner.clone(), handle_.inner.driver().clock().now() + Duration::from_secs(1), ); pin!(entry); @@ -157,7 +157,7 @@ fn reset_future() { let start = handle.inner.driver().clock().now(); let jh = thread::spawn(move || { - let entry = TimerEntry::new(&handle_.inner, start + Duration::from_secs(1)); + let entry = TimerEntry::new(handle_.inner.clone(), start + Duration::from_secs(1)); pin!(entry); let _ = entry @@ -219,7 +219,7 @@ fn poll_process_levels() { for i in 0..normal_or_miri(1024, 64) { let mut entry = Box::pin(TimerEntry::new( - &handle.inner, + handle.inner.clone(), handle.inner.driver().clock().now() + Duration::from_millis(i), )); @@ -253,7 +253,7 @@ fn poll_process_levels_targeted() { let handle = rt.handle(); let e1 = TimerEntry::new( - &handle.inner, + handle.inner.clone(), handle.inner.driver().clock().now() + Duration::from_millis(193), ); pin!(e1); diff --git a/tokio/src/time/sleep.rs b/tokio/src/time/sleep.rs index 36f6e83c6b1..9223396fe54 100644 --- a/tokio/src/time/sleep.rs +++ b/tokio/src/time/sleep.rs @@ -254,12 +254,11 @@ impl Sleep { location: Option<&'static Location<'static>>, ) -> Sleep { use crate::runtime::scheduler; - let handle = scheduler::Handle::current(); - let entry = TimerEntry::new(&handle, deadline); - + let entry = TimerEntry::new(handle, deadline); #[cfg(all(tokio_unstable, feature = "tracing"))] let inner = { + let handle = scheduler::Handle::current(); let clock = handle.driver().clock(); let handle = &handle.driver().time(); let time_source = handle.time_source(); From cdf9d997dc79ac51fbe376edd04136251e73c403 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sat, 4 May 2024 22:32:31 +0200 Subject: [PATCH 125/162] chore: prepare tokio-util v0.7.11 (#6535) --- tokio-util/CHANGELOG.md | 33 +++++++++++++++++++++++++++++++++ tokio-util/Cargo.toml | 2 +- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/tokio-util/CHANGELOG.md b/tokio-util/CHANGELOG.md index b98092c4eb0..729c0352df6 100644 --- a/tokio-util/CHANGELOG.md +++ b/tokio-util/CHANGELOG.md @@ -1,3 +1,36 @@ +# 0.7.11 (May 4th, 2024) + +This release updates the MSRV to 1.63. ([#6126]) + +### Added + +- either: implement `Sink` for `Either` ([#6239]) +- time: add `DelayQueue::deadline` ([#6163]) +- time: add `FutureExt::timeout` ([#6276]) + +### Changed + +- codec: assert compatibility between `LengthDelimitedCodec` options ([#6414]) +- codec: make tracing feature optional for codecs ([#6434]) +- io: add `T: ?Sized` to `tokio_util::io::poll_read_buf` ([#6441]) +- sync: remove `'static` bound on `impl Sink for PollSender` ([#6397]) + +### Documented + +- codec: add examples for `FramedRead` and `FramedWrite` ([#6310]) +- codec: document cancel safety of `SinkExt::send` and `StreamExt::next` ([#6417]) + +[#6126]: https://github.com/tokio-rs/tokio/pull/6126 +[#6163]: https://github.com/tokio-rs/tokio/pull/6163 +[#6239]: https://github.com/tokio-rs/tokio/pull/6239 +[#6276]: https://github.com/tokio-rs/tokio/pull/6276 +[#6310]: https://github.com/tokio-rs/tokio/pull/6310 +[#6397]: https://github.com/tokio-rs/tokio/pull/6397 +[#6414]: https://github.com/tokio-rs/tokio/pull/6414 +[#6417]: https://github.com/tokio-rs/tokio/pull/6417 +[#6434]: https://github.com/tokio-rs/tokio/pull/6434 +[#6441]: https://github.com/tokio-rs/tokio/pull/6441 + # 0.7.10 (October 24th, 2023) ### Added diff --git a/tokio-util/Cargo.toml b/tokio-util/Cargo.toml index 47f443aeee7..13e56dd8511 100644 --- a/tokio-util/Cargo.toml +++ b/tokio-util/Cargo.toml @@ -4,7 +4,7 @@ name = "tokio-util" # - Remove path dependencies # - Update CHANGELOG.md. # - Create "tokio-util-0.7.x" git tag. -version = "0.7.10" +version = "0.7.11" edition = "2021" rust-version = "1.63" authors = ["Tokio Contributors "] From 75e5b3d96d126b52a510ad19e79cf1e4445770e0 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sun, 5 May 2024 11:06:56 +0200 Subject: [PATCH 126/162] runtime: ignore `yield_calls_park_before_scheduling_again` test (#6537) --- .../tests/loom_current_thread/yield_now.rs | 17 +++++------------ .../tests/loom_multi_thread/yield_now.rs | 1 + .../tests/loom_multi_thread_alt/yield_now.rs | 1 + 3 files changed, 7 insertions(+), 12 deletions(-) diff --git a/tokio/src/runtime/tests/loom_current_thread/yield_now.rs b/tokio/src/runtime/tests/loom_current_thread/yield_now.rs index ba506e5a408..3d454209b24 100644 --- a/tokio/src/runtime/tests/loom_current_thread/yield_now.rs +++ b/tokio/src/runtime/tests/loom_current_thread/yield_now.rs @@ -1,5 +1,4 @@ use crate::runtime::park; -use crate::runtime::tests::loom_oneshot as oneshot; use crate::runtime::{self, Runtime}; #[test] @@ -8,10 +7,9 @@ fn yield_calls_park_before_scheduling_again() { let mut loom = loom::model::Builder::default(); loom.max_permutations = Some(1); loom.check(|| { - let rt = mk_runtime(2); - let (tx, rx) = oneshot::channel::<()>(); + let rt = mk_runtime(); - rt.spawn(async { + let jh = rt.spawn(async { let tid = loom::thread::current().id(); let park_count = park::current_thread_park_count(); @@ -21,17 +19,12 @@ fn yield_calls_park_before_scheduling_again() { let new_park_count = park::current_thread_park_count(); assert_eq!(park_count + 1, new_park_count); } - - tx.send(()); }); - rx.recv(); + rt.block_on(jh).unwrap(); }); } -fn mk_runtime(num_threads: usize) -> Runtime { - runtime::Builder::new_multi_thread() - .worker_threads(num_threads) - .build() - .unwrap() +fn mk_runtime() -> Runtime { + runtime::Builder::new_current_thread().build().unwrap() } diff --git a/tokio/src/runtime/tests/loom_multi_thread/yield_now.rs b/tokio/src/runtime/tests/loom_multi_thread/yield_now.rs index ba506e5a408..f078669ddfa 100644 --- a/tokio/src/runtime/tests/loom_multi_thread/yield_now.rs +++ b/tokio/src/runtime/tests/loom_multi_thread/yield_now.rs @@ -3,6 +3,7 @@ use crate::runtime::tests::loom_oneshot as oneshot; use crate::runtime::{self, Runtime}; #[test] +#[ignore] fn yield_calls_park_before_scheduling_again() { // Don't need to check all permutations let mut loom = loom::model::Builder::default(); diff --git a/tokio/src/runtime/tests/loom_multi_thread_alt/yield_now.rs b/tokio/src/runtime/tests/loom_multi_thread_alt/yield_now.rs index ba506e5a408..f078669ddfa 100644 --- a/tokio/src/runtime/tests/loom_multi_thread_alt/yield_now.rs +++ b/tokio/src/runtime/tests/loom_multi_thread_alt/yield_now.rs @@ -3,6 +3,7 @@ use crate::runtime::tests::loom_oneshot as oneshot; use crate::runtime::{self, Runtime}; #[test] +#[ignore] fn yield_calls_park_before_scheduling_again() { // Don't need to check all permutations let mut loom = loom::model::Builder::default(); From 7f59a6ea859141e9da76dfd77605256a6224ff2c Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sun, 5 May 2024 17:18:37 +0200 Subject: [PATCH 127/162] tests: remove tests module from integration tests (#6540) --- tokio/tests/task_builder.rs | 135 ++++++++++++++++++------------------ 1 file changed, 67 insertions(+), 68 deletions(-) diff --git a/tokio/tests/task_builder.rs b/tokio/tests/task_builder.rs index 78329ff26a4..c700f229f9f 100644 --- a/tokio/tests/task_builder.rs +++ b/tokio/tests/task_builder.rs @@ -1,81 +1,80 @@ -#[cfg(all(tokio_unstable, feature = "tracing"))] -mod tests { - use std::rc::Rc; - use tokio::{ - task::{Builder, LocalSet}, - test, - }; +#![cfg(all(tokio_unstable, feature = "tracing"))] - #[test] - async fn spawn_with_name() { - let result = Builder::new() - .name("name") - .spawn(async { "task executed" }) - .unwrap() - .await; +use std::rc::Rc; +use tokio::{ + task::{Builder, LocalSet}, + test, +}; - assert_eq!(result.unwrap(), "task executed"); - } +#[test] +async fn spawn_with_name() { + let result = Builder::new() + .name("name") + .spawn(async { "task executed" }) + .unwrap() + .await; - #[test] - async fn spawn_blocking_with_name() { - let result = Builder::new() - .name("name") - .spawn_blocking(|| "task executed") - .unwrap() - .await; + assert_eq!(result.unwrap(), "task executed"); +} + +#[test] +async fn spawn_blocking_with_name() { + let result = Builder::new() + .name("name") + .spawn_blocking(|| "task executed") + .unwrap() + .await; - assert_eq!(result.unwrap(), "task executed"); - } + assert_eq!(result.unwrap(), "task executed"); +} - #[test] - async fn spawn_local_with_name() { - let unsend_data = Rc::new("task executed"); - let result = LocalSet::new() - .run_until(async move { - Builder::new() - .name("name") - .spawn_local(async move { unsend_data }) - .unwrap() - .await - }) - .await; +#[test] +async fn spawn_local_with_name() { + let unsend_data = Rc::new("task executed"); + let result = LocalSet::new() + .run_until(async move { + Builder::new() + .name("name") + .spawn_local(async move { unsend_data }) + .unwrap() + .await + }) + .await; - assert_eq!(*result.unwrap(), "task executed"); - } + assert_eq!(*result.unwrap(), "task executed"); +} - #[test] - async fn spawn_without_name() { - let result = Builder::new() - .spawn(async { "task executed" }) - .unwrap() - .await; +#[test] +async fn spawn_without_name() { + let result = Builder::new() + .spawn(async { "task executed" }) + .unwrap() + .await; - assert_eq!(result.unwrap(), "task executed"); - } + assert_eq!(result.unwrap(), "task executed"); +} - #[test] - async fn spawn_blocking_without_name() { - let result = Builder::new() - .spawn_blocking(|| "task executed") - .unwrap() - .await; +#[test] +async fn spawn_blocking_without_name() { + let result = Builder::new() + .spawn_blocking(|| "task executed") + .unwrap() + .await; - assert_eq!(result.unwrap(), "task executed"); - } + assert_eq!(result.unwrap(), "task executed"); +} - #[test] - async fn spawn_local_without_name() { - let unsend_data = Rc::new("task executed"); - let result = LocalSet::new() - .run_until(async move { - Builder::new() - .spawn_local(async move { unsend_data }) - .unwrap() - .await - }) - .await; +#[test] +async fn spawn_local_without_name() { + let unsend_data = Rc::new("task executed"); + let result = LocalSet::new() + .run_until(async move { + Builder::new() + .spawn_local(async move { unsend_data }) + .unwrap() + .await + }) + .await; - assert_eq!(*result.unwrap(), "task executed"); - } + assert_eq!(*result.unwrap(), "task executed"); } From b652a4e64c3547af62c91f55d8fcbc3be0333d6e Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sun, 5 May 2024 17:18:49 +0200 Subject: [PATCH 128/162] util: no default features for hashbrown (#6541) --- tokio-util/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio-util/Cargo.toml b/tokio-util/Cargo.toml index 13e56dd8511..a33e9c9cff7 100644 --- a/tokio-util/Cargo.toml +++ b/tokio-util/Cargo.toml @@ -45,7 +45,7 @@ slab = { version = "0.4.4", optional = true } # Backs `DelayQueue` tracing = { version = "0.1.25", default-features = false, features = ["std"], optional = true } [target.'cfg(tokio_unstable)'.dependencies] -hashbrown = { version = "0.14.0", optional = true } +hashbrown = { version = "0.14.0", default-features = false, optional = true } [dev-dependencies] tokio = { version = "1.0.0", path = "../tokio", features = ["full"] } From 2a0df5fb05ae1a624fe2f6db756190f41812214b Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sun, 5 May 2024 17:43:11 +0200 Subject: [PATCH 129/162] ci: bump nightly to nightly-2024-05-05 (#6538) --- .cirrus.yml | 2 +- .github/workflows/ci.yml | 19 ++++++++++++++++++- target-specs/README.md | 9 +++++++++ target-specs/i686-unknown-linux-gnu.json | 22 ++++++++++++++-------- tokio-macros/src/lib.rs | 1 + tokio-stream/src/lib.rs | 1 + tokio-test/src/lib.rs | 1 + tokio-util/src/codec/length_delimited.rs | 1 - tokio-util/src/lib.rs | 1 + tokio-util/tests/task_join_map.rs | 1 + tokio/src/lib.rs | 9 +++------ tokio/src/signal/mod.rs | 2 ++ tokio/src/signal/unix.rs | 2 ++ tokio/tests/_require_full.rs | 2 ++ tokio/tests/dump.rs | 1 + tokio/tests/fs_open_options.rs | 7 ++++++- tokio/tests/macros_select.rs | 1 + tokio/tests/rt_basic.rs | 1 + tokio/tests/rt_common.rs | 1 + tokio/tests/rt_handle.rs | 1 + tokio/tests/rt_metrics.rs | 1 + tokio/tests/rt_threaded.rs | 1 + tokio/tests/rt_threaded_alt.rs | 1 + tokio/tests/task_abort.rs | 2 ++ tokio/tests/task_builder.rs | 1 + tokio/tests/task_id.rs | 1 + tokio/tests/task_join_set.rs | 1 + tokio/tests/task_local_set.rs | 1 + tokio/tests/task_yield_now.rs | 1 + 29 files changed, 77 insertions(+), 18 deletions(-) create mode 100644 target-specs/README.md diff --git a/.cirrus.yml b/.cirrus.yml index 8aea3efa74b..bdf3af74098 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -4,7 +4,7 @@ freebsd_instance: image_family: freebsd-14-0 env: RUST_STABLE: stable - RUST_NIGHTLY: nightly-2023-10-21 + RUST_NIGHTLY: nightly-2024-05-05 RUSTFLAGS: -D warnings # Test FreeBSD in a full VM on cirrus-ci.com. Test the i686 target too, in the diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4f5e9004959..1d799a54083 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -15,7 +15,7 @@ env: RUST_BACKTRACE: 1 # Change to specific Rust release to pin rust_stable: stable - rust_nightly: nightly-2023-10-21 + rust_nightly: nightly-2024-05-05 rust_clippy: '1.77' # When updating this, also update: # - README.md @@ -995,6 +995,23 @@ jobs: run: cargo check-external-types --all-features working-directory: tokio + check-unexpected-lints-cfgs: + name: check unexpected lints and cfgs + needs: basics + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install Rust ${{ env.rust_nightly }} + uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ env.rust_nightly }} + - name: don't allow warnings + run: sed -i '/#!\[allow(unknown_lints, unexpected_cfgs)\]/d' */src/lib.rs */tests/*.rs + - name: check for unknown lints and cfgs + run: cargo check --all-features --tests + env: + RUSTFLAGS: -Dwarnings --check-cfg=cfg(loom,tokio_unstable,tokio_taskdump,fuzzing,mio_unsupported_force_poll_poll,tokio_internal_mt_counters,fs,tokio_no_parking_lot,tokio_no_tuning_tests) -Funexpected_cfgs -Funknown_lints + check-fuzzing: name: check-fuzzing needs: basics diff --git a/target-specs/README.md b/target-specs/README.md new file mode 100644 index 00000000000..f5db16b78c5 --- /dev/null +++ b/target-specs/README.md @@ -0,0 +1,9 @@ +This is used for the `no-atomic-u64-test` ci check that verifies that Tokio +works even if the `AtomicU64` type is missing. + +When increasing the nightly compiler version, you may need to regenerate this +target using the following command: +``` +rustc +nightly -Z unstable-options --print target-spec-json --target i686-unknown-linux-gnu | grep -v 'is-builtin' | sed 's/"max-atomic-width": 64/"max-atomic-width": 32/' > target-specs/i686-unknown-linux-gnu.json +``` + diff --git a/target-specs/i686-unknown-linux-gnu.json b/target-specs/i686-unknown-linux-gnu.json index 4eebe7afb57..7a70e7474f3 100644 --- a/target-specs/i686-unknown-linux-gnu.json +++ b/target-specs/i686-unknown-linux-gnu.json @@ -1,29 +1,35 @@ { "arch": "x86", "cpu": "pentium4", + "crt-objects-fallback": "false", "crt-static-respected": true, - "data-layout": "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-f80:32-n8:16:32-S128", + "data-layout": "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i128:128-f64:32:64-f80:32-n8:16:32-S128", "dynamic-linking": true, "env": "gnu", "has-rpath": true, "has-thread-local": true, + "linker-flavor": "gnu-cc", "llvm-target": "i686-unknown-linux-gnu", "max-atomic-width": 32, + "metadata": { + "description": null, + "host_tools": null, + "std": null, + "tier": null + }, "os": "linux", "position-independent-executables": true, "pre-link-args": { - "gcc": [ + "gnu-cc": [ + "-m32" + ], + "gnu-lld-cc": [ "-m32" ] }, "relro-level": "full", "stack-probes": { - "kind": "inline-or-call", - "min-llvm-version-for-inline": [ - 16, - 0, - 0 - ] + "kind": "inline" }, "supported-sanitizers": [ "address" diff --git a/tokio-macros/src/lib.rs b/tokio-macros/src/lib.rs index ab7b083d381..c108d8c46a2 100644 --- a/tokio-macros/src/lib.rs +++ b/tokio-macros/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(unknown_lints, unexpected_cfgs)] #![allow(clippy::needless_doctest_main)] #![warn( missing_debug_implementations, diff --git a/tokio-stream/src/lib.rs b/tokio-stream/src/lib.rs index 6ff1085a552..11ccd8c6aee 100644 --- a/tokio-stream/src/lib.rs +++ b/tokio-stream/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(unknown_lints, unexpected_cfgs)] #![allow( clippy::cognitive_complexity, clippy::large_enum_variant, diff --git a/tokio-test/src/lib.rs b/tokio-test/src/lib.rs index 87e63861210..9f60faf7952 100644 --- a/tokio-test/src/lib.rs +++ b/tokio-test/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(unknown_lints, unexpected_cfgs)] #![warn( missing_debug_implementations, missing_docs, diff --git a/tokio-util/src/codec/length_delimited.rs b/tokio-util/src/codec/length_delimited.rs index 92d76b2cd28..c37f5863bd4 100644 --- a/tokio-util/src/codec/length_delimited.rs +++ b/tokio-util/src/codec/length_delimited.rs @@ -639,7 +639,6 @@ mod builder { impl LengthFieldType for u64 {} #[cfg(any( - target_pointer_width = "8", target_pointer_width = "16", target_pointer_width = "32", target_pointer_width = "64", diff --git a/tokio-util/src/lib.rs b/tokio-util/src/lib.rs index 1df4de1b459..34f69fd14e3 100644 --- a/tokio-util/src/lib.rs +++ b/tokio-util/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(unknown_lints, unexpected_cfgs)] #![allow(clippy::needless_doctest_main)] #![warn( missing_debug_implementations, diff --git a/tokio-util/tests/task_join_map.rs b/tokio-util/tests/task_join_map.rs index 1ab5f9ba832..8757f8b5c6e 100644 --- a/tokio-util/tests/task_join_map.rs +++ b/tokio-util/tests/task_join_map.rs @@ -1,3 +1,4 @@ +#![allow(unknown_lints, unexpected_cfgs)] #![warn(rust_2018_idioms)] #![cfg(all(feature = "rt", tokio_unstable))] diff --git a/tokio/src/lib.rs b/tokio/src/lib.rs index 52b098986fb..f15f8763e36 100644 --- a/tokio/src/lib.rs +++ b/tokio/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(unknown_lints, unexpected_cfgs)] #![allow( clippy::cognitive_complexity, clippy::large_enum_variant, @@ -446,13 +447,9 @@ // least 32 bits, which a lot of components in Tokio currently assumes. // // TODO: improve once we have MSRV access to const eval to make more flexible. -#[cfg(not(any( - target_pointer_width = "32", - target_pointer_width = "64", - target_pointer_width = "128" -)))] +#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))] compile_error! { - "Tokio requires the platform pointer width to be 32, 64, or 128 bits" + "Tokio requires the platform pointer width to be at least 32 bits" } #[cfg(all( diff --git a/tokio/src/signal/mod.rs b/tokio/src/signal/mod.rs index 59f71db0e46..5778f22ed12 100644 --- a/tokio/src/signal/mod.rs +++ b/tokio/src/signal/mod.rs @@ -45,7 +45,9 @@ use crate::sync::watch::Receiver; use std::task::{Context, Poll}; +#[cfg(feature = "signal")] mod ctrl_c; +#[cfg(feature = "signal")] pub use ctrl_c::ctrl_c; pub(crate) mod registry; diff --git a/tokio/src/signal/unix.rs b/tokio/src/signal/unix.rs index 52a9cbaac40..c4a196a660f 100644 --- a/tokio/src/signal/unix.rs +++ b/tokio/src/signal/unix.rs @@ -485,10 +485,12 @@ impl Signal { } // Work around for abstracting streams internally +#[cfg(feature = "process")] pub(crate) trait InternalStream { fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll>; } +#[cfg(feature = "process")] impl InternalStream for Signal { fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { self.poll_recv(cx) diff --git a/tokio/tests/_require_full.rs b/tokio/tests/_require_full.rs index d33943a960d..81c25179615 100644 --- a/tokio/tests/_require_full.rs +++ b/tokio/tests/_require_full.rs @@ -1,3 +1,5 @@ +#![allow(unknown_lints, unexpected_cfgs)] + #[cfg(not(any(feature = "full", target_family = "wasm")))] compile_error!("run main Tokio tests with `--features full`"); diff --git a/tokio/tests/dump.rs b/tokio/tests/dump.rs index c946f38436c..68b53aaf291 100644 --- a/tokio/tests/dump.rs +++ b/tokio/tests/dump.rs @@ -1,3 +1,4 @@ +#![allow(unknown_lints, unexpected_cfgs)] #![cfg(all( tokio_unstable, tokio_taskdump, diff --git a/tokio/tests/fs_open_options.rs b/tokio/tests/fs_open_options.rs index 41cfb45460c..84b63a504cf 100644 --- a/tokio/tests/fs_open_options.rs +++ b/tokio/tests/fs_open_options.rs @@ -55,8 +55,13 @@ async fn open_options_create_new() { #[tokio::test] #[cfg(unix)] async fn open_options_mode() { + let mode = format!("{:?}", OpenOptions::new().mode(0o644)); // TESTING HACK: use Debug output to check the stored data - assert!(format!("{:?}", OpenOptions::new().mode(0o644)).contains("mode: 420 ")); + assert!( + mode.contains("mode: 420 ") || mode.contains("mode: 0o000644 "), + "mode is: {}", + mode + ); } #[tokio::test] diff --git a/tokio/tests/macros_select.rs b/tokio/tests/macros_select.rs index f65cbdf2267..cbad971ab1f 100644 --- a/tokio/tests/macros_select.rs +++ b/tokio/tests/macros_select.rs @@ -1,3 +1,4 @@ +#![allow(unknown_lints, unexpected_cfgs)] #![cfg(feature = "macros")] #![allow(clippy::disallowed_names)] diff --git a/tokio/tests/rt_basic.rs b/tokio/tests/rt_basic.rs index 47bf2dfdc12..a5204bd83f7 100644 --- a/tokio/tests/rt_basic.rs +++ b/tokio/tests/rt_basic.rs @@ -1,3 +1,4 @@ +#![allow(unknown_lints, unexpected_cfgs)] #![warn(rust_2018_idioms)] #![cfg(feature = "full")] diff --git a/tokio/tests/rt_common.rs b/tokio/tests/rt_common.rs index a71fc4a735e..75a20057166 100644 --- a/tokio/tests/rt_common.rs +++ b/tokio/tests/rt_common.rs @@ -1,3 +1,4 @@ +#![allow(unknown_lints, unexpected_cfgs)] #![allow(clippy::needless_range_loop)] #![warn(rust_2018_idioms)] #![cfg(feature = "full")] diff --git a/tokio/tests/rt_handle.rs b/tokio/tests/rt_handle.rs index 92fa777e321..9efe9b4bde9 100644 --- a/tokio/tests/rt_handle.rs +++ b/tokio/tests/rt_handle.rs @@ -1,3 +1,4 @@ +#![allow(unknown_lints, unexpected_cfgs)] #![warn(rust_2018_idioms)] #![cfg(feature = "full")] diff --git a/tokio/tests/rt_metrics.rs b/tokio/tests/rt_metrics.rs index 4dfed06fed4..58869c530ae 100644 --- a/tokio/tests/rt_metrics.rs +++ b/tokio/tests/rt_metrics.rs @@ -1,3 +1,4 @@ +#![allow(unknown_lints, unexpected_cfgs)] #![warn(rust_2018_idioms)] #![cfg(all(feature = "full", tokio_unstable, not(target_os = "wasi")))] diff --git a/tokio/tests/rt_threaded.rs b/tokio/tests/rt_threaded.rs index 6e769fc831f..26690550f93 100644 --- a/tokio/tests/rt_threaded.rs +++ b/tokio/tests/rt_threaded.rs @@ -1,3 +1,4 @@ +#![allow(unknown_lints, unexpected_cfgs)] #![warn(rust_2018_idioms)] #![cfg(all(feature = "full", not(target_os = "wasi")))] diff --git a/tokio/tests/rt_threaded_alt.rs b/tokio/tests/rt_threaded_alt.rs index 8b7143b2f97..9eed1fe78b6 100644 --- a/tokio/tests/rt_threaded_alt.rs +++ b/tokio/tests/rt_threaded_alt.rs @@ -1,3 +1,4 @@ +#![allow(unknown_lints, unexpected_cfgs)] #![warn(rust_2018_idioms)] #![cfg(all(feature = "full", not(target_os = "wasi")))] #![cfg(tokio_unstable)] diff --git a/tokio/tests/task_abort.rs b/tokio/tests/task_abort.rs index 481cc96f2ef..09414f094ed 100644 --- a/tokio/tests/task_abort.rs +++ b/tokio/tests/task_abort.rs @@ -7,8 +7,10 @@ use tokio::time::Duration; use tokio::runtime::Builder; +#[cfg(panic = "unwind")] struct PanicOnDrop; +#[cfg(panic = "unwind")] impl Drop for PanicOnDrop { fn drop(&mut self) { panic!("Well what did you expect would happen..."); diff --git a/tokio/tests/task_builder.rs b/tokio/tests/task_builder.rs index c700f229f9f..4d1248500ab 100644 --- a/tokio/tests/task_builder.rs +++ b/tokio/tests/task_builder.rs @@ -1,3 +1,4 @@ +#![allow(unknown_lints, unexpected_cfgs)] #![cfg(all(tokio_unstable, feature = "tracing"))] use std::rc::Rc; diff --git a/tokio/tests/task_id.rs b/tokio/tests/task_id.rs index 95e48f4901d..574a050b0c1 100644 --- a/tokio/tests/task_id.rs +++ b/tokio/tests/task_id.rs @@ -1,3 +1,4 @@ +#![allow(unknown_lints, unexpected_cfgs)] #![warn(rust_2018_idioms)] #![cfg(all(feature = "full", tokio_unstable))] diff --git a/tokio/tests/task_join_set.rs b/tokio/tests/task_join_set.rs index 8a42be17b49..e87135337ba 100644 --- a/tokio/tests/task_join_set.rs +++ b/tokio/tests/task_join_set.rs @@ -1,3 +1,4 @@ +#![allow(unknown_lints, unexpected_cfgs)] #![warn(rust_2018_idioms)] #![cfg(feature = "full")] diff --git a/tokio/tests/task_local_set.rs b/tokio/tests/task_local_set.rs index d965eb341eb..ac46291a36c 100644 --- a/tokio/tests/task_local_set.rs +++ b/tokio/tests/task_local_set.rs @@ -1,3 +1,4 @@ +#![allow(unknown_lints, unexpected_cfgs)] #![warn(rust_2018_idioms)] #![cfg(feature = "full")] diff --git a/tokio/tests/task_yield_now.rs b/tokio/tests/task_yield_now.rs index b16bca52819..3cb8cb16e70 100644 --- a/tokio/tests/task_yield_now.rs +++ b/tokio/tests/task_yield_now.rs @@ -1,3 +1,4 @@ +#![allow(unknown_lints, unexpected_cfgs)] #![cfg(all(feature = "full", tokio_unstable))] use tokio::task; From 6fcd9c02176bf3cd570bc7de88edaa3b95ea480a Mon Sep 17 00:00:00 2001 From: Kezhu Wang Date: Mon, 6 May 2024 00:28:59 +0800 Subject: [PATCH 130/162] macros: make #[tokio::test] append #[test] at the end of the attribute list (#6497) --- .../tests/fail/macros_invalid_input.rs | 20 +++++++++ .../tests/fail/macros_invalid_input.stderr | 38 ++++++++++++---- tokio-macros/src/entry.rs | 44 ++++++++++++++++--- 3 files changed, 86 insertions(+), 16 deletions(-) diff --git a/tests-build/tests/fail/macros_invalid_input.rs b/tests-build/tests/fail/macros_invalid_input.rs index 99daf00b4f9..85b4924f225 100644 --- a/tests-build/tests/fail/macros_invalid_input.rs +++ b/tests-build/tests/fail/macros_invalid_input.rs @@ -45,4 +45,24 @@ async fn test_crate_not_path_invalid() {} #[test] async fn test_has_second_test_attr() {} +#[tokio::test] +#[::core::prelude::v1::test] +async fn test_has_second_test_attr_v1() {} + +#[tokio::test] +#[core::prelude::rust_2015::test] +async fn test_has_second_test_attr_rust_2015() {} + +#[tokio::test] +#[::std::prelude::rust_2018::test] +async fn test_has_second_test_attr_rust_2018() {} + +#[tokio::test] +#[std::prelude::rust_2021::test] +async fn test_has_second_test_attr_rust_2021() {} + +#[tokio::test] +#[tokio::test] +async fn test_has_generated_second_test_attr() {} + fn main() {} diff --git a/tests-build/tests/fail/macros_invalid_input.stderr b/tests-build/tests/fail/macros_invalid_input.stderr index 0c8d65fc159..11f95315cdf 100644 --- a/tests-build/tests/fail/macros_invalid_input.stderr +++ b/tests-build/tests/fail/macros_invalid_input.stderr @@ -76,20 +76,40 @@ error: Failed to parse value of `crate` as path: "456" 41 | #[tokio::test(crate = "456")] | ^^^^^ -error: second test attribute is supplied +error: second test attribute is supplied, consider removing or changing the order of your test attributes --> $DIR/macros_invalid_input.rs:45:1 | 45 | #[test] | ^^^^^^^ -error: duplicated attribute - --> $DIR/macros_invalid_input.rs:45:1 +error: second test attribute is supplied, consider removing or changing the order of your test attributes + --> $DIR/macros_invalid_input.rs:49:1 | -45 | #[test] - | ^^^^^^^ +49 | #[::core::prelude::v1::test] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error: second test attribute is supplied, consider removing or changing the order of your test attributes + --> $DIR/macros_invalid_input.rs:53:1 + | +53 | #[core::prelude::rust_2015::test] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error: second test attribute is supplied, consider removing or changing the order of your test attributes + --> $DIR/macros_invalid_input.rs:57:1 + | +57 | #[::std::prelude::rust_2018::test] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error: second test attribute is supplied, consider removing or changing the order of your test attributes + --> $DIR/macros_invalid_input.rs:61:1 + | +61 | #[std::prelude::rust_2021::test] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error: second test attribute is supplied, consider removing or changing the order of your test attributes + --> $DIR/macros_invalid_input.rs:64:1 | -note: the lint level is defined here - --> $DIR/macros_invalid_input.rs:1:9 +64 | #[tokio::test] + | ^^^^^^^^^^^^^^ | -1 | #![deny(duplicate_macro_attributes)] - | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the attribute macro `tokio::test` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/tokio-macros/src/entry.rs b/tokio-macros/src/entry.rs index 20cbdb1c92a..8858c8a1674 100644 --- a/tokio-macros/src/entry.rs +++ b/tokio-macros/src/entry.rs @@ -360,7 +360,7 @@ fn parse_knobs(mut input: ItemFn, is_test: bool, config: FinalConfig) -> TokenSt rt = quote_spanned! {last_stmt_start_span=> #rt.start_paused(#v) }; } - let header = if is_test { + let generated_attrs = if is_test { quote! { #[::core::prelude::v1::test] } @@ -410,7 +410,7 @@ fn parse_knobs(mut input: ItemFn, is_test: bool, config: FinalConfig) -> TokenSt } }; - input.into_tokens(header, body, last_block) + input.into_tokens(generated_attrs, body, last_block) } fn token_stream_with_error(mut tokens: TokenStream, error: syn::Error) -> TokenStream { @@ -442,6 +442,35 @@ pub(crate) fn main(args: TokenStream, item: TokenStream, rt_multi_thread: bool) } } +// Check whether given attribute is a test attribute of forms: +// * `#[test]` +// * `#[core::prelude::*::test]` or `#[::core::prelude::*::test]` +// * `#[std::prelude::*::test]` or `#[::std::prelude::*::test]` +fn is_test_attribute(attr: &Attribute) -> bool { + let path = match &attr.meta { + syn::Meta::Path(path) => path, + _ => return false, + }; + let candidates = [ + ["core", "prelude", "*", "test"], + ["std", "prelude", "*", "test"], + ]; + if path.leading_colon.is_none() + && path.segments.len() == 1 + && path.segments[0].arguments.is_none() + && path.segments[0].ident == "test" + { + return true; + } else if path.segments.len() != candidates[0].len() { + return false; + } + candidates.into_iter().any(|segments| { + path.segments.iter().zip(segments).all(|(segment, path)| { + segment.arguments.is_none() && (path == "*" || segment.ident == path) + }) + }) +} + pub(crate) fn test(args: TokenStream, item: TokenStream, rt_multi_thread: bool) -> TokenStream { // If any of the steps for this macro fail, we still want to expand to an item that is as close // to the expected output as possible. This helps out IDEs such that completions and other @@ -450,8 +479,8 @@ pub(crate) fn test(args: TokenStream, item: TokenStream, rt_multi_thread: bool) Ok(it) => it, Err(e) => return token_stream_with_error(item, e), }; - let config = if let Some(attr) = input.attrs().find(|attr| attr.meta.path().is_ident("test")) { - let msg = "second test attribute is supplied"; + let config = if let Some(attr) = input.attrs().find(|attr| is_test_attribute(attr)) { + let msg = "second test attribute is supplied, consider removing or changing the order of your test attributes"; Err(syn::Error::new_spanned(attr, msg)) } else { AttributeArgs::parse_terminated @@ -492,13 +521,11 @@ impl ItemFn { /// Convert our local function item into a token stream. fn into_tokens( self, - header: proc_macro2::TokenStream, + generated_attrs: proc_macro2::TokenStream, body: proc_macro2::TokenStream, last_block: proc_macro2::TokenStream, ) -> TokenStream { let mut tokens = proc_macro2::TokenStream::new(); - header.to_tokens(&mut tokens); - // Outer attributes are simply streamed as-is. for attr in self.outer_attrs { attr.to_tokens(&mut tokens); @@ -512,6 +539,9 @@ impl ItemFn { attr.to_tokens(&mut tokens); } + // Add generated macros at the end, so macros processed later are aware of them. + generated_attrs.to_tokens(&mut tokens); + self.vis.to_tokens(&mut tokens); self.sig.to_tokens(&mut tokens); From 227979f0918b2fa8f70ac6040d6281a9e1365641 Mon Sep 17 00:00:00 2001 From: Sebastian Schildt Date: Tue, 14 May 2024 20:11:37 +0900 Subject: [PATCH 131/162] net: support QNX OS (#6421) Co-authored-by: Akhil Thankachan Thomas --- tokio/src/net/unix/ucred.rs | 4 ++-- tokio/src/process/mod.rs | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/tokio/src/net/unix/ucred.rs b/tokio/src/net/unix/ucred.rs index 3390819160a..bcd1c755f6a 100644 --- a/tokio/src/net/unix/ucred.rs +++ b/tokio/src/net/unix/ucred.rs @@ -39,7 +39,7 @@ impl UCred { ))] pub(crate) use self::impl_linux::get_peer_cred; -#[cfg(target_os = "netbsd")] +#[cfg(any(target_os = "netbsd", target_os = "nto"))] pub(crate) use self::impl_netbsd::get_peer_cred; #[cfg(any(target_os = "dragonfly", target_os = "freebsd"))] @@ -120,7 +120,7 @@ pub(crate) mod impl_linux { } } -#[cfg(target_os = "netbsd")] +#[cfg(any(target_os = "netbsd", target_os = "nto"))] pub(crate) mod impl_netbsd { use crate::net::unix::{self, UnixStream}; diff --git a/tokio/src/process/mod.rs b/tokio/src/process/mod.rs index 0fad67cd01a..fc661d89c1f 100644 --- a/tokio/src/process/mod.rs +++ b/tokio/src/process/mod.rs @@ -672,6 +672,8 @@ impl Command { #[cfg(unix)] #[cfg_attr(docsrs, doc(cfg(unix)))] pub fn uid(&mut self, id: u32) -> &mut Command { + #[cfg(target_os = "nto")] + let id = id as i32; self.std.uid(id); self } @@ -681,6 +683,8 @@ impl Command { #[cfg(unix)] #[cfg_attr(docsrs, doc(cfg(unix)))] pub fn gid(&mut self, id: u32) -> &mut Command { + #[cfg(target_os = "nto")] + let id = id as i32; self.std.gid(id); self } From d085260ee02f25c4a56cfcd9d0b57fa15840aebb Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Tue, 14 May 2024 23:01:49 +0200 Subject: [PATCH 132/162] metrics: fix flaky injection_queue_depth test (#6559) --- tokio/tests/rt_metrics.rs | 46 +++++++++++++++------------------------ 1 file changed, 18 insertions(+), 28 deletions(-) diff --git a/tokio/tests/rt_metrics.rs b/tokio/tests/rt_metrics.rs index 58869c530ae..7f0c9ad8052 100644 --- a/tokio/tests/rt_metrics.rs +++ b/tokio/tests/rt_metrics.rs @@ -3,7 +3,7 @@ #![cfg(all(feature = "full", tokio_unstable, not(target_os = "wasi")))] use std::future::Future; -use std::sync::{Arc, Mutex}; +use std::sync::{Arc, Barrier, Mutex}; use std::task::Poll; use tokio::macros::support::poll_fn; @@ -504,7 +504,7 @@ fn worker_overflow_count() { } #[test] -fn injection_queue_depth() { +fn injection_queue_depth_current_thread() { use std::thread; let rt = current_thread(); @@ -518,44 +518,34 @@ fn injection_queue_depth() { .unwrap(); assert_eq!(1, metrics.injection_queue_depth()); +} +#[test] +fn injection_queue_depth_multi_thread() { let rt = threaded(); - let handle = rt.handle().clone(); let metrics = rt.metrics(); - // First we need to block the runtime workers - let (tx1, rx1) = std::sync::mpsc::channel(); - let (tx2, rx2) = std::sync::mpsc::channel(); - let (tx3, rx3) = std::sync::mpsc::channel(); - let rx3 = Arc::new(Mutex::new(rx3)); + let barrier1 = Arc::new(Barrier::new(3)); + let barrier2 = Arc::new(Barrier::new(3)); - rt.spawn(async move { rx1.recv().unwrap() }); - rt.spawn(async move { rx2.recv().unwrap() }); - - // Spawn some more to make sure there are items - for _ in 0..10 { - let rx = rx3.clone(); + // Spawn a task per runtime worker to block it. + for _ in 0..2 { + let barrier1 = barrier1.clone(); + let barrier2 = barrier2.clone(); rt.spawn(async move { - rx.lock().unwrap().recv().unwrap(); + barrier1.wait(); + barrier2.wait(); }); } - thread::spawn(move || { - handle.spawn(async {}); - }) - .join() - .unwrap(); + barrier1.wait(); - let n = metrics.injection_queue_depth(); - assert!(1 <= n, "{}", n); - assert!(15 >= n, "{}", n); - - for _ in 0..10 { - tx3.send(()).unwrap(); + for i in 0..10 { + assert_eq!(i, metrics.injection_queue_depth()); + rt.spawn(async {}); } - tx1.send(()).unwrap(); - tx2.send(()).unwrap(); + barrier2.wait(); } #[test] From df7706348554a039946925f7492d98e31bd46a85 Mon Sep 17 00:00:00 2001 From: Weijia Jiang Date: Wed, 15 May 2024 15:32:39 +0800 Subject: [PATCH 133/162] time: remove the `true_when` field in `TimerShared` (#6563) --- tokio/src/runtime/time/entry.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tokio/src/runtime/time/entry.rs b/tokio/src/runtime/time/entry.rs index a6be0e62a13..7991ee0dc0a 100644 --- a/tokio/src/runtime/time/entry.rs +++ b/tokio/src/runtime/time/entry.rs @@ -339,9 +339,6 @@ pub(crate) struct TimerShared { /// registered. cached_when: AtomicU64, - /// The true expiration time. Set by the timer future, read by the driver. - true_when: AtomicU64, - /// Current state. This records whether the timer entry is currently under /// the ownership of the driver, and if not, its current state (not /// complete, fired, error, etc). @@ -356,7 +353,6 @@ unsafe impl Sync for TimerShared {} impl std::fmt::Debug for TimerShared { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("TimerShared") - .field("when", &self.true_when.load(Ordering::Relaxed)) .field("cached_when", &self.cached_when.load(Ordering::Relaxed)) .field("state", &self.state) .finish() @@ -375,7 +371,6 @@ impl TimerShared { pub(super) fn new() -> Self { Self { cached_when: AtomicU64::new(0), - true_when: AtomicU64::new(0), pointers: linked_list::Pointers::new(), state: StateCell::default(), _p: PhantomPinned, From 18e048d7646e62ce893d606996fc91b983386def Mon Sep 17 00:00:00 2001 From: William Wen <44139337+wenym1@users.noreply.github.com> Date: Wed, 15 May 2024 19:57:35 +0800 Subject: [PATCH 134/162] sync: always drop message in destructor for oneshot receiver (#6558) --- tokio/src/sync/oneshot.rs | 22 +++++++++++-- tokio/src/sync/tests/loom_oneshot.rs | 47 ++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 2 deletions(-) diff --git a/tokio/src/sync/oneshot.rs b/tokio/src/sync/oneshot.rs index 9e8c3fcb7f7..ab29b3e3edd 100644 --- a/tokio/src/sync/oneshot.rs +++ b/tokio/src/sync/oneshot.rs @@ -1072,7 +1072,14 @@ impl Receiver { impl Drop for Receiver { fn drop(&mut self) { if let Some(inner) = self.inner.as_ref() { - inner.close(); + let state = inner.close(); + + if state.is_complete() { + // SAFETY: we have ensured that the `VALUE_SENT` bit has been set, + // so only the receiver can access the value. + drop(unsafe { inner.consume_value() }); + } + #[cfg(all(tokio_unstable, feature = "tracing"))] self.resource_span.in_scope(|| { tracing::trace!( @@ -1202,7 +1209,7 @@ impl Inner { } /// Called by `Receiver` to indicate that the value will never be received. - fn close(&self) { + fn close(&self) -> State { let prev = State::set_closed(&self.state); if prev.is_tx_task_set() && !prev.is_complete() { @@ -1210,6 +1217,8 @@ impl Inner { self.tx_task.with_task(Waker::wake_by_ref); } } + + prev } /// Consumes the value. This function does not check `state`. @@ -1248,6 +1257,15 @@ impl Drop for Inner { self.tx_task.drop_task(); } } + + // SAFETY: we have `&mut self`, and therefore we have + // exclusive access to the value. + unsafe { + // Note: the assertion holds because if the value has been sent by sender, + // we must ensure that the value must have been consumed by the receiver before + // dropping the `Inner`. + debug_assert!(self.consume_value().is_none()); + } } } diff --git a/tokio/src/sync/tests/loom_oneshot.rs b/tokio/src/sync/tests/loom_oneshot.rs index c5f79720794..717edcfd2a3 100644 --- a/tokio/src/sync/tests/loom_oneshot.rs +++ b/tokio/src/sync/tests/loom_oneshot.rs @@ -138,3 +138,50 @@ fn changing_tx_task() { } }); } + +#[test] +fn checking_tx_send_ok_not_drop() { + use std::borrow::Borrow; + use std::cell::Cell; + + loom::thread_local! { + static IS_RX: Cell = Cell::new(true); + } + + struct Msg; + + impl Drop for Msg { + fn drop(&mut self) { + IS_RX.with(|is_rx: &Cell<_>| { + // On `tx.send(msg)` returning `Err(msg)`, + // we call `std::mem::forget(msg)`, so that + // `drop` is not expected to be called in the + // tx thread. + assert!(is_rx.get()); + }); + } + } + + let mut builder = loom::model::Builder::new(); + builder.preemption_bound = Some(2); + + builder.check(|| { + let (tx, rx) = oneshot::channel(); + + // tx thread + let tx_thread_join_handle = thread::spawn(move || { + // Ensure that `Msg::drop` in this thread will see is_rx == false + IS_RX.with(|is_rx: &Cell<_>| { + is_rx.set(false); + }); + if let Err(msg) = tx.send(Msg) { + std::mem::forget(msg); + } + }); + + // main thread is the rx thread + drop(rx); + + tx_thread_join_handle.join().unwrap(); + }); +} From ced7739f69498a702640211bfbaeddb7e864b3d2 Mon Sep 17 00:00:00 2001 From: John-John Tedro Date: Thu, 16 May 2024 10:30:48 +0200 Subject: [PATCH 135/162] tokio: use `ptr::addr_of` instead of pointer arithmetic in linked_list (#6561) --- tokio/src/util/linked_list.rs | 45 ++++++++++------------------------- 1 file changed, 12 insertions(+), 33 deletions(-) diff --git a/tokio/src/util/linked_list.rs b/tokio/src/util/linked_list.rs index 0ed2b616456..ab20292e21d 100644 --- a/tokio/src/util/linked_list.rs +++ b/tokio/src/util/linked_list.rs @@ -78,26 +78,19 @@ pub(crate) struct Pointers { /// Additionally, we never access the `prev` or `next` fields directly, as any /// such access would implicitly involve the creation of a reference to the /// field, which we want to avoid since the fields are not `!Unpin`, and would -/// hence be given the `noalias` attribute if we were to do such an access. -/// As an alternative to accessing the fields directly, the `Pointers` type +/// hence be given the `noalias` attribute if we were to do such an access. As +/// an alternative to accessing the fields directly, the `Pointers` type /// provides getters and setters for the two fields, and those are implemented -/// using raw pointer casts and offsets, which is valid since the struct is -/// #[repr(C)]. +/// using `ptr`-specific methods which avoids the creation of intermediate +/// references. /// /// See this link for more information: /// -#[repr(C)] struct PointersInner { /// The previous node in the list. null if there is no previous node. - /// - /// This field is accessed through pointer manipulation, so it is not dead code. - #[allow(dead_code)] prev: Option>, /// The next node in the list. null if there is no previous node. - /// - /// This field is accessed through pointer manipulation, so it is not dead code. - #[allow(dead_code)] next: Option>, /// This type is !Unpin due to the heuristic from: @@ -418,38 +411,24 @@ impl Pointers { } pub(crate) fn get_prev(&self) -> Option> { - // SAFETY: prev is the first field in PointersInner, which is #[repr(C)]. - unsafe { - let inner = self.inner.get(); - let prev = inner as *const Option>; - ptr::read(prev) - } + // SAFETY: Field is accessed immutably through a reference. + unsafe { ptr::addr_of!((*self.inner.get()).prev).read() } } pub(crate) fn get_next(&self) -> Option> { - // SAFETY: next is the second field in PointersInner, which is #[repr(C)]. - unsafe { - let inner = self.inner.get(); - let prev = inner as *const Option>; - let next = prev.add(1); - ptr::read(next) - } + // SAFETY: Field is accessed immutably through a reference. + unsafe { ptr::addr_of!((*self.inner.get()).next).read() } } fn set_prev(&mut self, value: Option>) { - // SAFETY: prev is the first field in PointersInner, which is #[repr(C)]. + // SAFETY: Field is accessed mutably through a mutable reference. unsafe { - let inner = self.inner.get(); - let prev = inner as *mut Option>; - ptr::write(prev, value); + ptr::addr_of_mut!((*self.inner.get()).prev).write(value); } } fn set_next(&mut self, value: Option>) { - // SAFETY: next is the second field in PointersInner, which is #[repr(C)]. + // SAFETY: Field is accessed mutably through a mutable reference. unsafe { - let inner = self.inner.get(); - let prev = inner as *mut Option>; - let next = prev.add(1); - ptr::write(next, value); + ptr::addr_of_mut!((*self.inner.get()).next).write(value); } } } From d221c500faffab409ce84780ea1e40f76581332e Mon Sep 17 00:00:00 2001 From: Russell Cohen Date: Thu, 16 May 2024 06:14:07 -0400 Subject: [PATCH 136/162] docs: gate taskdump compiler_error behind not(doc) (#6564) --- tokio/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tokio/src/lib.rs b/tokio/src/lib.rs index f15f8763e36..bacc8192742 100644 --- a/tokio/src/lib.rs +++ b/tokio/src/lib.rs @@ -471,6 +471,7 @@ compile_error!("The `tokio_taskdump` feature requires `--cfg tokio_unstable`."); #[cfg(all( tokio_taskdump, + not(doc), not(all( target_os = "linux", any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64") From 35c6fd99243616db0a677c6686e37082c7c68a6f Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Fri, 17 May 2024 13:03:22 +0200 Subject: [PATCH 137/162] ci: set RUSTUP_WINDOWS_PATH_ADD_BIN=1 (#6568) --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1d799a54083..76151902576 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,6 +13,7 @@ concurrency: env: RUSTFLAGS: -Dwarnings RUST_BACKTRACE: 1 + RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Change to specific Rust release to pin rust_stable: stable rust_nightly: nightly-2024-05-05 From daa89017dad7cecb769d3145c4368ae491a4ac67 Mon Sep 17 00:00:00 2001 From: Michael Scholten <32096160+41Leahcim@users.noreply.github.com> Date: Sat, 18 May 2024 10:09:37 +0200 Subject: [PATCH 138/162] ci: fix new clippy warnings (#6569) --- benches/time_timeout.rs | 4 ++-- tokio-stream/src/wrappers/interval.rs | 2 +- tokio-util/src/codec/any_delimiter_codec.rs | 2 +- tokio-util/src/codec/lines_codec.rs | 2 +- tokio-util/src/time/wheel/mod.rs | 1 - tokio/src/runtime/scheduler/multi_thread/stats.rs | 13 +++---------- tokio/src/runtime/task/state.rs | 1 - tokio/src/sync/batch_semaphore.rs | 2 +- tokio/src/sync/broadcast.rs | 1 - tokio/src/sync/rwlock.rs | 2 +- tokio/tests/sync_broadcast.rs | 2 -- 11 files changed, 10 insertions(+), 22 deletions(-) diff --git a/benches/time_timeout.rs b/benches/time_timeout.rs index c961477562c..9dedee9ddcd 100644 --- a/benches/time_timeout.rs +++ b/benches/time_timeout.rs @@ -40,7 +40,7 @@ fn do_timeout_test(c: &mut Criterion, workers: usize, name: &str) { b.iter_custom(|iters| { let start = Instant::now(); runtime.block_on(async { - black_box(spawn_timeout_job(iters as usize, workers).await); + black_box(spawn_timeout_job(iters as usize, workers)).await; }); start.elapsed() }) @@ -77,7 +77,7 @@ fn do_sleep_test(c: &mut Criterion, workers: usize, name: &str) { b.iter_custom(|iters| { let start = Instant::now(); runtime.block_on(async { - black_box(spawn_sleep_job(iters as usize, workers).await); + black_box(spawn_sleep_job(iters as usize, workers)).await; }); start.elapsed() }) diff --git a/tokio-stream/src/wrappers/interval.rs b/tokio-stream/src/wrappers/interval.rs index 2bf0194bd0f..c7a0b1f1e2a 100644 --- a/tokio-stream/src/wrappers/interval.rs +++ b/tokio-stream/src/wrappers/interval.rs @@ -33,7 +33,7 @@ impl Stream for IntervalStream { } fn size_hint(&self) -> (usize, Option) { - (std::usize::MAX, None) + (usize::MAX, None) } } diff --git a/tokio-util/src/codec/any_delimiter_codec.rs b/tokio-util/src/codec/any_delimiter_codec.rs index 3dbfd456b0a..fc5e57582db 100644 --- a/tokio-util/src/codec/any_delimiter_codec.rs +++ b/tokio-util/src/codec/any_delimiter_codec.rs @@ -2,7 +2,7 @@ use crate::codec::decoder::Decoder; use crate::codec::encoder::Encoder; use bytes::{Buf, BufMut, Bytes, BytesMut}; -use std::{cmp, fmt, io, str, usize}; +use std::{cmp, fmt, io, str}; const DEFAULT_SEEK_DELIMITERS: &[u8] = b",;\n\r"; const DEFAULT_SEQUENCE_WRITER: &[u8] = b","; diff --git a/tokio-util/src/codec/lines_codec.rs b/tokio-util/src/codec/lines_codec.rs index 5a6035d13b9..0da19238b63 100644 --- a/tokio-util/src/codec/lines_codec.rs +++ b/tokio-util/src/codec/lines_codec.rs @@ -2,7 +2,7 @@ use crate::codec::decoder::Decoder; use crate::codec::encoder::Encoder; use bytes::{Buf, BufMut, BytesMut}; -use std::{cmp, fmt, io, str, usize}; +use std::{cmp, fmt, io, str}; /// A simple [`Decoder`] and [`Encoder`] implementation that splits up data into lines. /// diff --git a/tokio-util/src/time/wheel/mod.rs b/tokio-util/src/time/wheel/mod.rs index 10a9900a666..d81611c92bd 100644 --- a/tokio-util/src/time/wheel/mod.rs +++ b/tokio-util/src/time/wheel/mod.rs @@ -7,7 +7,6 @@ pub(crate) use self::stack::Stack; use std::borrow::Borrow; use std::fmt::Debug; -use std::usize; /// Timing wheel implementation. /// diff --git a/tokio/src/runtime/scheduler/multi_thread/stats.rs b/tokio/src/runtime/scheduler/multi_thread/stats.rs index 03cfc790054..9d495706e8d 100644 --- a/tokio/src/runtime/scheduler/multi_thread/stats.rs +++ b/tokio/src/runtime/scheduler/multi_thread/stats.rs @@ -1,6 +1,5 @@ use crate::runtime::{Config, MetricsBatch, WorkerMetrics}; -use std::cmp; use std::time::{Duration, Instant}; /// Per-worker statistics. This is used for both tuning the scheduler and @@ -62,15 +61,9 @@ impl Stats { // As of Rust 1.45, casts from f64 -> u32 are saturating, which is fine here. let tasks_per_interval = (TARGET_GLOBAL_QUEUE_INTERVAL / self.task_poll_time_ewma) as u32; - cmp::max( - // If we are using self-tuning, we don't want to return less than 2 as that would result in the - // global queue always getting checked first. - 2, - cmp::min( - MAX_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL, - tasks_per_interval, - ), - ) + // If we are using self-tuning, we don't want to return less than 2 as that would result in the + // global queue always getting checked first. + tasks_per_interval.clamp(2, MAX_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL) } pub(crate) fn submit(&mut self, to: &WorkerMetrics) { diff --git a/tokio/src/runtime/task/state.rs b/tokio/src/runtime/task/state.rs index 42b239e05bb..0fc7bb0329b 100644 --- a/tokio/src/runtime/task/state.rs +++ b/tokio/src/runtime/task/state.rs @@ -2,7 +2,6 @@ use crate::loom::sync::atomic::AtomicUsize; use std::fmt; use std::sync::atomic::Ordering::{AcqRel, Acquire, Release}; -use std::usize; pub(super) struct State { val: AtomicUsize, diff --git a/tokio/src/sync/batch_semaphore.rs b/tokio/src/sync/batch_semaphore.rs index def5cbc9f51..a241f7922bf 100644 --- a/tokio/src/sync/batch_semaphore.rs +++ b/tokio/src/sync/batch_semaphore.rs @@ -127,7 +127,7 @@ impl Semaphore { /// implementation used three bits, so we will continue to reserve them to /// avoid a breaking change if additional flags need to be added in the /// future. - pub(crate) const MAX_PERMITS: usize = std::usize::MAX >> 3; + pub(crate) const MAX_PERMITS: usize = usize::MAX >> 3; const CLOSED: usize = 1; // The least-significant bit in the number of permits is reserved to use // as a flag indicating that the semaphore has been closed. Consequently diff --git a/tokio/src/sync/broadcast.rs b/tokio/src/sync/broadcast.rs index 326b81b4d5d..ba0a44fb8b9 100644 --- a/tokio/src/sync/broadcast.rs +++ b/tokio/src/sync/broadcast.rs @@ -129,7 +129,6 @@ use std::pin::Pin; use std::ptr::NonNull; use std::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst}; use std::task::{Context, Poll, Waker}; -use std::usize; /// Sending-half of the [`broadcast`] channel. /// diff --git a/tokio/src/sync/rwlock.rs b/tokio/src/sync/rwlock.rs index 37cf73c5905..14983d5cb32 100644 --- a/tokio/src/sync/rwlock.rs +++ b/tokio/src/sync/rwlock.rs @@ -21,7 +21,7 @@ pub(crate) use write_guard::RwLockWriteGuard; pub(crate) use write_guard_mapped::RwLockMappedWriteGuard; #[cfg(not(loom))] -const MAX_READS: u32 = std::u32::MAX >> 3; +const MAX_READS: u32 = u32::MAX >> 3; #[cfg(loom)] const MAX_READS: u32 = 10; diff --git a/tokio/tests/sync_broadcast.rs b/tokio/tests/sync_broadcast.rs index 17fe44f3e89..2638c1f33d4 100644 --- a/tokio/tests/sync_broadcast.rs +++ b/tokio/tests/sync_broadcast.rs @@ -286,8 +286,6 @@ fn zero_capacity() { #[should_panic] #[cfg(not(target_family = "wasm"))] // wasm currently doesn't support unwinding fn capacity_too_big() { - use std::usize; - broadcast::channel::<()>(1 + (usize::MAX >> 1)); } From a02407171a3f1aeb86e7406bcac9dfb415278308 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sun, 19 May 2024 18:09:22 +0200 Subject: [PATCH 139/162] task: clarify that you can't abort `spawn_blocking` tasks (#6571) --- tokio/src/runtime/task/abort.rs | 14 ++++++++++++++ tokio/src/runtime/task/join.rs | 7 +++++++ tokio/src/task/blocking.rs | 14 ++++++-------- tokio/src/task/mod.rs | 6 ++++++ 4 files changed, 33 insertions(+), 8 deletions(-) diff --git a/tokio/src/runtime/task/abort.rs b/tokio/src/runtime/task/abort.rs index 410f0a4671b..2745b56020c 100644 --- a/tokio/src/runtime/task/abort.rs +++ b/tokio/src/runtime/task/abort.rs @@ -11,7 +11,14 @@ use std::panic::{RefUnwindSafe, UnwindSafe}; /// Dropping an `AbortHandle` releases the permission to terminate the task /// --- it does *not* abort the task. /// +/// Be aware that tasks spawned using [`spawn_blocking`] cannot be aborted +/// because they are not async. If you call `abort` on a `spawn_blocking` task, +/// then this *will not have any effect*, and the task will continue running +/// normally. The exception is if the task has not started running yet; in that +/// case, calling `abort` may prevent the task from starting. +/// /// [`JoinHandle`]: crate::task::JoinHandle +/// [`spawn_blocking`]: crate::task::spawn_blocking #[cfg_attr(docsrs, doc(cfg(feature = "rt")))] pub struct AbortHandle { raw: RawTask, @@ -31,11 +38,18 @@ impl AbortHandle { /// If the task was already cancelled, such as by [`JoinHandle::abort`], /// this method will do nothing. /// + /// Be aware that tasks spawned using [`spawn_blocking`] cannot be aborted + /// because they are not async. If you call `abort` on a `spawn_blocking` + /// task, then this *will not have any effect*, and the task will continue + /// running normally. The exception is if the task has not started running + /// yet; in that case, calling `abort` may prevent the task from starting. + /// /// See also [the module level docs] for more information on cancellation. /// /// [cancelled]: method@super::error::JoinError::is_cancelled /// [`JoinHandle::abort`]: method@super::JoinHandle::abort /// [the module level docs]: crate::task#cancellation + /// [`spawn_blocking`]: crate::task::spawn_blocking pub fn abort(&self) { self.raw.remote_abort(); } diff --git a/tokio/src/runtime/task/join.rs b/tokio/src/runtime/task/join.rs index 19289cf5826..e646f6575ed 100644 --- a/tokio/src/runtime/task/join.rs +++ b/tokio/src/runtime/task/join.rs @@ -179,6 +179,12 @@ impl JoinHandle { /// already completed at the time it was cancelled, but most likely it /// will fail with a [cancelled] `JoinError`. /// + /// Be aware that tasks spawned using [`spawn_blocking`] cannot be aborted + /// because they are not async. If you call `abort` on a `spawn_blocking` + /// task, then this *will not have any effect*, and the task will continue + /// running normally. The exception is if the task has not started running + /// yet; in that case, calling `abort` may prevent the task from starting. + /// /// See also [the module level docs] for more information on cancellation. /// /// ```rust @@ -210,6 +216,7 @@ impl JoinHandle { /// /// [cancelled]: method@super::error::JoinError::is_cancelled /// [the module level docs]: crate::task#cancellation + /// [`spawn_blocking`]: crate::task::spawn_blocking pub fn abort(&self) { self.raw.remote_abort(); } diff --git a/tokio/src/task/blocking.rs b/tokio/src/task/blocking.rs index 1cce466394e..193c28dfd0f 100644 --- a/tokio/src/task/blocking.rs +++ b/tokio/src/task/blocking.rs @@ -103,14 +103,11 @@ cfg_rt! { /// their own. If you want to spawn an ordinary thread, you should use /// [`thread::spawn`] instead. /// - /// Closures spawned using `spawn_blocking` cannot be cancelled abruptly; there - /// is no standard low level API to cause a thread to stop running. However, - /// a useful pattern is to pass some form of "cancellation token" into - /// the thread. This could be an [`AtomicBool`] that the task checks periodically. - /// Another approach is to have the thread primarily read or write from a channel, - /// and to exit when the channel closes; assuming the other side of the channel is dropped - /// when cancellation occurs, this will cause the blocking task thread to exit - /// soon after as well. + /// Be aware that tasks spawned using `spawn_blocking` cannot be aborted + /// because they are not async. If you call [`abort`] on a `spawn_blocking` + /// task, then this *will not have any effect*, and the task will continue + /// running normally. The exception is if the task has not started running + /// yet; in that case, calling `abort` may prevent the task from starting. /// /// When you shut down the executor, it will wait indefinitely for all blocking operations to /// finish. You can use [`shutdown_timeout`] to stop waiting for them after a @@ -152,6 +149,7 @@ cfg_rt! { /// [`shutdown_timeout`]: fn@crate::runtime::Runtime::shutdown_timeout /// [bridgesync]: https://tokio.rs/tokio/topics/bridging /// [`AtomicBool`]: struct@std::sync::atomic::AtomicBool + /// [`abort`]: crate::task::JoinHandle::abort /// /// # Examples /// diff --git a/tokio/src/task/mod.rs b/tokio/src/task/mod.rs index 32a87c93c55..0678aa3af7b 100644 --- a/tokio/src/task/mod.rs +++ b/tokio/src/task/mod.rs @@ -133,6 +133,12 @@ //! end of the task, then the [`JoinHandle`] will instead report that the task //! exited normally. //! +//! Be aware that tasks spawned using [`spawn_blocking`] cannot be aborted +//! because they are not async. If you call `abort` on a `spawn_blocking` +//! task, then this *will not have any effect*, and the task will continue +//! running normally. The exception is if the task has not started running +//! yet; in that case, calling `abort` may prevent the task from starting. +//! //! Be aware that calls to [`JoinHandle::abort`] just schedule the task for //! cancellation, and will return before the cancellation has completed. To wait //! for cancellation to complete, wait for the task to finish by awaiting the From 0b651c070f51ff30a2d610e1dff1a94cd3923749 Mon Sep 17 00:00:00 2001 From: "M.Amin Rayej" Date: Wed, 22 May 2024 14:54:48 +0330 Subject: [PATCH 140/162] fs: add `File::create_new` (#6573) --- tokio/src/fs/file.rs | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/tokio/src/fs/file.rs b/tokio/src/fs/file.rs index efce9fda990..27d91debbe8 100644 --- a/tokio/src/fs/file.rs +++ b/tokio/src/fs/file.rs @@ -193,6 +193,46 @@ impl File { Ok(File::from_std(std_file)) } + /// Opens a file in read-write mode. + /// + /// This function will create a file if it does not exist, or return an error + /// if it does. This way, if the call succeeds, the file returned is guaranteed + /// to be new. + /// + /// This option is useful because it is atomic. Otherwise between checking + /// whether a file exists and creating a new one, the file may have been + /// created by another process (a TOCTOU race condition / attack). + /// + /// This can also be written using `File::options().read(true).write(true).create_new(true).open(...)`. + /// + /// See [`OpenOptions`] for more details. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::File; + /// use tokio::io::AsyncWriteExt; + /// + /// # async fn dox() -> std::io::Result<()> { + /// let mut file = File::create_new("foo.txt").await?; + /// file.write_all(b"hello, world!").await?; + /// # Ok(()) + /// # } + /// ``` + /// + /// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait. + /// + /// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all + /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt + pub async fn create_new>(path: P) -> std::io::Result { + Self::options() + .read(true) + .write(true) + .create_new(true) + .open(path) + .await + } + /// Returns a new [`OpenOptions`] object. /// /// This function returns a new `OpenOptions` object that you can use to From e62c3e92692f795c407beadff580fa1380df5a26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=BA=8C=E6=89=8B=E6=8E=89=E5=8C=85=E5=B7=A5=E7=A8=8B?= =?UTF-8?q?=E5=B8=88?= Date: Wed, 22 May 2024 20:35:12 +0900 Subject: [PATCH 141/162] task: add `tokio::task::join_set::Builder::spawn_blocking` (#6578) Signed-off-by: hi-rustin --- tokio/src/task/join_set.rs | 47 +++++++++++++++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/tokio/src/task/join_set.rs b/tokio/src/task/join_set.rs index c9251db86d7..267ceb55bcc 100644 --- a/tokio/src/task/join_set.rs +++ b/tokio/src/task/join_set.rs @@ -308,7 +308,7 @@ impl JoinSet { /// Tries to join one of the tasks in the set that has completed and return its output. /// - /// Returns `None` if the set is empty. + /// Returns `None` if the set is empty. pub fn try_join_next(&mut self) -> Option> { // Loop over all notified `JoinHandle`s to find one that's ready, or until none are left. loop { @@ -623,6 +623,51 @@ impl<'a, T: 'static> Builder<'a, T> { Ok(self.joinset.insert(self.builder.spawn_on(future, handle)?)) } + /// Spawn the blocking code on the blocking threadpool with this builder's + /// settings, and store it in the [`JoinSet`]. + /// + /// # Returns + /// + /// An [`AbortHandle`] that can be used to remotely cancel the task. + /// + /// # Panics + /// + /// This method panics if called outside of a Tokio runtime. + /// + /// [`JoinSet`]: crate::task::JoinSet + /// [`AbortHandle`]: crate::task::AbortHandle + #[track_caller] + pub fn spawn_blocking(self, f: F) -> std::io::Result + where + F: FnOnce() -> T, + F: Send + 'static, + T: Send, + { + Ok(self.joinset.insert(self.builder.spawn_blocking(f)?)) + } + + /// Spawn the blocking code on the blocking threadpool of the provided + /// runtime handle with this builder's settings, and store it in the + /// [`JoinSet`]. + /// + /// # Returns + /// + /// An [`AbortHandle`] that can be used to remotely cancel the task. + /// + /// [`JoinSet`]: crate::task::JoinSet + /// [`AbortHandle`]: crate::task::AbortHandle + #[track_caller] + pub fn spawn_blocking_on(self, f: F, handle: &Handle) -> std::io::Result + where + F: FnOnce() -> T, + F: Send + 'static, + T: Send, + { + Ok(self + .joinset + .insert(self.builder.spawn_blocking_on(f, handle)?)) + } + /// Spawn the provided task on the current [`LocalSet`] with this builder's /// settings, and store it in the [`JoinSet`]. /// From 1914e1e4b9bfe6ea2d61970ec3fcf2b5d7bb0210 Mon Sep 17 00:00:00 2001 From: Weijia Jiang Date: Wed, 22 May 2024 19:54:26 +0800 Subject: [PATCH 142/162] time: use sharding for timer implementation (#6534) --- tokio/src/runtime/builder.rs | 9 +- tokio/src/runtime/context.rs | 12 +- tokio/src/runtime/driver.rs | 8 +- .../runtime/scheduler/multi_thread/worker.rs | 5 + .../scheduler/multi_thread_alt/worker.rs | 5 + tokio/src/runtime/time/entry.rs | 37 +++- tokio/src/runtime/time/mod.rs | 159 ++++++++++++------ tokio/src/runtime/time/tests/mod.rs | 16 +- tokio/src/util/mod.rs | 2 +- tokio/src/util/rand.rs | 1 + 10 files changed, 184 insertions(+), 70 deletions(-) diff --git a/tokio/src/runtime/builder.rs b/tokio/src/runtime/builder.rs index 3b09c0d4b10..05f736d3e50 100644 --- a/tokio/src/runtime/builder.rs +++ b/tokio/src/runtime/builder.rs @@ -702,7 +702,7 @@ impl Builder { } } - fn get_cfg(&self) -> driver::Cfg { + fn get_cfg(&self, workers: usize) -> driver::Cfg { driver::Cfg { enable_pause_time: match self.kind { Kind::CurrentThread => true, @@ -715,6 +715,7 @@ impl Builder { enable_time: self.enable_time, start_paused: self.start_paused, nevents: self.nevents, + workers, } } @@ -1095,7 +1096,7 @@ impl Builder { use crate::runtime::scheduler::{self, CurrentThread}; use crate::runtime::{runtime::Scheduler, Config}; - let (driver, driver_handle) = driver::Driver::new(self.get_cfg())?; + let (driver, driver_handle) = driver::Driver::new(self.get_cfg(1))?; // Blocking pool let blocking_pool = blocking::create_blocking_pool(self, self.max_blocking_threads); @@ -1248,7 +1249,7 @@ cfg_rt_multi_thread! { let core_threads = self.worker_threads.unwrap_or_else(num_cpus); - let (driver, driver_handle) = driver::Driver::new(self.get_cfg())?; + let (driver, driver_handle) = driver::Driver::new(self.get_cfg(core_threads))?; // Create the blocking pool let blocking_pool = @@ -1295,7 +1296,7 @@ cfg_rt_multi_thread! { use crate::runtime::scheduler::MultiThreadAlt; let core_threads = self.worker_threads.unwrap_or_else(num_cpus); - let (driver, driver_handle) = driver::Driver::new(self.get_cfg())?; + let (driver, driver_handle) = driver::Driver::new(self.get_cfg(core_threads))?; // Create the blocking pool let blocking_pool = diff --git a/tokio/src/runtime/context.rs b/tokio/src/runtime/context.rs index 62e4fc9474c..76918114bc3 100644 --- a/tokio/src/runtime/context.rs +++ b/tokio/src/runtime/context.rs @@ -3,7 +3,7 @@ use crate::runtime::coop; use std::cell::Cell; -#[cfg(any(feature = "rt", feature = "macros"))] +#[cfg(any(feature = "rt", feature = "macros", feature = "time"))] use crate::util::rand::FastRand; cfg_rt! { @@ -57,7 +57,7 @@ struct Context { #[cfg(feature = "rt")] runtime: Cell, - #[cfg(any(feature = "rt", feature = "macros"))] + #[cfg(any(feature = "rt", feature = "macros", feature = "time"))] rng: Cell>, /// Tracks the amount of "work" a task may still do before yielding back to @@ -100,7 +100,7 @@ tokio_thread_local! { #[cfg(feature = "rt")] runtime: Cell::new(EnterRuntime::NotEntered), - #[cfg(any(feature = "rt", feature = "macros"))] + #[cfg(any(feature = "rt", feature = "macros", feature = "time"))] rng: Cell::new(None), budget: Cell::new(coop::Budget::unconstrained()), @@ -121,7 +121,11 @@ tokio_thread_local! { } } -#[cfg(any(feature = "macros", all(feature = "sync", feature = "rt")))] +#[cfg(any( + feature = "time", + feature = "macros", + all(feature = "sync", feature = "rt") +))] pub(crate) fn thread_rng_n(n: u32) -> u32 { CONTEXT.with(|ctx| { let mut rng = ctx.rng.get().unwrap_or_else(FastRand::new); diff --git a/tokio/src/runtime/driver.rs b/tokio/src/runtime/driver.rs index 64928228b46..11aa7abb7b7 100644 --- a/tokio/src/runtime/driver.rs +++ b/tokio/src/runtime/driver.rs @@ -40,6 +40,7 @@ pub(crate) struct Cfg { pub(crate) enable_pause_time: bool, pub(crate) start_paused: bool, pub(crate) nevents: usize, + pub(crate) workers: usize, } impl Driver { @@ -48,7 +49,8 @@ impl Driver { let clock = create_clock(cfg.enable_pause_time, cfg.start_paused); - let (time_driver, time_handle) = create_time_driver(cfg.enable_time, io_stack, &clock); + let (time_driver, time_handle) = + create_time_driver(cfg.enable_time, io_stack, &clock, cfg.workers); Ok(( Self { inner: time_driver }, @@ -306,9 +308,10 @@ cfg_time! { enable: bool, io_stack: IoStack, clock: &Clock, + workers: usize, ) -> (TimeDriver, TimeHandle) { if enable { - let (driver, handle) = crate::runtime::time::Driver::new(io_stack, clock); + let (driver, handle) = crate::runtime::time::Driver::new(io_stack, clock, workers as u32); (TimeDriver::Enabled { driver }, Some(handle)) } else { @@ -361,6 +364,7 @@ cfg_not_time! { _enable: bool, io_stack: IoStack, _clock: &Clock, + _workers: usize, ) -> (TimeDriver, TimeHandle) { (io_stack, ()) } diff --git a/tokio/src/runtime/scheduler/multi_thread/worker.rs b/tokio/src/runtime/scheduler/multi_thread/worker.rs index 83e70795f4f..9f0dd98dfdc 100644 --- a/tokio/src/runtime/scheduler/multi_thread/worker.rs +++ b/tokio/src/runtime/scheduler/multi_thread/worker.rs @@ -742,6 +742,11 @@ impl Context { pub(crate) fn defer(&self, waker: &Waker) { self.defer.defer(waker); } + + #[allow(dead_code)] + pub(crate) fn get_worker_index(&self) -> usize { + self.worker.index + } } impl Core { diff --git a/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs b/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs index c315e382291..63ae0a49743 100644 --- a/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs +++ b/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs @@ -1311,6 +1311,11 @@ impl Context { fn shared(&self) -> &Shared { &self.handle.shared } + + #[cfg_attr(not(feature = "time"), allow(dead_code))] + pub(crate) fn get_worker_index(&self) -> usize { + self.index + } } impl Core { diff --git a/tokio/src/runtime/time/entry.rs b/tokio/src/runtime/time/entry.rs index 7991ee0dc0a..834077caa3d 100644 --- a/tokio/src/runtime/time/entry.rs +++ b/tokio/src/runtime/time/entry.rs @@ -58,6 +58,7 @@ use crate::loom::cell::UnsafeCell; use crate::loom::sync::atomic::AtomicU64; use crate::loom::sync::atomic::Ordering; +use crate::runtime::context; use crate::runtime::scheduler; use crate::sync::AtomicWaker; use crate::time::Instant; @@ -328,6 +329,8 @@ pub(super) type EntryList = crate::util::linked_list::LinkedList Self { + pub(super) fn new(shard_id: u32) -> Self { Self { + shard_id, cached_when: AtomicU64::new(0), pointers: linked_list::Pointers::new(), state: StateCell::default(), @@ -438,6 +442,11 @@ impl TimerShared { pub(super) fn might_be_registered(&self) -> bool { self.state.might_be_registered() } + + /// Gets the shard id. + pub(super) fn shard_id(&self) -> u32 { + self.shard_id + } } unsafe impl linked_list::Link for TimerShared { @@ -485,8 +494,10 @@ impl TimerEntry { fn inner(&self) -> &TimerShared { let inner = unsafe { &*self.inner.get() }; if inner.is_none() { + let shard_size = self.driver.driver().time().inner.get_shard_size(); + let shard_id = generate_shard_id(shard_size); unsafe { - *self.inner.get() = Some(TimerShared::new()); + *self.inner.get() = Some(TimerShared::new(shard_id)); } } return inner.as_ref().unwrap(); @@ -643,3 +654,25 @@ impl Drop for TimerEntry { unsafe { Pin::new_unchecked(self) }.as_mut().cancel(); } } + +// Generates a shard id. If current thread is a worker thread, we use its worker index as a shard id. +// Otherwise, we use a random number generator to obtain the shard id. +cfg_rt! { + fn generate_shard_id(shard_size: u32) -> u32 { + let id = context::with_scheduler(|ctx| match ctx { + Some(scheduler::Context::CurrentThread(_ctx)) => 0, + #[cfg(feature = "rt-multi-thread")] + Some(scheduler::Context::MultiThread(ctx)) => ctx.get_worker_index() as u32, + #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] + Some(scheduler::Context::MultiThreadAlt(ctx)) => ctx.get_worker_index() as u32, + None => context::thread_rng_n(shard_size), + }); + id % shard_size + } +} + +cfg_not_rt! { + fn generate_shard_id(shard_size: u32) -> u32 { + context::thread_rng_n(shard_size) + } +} diff --git a/tokio/src/runtime/time/mod.rs b/tokio/src/runtime/time/mod.rs index 8cd51c5cb4a..37b04ef0000 100644 --- a/tokio/src/runtime/time/mod.rs +++ b/tokio/src/runtime/time/mod.rs @@ -12,6 +12,7 @@ use entry::{EntryList, TimerHandle, TimerShared, MAX_SAFE_MILLIS_DURATION}; mod handle; pub(crate) use self::handle::Handle; +use self::wheel::Wheel; mod source; pub(crate) use source::TimeSource; @@ -25,9 +26,28 @@ use crate::time::error::Error; use crate::time::{Clock, Duration}; use crate::util::WakeList; +use crate::loom::sync::atomic::AtomicU64; use std::fmt; use std::{num::NonZeroU64, ptr::NonNull}; +struct AtomicOptionNonZeroU64(AtomicU64); + +// A helper type to store the `next_wake`. +impl AtomicOptionNonZeroU64 { + fn new(val: Option) -> Self { + Self(AtomicU64::new(val.map_or(0, NonZeroU64::get))) + } + + fn store(&self, val: Option) { + self.0 + .store(val.map_or(0, NonZeroU64::get), Ordering::Relaxed); + } + + fn load(&self) -> Option { + NonZeroU64::new(self.0.load(Ordering::Relaxed)) + } +} + /// Time implementation that drives [`Sleep`][sleep], [`Interval`][interval], and [`Timeout`][timeout]. /// /// A `Driver` instance tracks the state necessary for managing time and @@ -91,8 +111,11 @@ pub(crate) struct Driver { /// Timer state shared between `Driver`, `Handle`, and `Registration`. struct Inner { - // The state is split like this so `Handle` can access `is_shutdown` without locking the mutex - pub(super) state: Mutex, + /// The earliest time at which we promise to wake up without unparking. + next_wake: AtomicOptionNonZeroU64, + + /// Sharded Timer wheels. + wheels: Box<[Mutex]>, /// True if the driver is being shutdown. pub(super) is_shutdown: AtomicBool, @@ -107,15 +130,6 @@ struct Inner { did_wake: AtomicBool, } -/// Time state shared which must be protected by a `Mutex` -struct InnerState { - /// The earliest time at which we promise to wake up without unparking. - next_wake: Option, - - /// Timer wheel. - wheel: wheel::Wheel, -} - // ===== impl Driver ===== impl Driver { @@ -123,18 +137,20 @@ impl Driver { /// thread and `time_source` to get the current time and convert to ticks. /// /// Specifying the source of time is useful when testing. - pub(crate) fn new(park: IoStack, clock: &Clock) -> (Driver, Handle) { + pub(crate) fn new(park: IoStack, clock: &Clock, shards: u32) -> (Driver, Handle) { + assert!(shards > 0); + let time_source = TimeSource::new(clock); + let wheels: Vec<_> = (0..shards) + .map(|_| Mutex::new(wheel::Wheel::new())) + .collect(); let handle = Handle { time_source, inner: Inner { - state: Mutex::new(InnerState { - next_wake: None, - wheel: wheel::Wheel::new(), - }), + next_wake: AtomicOptionNonZeroU64::new(None), + wheels: wheels.into_boxed_slice(), is_shutdown: AtomicBool::new(false), - #[cfg(feature = "test-util")] did_wake: AtomicBool::new(false), }, @@ -164,24 +180,30 @@ impl Driver { // Advance time forward to the end of time. - handle.process_at_time(u64::MAX); + handle.process_at_time(0, u64::MAX); self.park.shutdown(rt_handle); } fn park_internal(&mut self, rt_handle: &driver::Handle, limit: Option) { let handle = rt_handle.time(); - let mut lock = handle.inner.state.lock(); - assert!(!handle.is_shutdown()); - let next_wake = lock.wheel.next_expiration_time(); - lock.next_wake = - next_wake.map(|t| NonZeroU64::new(t).unwrap_or_else(|| NonZeroU64::new(1).unwrap())); - - drop(lock); - - match next_wake { + // Finds out the min expiration time to park. + let expiration_time = (0..rt_handle.time().inner.get_shard_size()) + .filter_map(|id| { + let lock = rt_handle.time().inner.lock_sharded_wheel(id); + lock.next_expiration_time() + }) + .min(); + + rt_handle + .time() + .inner + .next_wake + .store(next_wake_time(expiration_time)); + + match expiration_time { Some(when) => { let now = handle.time_source.now(rt_handle.clock()); // Note that we effectively round up to 1ms here - this avoids @@ -245,30 +267,59 @@ impl Driver { } } +// Helper function to turn expiration_time into next_wake_time. +// Since the `park_timeout` will round up to 1ms for avoiding very +// short-duration microsecond-resolution sleeps, we do the same here. +// The conversion is as follows +// None => None +// Some(0) => Some(1) +// Some(i) => Some(i) +fn next_wake_time(expiration_time: Option) -> Option { + expiration_time.and_then(|v| { + if v == 0 { + NonZeroU64::new(1) + } else { + NonZeroU64::new(v) + } + }) +} + impl Handle { /// Runs timer related logic, and returns the next wakeup time pub(self) fn process(&self, clock: &Clock) { let now = self.time_source().now(clock); + // For fairness, randomly select one to start. + let shards = self.inner.get_shard_size(); + let start = crate::runtime::context::thread_rng_n(shards); + self.process_at_time(start, now); + } - self.process_at_time(now); + pub(self) fn process_at_time(&self, start: u32, now: u64) { + let shards = self.inner.get_shard_size(); + + let expiration_time = (start..shards + start) + .filter_map(|i| self.process_at_sharded_time(i, now)) + .min(); + + self.inner.next_wake.store(next_wake_time(expiration_time)); } - pub(self) fn process_at_time(&self, mut now: u64) { + // Returns the next wakeup time of this shard. + pub(self) fn process_at_sharded_time(&self, id: u32, mut now: u64) -> Option { let mut waker_list = WakeList::new(); + let mut lock = self.inner.lock_sharded_wheel(id); - let mut lock = self.inner.lock(); - - if now < lock.wheel.elapsed() { + if now < lock.elapsed() { // Time went backwards! This normally shouldn't happen as the Rust language // guarantees that an Instant is monotonic, but can happen when running // Linux in a VM on a Windows host due to std incorrectly trusting the // hardware clock to be monotonic. // // See for more information. - now = lock.wheel.elapsed(); + now = lock.elapsed(); } - while let Some(entry) = lock.wheel.poll(now) { + while let Some(entry) = lock.poll(now) { debug_assert!(unsafe { entry.is_pending() }); // SAFETY: We hold the driver lock, and just removed the entry from any linked lists. @@ -281,19 +332,15 @@ impl Handle { waker_list.wake_all(); - lock = self.inner.lock(); + lock = self.inner.lock_sharded_wheel(id); } } } - - lock.next_wake = lock - .wheel - .poll_at() - .map(|t| NonZeroU64::new(t).unwrap_or_else(|| NonZeroU64::new(1).unwrap())); - + let next_wake_up = lock.poll_at(); drop(lock); waker_list.wake_all(); + next_wake_up } /// Removes a registered timer from the driver. @@ -308,10 +355,10 @@ impl Handle { /// `add_entry` must not be called concurrently. pub(self) unsafe fn clear_entry(&self, entry: NonNull) { unsafe { - let mut lock = self.inner.lock(); + let mut lock = self.inner.lock_sharded_wheel(entry.as_ref().shard_id()); if entry.as_ref().might_be_registered() { - lock.wheel.remove(entry); + lock.remove(entry); } entry.as_ref().handle().fire(Ok(())); @@ -331,12 +378,12 @@ impl Handle { entry: NonNull, ) { let waker = unsafe { - let mut lock = self.inner.lock(); + let mut lock = self.inner.lock_sharded_wheel(entry.as_ref().shard_id()); // We may have raced with a firing/deregistration, so check before // deregistering. if unsafe { entry.as_ref().might_be_registered() } { - lock.wheel.remove(entry); + lock.remove(entry); } // Now that we have exclusive control of this entry, mint a handle to reinsert it. @@ -350,10 +397,12 @@ impl Handle { // Note: We don't have to worry about racing with some other resetting // thread, because add_entry and reregister require exclusive control of // the timer entry. - match unsafe { lock.wheel.insert(entry) } { + match unsafe { lock.insert(entry) } { Ok(when) => { - if lock + if self + .inner .next_wake + .load() .map(|next_wake| when < next_wake.get()) .unwrap_or(true) { @@ -389,15 +438,25 @@ impl Handle { // ===== impl Inner ===== impl Inner { - /// Locks the driver's inner structure - pub(super) fn lock(&self) -> crate::loom::sync::MutexGuard<'_, InnerState> { - self.state.lock() + /// Locks the driver's sharded wheel structure. + pub(super) fn lock_sharded_wheel( + &self, + shard_id: u32, + ) -> crate::loom::sync::MutexGuard<'_, Wheel> { + let index = shard_id % (self.wheels.len() as u32); + // Safety: This modulo operation ensures that the index is not out of bounds. + unsafe { self.wheels.get_unchecked(index as usize).lock() } } // Check whether the driver has been shutdown pub(super) fn is_shutdown(&self) -> bool { self.is_shutdown.load(Ordering::SeqCst) } + + // Gets the number of shards. + fn get_shard_size(&self) -> u32 { + self.wheels.len() as u32 + } } impl fmt::Debug for Inner { diff --git a/tokio/src/runtime/time/tests/mod.rs b/tokio/src/runtime/time/tests/mod.rs index 520dc00a462..676cf55f9c6 100644 --- a/tokio/src/runtime/time/tests/mod.rs +++ b/tokio/src/runtime/time/tests/mod.rs @@ -68,7 +68,7 @@ fn single_timer() { // This may or may not return Some (depending on how it races with the // thread). If it does return None, however, the timer should complete // synchronously. - time.process_at_time(time.time_source().now(clock) + 2_000_000_000); + time.process_at_time(0, time.time_source().now(clock) + 2_000_000_000); jh.join().unwrap(); }) @@ -102,7 +102,7 @@ fn drop_timer() { let clock = handle.inner.driver().clock(); // advance 2s in the future. - time.process_at_time(time.time_source().now(clock) + 2_000_000_000); + time.process_at_time(0, time.time_source().now(clock) + 2_000_000_000); jh.join().unwrap(); }) @@ -138,7 +138,7 @@ fn change_waker() { let clock = handle.inner.driver().clock(); // advance 2s - time.process_at_time(time.time_source().now(clock) + 2_000_000_000); + time.process_at_time(0, time.time_source().now(clock) + 2_000_000_000); jh.join().unwrap(); }) @@ -181,6 +181,7 @@ fn reset_future() { // This may or may not return a wakeup time. handle.process_at_time( + 0, handle .time_source() .instant_to_tick(start + Duration::from_millis(1500)), @@ -189,6 +190,7 @@ fn reset_future() { assert!(!finished_early.load(Ordering::Relaxed)); handle.process_at_time( + 0, handle .time_source() .instant_to_tick(start + Duration::from_millis(2500)), @@ -231,7 +233,7 @@ fn poll_process_levels() { } for t in 1..normal_or_miri(1024, 64) { - handle.inner.driver().time().process_at_time(t as u64); + handle.inner.driver().time().process_at_time(0, t as u64); for (deadline, future) in entries.iter_mut().enumerate() { let mut context = Context::from_waker(noop_waker_ref()); @@ -260,8 +262,8 @@ fn poll_process_levels_targeted() { let handle = handle.inner.driver().time(); - handle.process_at_time(62); + handle.process_at_time(0, 62); assert!(e1.as_mut().poll_elapsed(&mut context).is_pending()); - handle.process_at_time(192); - handle.process_at_time(192); + handle.process_at_time(0, 192); + handle.process_at_time(0, 192); } diff --git a/tokio/src/util/mod.rs b/tokio/src/util/mod.rs index d821ec897cf..7cf371195ff 100644 --- a/tokio/src/util/mod.rs +++ b/tokio/src/util/mod.rs @@ -49,7 +49,7 @@ cfg_rt! { pub(crate) mod sharded_list; } -#[cfg(any(feature = "rt", feature = "macros"))] +#[cfg(any(feature = "rt", feature = "macros", feature = "time"))] pub(crate) mod rand; cfg_rt! { diff --git a/tokio/src/util/rand.rs b/tokio/src/util/rand.rs index 67c45693c9c..aad85b973ff 100644 --- a/tokio/src/util/rand.rs +++ b/tokio/src/util/rand.rs @@ -71,6 +71,7 @@ impl FastRand { #[cfg(any( feature = "macros", feature = "rt-multi-thread", + feature = "time", all(feature = "sync", feature = "rt") ))] pub(crate) fn fastrand_n(&mut self, n: u32) -> u32 { From 16ef7b1fd5b370208c5b9604e3fb0a964f24e92f Mon Sep 17 00:00:00 2001 From: Roy Wellington <53838718+roy-work@users.noreply.github.com> Date: Thu, 23 May 2024 00:52:57 -0400 Subject: [PATCH 143/162] docs: fix `stdin` documentation (#6581) The code for `output` indicates that only sets `stdout` and `stderr`, yet the docs for `stdin` indicated that it too would be set. This seems like it was just a simple copy/paste typo, so correct `stdin` to note that it just defaults to `inherit`. Fixes #6577. --- tokio/src/process/mod.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tokio/src/process/mod.rs b/tokio/src/process/mod.rs index fc661d89c1f..585dbdd6af6 100644 --- a/tokio/src/process/mod.rs +++ b/tokio/src/process/mod.rs @@ -544,11 +544,9 @@ impl Command { /// Sets configuration for the child process's standard input (stdin) handle. /// - /// Defaults to [`inherit`] when used with `spawn` or `status`, and - /// defaults to [`piped`] when used with `output`. + /// Defaults to [`inherit`]. /// /// [`inherit`]: std::process::Stdio::inherit - /// [`piped`]: std::process::Stdio::piped /// /// # Examples /// From cba86cf1b1edea8cd131f168a99953e6e35739d2 Mon Sep 17 00:00:00 2001 From: Russell Cohen Date: Thu, 23 May 2024 12:41:01 -0400 Subject: [PATCH 144/162] metrics: add MetricAtomicU64 and use in metrics (#6574) --- spellcheck.dic | 3 +- tokio/src/macros/cfg.rs | 28 +- tokio/src/runtime/metrics/histogram.rs | 15 +- tokio/src/runtime/metrics/io.rs | 14 +- tokio/src/runtime/metrics/runtime.rs | 1158 ++++++++++++------------ tokio/src/runtime/metrics/scheduler.rs | 15 +- tokio/src/runtime/metrics/worker.rs | 39 +- tokio/src/runtime/tests/queue.rs | 21 +- tokio/src/util/metric_atomics.rs | 47 + tokio/src/util/mod.rs | 2 + tokio/tests/rt_basic.rs | 2 +- tokio/tests/rt_metrics.rs | 7 +- tokio/tests/rt_threaded.rs | 2 +- tokio/tests/rt_threaded_alt.rs | 2 +- 14 files changed, 722 insertions(+), 633 deletions(-) create mode 100644 tokio/src/util/metric_atomics.rs diff --git a/spellcheck.dic b/spellcheck.dic index 4b9288118d2..238e24f7dc3 100644 --- a/spellcheck.dic +++ b/spellcheck.dic @@ -1,4 +1,4 @@ -284 +285 & + < @@ -34,6 +34,7 @@ amongst api APIs async +atomics awaitable backend backpressure diff --git a/tokio/src/macros/cfg.rs b/tokio/src/macros/cfg.rs index c67e0e8379f..f44599ff47a 100644 --- a/tokio/src/macros/cfg.rs +++ b/tokio/src/macros/cfg.rs @@ -218,19 +218,37 @@ macro_rules! cfg_macros { macro_rules! cfg_metrics { ($($item:item)*) => { $( - // For now, metrics is only disabled in loom tests. - // When stabilized, it might have a dedicated feature flag. - #[cfg(all(tokio_unstable, not(loom)))] + #[cfg(tokio_unstable)] #[cfg_attr(docsrs, doc(cfg(tokio_unstable)))] $item )* } } +/// Some metrics require 64-bit atomics. +macro_rules! cfg_64bit_metrics { + ($($item:item)*) => { + $( + #[cfg(target_has_atomic = "64")] + #[cfg_attr(docsrs, doc(cfg(target_has_atomic = "64")))] + $item + )* + } +} + +macro_rules! cfg_no_64bit_metrics { + ($($item:item)*) => { + $( + #[cfg(not(target_has_atomic = "64"))] + $item + )* + } +} + macro_rules! cfg_not_metrics { ($($item:item)*) => { $( - #[cfg(not(all(tokio_unstable, not(loom))))] + #[cfg(not(tokio_unstable))] $item )* } @@ -238,7 +256,7 @@ macro_rules! cfg_not_metrics { macro_rules! cfg_not_rt_and_metrics_and_net { ($($item:item)*) => { - $( #[cfg(not(all(feature = "net", feature = "rt", all(tokio_unstable, not(loom)))))]$item )* + $( #[cfg(not(all(feature = "net", feature = "rt", tokio_unstable)))]$item )* } } diff --git a/tokio/src/runtime/metrics/histogram.rs b/tokio/src/runtime/metrics/histogram.rs index 976f54fe852..f75ffa3b495 100644 --- a/tokio/src/runtime/metrics/histogram.rs +++ b/tokio/src/runtime/metrics/histogram.rs @@ -1,4 +1,5 @@ -use crate::loom::sync::atomic::{AtomicU64, Ordering::Relaxed}; +use crate::loom::sync::atomic::Ordering::Relaxed; +use crate::util::metric_atomics::MetricAtomicU64; use std::cmp; use std::ops::Range; @@ -6,7 +7,7 @@ use std::ops::Range; #[derive(Debug)] pub(crate) struct Histogram { /// The histogram buckets - buckets: Box<[AtomicU64]>, + buckets: Box<[MetricAtomicU64]>, /// Bucket scale, linear or log scale: HistogramScale, @@ -53,8 +54,10 @@ impl Histogram { self.buckets.len() } - pub(crate) fn get(&self, bucket: usize) -> u64 { - self.buckets[bucket].load(Relaxed) + cfg_64bit_metrics! { + pub(crate) fn get(&self, bucket: usize) -> u64 { + self.buckets[bucket].load(Relaxed) + } } pub(crate) fn bucket_range(&self, bucket: usize) -> Range { @@ -150,7 +153,7 @@ impl HistogramBuilder { Histogram { buckets: (0..self.num_buckets) - .map(|_| AtomicU64::new(0)) + .map(|_| MetricAtomicU64::new(0)) .collect::>() .into_boxed_slice(), resolution, @@ -165,7 +168,7 @@ impl Default for HistogramBuilder { } } -#[cfg(test)] +#[cfg(all(test, target_has_atomic = "64"))] mod test { use super::*; diff --git a/tokio/src/runtime/metrics/io.rs b/tokio/src/runtime/metrics/io.rs index 06efdd42d72..674fca5faec 100644 --- a/tokio/src/runtime/metrics/io.rs +++ b/tokio/src/runtime/metrics/io.rs @@ -1,24 +1,24 @@ #![cfg_attr(not(feature = "net"), allow(dead_code))] -use crate::loom::sync::atomic::{AtomicU64, Ordering::Relaxed}; +use crate::{loom::sync::atomic::Ordering::Relaxed, util::metric_atomics::MetricAtomicU64}; #[derive(Default)] pub(crate) struct IoDriverMetrics { - pub(super) fd_registered_count: AtomicU64, - pub(super) fd_deregistered_count: AtomicU64, - pub(super) ready_count: AtomicU64, + pub(super) fd_registered_count: MetricAtomicU64, + pub(super) fd_deregistered_count: MetricAtomicU64, + pub(super) ready_count: MetricAtomicU64, } impl IoDriverMetrics { pub(crate) fn incr_fd_count(&self) { - self.fd_registered_count.fetch_add(1, Relaxed); + self.fd_registered_count.add(1, Relaxed); } pub(crate) fn dec_fd_count(&self) { - self.fd_deregistered_count.fetch_add(1, Relaxed); + self.fd_deregistered_count.add(1, Relaxed); } pub(crate) fn incr_ready_count_by(&self, amt: u64) { - self.ready_count.fetch_add(amt, Relaxed); + self.ready_count.add(amt, Relaxed); } } diff --git a/tokio/src/runtime/metrics/runtime.rs b/tokio/src/runtime/metrics/runtime.rs index 66a3e51bb97..865a6406a6a 100644 --- a/tokio/src/runtime/metrics/runtime.rs +++ b/tokio/src/runtime/metrics/runtime.rs @@ -1,7 +1,9 @@ use crate::runtime::Handle; use std::ops::Range; -use std::sync::atomic::Ordering::Relaxed; +cfg_64bit_metrics! { + use std::sync::atomic::Ordering::Relaxed; +} use std::time::Duration; /// Handle to the runtime's metrics. @@ -112,407 +114,409 @@ impl RuntimeMetrics { self.handle.inner.num_idle_blocking_threads() } - /// Returns the number of tasks scheduled from **outside** of the runtime. - /// - /// The remote schedule count starts at zero when the runtime is created and - /// increases by one each time a task is woken from **outside** of the - /// runtime. This usually means that a task is spawned or notified from a - /// non-runtime thread and must be queued using the Runtime's injection - /// queue, which tends to be slower. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.remote_schedule_count(); - /// println!("{} tasks were scheduled from outside the runtime", n); - /// } - /// ``` - pub fn remote_schedule_count(&self) -> u64 { - self.handle - .inner - .scheduler_metrics() - .remote_schedule_count - .load(Relaxed) - } + cfg_64bit_metrics! { + /// Returns the number of tasks scheduled from **outside** of the runtime. + /// + /// The remote schedule count starts at zero when the runtime is created and + /// increases by one each time a task is woken from **outside** of the + /// runtime. This usually means that a task is spawned or notified from a + /// non-runtime thread and must be queued using the Runtime's injection + /// queue, which tends to be slower. + /// + /// The counter is monotonically increasing. It is never decremented or + /// reset to zero. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.remote_schedule_count(); + /// println!("{} tasks were scheduled from outside the runtime", n); + /// } + /// ``` + pub fn remote_schedule_count(&self) -> u64 { + self.handle + .inner + .scheduler_metrics() + .remote_schedule_count + .load(Relaxed) + } - /// Returns the number of times that tasks have been forced to yield back to the scheduler - /// after exhausting their task budgets. - /// - /// This count starts at zero when the runtime is created and increases by one each time a task yields due to exhausting its budget. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - pub fn budget_forced_yield_count(&self) -> u64 { - self.handle - .inner - .scheduler_metrics() - .budget_forced_yield_count - .load(Relaxed) - } + /// Returns the number of times that tasks have been forced to yield back to the scheduler + /// after exhausting their task budgets. + /// + /// This count starts at zero when the runtime is created and increases by one each time a task yields due to exhausting its budget. + /// + /// The counter is monotonically increasing. It is never decremented or + /// reset to zero. + pub fn budget_forced_yield_count(&self) -> u64 { + self.handle + .inner + .scheduler_metrics() + .budget_forced_yield_count + .load(Relaxed) + } - /// Returns the total number of times the given worker thread has parked. - /// - /// The worker park count starts at zero when the runtime is created and - /// increases by one each time the worker parks the thread waiting for new - /// inbound events to process. This usually means the worker has processed - /// all pending work and is currently idle. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_park_count(0); - /// println!("worker 0 parked {} times", n); - /// } - /// ``` - pub fn worker_park_count(&self, worker: usize) -> u64 { - self.handle - .inner - .worker_metrics(worker) - .park_count - .load(Relaxed) - } + /// Returns the total number of times the given worker thread has parked. + /// + /// The worker park count starts at zero when the runtime is created and + /// increases by one each time the worker parks the thread waiting for new + /// inbound events to process. This usually means the worker has processed + /// all pending work and is currently idle. + /// + /// The counter is monotonically increasing. It is never decremented or + /// reset to zero. + /// + /// # Arguments + /// + /// `worker` is the index of the worker being queried. The given value must + /// be between 0 and `num_workers()`. The index uniquely identifies a single + /// worker and will continue to identify the worker throughout the lifetime + /// of the runtime instance. + /// + /// # Panics + /// + /// The method panics when `worker` represents an invalid worker, i.e. is + /// greater than or equal to `num_workers()`. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.worker_park_count(0); + /// println!("worker 0 parked {} times", n); + /// } + /// ``` + pub fn worker_park_count(&self, worker: usize) -> u64 { + self.handle + .inner + .worker_metrics(worker) + .park_count + .load(Relaxed) + } - /// Returns the number of times the given worker thread unparked but - /// performed no work before parking again. - /// - /// The worker no-op count starts at zero when the runtime is created and - /// increases by one each time the worker unparks the thread but finds no - /// new work and goes back to sleep. This indicates a false-positive wake up. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_noop_count(0); - /// println!("worker 0 had {} no-op unparks", n); - /// } - /// ``` - pub fn worker_noop_count(&self, worker: usize) -> u64 { - self.handle - .inner - .worker_metrics(worker) - .noop_count - .load(Relaxed) - } + /// Returns the number of times the given worker thread unparked but + /// performed no work before parking again. + /// + /// The worker no-op count starts at zero when the runtime is created and + /// increases by one each time the worker unparks the thread but finds no + /// new work and goes back to sleep. This indicates a false-positive wake up. + /// + /// The counter is monotonically increasing. It is never decremented or + /// reset to zero. + /// + /// # Arguments + /// + /// `worker` is the index of the worker being queried. The given value must + /// be between 0 and `num_workers()`. The index uniquely identifies a single + /// worker and will continue to identify the worker throughout the lifetime + /// of the runtime instance. + /// + /// # Panics + /// + /// The method panics when `worker` represents an invalid worker, i.e. is + /// greater than or equal to `num_workers()`. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.worker_noop_count(0); + /// println!("worker 0 had {} no-op unparks", n); + /// } + /// ``` + pub fn worker_noop_count(&self, worker: usize) -> u64 { + self.handle + .inner + .worker_metrics(worker) + .noop_count + .load(Relaxed) + } - /// Returns the number of tasks the given worker thread stole from - /// another worker thread. - /// - /// This metric only applies to the **multi-threaded** runtime and will - /// always return `0` when using the current thread runtime. - /// - /// The worker steal count starts at zero when the runtime is created and - /// increases by `N` each time the worker has processed its scheduled queue - /// and successfully steals `N` more pending tasks from another worker. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_steal_count(0); - /// println!("worker 0 has stolen {} tasks", n); - /// } - /// ``` - pub fn worker_steal_count(&self, worker: usize) -> u64 { - self.handle - .inner - .worker_metrics(worker) - .steal_count - .load(Relaxed) - } + /// Returns the number of tasks the given worker thread stole from + /// another worker thread. + /// + /// This metric only applies to the **multi-threaded** runtime and will + /// always return `0` when using the current thread runtime. + /// + /// The worker steal count starts at zero when the runtime is created and + /// increases by `N` each time the worker has processed its scheduled queue + /// and successfully steals `N` more pending tasks from another worker. + /// + /// The counter is monotonically increasing. It is never decremented or + /// reset to zero. + /// + /// # Arguments + /// + /// `worker` is the index of the worker being queried. The given value must + /// be between 0 and `num_workers()`. The index uniquely identifies a single + /// worker and will continue to identify the worker throughout the lifetime + /// of the runtime instance. + /// + /// # Panics + /// + /// The method panics when `worker` represents an invalid worker, i.e. is + /// greater than or equal to `num_workers()`. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.worker_steal_count(0); + /// println!("worker 0 has stolen {} tasks", n); + /// } + /// ``` + pub fn worker_steal_count(&self, worker: usize) -> u64 { + self.handle + .inner + .worker_metrics(worker) + .steal_count + .load(Relaxed) + } - /// Returns the number of times the given worker thread stole tasks from - /// another worker thread. - /// - /// This metric only applies to the **multi-threaded** runtime and will - /// always return `0` when using the current thread runtime. - /// - /// The worker steal count starts at zero when the runtime is created and - /// increases by one each time the worker has processed its scheduled queue - /// and successfully steals more pending tasks from another worker. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_steal_operations(0); - /// println!("worker 0 has stolen tasks {} times", n); - /// } - /// ``` - pub fn worker_steal_operations(&self, worker: usize) -> u64 { - self.handle - .inner - .worker_metrics(worker) - .steal_operations - .load(Relaxed) - } + /// Returns the number of times the given worker thread stole tasks from + /// another worker thread. + /// + /// This metric only applies to the **multi-threaded** runtime and will + /// always return `0` when using the current thread runtime. + /// + /// The worker steal count starts at zero when the runtime is created and + /// increases by one each time the worker has processed its scheduled queue + /// and successfully steals more pending tasks from another worker. + /// + /// The counter is monotonically increasing. It is never decremented or + /// reset to zero. + /// + /// # Arguments + /// + /// `worker` is the index of the worker being queried. The given value must + /// be between 0 and `num_workers()`. The index uniquely identifies a single + /// worker and will continue to identify the worker throughout the lifetime + /// of the runtime instance. + /// + /// # Panics + /// + /// The method panics when `worker` represents an invalid worker, i.e. is + /// greater than or equal to `num_workers()`. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.worker_steal_operations(0); + /// println!("worker 0 has stolen tasks {} times", n); + /// } + /// ``` + pub fn worker_steal_operations(&self, worker: usize) -> u64 { + self.handle + .inner + .worker_metrics(worker) + .steal_operations + .load(Relaxed) + } - /// Returns the number of tasks the given worker thread has polled. - /// - /// The worker poll count starts at zero when the runtime is created and - /// increases by one each time the worker polls a scheduled task. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_poll_count(0); - /// println!("worker 0 has polled {} tasks", n); - /// } - /// ``` - pub fn worker_poll_count(&self, worker: usize) -> u64 { - self.handle - .inner - .worker_metrics(worker) - .poll_count - .load(Relaxed) - } + /// Returns the number of tasks the given worker thread has polled. + /// + /// The worker poll count starts at zero when the runtime is created and + /// increases by one each time the worker polls a scheduled task. + /// + /// The counter is monotonically increasing. It is never decremented or + /// reset to zero. + /// + /// # Arguments + /// + /// `worker` is the index of the worker being queried. The given value must + /// be between 0 and `num_workers()`. The index uniquely identifies a single + /// worker and will continue to identify the worker throughout the lifetime + /// of the runtime instance. + /// + /// # Panics + /// + /// The method panics when `worker` represents an invalid worker, i.e. is + /// greater than or equal to `num_workers()`. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.worker_poll_count(0); + /// println!("worker 0 has polled {} tasks", n); + /// } + /// ``` + pub fn worker_poll_count(&self, worker: usize) -> u64 { + self.handle + .inner + .worker_metrics(worker) + .poll_count + .load(Relaxed) + } - /// Returns the amount of time the given worker thread has been busy. - /// - /// The worker busy duration starts at zero when the runtime is created and - /// increases whenever the worker is spending time processing work. Using - /// this value can indicate the load of the given worker. If a lot of time - /// is spent busy, then the worker is under load and will check for inbound - /// events less often. - /// - /// The timer is monotonically increasing. It is never decremented or reset - /// to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_total_busy_duration(0); - /// println!("worker 0 was busy for a total of {:?}", n); - /// } - /// ``` - pub fn worker_total_busy_duration(&self, worker: usize) -> Duration { - let nanos = self - .handle - .inner - .worker_metrics(worker) - .busy_duration_total - .load(Relaxed); - Duration::from_nanos(nanos) - } + /// Returns the amount of time the given worker thread has been busy. + /// + /// The worker busy duration starts at zero when the runtime is created and + /// increases whenever the worker is spending time processing work. Using + /// this value can indicate the load of the given worker. If a lot of time + /// is spent busy, then the worker is under load and will check for inbound + /// events less often. + /// + /// The timer is monotonically increasing. It is never decremented or reset + /// to zero. + /// + /// # Arguments + /// + /// `worker` is the index of the worker being queried. The given value must + /// be between 0 and `num_workers()`. The index uniquely identifies a single + /// worker and will continue to identify the worker throughout the lifetime + /// of the runtime instance. + /// + /// # Panics + /// + /// The method panics when `worker` represents an invalid worker, i.e. is + /// greater than or equal to `num_workers()`. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.worker_total_busy_duration(0); + /// println!("worker 0 was busy for a total of {:?}", n); + /// } + /// ``` + pub fn worker_total_busy_duration(&self, worker: usize) -> Duration { + let nanos = self + .handle + .inner + .worker_metrics(worker) + .busy_duration_total + .load(Relaxed); + Duration::from_nanos(nanos) + } - /// Returns the number of tasks scheduled from **within** the runtime on the - /// given worker's local queue. - /// - /// The local schedule count starts at zero when the runtime is created and - /// increases by one each time a task is woken from **inside** of the - /// runtime on the given worker. This usually means that a task is spawned - /// or notified from within a runtime thread and will be queued on the - /// worker-local queue. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_local_schedule_count(0); - /// println!("{} tasks were scheduled on the worker's local queue", n); - /// } - /// ``` - pub fn worker_local_schedule_count(&self, worker: usize) -> u64 { - self.handle - .inner - .worker_metrics(worker) - .local_schedule_count - .load(Relaxed) - } + /// Returns the number of tasks scheduled from **within** the runtime on the + /// given worker's local queue. + /// + /// The local schedule count starts at zero when the runtime is created and + /// increases by one each time a task is woken from **inside** of the + /// runtime on the given worker. This usually means that a task is spawned + /// or notified from within a runtime thread and will be queued on the + /// worker-local queue. + /// + /// The counter is monotonically increasing. It is never decremented or + /// reset to zero. + /// + /// # Arguments + /// + /// `worker` is the index of the worker being queried. The given value must + /// be between 0 and `num_workers()`. The index uniquely identifies a single + /// worker and will continue to identify the worker throughout the lifetime + /// of the runtime instance. + /// + /// # Panics + /// + /// The method panics when `worker` represents an invalid worker, i.e. is + /// greater than or equal to `num_workers()`. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.worker_local_schedule_count(0); + /// println!("{} tasks were scheduled on the worker's local queue", n); + /// } + /// ``` + pub fn worker_local_schedule_count(&self, worker: usize) -> u64 { + self.handle + .inner + .worker_metrics(worker) + .local_schedule_count + .load(Relaxed) + } - /// Returns the number of times the given worker thread saturated its local - /// queue. - /// - /// This metric only applies to the **multi-threaded** scheduler. - /// - /// The worker overflow count starts at zero when the runtime is created and - /// increases by one each time the worker attempts to schedule a task - /// locally, but its local queue is full. When this happens, half of the - /// local queue is moved to the injection queue. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_overflow_count(0); - /// println!("worker 0 has overflowed its queue {} times", n); - /// } - /// ``` - pub fn worker_overflow_count(&self, worker: usize) -> u64 { - self.handle - .inner - .worker_metrics(worker) - .overflow_count - .load(Relaxed) + /// Returns the number of times the given worker thread saturated its local + /// queue. + /// + /// This metric only applies to the **multi-threaded** scheduler. + /// + /// The worker overflow count starts at zero when the runtime is created and + /// increases by one each time the worker attempts to schedule a task + /// locally, but its local queue is full. When this happens, half of the + /// local queue is moved to the injection queue. + /// + /// The counter is monotonically increasing. It is never decremented or + /// reset to zero. + /// + /// # Arguments + /// + /// `worker` is the index of the worker being queried. The given value must + /// be between 0 and `num_workers()`. The index uniquely identifies a single + /// worker and will continue to identify the worker throughout the lifetime + /// of the runtime instance. + /// + /// # Panics + /// + /// The method panics when `worker` represents an invalid worker, i.e. is + /// greater than or equal to `num_workers()`. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.worker_overflow_count(0); + /// println!("worker 0 has overflowed its queue {} times", n); + /// } + /// ``` + pub fn worker_overflow_count(&self, worker: usize) -> u64 { + self.handle + .inner + .worker_metrics(worker) + .overflow_count + .load(Relaxed) + } } /// Returns the number of tasks currently scheduled in the runtime's @@ -704,110 +708,112 @@ impl RuntimeMetrics { .unwrap_or_default() } - /// Returns the number of times the given worker polled tasks with a poll - /// duration within the given bucket's range. - /// - /// Each worker maintains its own histogram and the counts for each bucket - /// starts at zero when the runtime is created. Each time the worker polls a - /// task, it tracks the duration the task poll time took and increments the - /// associated bucket by 1. - /// - /// Each bucket is a monotonically increasing counter. It is never - /// decremented or reset to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// `bucket` is the index of the bucket being queried. The bucket is scoped - /// to the worker. The range represented by the bucket can be queried by - /// calling [`poll_count_histogram_bucket_range()`]. Each worker maintains - /// identical bucket ranges. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()` or if `bucket` represents an - /// invalid bucket. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::{self, Handle}; - /// - /// fn main() { - /// runtime::Builder::new_current_thread() - /// .enable_metrics_poll_count_histogram() - /// .build() - /// .unwrap() - /// .block_on(async { - /// let metrics = Handle::current().metrics(); - /// let buckets = metrics.poll_count_histogram_num_buckets(); - /// - /// for worker in 0..metrics.num_workers() { - /// for i in 0..buckets { - /// let count = metrics.poll_count_histogram_bucket_count(worker, i); - /// println!("Poll count {}", count); - /// } - /// } - /// }); - /// } - /// ``` - /// - /// [`poll_count_histogram_bucket_range()`]: crate::runtime::RuntimeMetrics::poll_count_histogram_bucket_range - #[track_caller] - pub fn poll_count_histogram_bucket_count(&self, worker: usize, bucket: usize) -> u64 { - self.handle - .inner - .worker_metrics(worker) - .poll_count_histogram - .as_ref() - .map(|histogram| histogram.get(bucket)) - .unwrap_or_default() - } + cfg_64bit_metrics! { + /// Returns the number of times the given worker polled tasks with a poll + /// duration within the given bucket's range. + /// + /// Each worker maintains its own histogram and the counts for each bucket + /// starts at zero when the runtime is created. Each time the worker polls a + /// task, it tracks the duration the task poll time took and increments the + /// associated bucket by 1. + /// + /// Each bucket is a monotonically increasing counter. It is never + /// decremented or reset to zero. + /// + /// # Arguments + /// + /// `worker` is the index of the worker being queried. The given value must + /// be between 0 and `num_workers()`. The index uniquely identifies a single + /// worker and will continue to identify the worker throughout the lifetime + /// of the runtime instance. + /// + /// `bucket` is the index of the bucket being queried. The bucket is scoped + /// to the worker. The range represented by the bucket can be queried by + /// calling [`poll_count_histogram_bucket_range()`]. Each worker maintains + /// identical bucket ranges. + /// + /// # Panics + /// + /// The method panics when `worker` represents an invalid worker, i.e. is + /// greater than or equal to `num_workers()` or if `bucket` represents an + /// invalid bucket. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::{self, Handle}; + /// + /// fn main() { + /// runtime::Builder::new_current_thread() + /// .enable_metrics_poll_count_histogram() + /// .build() + /// .unwrap() + /// .block_on(async { + /// let metrics = Handle::current().metrics(); + /// let buckets = metrics.poll_count_histogram_num_buckets(); + /// + /// for worker in 0..metrics.num_workers() { + /// for i in 0..buckets { + /// let count = metrics.poll_count_histogram_bucket_count(worker, i); + /// println!("Poll count {}", count); + /// } + /// } + /// }); + /// } + /// ``` + /// + /// [`poll_count_histogram_bucket_range()`]: crate::runtime::RuntimeMetrics::poll_count_histogram_bucket_range + #[track_caller] + pub fn poll_count_histogram_bucket_count(&self, worker: usize, bucket: usize) -> u64 { + self.handle + .inner + .worker_metrics(worker) + .poll_count_histogram + .as_ref() + .map(|histogram| histogram.get(bucket)) + .unwrap_or_default() + } - /// Returns the mean duration of task polls, in nanoseconds. - /// - /// This is an exponentially weighted moving average. Currently, this metric - /// is only provided by the multi-threaded runtime. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_mean_poll_time(0); - /// println!("worker 0 has a mean poll time of {:?}", n); - /// } - /// ``` - #[track_caller] - pub fn worker_mean_poll_time(&self, worker: usize) -> Duration { - let nanos = self - .handle - .inner - .worker_metrics(worker) - .mean_poll_time - .load(Relaxed); - Duration::from_nanos(nanos) + /// Returns the mean duration of task polls, in nanoseconds. + /// + /// This is an exponentially weighted moving average. Currently, this metric + /// is only provided by the multi-threaded runtime. + /// + /// # Arguments + /// + /// `worker` is the index of the worker being queried. The given value must + /// be between 0 and `num_workers()`. The index uniquely identifies a single + /// worker and will continue to identify the worker throughout the lifetime + /// of the runtime instance. + /// + /// # Panics + /// + /// The method panics when `worker` represents an invalid worker, i.e. is + /// greater than or equal to `num_workers()`. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.worker_mean_poll_time(0); + /// println!("worker 0 has a mean poll time of {:?}", n); + /// } + /// ``` + #[track_caller] + pub fn worker_mean_poll_time(&self, worker: usize) -> Duration { + let nanos = self + .handle + .inner + .worker_metrics(worker) + .mean_poll_time + .load(Relaxed); + Duration::from_nanos(nanos) + } } /// Returns the number of tasks currently scheduled in the blocking @@ -837,88 +843,90 @@ impl RuntimeMetrics { cfg_net! { impl RuntimeMetrics { - /// Returns the number of file descriptors that have been registered with the - /// runtime's I/O driver. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let registered_fds = metrics.io_driver_fd_registered_count(); - /// println!("{} fds have been registered with the runtime's I/O driver.", registered_fds); - /// - /// let deregistered_fds = metrics.io_driver_fd_deregistered_count(); - /// - /// let current_fd_count = registered_fds - deregistered_fds; - /// println!("{} fds are currently registered by the runtime's I/O driver.", current_fd_count); - /// } - /// ``` - pub fn io_driver_fd_registered_count(&self) -> u64 { - self.with_io_driver_metrics(|m| { - m.fd_registered_count.load(Relaxed) - }) - } + cfg_64bit_metrics! { + /// Returns the number of file descriptors that have been registered with the + /// runtime's I/O driver. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let registered_fds = metrics.io_driver_fd_registered_count(); + /// println!("{} fds have been registered with the runtime's I/O driver.", registered_fds); + /// + /// let deregistered_fds = metrics.io_driver_fd_deregistered_count(); + /// + /// let current_fd_count = registered_fds - deregistered_fds; + /// println!("{} fds are currently registered by the runtime's I/O driver.", current_fd_count); + /// } + /// ``` + pub fn io_driver_fd_registered_count(&self) -> u64 { + self.with_io_driver_metrics(|m| { + m.fd_registered_count.load(Relaxed) + }) + } - /// Returns the number of file descriptors that have been deregistered by the - /// runtime's I/O driver. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.io_driver_fd_deregistered_count(); - /// println!("{} fds have been deregistered by the runtime's I/O driver.", n); - /// } - /// ``` - pub fn io_driver_fd_deregistered_count(&self) -> u64 { - self.with_io_driver_metrics(|m| { - m.fd_deregistered_count.load(Relaxed) - }) - } + /// Returns the number of file descriptors that have been deregistered by the + /// runtime's I/O driver. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.io_driver_fd_deregistered_count(); + /// println!("{} fds have been deregistered by the runtime's I/O driver.", n); + /// } + /// ``` + pub fn io_driver_fd_deregistered_count(&self) -> u64 { + self.with_io_driver_metrics(|m| { + m.fd_deregistered_count.load(Relaxed) + }) + } - /// Returns the number of ready events processed by the runtime's - /// I/O driver. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.io_driver_ready_count(); - /// println!("{} ready events processed by the runtime's I/O driver.", n); - /// } - /// ``` - pub fn io_driver_ready_count(&self) -> u64 { - self.with_io_driver_metrics(|m| m.ready_count.load(Relaxed)) - } + /// Returns the number of ready events processed by the runtime's + /// I/O driver. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.io_driver_ready_count(); + /// println!("{} ready events processed by the runtime's I/O driver.", n); + /// } + /// ``` + pub fn io_driver_ready_count(&self) -> u64 { + self.with_io_driver_metrics(|m| m.ready_count.load(Relaxed)) + } - fn with_io_driver_metrics(&self, f: F) -> u64 - where - F: Fn(&super::IoDriverMetrics) -> u64, - { - // TODO: Investigate if this should return 0, most of our metrics always increase - // thus this breaks that guarantee. - self.handle - .inner - .driver() - .io - .as_ref() - .map(|h| f(&h.metrics)) - .unwrap_or(0) + fn with_io_driver_metrics(&self, f: F) -> u64 + where + F: Fn(&super::IoDriverMetrics) -> u64, + { + // TODO: Investigate if this should return 0, most of our metrics always increase + // thus this breaks that guarantee. + self.handle + .inner + .driver() + .io + .as_ref() + .map(|h| f(&h.metrics)) + .unwrap_or(0) + } } } } diff --git a/tokio/src/runtime/metrics/scheduler.rs b/tokio/src/runtime/metrics/scheduler.rs index d9f8edfaabc..8b64eced38c 100644 --- a/tokio/src/runtime/metrics/scheduler.rs +++ b/tokio/src/runtime/metrics/scheduler.rs @@ -1,4 +1,5 @@ -use crate::loom::sync::atomic::{AtomicU64, Ordering::Relaxed}; +use crate::loom::sync::atomic::Ordering::Relaxed; +use crate::util::metric_atomics::MetricAtomicU64; /// Retrieves metrics from the Tokio runtime. /// @@ -10,25 +11,25 @@ use crate::loom::sync::atomic::{AtomicU64, Ordering::Relaxed}; #[derive(Debug)] pub(crate) struct SchedulerMetrics { /// Number of tasks that are scheduled from outside the runtime. - pub(super) remote_schedule_count: AtomicU64, - pub(super) budget_forced_yield_count: AtomicU64, + pub(super) remote_schedule_count: MetricAtomicU64, + pub(super) budget_forced_yield_count: MetricAtomicU64, } impl SchedulerMetrics { pub(crate) fn new() -> SchedulerMetrics { SchedulerMetrics { - remote_schedule_count: AtomicU64::new(0), - budget_forced_yield_count: AtomicU64::new(0), + remote_schedule_count: MetricAtomicU64::new(0), + budget_forced_yield_count: MetricAtomicU64::new(0), } } /// Increment the number of tasks scheduled externally pub(crate) fn inc_remote_schedule_count(&self) { - self.remote_schedule_count.fetch_add(1, Relaxed); + self.remote_schedule_count.add(1, Relaxed); } /// Increment the number of tasks forced to yield due to budget exhaustion pub(crate) fn inc_budget_forced_yield_count(&self) { - self.budget_forced_yield_count.fetch_add(1, Relaxed); + self.budget_forced_yield_count.add(1, Relaxed); } } diff --git a/tokio/src/runtime/metrics/worker.rs b/tokio/src/runtime/metrics/worker.rs index cefe4d2abc6..fc7c4e6dfe4 100644 --- a/tokio/src/runtime/metrics/worker.rs +++ b/tokio/src/runtime/metrics/worker.rs @@ -1,7 +1,8 @@ +use crate::loom::sync::atomic::AtomicUsize; use crate::loom::sync::atomic::Ordering::Relaxed; -use crate::loom::sync::atomic::{AtomicU64, AtomicUsize}; use crate::runtime::metrics::Histogram; use crate::runtime::Config; +use crate::util::metric_atomics::MetricAtomicU64; /// Retrieve runtime worker metrics. /// @@ -14,31 +15,31 @@ use crate::runtime::Config; #[repr(align(128))] pub(crate) struct WorkerMetrics { /// Number of times the worker parked. - pub(crate) park_count: AtomicU64, + pub(crate) park_count: MetricAtomicU64, /// Number of times the worker woke then parked again without doing work. - pub(crate) noop_count: AtomicU64, + pub(crate) noop_count: MetricAtomicU64, /// Number of tasks the worker stole. - pub(crate) steal_count: AtomicU64, + pub(crate) steal_count: MetricAtomicU64, /// Number of times the worker stole - pub(crate) steal_operations: AtomicU64, + pub(crate) steal_operations: MetricAtomicU64, /// Number of tasks the worker polled. - pub(crate) poll_count: AtomicU64, + pub(crate) poll_count: MetricAtomicU64, /// EWMA task poll time, in nanoseconds. - pub(crate) mean_poll_time: AtomicU64, + pub(crate) mean_poll_time: MetricAtomicU64, /// Amount of time the worker spent doing work vs. parking. - pub(crate) busy_duration_total: AtomicU64, + pub(crate) busy_duration_total: MetricAtomicU64, /// Number of tasks scheduled for execution on the worker's local queue. - pub(crate) local_schedule_count: AtomicU64, + pub(crate) local_schedule_count: MetricAtomicU64, /// Number of tasks moved from the local queue to the global queue to free space. - pub(crate) overflow_count: AtomicU64, + pub(crate) overflow_count: MetricAtomicU64, /// Number of tasks currently in the local queue. Used only by the /// current-thread scheduler. @@ -60,15 +61,15 @@ impl WorkerMetrics { pub(crate) fn new() -> WorkerMetrics { WorkerMetrics { - park_count: AtomicU64::new(0), - noop_count: AtomicU64::new(0), - steal_count: AtomicU64::new(0), - steal_operations: AtomicU64::new(0), - poll_count: AtomicU64::new(0), - mean_poll_time: AtomicU64::new(0), - overflow_count: AtomicU64::new(0), - busy_duration_total: AtomicU64::new(0), - local_schedule_count: AtomicU64::new(0), + park_count: MetricAtomicU64::new(0), + noop_count: MetricAtomicU64::new(0), + steal_count: MetricAtomicU64::new(0), + steal_operations: MetricAtomicU64::new(0), + poll_count: MetricAtomicU64::new(0), + mean_poll_time: MetricAtomicU64::new(0), + overflow_count: MetricAtomicU64::new(0), + busy_duration_total: MetricAtomicU64::new(0), + local_schedule_count: MetricAtomicU64::new(0), queue_depth: AtomicUsize::new(0), poll_count_histogram: None, } diff --git a/tokio/src/runtime/tests/queue.rs b/tokio/src/runtime/tests/queue.rs index 5df92b7a291..55429b1b11b 100644 --- a/tokio/src/runtime/tests/queue.rs +++ b/tokio/src/runtime/tests/queue.rs @@ -7,18 +7,21 @@ use std::time::Duration; #[allow(unused)] macro_rules! assert_metrics { - ($stats:ident, $field:ident == $v:expr) => {{ - use crate::runtime::WorkerMetrics; - use std::sync::atomic::Ordering::Relaxed; + ($stats:ident, $field:ident == $v:expr) => { + #[cfg(target_has_atomic = "64")] + { + use crate::runtime::WorkerMetrics; + use std::sync::atomic::Ordering::Relaxed; - let worker = WorkerMetrics::new(); - $stats.submit(&worker); + let worker = WorkerMetrics::new(); + $stats.submit(&worker); - let expect = $v; - let actual = worker.$field.load(Relaxed); + let expect = $v; + let actual = worker.$field.load(Relaxed); - assert!(actual == expect, "expect = {}; actual = {}", expect, actual) - }}; + assert!(actual == expect, "expect = {}; actual = {}", expect, actual) + } + }; } fn new_stats() -> Stats { diff --git a/tokio/src/util/metric_atomics.rs b/tokio/src/util/metric_atomics.rs new file mode 100644 index 00000000000..3c080298ecf --- /dev/null +++ b/tokio/src/util/metric_atomics.rs @@ -0,0 +1,47 @@ +use std::sync::atomic::Ordering; + +cfg_64bit_metrics! { + use std::sync::atomic::AtomicU64; +} + +/// `AtomicU64` that is is a no-op on platforms without 64-bit atomics +/// +/// When used on platforms without 64-bit atomics, writes to this are no-ops. +/// The `load` method is only defined when 64-bit atomics are available. +#[derive(Debug, Default)] +pub(crate) struct MetricAtomicU64 { + #[cfg(target_has_atomic = "64")] + value: AtomicU64, +} + +// some of these are currently only used behind cfg_unstable +#[allow(dead_code)] +impl MetricAtomicU64 { + // Load is only defined when supported + cfg_64bit_metrics! { + pub(crate) fn load(&self, ordering: Ordering) -> u64 { + self.value.load(ordering) + } + } + + cfg_64bit_metrics! { + pub(crate) fn store(&self, val: u64, ordering: Ordering) { + self.value.store(val, ordering) + } + + pub(crate) fn new(value: u64) -> Self { + Self { value: AtomicU64::new(value) } + } + + pub(crate) fn add(&self, value: u64, ordering: Ordering) { + self.value.fetch_add(value, ordering); + } + } + + cfg_no_64bit_metrics! { + pub(crate) fn store(&self, _val: u64, _ordering: Ordering) { } + // on platforms without 64-bit atomics, fetch-add returns unit + pub(crate) fn add(&self, _value: u64, _ordering: Ordering) { } + pub(crate) fn new(_value: u64) -> Self { Self { } } + } +} diff --git a/tokio/src/util/mod.rs b/tokio/src/util/mod.rs index 7cf371195ff..3722b0bc2d4 100644 --- a/tokio/src/util/mod.rs +++ b/tokio/src/util/mod.rs @@ -5,6 +5,8 @@ cfg_io_driver! { #[cfg(feature = "rt")] pub(crate) mod atomic_cell; +pub(crate) mod metric_atomics; + #[cfg(any(feature = "rt", feature = "signal", feature = "process"))] pub(crate) mod once_cell; diff --git a/tokio/tests/rt_basic.rs b/tokio/tests/rt_basic.rs index a5204bd83f7..4c558c90e28 100644 --- a/tokio/tests/rt_basic.rs +++ b/tokio/tests/rt_basic.rs @@ -19,7 +19,7 @@ mod support { macro_rules! cfg_metrics { ($($t:tt)*) => { - #[cfg(tokio_unstable)] + #[cfg(all(tokio_unstable, target_has_atomic = "64"))] { $( $t )* } diff --git a/tokio/tests/rt_metrics.rs b/tokio/tests/rt_metrics.rs index 7f0c9ad8052..6a710a46ce6 100644 --- a/tokio/tests/rt_metrics.rs +++ b/tokio/tests/rt_metrics.rs @@ -1,6 +1,11 @@ #![allow(unknown_lints, unexpected_cfgs)] #![warn(rust_2018_idioms)] -#![cfg(all(feature = "full", tokio_unstable, not(target_os = "wasi")))] +#![cfg(all( + feature = "full", + tokio_unstable, + not(target_os = "wasi"), + target_has_atomic = "64" +))] use std::future::Future; use std::sync::{Arc, Barrier, Mutex}; diff --git a/tokio/tests/rt_threaded.rs b/tokio/tests/rt_threaded.rs index 26690550f93..a4742dd234e 100644 --- a/tokio/tests/rt_threaded.rs +++ b/tokio/tests/rt_threaded.rs @@ -18,7 +18,7 @@ use std::task::{Context, Poll, Waker}; macro_rules! cfg_metrics { ($($t:tt)*) => { - #[cfg(tokio_unstable)] + #[cfg(all(tokio_unstable, target_has_atomic = "64"))] { $( $t )* } diff --git a/tokio/tests/rt_threaded_alt.rs b/tokio/tests/rt_threaded_alt.rs index 9eed1fe78b6..33af45e68bb 100644 --- a/tokio/tests/rt_threaded_alt.rs +++ b/tokio/tests/rt_threaded_alt.rs @@ -19,7 +19,7 @@ use std::task::{Context, Poll, Waker}; macro_rules! cfg_metrics { ($($t:tt)*) => { - #[cfg(tokio_unstable)] + #[cfg(all(tokio_unstable, target_has_atomic = "64"))] { $( $t )* } From 12920cea45e81bf831eb7174a3337b6d06b0b27d Mon Sep 17 00:00:00 2001 From: Alan Somers Date: Sat, 25 May 2024 09:30:36 -0600 Subject: [PATCH 145/162] tests: update nix and mio-aio dependencies (#6552) nix 0.29.0 and mio-aio 0.9.0 use I/O Safety. Co-authored-by: Frederick Mayle --- Cargo.toml | 1 - tokio/Cargo.toml | 4 ++-- tokio/tests/io_async_fd.rs | 31 +++++++++---------------------- tokio/tests/io_poll_aio.rs | 13 +++++++------ tokio/tests/net_unix_pipe.rs | 4 +--- 5 files changed, 19 insertions(+), 34 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index d8ac248189d..2238deac71c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,4 +17,3 @@ members = [ [workspace.metadata.spellcheck] config = "spellcheck.toml" - diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 020cc1e4ac2..e46e274c47a 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -118,7 +118,7 @@ signal-hook-registry = { version = "1.1.1", optional = true } [target.'cfg(unix)'.dev-dependencies] libc = { version = "0.2.149" } -nix = { version = "0.27.1", default-features = false, features = ["fs", "socket"] } +nix = { version = "0.29.0", default-features = false, features = ["aio", "fs", "socket"] } [target.'cfg(windows)'.dependencies.windows-sys] version = "0.48" @@ -149,7 +149,7 @@ rand = "0.8.0" wasm-bindgen-test = "0.3.0" [target.'cfg(target_os = "freebsd")'.dev-dependencies] -mio-aio = { version = "0.8.0", features = ["tokio"] } +mio-aio = { version = "0.9.0", features = ["tokio"] } [target.'cfg(loom)'.dev-dependencies] loom = { version = "0.7", features = ["futures", "checkpoint"] } diff --git a/tokio/tests/io_async_fd.rs b/tokio/tests/io_async_fd.rs index ea798b3067a..4e6f630eb93 100644 --- a/tokio/tests/io_async_fd.rs +++ b/tokio/tests/io_async_fd.rs @@ -1,7 +1,7 @@ #![warn(rust_2018_idioms)] #![cfg(all(unix, feature = "full"))] -use std::os::unix::io::{AsRawFd, IntoRawFd, RawFd}; +use std::os::unix::io::{AsRawFd, RawFd}; use std::sync::{ atomic::{AtomicBool, Ordering}, Arc, @@ -13,7 +13,7 @@ use std::{ task::{Context, Waker}, }; -use nix::unistd::{close, read, write}; +use nix::unistd::{read, write}; use futures::poll; @@ -58,18 +58,18 @@ impl TestWaker { #[derive(Debug)] struct FileDescriptor { - fd: RawFd, + fd: std::os::fd::OwnedFd, } impl AsRawFd for FileDescriptor { fn as_raw_fd(&self) -> RawFd { - self.fd + self.fd.as_raw_fd() } } impl Read for &FileDescriptor { fn read(&mut self, buf: &mut [u8]) -> io::Result { - read(self.fd, buf).map_err(io::Error::from) + read(self.fd.as_raw_fd(), buf).map_err(io::Error::from) } } @@ -81,7 +81,7 @@ impl Read for FileDescriptor { impl Write for &FileDescriptor { fn write(&mut self, buf: &[u8]) -> io::Result { - write(self.fd, buf).map_err(io::Error::from) + write(&self.fd, buf).map_err(io::Error::from) } fn flush(&mut self) -> io::Result<()> { @@ -99,12 +99,6 @@ impl Write for FileDescriptor { } } -impl Drop for FileDescriptor { - fn drop(&mut self) { - let _ = close(self.fd); - } -} - fn set_nonblocking(fd: RawFd) { use nix::fcntl::{OFlag, F_GETFL, F_SETFL}; @@ -133,17 +127,10 @@ fn socketpair() -> (FileDescriptor, FileDescriptor) { SockFlag::empty(), ) .expect("socketpair"); - let fds = ( - FileDescriptor { - fd: fd_a.into_raw_fd(), - }, - FileDescriptor { - fd: fd_b.into_raw_fd(), - }, - ); + let fds = (FileDescriptor { fd: fd_a }, FileDescriptor { fd: fd_b }); - set_nonblocking(fds.0.fd); - set_nonblocking(fds.1.fd); + set_nonblocking(fds.0.fd.as_raw_fd()); + set_nonblocking(fds.1.fd.as_raw_fd()); fds } diff --git a/tokio/tests/io_poll_aio.rs b/tokio/tests/io_poll_aio.rs index e83859f5c98..242887eb60f 100644 --- a/tokio/tests/io_poll_aio.rs +++ b/tokio/tests/io_poll_aio.rs @@ -5,6 +5,7 @@ use mio_aio::{AioFsyncMode, SourceApi}; use std::{ future::Future, io, mem, + os::fd::AsFd, os::unix::io::{AsRawFd, RawFd}, pin::{pin, Pin}, task::{Context, Poll}, @@ -17,9 +18,9 @@ mod aio { use super::*; #[derive(Debug)] - struct TokioSource(mio_aio::Source); + struct TokioSource<'fd>(mio_aio::Source>); - impl AioSource for TokioSource { + impl<'fd> AioSource for TokioSource<'fd> { fn register(&mut self, kq: RawFd, token: usize) { self.0.register_raw(kq, token) } @@ -29,9 +30,9 @@ mod aio { } /// A very crude implementation of an AIO-based future - struct FsyncFut(Aio); + struct FsyncFut<'fd>(Aio>); - impl FsyncFut { + impl<'fd> FsyncFut<'fd> { pub fn submit(self: Pin<&mut Self>) -> io::Result<()> { let p = unsafe { self.map_unchecked_mut(|s| &mut s.0 .0) }; match p.submit() { @@ -41,7 +42,7 @@ mod aio { } } - impl Future for FsyncFut { + impl<'fd> Future for FsyncFut<'fd> { type Output = io::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { @@ -134,7 +135,7 @@ mod aio { #[tokio::test] async fn fsync() { let f = tempfile().unwrap(); - let fd = f.as_raw_fd(); + let fd = f.as_fd(); let mode = AioFsyncMode::O_SYNC; let source = TokioSource(mio_aio::Fsync::fsync(fd, mode, 0)); let poll_aio = Aio::new_for_aio(source).unwrap(); diff --git a/tokio/tests/net_unix_pipe.rs b/tokio/tests/net_unix_pipe.rs index 6706880ed1b..37b8b41bd31 100644 --- a/tokio/tests/net_unix_pipe.rs +++ b/tokio/tests/net_unix_pipe.rs @@ -489,12 +489,10 @@ async fn anon_pipe_spawn_echo() -> std::io::Result<()> { #[cfg(target_os = "linux")] async fn anon_pipe_from_owned_fd() -> std::io::Result<()> { use nix::fcntl::OFlag; - use std::os::unix::io::{FromRawFd, OwnedFd}; const DATA: &[u8] = b"this is some data to write to the pipe"; - let fds = nix::unistd::pipe2(OFlag::O_CLOEXEC | OFlag::O_NONBLOCK)?; - let (rx_fd, tx_fd) = unsafe { (OwnedFd::from_raw_fd(fds.0), OwnedFd::from_raw_fd(fds.1)) }; + let (rx_fd, tx_fd) = nix::unistd::pipe2(OFlag::O_CLOEXEC | OFlag::O_NONBLOCK)?; let mut rx = pipe::Receiver::from_owned_fd(rx_fd)?; let mut tx = pipe::Sender::from_owned_fd(tx_fd)?; From 0a85a9662d30139c779ea49a59be30db0f292b5d Mon Sep 17 00:00:00 2001 From: Maxwell Borden Date: Sat, 25 May 2024 22:14:59 +0200 Subject: [PATCH 146/162] net: implement `Clone` for `NamedPipeInfo` (#6586) --- tokio/src/net/windows/named_pipe.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio/src/net/windows/named_pipe.rs b/tokio/src/net/windows/named_pipe.rs index 98e63f0c450..81148482537 100644 --- a/tokio/src/net/windows/named_pipe.rs +++ b/tokio/src/net/windows/named_pipe.rs @@ -2626,7 +2626,7 @@ pub enum PipeEnd { /// Information about a named pipe. /// /// Constructed through [`NamedPipeServer::info`] or [`NamedPipeClient::info`]. -#[derive(Debug)] +#[derive(Debug, Clone)] #[non_exhaustive] pub struct PipeInfo { /// Indicates the mode of a named pipe. From 2890d0c3db4f595330d8d223bfbfeb81e205b048 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Sat, 25 May 2024 22:49:17 +0100 Subject: [PATCH 147/162] metrics: fix blocking_threads count (#6551) --- tokio/src/runtime/scheduler/multi_thread/handle/metrics.rs | 5 ++++- .../src/runtime/scheduler/multi_thread_alt/handle/metrics.rs | 5 ++++- tokio/tests/rt_metrics.rs | 5 +++++ 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/tokio/src/runtime/scheduler/multi_thread/handle/metrics.rs b/tokio/src/runtime/scheduler/multi_thread/handle/metrics.rs index 838694fc89e..3d614b478c5 100644 --- a/tokio/src/runtime/scheduler/multi_thread/handle/metrics.rs +++ b/tokio/src/runtime/scheduler/multi_thread/handle/metrics.rs @@ -8,7 +8,10 @@ impl Handle { } pub(crate) fn num_blocking_threads(&self) -> usize { - self.blocking_spawner.num_threads() + // workers are currently spawned using spawn_blocking + self.blocking_spawner + .num_threads() + .saturating_sub(self.num_workers()) } pub(crate) fn num_idle_blocking_threads(&self) -> usize { diff --git a/tokio/src/runtime/scheduler/multi_thread_alt/handle/metrics.rs b/tokio/src/runtime/scheduler/multi_thread_alt/handle/metrics.rs index 838694fc89e..3d614b478c5 100644 --- a/tokio/src/runtime/scheduler/multi_thread_alt/handle/metrics.rs +++ b/tokio/src/runtime/scheduler/multi_thread_alt/handle/metrics.rs @@ -8,7 +8,10 @@ impl Handle { } pub(crate) fn num_blocking_threads(&self) -> usize { - self.blocking_spawner.num_threads() + // workers are currently spawned using spawn_blocking + self.blocking_spawner + .num_threads() + .saturating_sub(self.num_workers()) } pub(crate) fn num_idle_blocking_threads(&self) -> usize { diff --git a/tokio/tests/rt_metrics.rs b/tokio/tests/rt_metrics.rs index 6a710a46ce6..2446deb6b41 100644 --- a/tokio/tests/rt_metrics.rs +++ b/tokio/tests/rt_metrics.rs @@ -31,6 +31,11 @@ fn num_blocking_threads() { assert_eq!(0, rt.metrics().num_blocking_threads()); let _ = rt.block_on(rt.spawn_blocking(move || {})); assert_eq!(1, rt.metrics().num_blocking_threads()); + + let rt = threaded(); + assert_eq!(0, rt.metrics().num_blocking_threads()); + let _ = rt.block_on(rt.spawn_blocking(move || {})); + assert_eq!(1, rt.metrics().num_blocking_threads()); } #[test] From 3a6fdc05681841c30fe4e27b63924c7908ea4634 Mon Sep 17 00:00:00 2001 From: Josh McKinney Date: Sun, 26 May 2024 02:29:27 -0700 Subject: [PATCH 148/162] license: fix formatting and remove year in licenses (#6451) --- LICENSE | 40 ++++++++++++++++++---------------------- tokio-macros/LICENSE | 29 ++--------------------------- tokio-stream/LICENSE | 40 ++++++++++++++++++---------------------- tokio-test/LICENSE | 40 ++++++++++++++++++---------------------- tokio-util/LICENSE | 40 ++++++++++++++++++---------------------- tokio/LICENSE | 40 ++++++++++++++++++---------------------- 6 files changed, 92 insertions(+), 137 deletions(-) diff --git a/LICENSE b/LICENSE index 8bdf6bd60d3..f0dbcf4b45d 100644 --- a/LICENSE +++ b/LICENSE @@ -1,25 +1,21 @@ -Copyright (c) 2023 Tokio Contributors +MIT License -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: +Copyright (c) Tokio Contributors -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/tokio-macros/LICENSE b/tokio-macros/LICENSE index 12d1037fd0c..c4d82b91d6d 100644 --- a/tokio-macros/LICENSE +++ b/tokio-macros/LICENSE @@ -1,32 +1,7 @@ -Copyright (c) 2023 Tokio Contributors - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. - -The MIT License (MIT) +MIT License Copyright (c) 2019 Yoshua Wuyts +Copyright (c) Tokio Contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/tokio-stream/LICENSE b/tokio-stream/LICENSE index 8bdf6bd60d3..f0dbcf4b45d 100644 --- a/tokio-stream/LICENSE +++ b/tokio-stream/LICENSE @@ -1,25 +1,21 @@ -Copyright (c) 2023 Tokio Contributors +MIT License -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: +Copyright (c) Tokio Contributors -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/tokio-test/LICENSE b/tokio-test/LICENSE index 8bdf6bd60d3..f0dbcf4b45d 100644 --- a/tokio-test/LICENSE +++ b/tokio-test/LICENSE @@ -1,25 +1,21 @@ -Copyright (c) 2023 Tokio Contributors +MIT License -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: +Copyright (c) Tokio Contributors -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/tokio-util/LICENSE b/tokio-util/LICENSE index 8bdf6bd60d3..f0dbcf4b45d 100644 --- a/tokio-util/LICENSE +++ b/tokio-util/LICENSE @@ -1,25 +1,21 @@ -Copyright (c) 2023 Tokio Contributors +MIT License -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: +Copyright (c) Tokio Contributors -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/tokio/LICENSE b/tokio/LICENSE index 8bdf6bd60d3..f0dbcf4b45d 100644 --- a/tokio/LICENSE +++ b/tokio/LICENSE @@ -1,25 +1,21 @@ -Copyright (c) 2023 Tokio Contributors +MIT License -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: +Copyright (c) Tokio Contributors -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. From 6c42d286b343f498ce29de2aab9358a0aedb081c Mon Sep 17 00:00:00 2001 From: Maxwell Borden Date: Sun, 26 May 2024 12:25:19 +0200 Subject: [PATCH 149/162] net: fix misleading `NamedPipeServer` example (#6590) The previous NamedPipeServer doc example seemed to imply that the return type of `NamedPipeServer::connect()` was a `Future>>`, however, `connect()` returns a `Future>>`. The following line of code reopening the pipe would shadow the newly connected pipe immediately, making the following spawned task pointless. Hopefully these changes make it more clear what should be happening in the example. --- tokio/src/net/windows/named_pipe.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tokio/src/net/windows/named_pipe.rs b/tokio/src/net/windows/named_pipe.rs index 81148482537..0b312f896f1 100644 --- a/tokio/src/net/windows/named_pipe.rs +++ b/tokio/src/net/windows/named_pipe.rs @@ -75,7 +75,8 @@ use self::doc::*; /// let server = tokio::spawn(async move { /// loop { /// // Wait for a client to connect. -/// let connected = server.connect().await?; +/// server.connect().await?; +/// let connected_client = server; /// /// // Construct the next server to be connected before sending the one /// // we already have of onto a task. This ensures that the server From 9e00b266e08d263c497dc9de57d9acbc049ae69b Mon Sep 17 00:00:00 2001 From: Pau Freixes Date: Mon, 27 May 2024 13:18:34 +0200 Subject: [PATCH 150/162] sync: add `Notify::notify_last` (#6520) --- tokio/src/sync/notify.rs | 82 +++++++++++++++++++++++++++-------- tokio/src/util/linked_list.rs | 20 +++++++++ tokio/tests/sync_notify.rs | 75 ++++++++++++++++++++++++++++++++ 3 files changed, 160 insertions(+), 17 deletions(-) diff --git a/tokio/src/sync/notify.rs b/tokio/src/sync/notify.rs index 879b89b4069..5d344f70411 100644 --- a/tokio/src/sync/notify.rs +++ b/tokio/src/sync/notify.rs @@ -223,7 +223,9 @@ struct Waiter { /// `Notify`, or it is exclusively owned by the enclosing `Waiter`. waker: UnsafeCell>, - /// Notification for this waiter. + /// Notification for this waiter. Uses 2 bits to store if and how was + /// notified, 1 bit for storing if it was woken up using FIFO or LIFO, and + /// the rest of it is unused. /// * if it's `None`, then `waker` is protected by the `waiters` lock. /// * if it's `Some`, then `waker` is exclusively owned by the /// enclosing `Waiter` and can be accessed without locking. @@ -253,13 +255,16 @@ generate_addr_of_methods! { } // No notification. -const NOTIFICATION_NONE: usize = 0; +const NOTIFICATION_NONE: usize = 0b000; // Notification type used by `notify_one`. -const NOTIFICATION_ONE: usize = 1; +const NOTIFICATION_ONE: usize = 0b001; + +// Notification type used by `notify_last`. +const NOTIFICATION_LAST: usize = 0b101; // Notification type used by `notify_waiters`. -const NOTIFICATION_ALL: usize = 2; +const NOTIFICATION_ALL: usize = 0b010; /// Notification for a `Waiter`. /// This struct is equivalent to `Option`, but uses @@ -275,13 +280,20 @@ impl AtomicNotification { /// Store-release a notification. /// This method should be called exactly once. fn store_release(&self, notification: Notification) { - self.0.store(notification as usize, Release); + let data: usize = match notification { + Notification::All => NOTIFICATION_ALL, + Notification::One(NotifyOneStrategy::Fifo) => NOTIFICATION_ONE, + Notification::One(NotifyOneStrategy::Lifo) => NOTIFICATION_LAST, + }; + self.0.store(data, Release); } fn load(&self, ordering: Ordering) -> Option { - match self.0.load(ordering) { + let data = self.0.load(ordering); + match data { NOTIFICATION_NONE => None, - NOTIFICATION_ONE => Some(Notification::One), + NOTIFICATION_ONE => Some(Notification::One(NotifyOneStrategy::Fifo)), + NOTIFICATION_LAST => Some(Notification::One(NotifyOneStrategy::Lifo)), NOTIFICATION_ALL => Some(Notification::All), _ => unreachable!(), } @@ -296,11 +308,18 @@ impl AtomicNotification { } } +#[derive(Debug, PartialEq, Eq)] +#[repr(usize)] +enum NotifyOneStrategy { + Fifo, + Lifo, +} + #[derive(Debug, PartialEq, Eq)] #[repr(usize)] enum Notification { - One = NOTIFICATION_ONE, - All = NOTIFICATION_ALL, + One(NotifyOneStrategy), + All, } /// List used in `Notify::notify_waiters`. It wraps a guarded linked list @@ -521,7 +540,7 @@ impl Notify { } } - /// Notifies a waiting task. + /// Notifies the first waiting task. /// /// If a task is currently waiting, that task is notified. Otherwise, a /// permit is stored in this `Notify` value and the **next** call to @@ -558,6 +577,23 @@ impl Notify { // Alias for old name in 0.x #[cfg_attr(docsrs, doc(alias = "notify"))] pub fn notify_one(&self) { + self.notify_with_strategy(NotifyOneStrategy::Fifo); + } + + /// Notifies the last waiting task. + /// + /// This function behaves similar to `notify_one`. The only difference is that it wakes + /// the most recently added waiter instead of the oldest waiter. + /// + /// Check the [`notify_one()`] documentation for more info and + /// examples. + /// + /// [`notify_one()`]: Notify::notify_one + pub fn notify_last(&self) { + self.notify_with_strategy(NotifyOneStrategy::Lifo); + } + + fn notify_with_strategy(&self, strategy: NotifyOneStrategy) { // Load the current state let mut curr = self.state.load(SeqCst); @@ -585,7 +621,7 @@ impl Notify { // transition out of WAITING while the lock is held. curr = self.state.load(SeqCst); - if let Some(waker) = notify_locked(&mut waiters, &self.state, curr) { + if let Some(waker) = notify_locked(&mut waiters, &self.state, curr, strategy) { drop(waiters); waker.wake(); } @@ -708,7 +744,12 @@ impl Default for Notify { impl UnwindSafe for Notify {} impl RefUnwindSafe for Notify {} -fn notify_locked(waiters: &mut WaitList, state: &AtomicUsize, curr: usize) -> Option { +fn notify_locked( + waiters: &mut WaitList, + state: &AtomicUsize, + curr: usize, + strategy: NotifyOneStrategy, +) -> Option { match get_state(curr) { EMPTY | NOTIFIED => { let res = state.compare_exchange(curr, set_state(curr, NOTIFIED), SeqCst, SeqCst); @@ -728,8 +769,11 @@ fn notify_locked(waiters: &mut WaitList, state: &AtomicUsize, curr: usize) -> Op // concurrently change as holding the lock is required to // transition **out** of `WAITING`. // - // Get a pending waiter - let waiter = waiters.pop_back().unwrap(); + // Get a pending waiter using one of the available dequeue strategies. + let waiter = match strategy { + NotifyOneStrategy::Fifo => waiters.pop_back().unwrap(), + NotifyOneStrategy::Lifo => waiters.pop_front().unwrap(), + }; // Safety: we never make mutable references to waiters. let waiter = unsafe { waiter.as_ref() }; @@ -738,7 +782,9 @@ fn notify_locked(waiters: &mut WaitList, state: &AtomicUsize, curr: usize) -> Op let waker = unsafe { waiter.waker.with_mut(|waker| (*waker).take()) }; // This waiter is unlinked and will not be shared ever again, release it. - waiter.notification.store_release(Notification::One); + waiter + .notification + .store_release(Notification::One(strategy)); if waiters.is_empty() { // As this the **final** waiter in the list, the state @@ -1137,8 +1183,10 @@ impl Drop for Notified<'_> { // See if the node was notified but not received. In this case, if // the notification was triggered via `notify_one`, it must be sent // to the next waiter. - if notification == Some(Notification::One) { - if let Some(waker) = notify_locked(&mut waiters, ¬ify.state, notify_state) { + if let Some(Notification::One(strategy)) = notification { + if let Some(waker) = + notify_locked(&mut waiters, ¬ify.state, notify_state, strategy) + { drop(waiters); waker.wake(); } diff --git a/tokio/src/util/linked_list.rs b/tokio/src/util/linked_list.rs index ab20292e21d..0274849b0c6 100644 --- a/tokio/src/util/linked_list.rs +++ b/tokio/src/util/linked_list.rs @@ -137,6 +137,26 @@ impl LinkedList { } } + /// Removes the first element from a list and returns it, or None if it is + /// empty. + pub(crate) fn pop_front(&mut self) -> Option { + unsafe { + let head = self.head?; + self.head = L::pointers(head).as_ref().get_next(); + + if let Some(new_head) = L::pointers(head).as_ref().get_next() { + L::pointers(new_head).as_mut().set_prev(None); + } else { + self.tail = None; + } + + L::pointers(head).as_mut().set_prev(None); + L::pointers(head).as_mut().set_next(None); + + Some(L::from_raw(head)) + } + } + /// Removes the last element from a list and returns it, or None if it is /// empty. pub(crate) fn pop_back(&mut self) -> Option { diff --git a/tokio/tests/sync_notify.rs b/tokio/tests/sync_notify.rs index 01b8ce86537..13b3f921e98 100644 --- a/tokio/tests/sync_notify.rs +++ b/tokio/tests/sync_notify.rs @@ -21,6 +21,38 @@ fn notify_notified_one() { assert_ready!(notified.poll()); } +#[test] +fn notify_multi_notified_one() { + let notify = Notify::new(); + let mut notified1 = spawn(async { notify.notified().await }); + let mut notified2 = spawn(async { notify.notified().await }); + + // add two waiters into the queue + assert_pending!(notified1.poll()); + assert_pending!(notified2.poll()); + + // should wakeup the first one + notify.notify_one(); + assert_ready!(notified1.poll()); + assert_pending!(notified2.poll()); +} + +#[test] +fn notify_multi_notified_last() { + let notify = Notify::new(); + let mut notified1 = spawn(async { notify.notified().await }); + let mut notified2 = spawn(async { notify.notified().await }); + + // add two waiters into the queue + assert_pending!(notified1.poll()); + assert_pending!(notified2.poll()); + + // should wakeup the last one + notify.notify_last(); + assert_pending!(notified1.poll()); + assert_ready!(notified2.poll()); +} + #[test] fn notified_one_notify() { let notify = Notify::new(); @@ -105,6 +137,49 @@ fn notified_multi_notify_drop_one() { assert_ready!(notified2.poll()); } +#[test] +fn notified_multi_notify_one_drop() { + let notify = Notify::new(); + let mut notified1 = spawn(async { notify.notified().await }); + let mut notified2 = spawn(async { notify.notified().await }); + let mut notified3 = spawn(async { notify.notified().await }); + + // add waiters by order of poll execution + assert_pending!(notified1.poll()); + assert_pending!(notified2.poll()); + assert_pending!(notified3.poll()); + + // by default fifo + notify.notify_one(); + + drop(notified1); + + // next waiter should be the one to be to woken up + assert_ready!(notified2.poll()); + assert_pending!(notified3.poll()); +} + +#[test] +fn notified_multi_notify_last_drop() { + let notify = Notify::new(); + let mut notified1 = spawn(async { notify.notified().await }); + let mut notified2 = spawn(async { notify.notified().await }); + let mut notified3 = spawn(async { notify.notified().await }); + + // add waiters by order of poll execution + assert_pending!(notified1.poll()); + assert_pending!(notified2.poll()); + assert_pending!(notified3.poll()); + + notify.notify_last(); + + drop(notified3); + + // latest waiter added should be the one to woken up + assert_ready!(notified2.poll()); + assert_pending!(notified1.poll()); +} + #[test] fn notify_in_drop_after_wake() { use futures::task::ArcWake; From 86658bd87dc470f8e36eb6b893cc403820cfb7ee Mon Sep 17 00:00:00 2001 From: Russell Cohen Date: Tue, 28 May 2024 15:55:20 -0400 Subject: [PATCH 151/162] metrics: stabilize `RuntimeMetrics::worker_count` (#6556) --- tokio/src/macros/cfg.rs | 4 +- tokio/src/runtime/blocking/pool.rs | 4 +- tokio/src/runtime/builder.rs | 2 +- tokio/src/runtime/coop.rs | 4 +- tokio/src/runtime/handle.rs | 16 +- tokio/src/runtime/io/metrics.rs | 2 +- tokio/src/runtime/metrics/histogram.rs | 2 +- tokio/src/runtime/metrics/io.rs | 3 +- tokio/src/runtime/metrics/mod.rs | 10 +- tokio/src/runtime/metrics/runtime.rs | 1309 +++++++++-------- tokio/src/runtime/metrics/worker.rs | 6 +- tokio/src/runtime/mod.rs | 15 +- tokio/src/runtime/runtime.rs | 16 +- .../runtime/scheduler/current_thread/mod.rs | 2 +- tokio/src/runtime/scheduler/inject.rs | 2 +- tokio/src/runtime/scheduler/mod.rs | 24 +- .../runtime/scheduler/multi_thread/handle.rs | 4 +- .../scheduler/multi_thread/handle/metrics.rs | 58 +- .../runtime/scheduler/multi_thread/queue.rs | 2 +- .../runtime/scheduler/multi_thread/worker.rs | 2 +- .../scheduler/multi_thread_alt/handle.rs | 2 +- .../scheduler/multi_thread_alt/queue.rs | 2 +- .../scheduler/multi_thread_alt/worker.rs | 2 +- tokio/src/runtime/tests/queue.rs | 8 +- 24 files changed, 749 insertions(+), 752 deletions(-) diff --git a/tokio/src/macros/cfg.rs b/tokio/src/macros/cfg.rs index f44599ff47a..8a72476f7c4 100644 --- a/tokio/src/macros/cfg.rs +++ b/tokio/src/macros/cfg.rs @@ -215,7 +215,7 @@ macro_rules! cfg_macros { } } -macro_rules! cfg_metrics { +macro_rules! cfg_unstable_metrics { ($($item:item)*) => { $( #[cfg(tokio_unstable)] @@ -245,7 +245,7 @@ macro_rules! cfg_no_64bit_metrics { } } -macro_rules! cfg_not_metrics { +macro_rules! cfg_not_unstable_metrics { ($($item:item)*) => { $( #[cfg(not(tokio_unstable))] diff --git a/tokio/src/runtime/blocking/pool.rs b/tokio/src/runtime/blocking/pool.rs index c74aea76568..3757079f329 100644 --- a/tokio/src/runtime/blocking/pool.rs +++ b/tokio/src/runtime/blocking/pool.rs @@ -40,7 +40,7 @@ impl SpawnerMetrics { self.num_idle_threads.load(Ordering::Relaxed) } - cfg_metrics! { + cfg_unstable_metrics! { fn queue_depth(&self) -> usize { self.queue_depth.load(Ordering::Relaxed) } @@ -474,7 +474,7 @@ impl Spawner { } } -cfg_metrics! { +cfg_unstable_metrics! { impl Spawner { pub(crate) fn num_threads(&self) -> usize { self.inner.metrics.num_threads() diff --git a/tokio/src/runtime/builder.rs b/tokio/src/runtime/builder.rs index 05f736d3e50..519c7d01413 100644 --- a/tokio/src/runtime/builder.rs +++ b/tokio/src/runtime/builder.rs @@ -957,7 +957,7 @@ impl Builder { } } - cfg_metrics! { + cfg_unstable_metrics! { /// Enables tracking the distribution of task poll times. /// /// Task poll times are not instrumented by default as doing so requires diff --git a/tokio/src/runtime/coop.rs b/tokio/src/runtime/coop.rs index d9f7ff2af29..f2afa75c9c4 100644 --- a/tokio/src/runtime/coop.rs +++ b/tokio/src/runtime/coop.rs @@ -197,7 +197,7 @@ cfg_coop! { } cfg_rt! { - cfg_metrics! { + cfg_unstable_metrics! { #[inline(always)] fn inc_budget_forced_yield_count() { let _ = context::with_current(|handle| { @@ -206,7 +206,7 @@ cfg_coop! { } } - cfg_not_metrics! { + cfg_not_unstable_metrics! { #[inline(always)] fn inc_budget_forced_yield_count() {} } diff --git a/tokio/src/runtime/handle.rs b/tokio/src/runtime/handle.rs index 01d210cd36f..5691a6e3bd2 100644 --- a/tokio/src/runtime/handle.rs +++ b/tokio/src/runtime/handle.rs @@ -1,6 +1,6 @@ #[cfg(tokio_unstable)] use crate::runtime; -use crate::runtime::{context, scheduler, RuntimeFlavor}; +use crate::runtime::{context, scheduler, RuntimeFlavor, RuntimeMetrics}; /// Handle to the runtime. /// @@ -393,17 +393,11 @@ impl Handle { owned_id.into() } } -} - -cfg_metrics! { - use crate::runtime::RuntimeMetrics; - impl Handle { - /// Returns a view that lets you get information about how the runtime - /// is performing. - pub fn metrics(&self) -> RuntimeMetrics { - RuntimeMetrics::new(self.clone()) - } + /// Returns a view that lets you get information about how the runtime + /// is performing. + pub fn metrics(&self) -> RuntimeMetrics { + RuntimeMetrics::new(self.clone()) } } diff --git a/tokio/src/runtime/io/metrics.rs b/tokio/src/runtime/io/metrics.rs index ec341efe680..e7a01bc2f46 100644 --- a/tokio/src/runtime/io/metrics.rs +++ b/tokio/src/runtime/io/metrics.rs @@ -17,7 +17,7 @@ cfg_not_rt_and_metrics_and_net! { cfg_net! { cfg_rt! { - cfg_metrics! { + cfg_unstable_metrics! { pub(crate) use crate::runtime::IoDriverMetrics; } } diff --git a/tokio/src/runtime/metrics/histogram.rs b/tokio/src/runtime/metrics/histogram.rs index f75ffa3b495..4cfd769a94e 100644 --- a/tokio/src/runtime/metrics/histogram.rs +++ b/tokio/src/runtime/metrics/histogram.rs @@ -1,5 +1,5 @@ -use crate::loom::sync::atomic::Ordering::Relaxed; use crate::util::metric_atomics::MetricAtomicU64; +use std::sync::atomic::Ordering::Relaxed; use std::cmp; use std::ops::Range; diff --git a/tokio/src/runtime/metrics/io.rs b/tokio/src/runtime/metrics/io.rs index 674fca5faec..9fdf3c96694 100644 --- a/tokio/src/runtime/metrics/io.rs +++ b/tokio/src/runtime/metrics/io.rs @@ -1,6 +1,7 @@ #![cfg_attr(not(feature = "net"), allow(dead_code))] -use crate::{loom::sync::atomic::Ordering::Relaxed, util::metric_atomics::MetricAtomicU64}; +use crate::util::metric_atomics::MetricAtomicU64; +use std::sync::atomic::Ordering::Relaxed; #[derive(Default)] pub(crate) struct IoDriverMetrics { diff --git a/tokio/src/runtime/metrics/mod.rs b/tokio/src/runtime/metrics/mod.rs index 88be4a5211f..295c97cce88 100644 --- a/tokio/src/runtime/metrics/mod.rs +++ b/tokio/src/runtime/metrics/mod.rs @@ -8,7 +8,10 @@ //! [unstable]: crate#unstable-features #![allow(clippy::module_inception)] -cfg_metrics! { +mod runtime; +pub use runtime::RuntimeMetrics; + +cfg_unstable_metrics! { mod batch; pub(crate) use batch::MetricsBatch; @@ -17,9 +20,6 @@ cfg_metrics! { #[allow(unreachable_pub)] // rust-lang/rust#57411 pub use histogram::HistogramScale; - mod runtime; - #[allow(unreachable_pub)] // rust-lang/rust#57411 - pub use runtime::RuntimeMetrics; mod scheduler; pub(crate) use scheduler::SchedulerMetrics; @@ -33,7 +33,7 @@ cfg_metrics! { } } -cfg_not_metrics! { +cfg_not_unstable_metrics! { mod mock; pub(crate) use mock::{SchedulerMetrics, WorkerMetrics, MetricsBatch, HistogramBuilder}; diff --git a/tokio/src/runtime/metrics/runtime.rs b/tokio/src/runtime/metrics/runtime.rs index 865a6406a6a..8d30f66f6ff 100644 --- a/tokio/src/runtime/metrics/runtime.rs +++ b/tokio/src/runtime/metrics/runtime.rs @@ -1,10 +1,12 @@ use crate::runtime::Handle; -use std::ops::Range; -cfg_64bit_metrics! { - use std::sync::atomic::Ordering::Relaxed; +cfg_unstable_metrics! { + use std::ops::Range; + cfg_64bit_metrics! { + use std::sync::atomic::Ordering::Relaxed; + } + use std::time::Duration; } -use std::time::Duration; /// Handle to the runtime's metrics. /// @@ -45,221 +47,354 @@ impl RuntimeMetrics { self.handle.inner.num_workers() } - /// Returns the number of additional threads spawned by the runtime. - /// - /// The number of workers is set by configuring `max_blocking_threads` on - /// `runtime::Builder`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let _ = tokio::task::spawn_blocking(move || { - /// // Stand-in for compute-heavy work or using synchronous APIs - /// 1 + 1 - /// }).await; - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.num_blocking_threads(); - /// println!("Runtime has created {} threads", n); - /// } - /// ``` - pub fn num_blocking_threads(&self) -> usize { - self.handle.inner.num_blocking_threads() - } + cfg_unstable_metrics! { - /// Returns the number of active tasks in the runtime. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.active_tasks_count(); - /// println!("Runtime has {} active tasks", n); - /// } - /// ``` - pub fn active_tasks_count(&self) -> usize { - self.handle.inner.active_tasks_count() - } + /// Returns the number of additional threads spawned by the runtime. + /// + /// The number of workers is set by configuring `max_blocking_threads` on + /// `runtime::Builder`. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let _ = tokio::task::spawn_blocking(move || { + /// // Stand-in for compute-heavy work or using synchronous APIs + /// 1 + 1 + /// }).await; + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.num_blocking_threads(); + /// println!("Runtime has created {} threads", n); + /// } + /// ``` + pub fn num_blocking_threads(&self) -> usize { + self.handle.inner.num_blocking_threads() + } - /// Returns the number of idle threads, which have spawned by the runtime - /// for `spawn_blocking` calls. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let _ = tokio::task::spawn_blocking(move || { - /// // Stand-in for compute-heavy work or using synchronous APIs - /// 1 + 1 - /// }).await; - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.num_idle_blocking_threads(); - /// println!("Runtime has {} idle blocking thread pool threads", n); - /// } - /// ``` - pub fn num_idle_blocking_threads(&self) -> usize { - self.handle.inner.num_idle_blocking_threads() - } + /// Returns the number of active tasks in the runtime. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.active_tasks_count(); + /// println!("Runtime has {} active tasks", n); + /// } + /// ``` + pub fn active_tasks_count(&self) -> usize { + self.handle.inner.active_tasks_count() + } - cfg_64bit_metrics! { - /// Returns the number of tasks scheduled from **outside** of the runtime. - /// - /// The remote schedule count starts at zero when the runtime is created and - /// increases by one each time a task is woken from **outside** of the - /// runtime. This usually means that a task is spawned or notified from a - /// non-runtime thread and must be queued using the Runtime's injection - /// queue, which tends to be slower. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.remote_schedule_count(); - /// println!("{} tasks were scheduled from outside the runtime", n); - /// } - /// ``` - pub fn remote_schedule_count(&self) -> u64 { - self.handle - .inner - .scheduler_metrics() - .remote_schedule_count - .load(Relaxed) - } + /// Returns the number of idle threads, which have spawned by the runtime + /// for `spawn_blocking` calls. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let _ = tokio::task::spawn_blocking(move || { + /// // Stand-in for compute-heavy work or using synchronous APIs + /// 1 + 1 + /// }).await; + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.num_idle_blocking_threads(); + /// println!("Runtime has {} idle blocking thread pool threads", n); + /// } + /// ``` + pub fn num_idle_blocking_threads(&self) -> usize { + self.handle.inner.num_idle_blocking_threads() + } - /// Returns the number of times that tasks have been forced to yield back to the scheduler - /// after exhausting their task budgets. - /// - /// This count starts at zero when the runtime is created and increases by one each time a task yields due to exhausting its budget. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - pub fn budget_forced_yield_count(&self) -> u64 { - self.handle - .inner - .scheduler_metrics() - .budget_forced_yield_count - .load(Relaxed) - } + cfg_64bit_metrics! { + /// Returns the number of tasks scheduled from **outside** of the runtime. + /// + /// The remote schedule count starts at zero when the runtime is created and + /// increases by one each time a task is woken from **outside** of the + /// runtime. This usually means that a task is spawned or notified from a + /// non-runtime thread and must be queued using the Runtime's injection + /// queue, which tends to be slower. + /// + /// The counter is monotonically increasing. It is never decremented or + /// reset to zero. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.remote_schedule_count(); + /// println!("{} tasks were scheduled from outside the runtime", n); + /// } + /// ``` + pub fn remote_schedule_count(&self) -> u64 { + self.handle + .inner + .scheduler_metrics() + .remote_schedule_count + .load(Relaxed) + } - /// Returns the total number of times the given worker thread has parked. - /// - /// The worker park count starts at zero when the runtime is created and - /// increases by one each time the worker parks the thread waiting for new - /// inbound events to process. This usually means the worker has processed - /// all pending work and is currently idle. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_park_count(0); - /// println!("worker 0 parked {} times", n); - /// } - /// ``` - pub fn worker_park_count(&self, worker: usize) -> u64 { - self.handle - .inner - .worker_metrics(worker) - .park_count - .load(Relaxed) - } + /// Returns the number of times that tasks have been forced to yield back to the scheduler + /// after exhausting their task budgets. + /// + /// This count starts at zero when the runtime is created and increases by one each time a task yields due to exhausting its budget. + /// + /// The counter is monotonically increasing. It is never decremented or + /// reset to zero. + pub fn budget_forced_yield_count(&self) -> u64 { + self.handle + .inner + .scheduler_metrics() + .budget_forced_yield_count + .load(Relaxed) + } - /// Returns the number of times the given worker thread unparked but - /// performed no work before parking again. - /// - /// The worker no-op count starts at zero when the runtime is created and - /// increases by one each time the worker unparks the thread but finds no - /// new work and goes back to sleep. This indicates a false-positive wake up. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_noop_count(0); - /// println!("worker 0 had {} no-op unparks", n); - /// } - /// ``` - pub fn worker_noop_count(&self, worker: usize) -> u64 { - self.handle - .inner - .worker_metrics(worker) - .noop_count - .load(Relaxed) - } + /// Returns the total number of times the given worker thread has parked. + /// + /// The worker park count starts at zero when the runtime is created and + /// increases by one each time the worker parks the thread waiting for new + /// inbound events to process. This usually means the worker has processed + /// all pending work and is currently idle. + /// + /// The counter is monotonically increasing. It is never decremented or + /// reset to zero. + /// + /// # Arguments + /// + /// `worker` is the index of the worker being queried. The given value must + /// be between 0 and `num_workers()`. The index uniquely identifies a single + /// worker and will continue to identify the worker throughout the lifetime + /// of the runtime instance. + /// + /// # Panics + /// + /// The method panics when `worker` represents an invalid worker, i.e. is + /// greater than or equal to `num_workers()`. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.worker_park_count(0); + /// println!("worker 0 parked {} times", n); + /// } + /// ``` + pub fn worker_park_count(&self, worker: usize) -> u64 { + self.handle + .inner + .worker_metrics(worker) + .park_count + .load(Relaxed) + } - /// Returns the number of tasks the given worker thread stole from - /// another worker thread. - /// - /// This metric only applies to the **multi-threaded** runtime and will - /// always return `0` when using the current thread runtime. + /// Returns the number of times the given worker thread unparked but + /// performed no work before parking again. + /// + /// The worker no-op count starts at zero when the runtime is created and + /// increases by one each time the worker unparks the thread but finds no + /// new work and goes back to sleep. This indicates a false-positive wake up. + /// + /// The counter is monotonically increasing. It is never decremented or + /// reset to zero. + /// + /// # Arguments + /// + /// `worker` is the index of the worker being queried. The given value must + /// be between 0 and `num_workers()`. The index uniquely identifies a single + /// worker and will continue to identify the worker throughout the lifetime + /// of the runtime instance. + /// + /// # Panics + /// + /// The method panics when `worker` represents an invalid worker, i.e. is + /// greater than or equal to `num_workers()`. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.worker_noop_count(0); + /// println!("worker 0 had {} no-op unparks", n); + /// } + /// ``` + pub fn worker_noop_count(&self, worker: usize) -> u64 { + self.handle + .inner + .worker_metrics(worker) + .noop_count + .load(Relaxed) + } + + /// Returns the number of tasks the given worker thread stole from + /// another worker thread. + /// + /// This metric only applies to the **multi-threaded** runtime and will + /// always return `0` when using the current thread runtime. + /// + /// The worker steal count starts at zero when the runtime is created and + /// increases by `N` each time the worker has processed its scheduled queue + /// and successfully steals `N` more pending tasks from another worker. + /// + /// The counter is monotonically increasing. It is never decremented or + /// reset to zero. + /// + /// # Arguments + /// + /// `worker` is the index of the worker being queried. The given value must + /// be between 0 and `num_workers()`. The index uniquely identifies a single + /// worker and will continue to identify the worker throughout the lifetime + /// of the runtime instance. + /// + /// # Panics + /// + /// The method panics when `worker` represents an invalid worker, i.e. is + /// greater than or equal to `num_workers()`. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.worker_steal_count(0); + /// println!("worker 0 has stolen {} tasks", n); + /// } + /// ``` + pub fn worker_steal_count(&self, worker: usize) -> u64 { + self.handle + .inner + .worker_metrics(worker) + .steal_count + .load(Relaxed) + } + + /// Returns the number of times the given worker thread stole tasks from + /// another worker thread. + /// + /// This metric only applies to the **multi-threaded** runtime and will + /// always return `0` when using the current thread runtime. + /// + /// The worker steal count starts at zero when the runtime is created and + /// increases by one each time the worker has processed its scheduled queue + /// and successfully steals more pending tasks from another worker. + /// + /// The counter is monotonically increasing. It is never decremented or + /// reset to zero. + /// + /// # Arguments + /// + /// `worker` is the index of the worker being queried. The given value must + /// be between 0 and `num_workers()`. The index uniquely identifies a single + /// worker and will continue to identify the worker throughout the lifetime + /// of the runtime instance. + /// + /// # Panics + /// + /// The method panics when `worker` represents an invalid worker, i.e. is + /// greater than or equal to `num_workers()`. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.worker_steal_operations(0); + /// println!("worker 0 has stolen tasks {} times", n); + /// } + /// ``` + pub fn worker_steal_operations(&self, worker: usize) -> u64 { + self.handle + .inner + .worker_metrics(worker) + .steal_operations + .load(Relaxed) + } + + /// Returns the number of tasks the given worker thread has polled. + /// + /// The worker poll count starts at zero when the runtime is created and + /// increases by one each time the worker polls a scheduled task. + /// + /// The counter is monotonically increasing. It is never decremented or + /// reset to zero. + /// + /// # Arguments + /// + /// `worker` is the index of the worker being queried. The given value must + /// be between 0 and `num_workers()`. The index uniquely identifies a single + /// worker and will continue to identify the worker throughout the lifetime + /// of the runtime instance. + /// + /// # Panics + /// + /// The method panics when `worker` represents an invalid worker, i.e. is + /// greater than or equal to `num_workers()`. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.worker_poll_count(0); + /// println!("worker 0 has polled {} tasks", n); + /// } + /// ``` + pub fn worker_poll_count(&self, worker: usize) -> u64 { + self.handle + .inner + .worker_metrics(worker) + .poll_count + .load(Relaxed) + } + + /// Returns the amount of time the given worker thread has been busy. /// - /// The worker steal count starts at zero when the runtime is created and - /// increases by `N` each time the worker has processed its scheduled queue - /// and successfully steals `N` more pending tasks from another worker. + /// The worker busy duration starts at zero when the runtime is created and + /// increases whenever the worker is spending time processing work. Using + /// this value can indicate the load of the given worker. If a lot of time + /// is spent busy, then the worker is under load and will check for inbound + /// events less often. /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. + /// The timer is monotonically increasing. It is never decremented or reset + /// to zero. /// /// # Arguments /// @@ -282,27 +417,28 @@ impl RuntimeMetrics { /// async fn main() { /// let metrics = Handle::current().metrics(); /// - /// let n = metrics.worker_steal_count(0); - /// println!("worker 0 has stolen {} tasks", n); + /// let n = metrics.worker_total_busy_duration(0); + /// println!("worker 0 was busy for a total of {:?}", n); /// } /// ``` - pub fn worker_steal_count(&self, worker: usize) -> u64 { - self.handle + pub fn worker_total_busy_duration(&self, worker: usize) -> Duration { + let nanos = self + .handle .inner .worker_metrics(worker) - .steal_count - .load(Relaxed) + .busy_duration_total + .load(Relaxed); + Duration::from_nanos(nanos) } - /// Returns the number of times the given worker thread stole tasks from - /// another worker thread. + /// Returns the number of tasks scheduled from **within** the runtime on the + /// given worker's local queue. /// - /// This metric only applies to the **multi-threaded** runtime and will - /// always return `0` when using the current thread runtime. - /// - /// The worker steal count starts at zero when the runtime is created and - /// increases by one each time the worker has processed its scheduled queue - /// and successfully steals more pending tasks from another worker. + /// The local schedule count starts at zero when the runtime is created and + /// increases by one each time a task is woken from **inside** of the + /// runtime on the given worker. This usually means that a task is spawned + /// or notified from within a runtime thread and will be queued on the + /// worker-local queue. /// /// The counter is monotonically increasing. It is never decremented or /// reset to zero. @@ -328,22 +464,27 @@ impl RuntimeMetrics { /// async fn main() { /// let metrics = Handle::current().metrics(); /// - /// let n = metrics.worker_steal_operations(0); - /// println!("worker 0 has stolen tasks {} times", n); + /// let n = metrics.worker_local_schedule_count(0); + /// println!("{} tasks were scheduled on the worker's local queue", n); /// } /// ``` - pub fn worker_steal_operations(&self, worker: usize) -> u64 { + pub fn worker_local_schedule_count(&self, worker: usize) -> u64 { self.handle .inner .worker_metrics(worker) - .steal_operations + .local_schedule_count .load(Relaxed) } - /// Returns the number of tasks the given worker thread has polled. + /// Returns the number of times the given worker thread saturated its local + /// queue. + /// + /// This metric only applies to the **multi-threaded** scheduler. /// - /// The worker poll count starts at zero when the runtime is created and - /// increases by one each time the worker polls a scheduled task. + /// The worker overflow count starts at zero when the runtime is created and + /// increases by one each time the worker attempts to schedule a task + /// locally, but its local queue is full. When this happens, half of the + /// local queue is moved to the injection queue. /// /// The counter is monotonically increasing. It is never decremented or /// reset to zero. @@ -369,40 +510,27 @@ impl RuntimeMetrics { /// async fn main() { /// let metrics = Handle::current().metrics(); /// - /// let n = metrics.worker_poll_count(0); - /// println!("worker 0 has polled {} tasks", n); + /// let n = metrics.worker_overflow_count(0); + /// println!("worker 0 has overflowed its queue {} times", n); /// } /// ``` - pub fn worker_poll_count(&self, worker: usize) -> u64 { + pub fn worker_overflow_count(&self, worker: usize) -> u64 { self.handle .inner .worker_metrics(worker) - .poll_count + .overflow_count .load(Relaxed) } + } - /// Returns the amount of time the given worker thread has been busy. - /// - /// The worker busy duration starts at zero when the runtime is created and - /// increases whenever the worker is spending time processing work. Using - /// this value can indicate the load of the given worker. If a lot of time - /// is spent busy, then the worker is under load and will check for inbound - /// events less often. - /// - /// The timer is monotonically increasing. It is never decremented or reset - /// to zero. + /// Returns the number of tasks currently scheduled in the runtime's + /// injection queue. /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. + /// Tasks that are spawned or notified from a non-runtime thread are + /// scheduled using the runtime's injection queue. This metric returns the + /// **current** number of tasks pending in the injection queue. As such, the + /// returned value may increase or decrease as new tasks are scheduled and + /// processed. /// /// # Examples /// @@ -413,31 +541,22 @@ impl RuntimeMetrics { /// async fn main() { /// let metrics = Handle::current().metrics(); /// - /// let n = metrics.worker_total_busy_duration(0); - /// println!("worker 0 was busy for a total of {:?}", n); + /// let n = metrics.injection_queue_depth(); + /// println!("{} tasks currently pending in the runtime's injection queue", n); /// } /// ``` - pub fn worker_total_busy_duration(&self, worker: usize) -> Duration { - let nanos = self - .handle - .inner - .worker_metrics(worker) - .busy_duration_total - .load(Relaxed); - Duration::from_nanos(nanos) + pub fn injection_queue_depth(&self) -> usize { + self.handle.inner.injection_queue_depth() } - /// Returns the number of tasks scheduled from **within** the runtime on the - /// given worker's local queue. - /// - /// The local schedule count starts at zero when the runtime is created and - /// increases by one each time a task is woken from **inside** of the - /// runtime on the given worker. This usually means that a task is spawned - /// or notified from within a runtime thread and will be queued on the - /// worker-local queue. + /// Returns the number of tasks currently scheduled in the given worker's + /// local queue. /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. + /// Tasks that are spawned or notified from within a runtime thread are + /// scheduled using that worker's local queue. This metric returns the + /// **current** number of tasks pending in the worker's local queue. As + /// such, the returned value may increase or decrease as new tasks are + /// scheduled and processed. /// /// # Arguments /// @@ -460,283 +579,56 @@ impl RuntimeMetrics { /// async fn main() { /// let metrics = Handle::current().metrics(); /// - /// let n = metrics.worker_local_schedule_count(0); - /// println!("{} tasks were scheduled on the worker's local queue", n); + /// let n = metrics.worker_local_queue_depth(0); + /// println!("{} tasks currently pending in worker 0's local queue", n); /// } /// ``` - pub fn worker_local_schedule_count(&self, worker: usize) -> u64 { - self.handle - .inner - .worker_metrics(worker) - .local_schedule_count - .load(Relaxed) + pub fn worker_local_queue_depth(&self, worker: usize) -> usize { + self.handle.inner.worker_local_queue_depth(worker) } - /// Returns the number of times the given worker thread saturated its local - /// queue. - /// - /// This metric only applies to the **multi-threaded** scheduler. - /// - /// The worker overflow count starts at zero when the runtime is created and - /// increases by one each time the worker attempts to schedule a task - /// locally, but its local queue is full. When this happens, half of the - /// local queue is moved to the injection queue. - /// - /// The counter is monotonically increasing. It is never decremented or - /// reset to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. + /// Returns `true` if the runtime is tracking the distribution of task poll + /// times. /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. + /// Task poll times are not instrumented by default as doing so requires + /// calling [`Instant::now()`] twice per task poll. The feature is enabled + /// by calling [`enable_metrics_poll_count_histogram()`] when building the + /// runtime. /// /// # Examples /// /// ``` - /// use tokio::runtime::Handle; + /// use tokio::runtime::{self, Handle}; /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); + /// fn main() { + /// runtime::Builder::new_current_thread() + /// .enable_metrics_poll_count_histogram() + /// .build() + /// .unwrap() + /// .block_on(async { + /// let metrics = Handle::current().metrics(); + /// let enabled = metrics.poll_count_histogram_enabled(); /// - /// let n = metrics.worker_overflow_count(0); - /// println!("worker 0 has overflowed its queue {} times", n); + /// println!("Tracking task poll time distribution: {:?}", enabled); + /// }); /// } /// ``` - pub fn worker_overflow_count(&self, worker: usize) -> u64 { + /// + /// [`enable_metrics_poll_count_histogram()`]: crate::runtime::Builder::enable_metrics_poll_count_histogram + /// [`Instant::now()`]: std::time::Instant::now + pub fn poll_count_histogram_enabled(&self) -> bool { self.handle .inner - .worker_metrics(worker) - .overflow_count - .load(Relaxed) + .worker_metrics(0) + .poll_count_histogram + .is_some() } - } - /// Returns the number of tasks currently scheduled in the runtime's - /// injection queue. - /// - /// Tasks that are spawned or notified from a non-runtime thread are - /// scheduled using the runtime's injection queue. This metric returns the - /// **current** number of tasks pending in the injection queue. As such, the - /// returned value may increase or decrease as new tasks are scheduled and - /// processed. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.injection_queue_depth(); - /// println!("{} tasks currently pending in the runtime's injection queue", n); - /// } - /// ``` - pub fn injection_queue_depth(&self) -> usize { - self.handle.inner.injection_queue_depth() - } - - /// Returns the number of tasks currently scheduled in the given worker's - /// local queue. - /// - /// Tasks that are spawned or notified from within a runtime thread are - /// scheduled using that worker's local queue. This metric returns the - /// **current** number of tasks pending in the worker's local queue. As - /// such, the returned value may increase or decrease as new tasks are - /// scheduled and processed. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.worker_local_queue_depth(0); - /// println!("{} tasks currently pending in worker 0's local queue", n); - /// } - /// ``` - pub fn worker_local_queue_depth(&self, worker: usize) -> usize { - self.handle.inner.worker_local_queue_depth(worker) - } - - /// Returns `true` if the runtime is tracking the distribution of task poll - /// times. - /// - /// Task poll times are not instrumented by default as doing so requires - /// calling [`Instant::now()`] twice per task poll. The feature is enabled - /// by calling [`enable_metrics_poll_count_histogram()`] when building the - /// runtime. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::{self, Handle}; - /// - /// fn main() { - /// runtime::Builder::new_current_thread() - /// .enable_metrics_poll_count_histogram() - /// .build() - /// .unwrap() - /// .block_on(async { - /// let metrics = Handle::current().metrics(); - /// let enabled = metrics.poll_count_histogram_enabled(); - /// - /// println!("Tracking task poll time distribution: {:?}", enabled); - /// }); - /// } - /// ``` - /// - /// [`enable_metrics_poll_count_histogram()`]: crate::runtime::Builder::enable_metrics_poll_count_histogram - /// [`Instant::now()`]: std::time::Instant::now - pub fn poll_count_histogram_enabled(&self) -> bool { - self.handle - .inner - .worker_metrics(0) - .poll_count_histogram - .is_some() - } - - /// Returns the number of histogram buckets tracking the distribution of - /// task poll times. - /// - /// This value is configured by calling - /// [`metrics_poll_count_histogram_buckets()`] when building the runtime. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::{self, Handle}; - /// - /// fn main() { - /// runtime::Builder::new_current_thread() - /// .enable_metrics_poll_count_histogram() - /// .build() - /// .unwrap() - /// .block_on(async { - /// let metrics = Handle::current().metrics(); - /// let buckets = metrics.poll_count_histogram_num_buckets(); - /// - /// println!("Histogram buckets: {:?}", buckets); - /// }); - /// } - /// ``` - /// - /// [`metrics_poll_count_histogram_buckets()`]: - /// crate::runtime::Builder::metrics_poll_count_histogram_buckets - pub fn poll_count_histogram_num_buckets(&self) -> usize { - self.handle - .inner - .worker_metrics(0) - .poll_count_histogram - .as_ref() - .map(|histogram| histogram.num_buckets()) - .unwrap_or_default() - } - - /// Returns the range of task poll times tracked by the given bucket. - /// - /// This value is configured by calling - /// [`metrics_poll_count_histogram_resolution()`] when building the runtime. - /// - /// # Panics - /// - /// The method panics if `bucket` represents an invalid bucket index, i.e. - /// is greater than or equal to `poll_count_histogram_num_buckets()`. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::{self, Handle}; - /// - /// fn main() { - /// runtime::Builder::new_current_thread() - /// .enable_metrics_poll_count_histogram() - /// .build() - /// .unwrap() - /// .block_on(async { - /// let metrics = Handle::current().metrics(); - /// let buckets = metrics.poll_count_histogram_num_buckets(); - /// - /// for i in 0..buckets { - /// let range = metrics.poll_count_histogram_bucket_range(i); - /// println!("Histogram bucket {} range: {:?}", i, range); - /// } - /// }); - /// } - /// ``` - /// - /// [`metrics_poll_count_histogram_resolution()`]: - /// crate::runtime::Builder::metrics_poll_count_histogram_resolution - #[track_caller] - pub fn poll_count_histogram_bucket_range(&self, bucket: usize) -> Range { - self.handle - .inner - .worker_metrics(0) - .poll_count_histogram - .as_ref() - .map(|histogram| { - let range = histogram.bucket_range(bucket); - std::ops::Range { - start: Duration::from_nanos(range.start), - end: Duration::from_nanos(range.end), - } - }) - .unwrap_or_default() - } - - cfg_64bit_metrics! { - /// Returns the number of times the given worker polled tasks with a poll - /// duration within the given bucket's range. - /// - /// Each worker maintains its own histogram and the counts for each bucket - /// starts at zero when the runtime is created. Each time the worker polls a - /// task, it tracks the duration the task poll time took and increments the - /// associated bucket by 1. - /// - /// Each bucket is a monotonically increasing counter. It is never - /// decremented or reset to zero. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. - /// - /// `bucket` is the index of the bucket being queried. The bucket is scoped - /// to the worker. The range represented by the bucket can be queried by - /// calling [`poll_count_histogram_bucket_range()`]. Each worker maintains - /// identical bucket ranges. + /// Returns the number of histogram buckets tracking the distribution of + /// task poll times. /// - /// # Panics - /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()` or if `bucket` represents an - /// invalid bucket. + /// This value is configured by calling + /// [`metrics_poll_count_histogram_buckets()`] when building the runtime. /// /// # Examples /// @@ -752,149 +644,156 @@ impl RuntimeMetrics { /// let metrics = Handle::current().metrics(); /// let buckets = metrics.poll_count_histogram_num_buckets(); /// - /// for worker in 0..metrics.num_workers() { - /// for i in 0..buckets { - /// let count = metrics.poll_count_histogram_bucket_count(worker, i); - /// println!("Poll count {}", count); - /// } - /// } + /// println!("Histogram buckets: {:?}", buckets); /// }); /// } /// ``` /// - /// [`poll_count_histogram_bucket_range()`]: crate::runtime::RuntimeMetrics::poll_count_histogram_bucket_range - #[track_caller] - pub fn poll_count_histogram_bucket_count(&self, worker: usize, bucket: usize) -> u64 { + /// [`metrics_poll_count_histogram_buckets()`]: + /// crate::runtime::Builder::metrics_poll_count_histogram_buckets + pub fn poll_count_histogram_num_buckets(&self) -> usize { self.handle .inner - .worker_metrics(worker) + .worker_metrics(0) .poll_count_histogram .as_ref() - .map(|histogram| histogram.get(bucket)) + .map(|histogram| histogram.num_buckets()) .unwrap_or_default() } - /// Returns the mean duration of task polls, in nanoseconds. + /// Returns the range of task poll times tracked by the given bucket. /// - /// This is an exponentially weighted moving average. Currently, this metric - /// is only provided by the multi-threaded runtime. - /// - /// # Arguments - /// - /// `worker` is the index of the worker being queried. The given value must - /// be between 0 and `num_workers()`. The index uniquely identifies a single - /// worker and will continue to identify the worker throughout the lifetime - /// of the runtime instance. + /// This value is configured by calling + /// [`metrics_poll_count_histogram_resolution()`] when building the runtime. /// /// # Panics /// - /// The method panics when `worker` represents an invalid worker, i.e. is - /// greater than or equal to `num_workers()`. + /// The method panics if `bucket` represents an invalid bucket index, i.e. + /// is greater than or equal to `poll_count_histogram_num_buckets()`. /// /// # Examples /// /// ``` - /// use tokio::runtime::Handle; + /// use tokio::runtime::{self, Handle}; /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); + /// fn main() { + /// runtime::Builder::new_current_thread() + /// .enable_metrics_poll_count_histogram() + /// .build() + /// .unwrap() + /// .block_on(async { + /// let metrics = Handle::current().metrics(); + /// let buckets = metrics.poll_count_histogram_num_buckets(); /// - /// let n = metrics.worker_mean_poll_time(0); - /// println!("worker 0 has a mean poll time of {:?}", n); + /// for i in 0..buckets { + /// let range = metrics.poll_count_histogram_bucket_range(i); + /// println!("Histogram bucket {} range: {:?}", i, range); + /// } + /// }); /// } /// ``` + /// + /// [`metrics_poll_count_histogram_resolution()`]: + /// crate::runtime::Builder::metrics_poll_count_histogram_resolution #[track_caller] - pub fn worker_mean_poll_time(&self, worker: usize) -> Duration { - let nanos = self - .handle + pub fn poll_count_histogram_bucket_range(&self, bucket: usize) -> Range { + self.handle .inner - .worker_metrics(worker) - .mean_poll_time - .load(Relaxed); - Duration::from_nanos(nanos) + .worker_metrics(0) + .poll_count_histogram + .as_ref() + .map(|histogram| { + let range = histogram.bucket_range(bucket); + std::ops::Range { + start: Duration::from_nanos(range.start), + end: Duration::from_nanos(range.end), + } + }) + .unwrap_or_default() } - } - - /// Returns the number of tasks currently scheduled in the blocking - /// thread pool, spawned using `spawn_blocking`. - /// - /// This metric returns the **current** number of tasks pending in - /// blocking thread pool. As such, the returned value may increase - /// or decrease as new tasks are scheduled and processed. - /// - /// # Examples - /// - /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.blocking_queue_depth(); - /// println!("{} tasks currently pending in the blocking thread pool", n); - /// } - /// ``` - pub fn blocking_queue_depth(&self) -> usize { - self.handle.inner.blocking_queue_depth() - } -} -cfg_net! { - impl RuntimeMetrics { cfg_64bit_metrics! { - /// Returns the number of file descriptors that have been registered with the - /// runtime's I/O driver. + /// Returns the number of times the given worker polled tasks with a poll + /// duration within the given bucket's range. /// - /// # Examples + /// Each worker maintains its own histogram and the counts for each bucket + /// starts at zero when the runtime is created. Each time the worker polls a + /// task, it tracks the duration the task poll time took and increments the + /// associated bucket by 1. /// - /// ``` - /// use tokio::runtime::Handle; + /// Each bucket is a monotonically increasing counter. It is never + /// decremented or reset to zero. /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); + /// # Arguments /// - /// let registered_fds = metrics.io_driver_fd_registered_count(); - /// println!("{} fds have been registered with the runtime's I/O driver.", registered_fds); + /// `worker` is the index of the worker being queried. The given value must + /// be between 0 and `num_workers()`. The index uniquely identifies a single + /// worker and will continue to identify the worker throughout the lifetime + /// of the runtime instance. /// - /// let deregistered_fds = metrics.io_driver_fd_deregistered_count(); + /// `bucket` is the index of the bucket being queried. The bucket is scoped + /// to the worker. The range represented by the bucket can be queried by + /// calling [`poll_count_histogram_bucket_range()`]. Each worker maintains + /// identical bucket ranges. /// - /// let current_fd_count = registered_fds - deregistered_fds; - /// println!("{} fds are currently registered by the runtime's I/O driver.", current_fd_count); - /// } - /// ``` - pub fn io_driver_fd_registered_count(&self) -> u64 { - self.with_io_driver_metrics(|m| { - m.fd_registered_count.load(Relaxed) - }) - } - - /// Returns the number of file descriptors that have been deregistered by the - /// runtime's I/O driver. + /// # Panics + /// + /// The method panics when `worker` represents an invalid worker, i.e. is + /// greater than or equal to `num_workers()` or if `bucket` represents an + /// invalid bucket. /// /// # Examples /// /// ``` - /// use tokio::runtime::Handle; - /// - /// #[tokio::main] - /// async fn main() { - /// let metrics = Handle::current().metrics(); - /// - /// let n = metrics.io_driver_fd_deregistered_count(); - /// println!("{} fds have been deregistered by the runtime's I/O driver.", n); + /// use tokio::runtime::{self, Handle}; + /// + /// fn main() { + /// runtime::Builder::new_current_thread() + /// .enable_metrics_poll_count_histogram() + /// .build() + /// .unwrap() + /// .block_on(async { + /// let metrics = Handle::current().metrics(); + /// let buckets = metrics.poll_count_histogram_num_buckets(); + /// + /// for worker in 0..metrics.num_workers() { + /// for i in 0..buckets { + /// let count = metrics.poll_count_histogram_bucket_count(worker, i); + /// println!("Poll count {}", count); + /// } + /// } + /// }); /// } /// ``` - pub fn io_driver_fd_deregistered_count(&self) -> u64 { - self.with_io_driver_metrics(|m| { - m.fd_deregistered_count.load(Relaxed) - }) + /// + /// [`poll_count_histogram_bucket_range()`]: crate::runtime::RuntimeMetrics::poll_count_histogram_bucket_range + #[track_caller] + pub fn poll_count_histogram_bucket_count(&self, worker: usize, bucket: usize) -> u64 { + self.handle + .inner + .worker_metrics(worker) + .poll_count_histogram + .as_ref() + .map(|histogram| histogram.get(bucket)) + .unwrap_or_default() } - /// Returns the number of ready events processed by the runtime's - /// I/O driver. + /// Returns the mean duration of task polls, in nanoseconds. + /// + /// This is an exponentially weighted moving average. Currently, this metric + /// is only provided by the multi-threaded runtime. + /// + /// # Arguments + /// + /// `worker` is the index of the worker being queried. The given value must + /// be between 0 and `num_workers()`. The index uniquely identifies a single + /// worker and will continue to identify the worker throughout the lifetime + /// of the runtime instance. + /// + /// # Panics + /// + /// The method panics when `worker` represents an invalid worker, i.e. is + /// greater than or equal to `num_workers()`. /// /// # Examples /// @@ -905,27 +804,131 @@ cfg_net! { /// async fn main() { /// let metrics = Handle::current().metrics(); /// - /// let n = metrics.io_driver_ready_count(); - /// println!("{} ready events processed by the runtime's I/O driver.", n); + /// let n = metrics.worker_mean_poll_time(0); + /// println!("worker 0 has a mean poll time of {:?}", n); /// } /// ``` - pub fn io_driver_ready_count(&self) -> u64 { - self.with_io_driver_metrics(|m| m.ready_count.load(Relaxed)) + #[track_caller] + pub fn worker_mean_poll_time(&self, worker: usize) -> Duration { + let nanos = self + .handle + .inner + .worker_metrics(worker) + .mean_poll_time + .load(Relaxed); + Duration::from_nanos(nanos) } + } - fn with_io_driver_metrics(&self, f: F) -> u64 - where - F: Fn(&super::IoDriverMetrics) -> u64, - { - // TODO: Investigate if this should return 0, most of our metrics always increase - // thus this breaks that guarantee. - self.handle - .inner - .driver() - .io - .as_ref() - .map(|h| f(&h.metrics)) - .unwrap_or(0) + /// Returns the number of tasks currently scheduled in the blocking + /// thread pool, spawned using `spawn_blocking`. + /// + /// This metric returns the **current** number of tasks pending in + /// blocking thread pool. As such, the returned value may increase + /// or decrease as new tasks are scheduled and processed. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.blocking_queue_depth(); + /// println!("{} tasks currently pending in the blocking thread pool", n); + /// } + /// ``` + pub fn blocking_queue_depth(&self) -> usize { + self.handle.inner.blocking_queue_depth() + } + + cfg_net! { + cfg_64bit_metrics! { + /// Returns the number of file descriptors that have been registered with the + /// runtime's I/O driver. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let registered_fds = metrics.io_driver_fd_registered_count(); + /// println!("{} fds have been registered with the runtime's I/O driver.", registered_fds); + /// + /// let deregistered_fds = metrics.io_driver_fd_deregistered_count(); + /// + /// let current_fd_count = registered_fds - deregistered_fds; + /// println!("{} fds are currently registered by the runtime's I/O driver.", current_fd_count); + /// } + /// ``` + pub fn io_driver_fd_registered_count(&self) -> u64 { + self.with_io_driver_metrics(|m| { + m.fd_registered_count.load(Relaxed) + }) + } + + /// Returns the number of file descriptors that have been deregistered by the + /// runtime's I/O driver. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.io_driver_fd_deregistered_count(); + /// println!("{} fds have been deregistered by the runtime's I/O driver.", n); + /// } + /// ``` + pub fn io_driver_fd_deregistered_count(&self) -> u64 { + self.with_io_driver_metrics(|m| { + m.fd_deregistered_count.load(Relaxed) + }) + } + + /// Returns the number of ready events processed by the runtime's + /// I/O driver. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Handle; + /// + /// #[tokio::main] + /// async fn main() { + /// let metrics = Handle::current().metrics(); + /// + /// let n = metrics.io_driver_ready_count(); + /// println!("{} ready events processed by the runtime's I/O driver.", n); + /// } + /// ``` + pub fn io_driver_ready_count(&self) -> u64 { + self.with_io_driver_metrics(|m| m.ready_count.load(Relaxed)) + } + + fn with_io_driver_metrics(&self, f: F) -> u64 + where + F: Fn(&super::IoDriverMetrics) -> u64, + { + // TODO: Investigate if this should return 0, most of our metrics always increase + // thus this breaks that guarantee. + self.handle + .inner + .driver() + .io + .as_ref() + .map(|h| f(&h.metrics)) + .unwrap_or(0) + } } } } diff --git a/tokio/src/runtime/metrics/worker.rs b/tokio/src/runtime/metrics/worker.rs index fc7c4e6dfe4..a396bf5a391 100644 --- a/tokio/src/runtime/metrics/worker.rs +++ b/tokio/src/runtime/metrics/worker.rs @@ -1,8 +1,10 @@ -use crate::loom::sync::atomic::AtomicUsize; -use crate::loom::sync::atomic::Ordering::Relaxed; use crate::runtime::metrics::Histogram; use crate::runtime::Config; use crate::util::metric_atomics::MetricAtomicU64; +// This is NOT the Loom atomic. To avoid an unnecessary state explosion in loom, +// all metrics use regular atomics. +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::Relaxed; /// Retrieve runtime worker metrics. /// diff --git a/tokio/src/runtime/mod.rs b/tokio/src/runtime/mod.rs index 3d333960f3d..3fcde75b54e 100644 --- a/tokio/src/runtime/mod.rs +++ b/tokio/src/runtime/mod.rs @@ -388,21 +388,18 @@ cfg_rt! { mod thread_id; pub(crate) use thread_id::ThreadId; - cfg_metrics! { - mod metrics; - pub use metrics::{RuntimeMetrics, HistogramScale}; + pub(crate) mod metrics; + pub use metrics::RuntimeMetrics; - pub(crate) use metrics::{MetricsBatch, SchedulerMetrics, WorkerMetrics, HistogramBuilder}; + cfg_unstable_metrics! { + pub use metrics::HistogramScale; cfg_net! { - pub(crate) use metrics::IoDriverMetrics; + pub(crate) use metrics::IoDriverMetrics; } } - cfg_not_metrics! { - pub(crate) mod metrics; - pub(crate) use metrics::{SchedulerMetrics, WorkerMetrics, MetricsBatch, HistogramBuilder}; - } + pub(crate) use metrics::{MetricsBatch, SchedulerMetrics, WorkerMetrics, HistogramBuilder}; /// After thread starts / before thread stops type Callback = std::sync::Arc; diff --git a/tokio/src/runtime/runtime.rs b/tokio/src/runtime/runtime.rs index 7cf2cebeffc..d904af50458 100644 --- a/tokio/src/runtime/runtime.rs +++ b/tokio/src/runtime/runtime.rs @@ -455,6 +455,12 @@ impl Runtime { pub fn shutdown_background(self) { self.shutdown_timeout(Duration::from_nanos(0)); } + + /// Returns a view that lets you get information about how the runtime + /// is performing. + pub fn metrics(&self) -> crate::runtime::RuntimeMetrics { + self.handle.metrics() + } } #[allow(clippy::single_match)] // there are comments in the error branch, so we don't want if-let @@ -486,13 +492,3 @@ impl Drop for Runtime { impl std::panic::UnwindSafe for Runtime {} impl std::panic::RefUnwindSafe for Runtime {} - -cfg_metrics! { - impl Runtime { - /// Returns a view that lets you get information about how the runtime - /// is performing. - pub fn metrics(&self) -> crate::runtime::RuntimeMetrics { - self.handle.metrics() - } - } -} diff --git a/tokio/src/runtime/scheduler/current_thread/mod.rs b/tokio/src/runtime/scheduler/current_thread/mod.rs index 36bcefc4406..b9c23837a58 100644 --- a/tokio/src/runtime/scheduler/current_thread/mod.rs +++ b/tokio/src/runtime/scheduler/current_thread/mod.rs @@ -502,7 +502,7 @@ impl Handle { } } -cfg_metrics! { +cfg_unstable_metrics! { impl Handle { pub(crate) fn scheduler_metrics(&self) -> &SchedulerMetrics { &self.shared.scheduler_metrics diff --git a/tokio/src/runtime/scheduler/inject.rs b/tokio/src/runtime/scheduler/inject.rs index 39976fcd7a2..811b02c136c 100644 --- a/tokio/src/runtime/scheduler/inject.rs +++ b/tokio/src/runtime/scheduler/inject.rs @@ -16,7 +16,7 @@ cfg_rt_multi_thread! { mod rt_multi_thread; } -cfg_metrics! { +cfg_unstable_metrics! { mod metrics; } diff --git a/tokio/src/runtime/scheduler/mod.rs b/tokio/src/runtime/scheduler/mod.rs index 04fbff39e47..3cbba11b752 100644 --- a/tokio/src/runtime/scheduler/mod.rs +++ b/tokio/src/runtime/scheduler/mod.rs @@ -163,20 +163,22 @@ cfg_rt! { } } - cfg_metrics! { + impl Handle { + pub(crate) fn num_workers(&self) -> usize { + match self { + Handle::CurrentThread(_) => 1, + #[cfg(feature = "rt-multi-thread")] + Handle::MultiThread(handle) => handle.num_workers(), + #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] + Handle::MultiThreadAlt(handle) => handle.num_workers(), + } + } + } + + cfg_unstable_metrics! { use crate::runtime::{SchedulerMetrics, WorkerMetrics}; impl Handle { - pub(crate) fn num_workers(&self) -> usize { - match self { - Handle::CurrentThread(_) => 1, - #[cfg(feature = "rt-multi-thread")] - Handle::MultiThread(handle) => handle.num_workers(), - #[cfg(all(tokio_unstable, feature = "rt-multi-thread"))] - Handle::MultiThreadAlt(handle) => handle.num_workers(), - } - } - pub(crate) fn num_blocking_threads(&self) -> usize { match_flavor!(self, Handle(handle) => handle.num_blocking_threads()) } diff --git a/tokio/src/runtime/scheduler/multi_thread/handle.rs b/tokio/src/runtime/scheduler/multi_thread/handle.rs index 568eb80af8b..72f776e47fa 100644 --- a/tokio/src/runtime/scheduler/multi_thread/handle.rs +++ b/tokio/src/runtime/scheduler/multi_thread/handle.rs @@ -9,9 +9,7 @@ use crate::util::RngSeedGenerator; use std::fmt; -cfg_metrics! { - mod metrics; -} +mod metrics; cfg_taskdump! { mod taskdump; diff --git a/tokio/src/runtime/scheduler/multi_thread/handle/metrics.rs b/tokio/src/runtime/scheduler/multi_thread/handle/metrics.rs index 3d614b478c5..6ced245ee5b 100644 --- a/tokio/src/runtime/scheduler/multi_thread/handle/metrics.rs +++ b/tokio/src/runtime/scheduler/multi_thread/handle/metrics.rs @@ -1,44 +1,48 @@ use super::Handle; -use crate::runtime::{SchedulerMetrics, WorkerMetrics}; +cfg_unstable_metrics! { + use crate::runtime::{SchedulerMetrics, WorkerMetrics}; +} impl Handle { pub(crate) fn num_workers(&self) -> usize { self.shared.worker_metrics.len() } - pub(crate) fn num_blocking_threads(&self) -> usize { - // workers are currently spawned using spawn_blocking - self.blocking_spawner - .num_threads() - .saturating_sub(self.num_workers()) - } + cfg_unstable_metrics! { + pub(crate) fn num_blocking_threads(&self) -> usize { + // workers are currently spawned using spawn_blocking + self.blocking_spawner + .num_threads() + .saturating_sub(self.num_workers()) + } - pub(crate) fn num_idle_blocking_threads(&self) -> usize { - self.blocking_spawner.num_idle_threads() - } + pub(crate) fn num_idle_blocking_threads(&self) -> usize { + self.blocking_spawner.num_idle_threads() + } - pub(crate) fn active_tasks_count(&self) -> usize { - self.shared.owned.active_tasks_count() - } + pub(crate) fn active_tasks_count(&self) -> usize { + self.shared.owned.active_tasks_count() + } - pub(crate) fn scheduler_metrics(&self) -> &SchedulerMetrics { - &self.shared.scheduler_metrics - } + pub(crate) fn scheduler_metrics(&self) -> &SchedulerMetrics { + &self.shared.scheduler_metrics + } - pub(crate) fn worker_metrics(&self, worker: usize) -> &WorkerMetrics { - &self.shared.worker_metrics[worker] - } + pub(crate) fn worker_metrics(&self, worker: usize) -> &WorkerMetrics { + &self.shared.worker_metrics[worker] + } - pub(crate) fn injection_queue_depth(&self) -> usize { - self.shared.injection_queue_depth() - } + pub(crate) fn injection_queue_depth(&self) -> usize { + self.shared.injection_queue_depth() + } - pub(crate) fn worker_local_queue_depth(&self, worker: usize) -> usize { - self.shared.worker_local_queue_depth(worker) - } + pub(crate) fn worker_local_queue_depth(&self, worker: usize) -> usize { + self.shared.worker_local_queue_depth(worker) + } - pub(crate) fn blocking_queue_depth(&self) -> usize { - self.blocking_spawner.queue_depth() + pub(crate) fn blocking_queue_depth(&self) -> usize { + self.blocking_spawner.queue_depth() + } } } diff --git a/tokio/src/runtime/scheduler/multi_thread/queue.rs b/tokio/src/runtime/scheduler/multi_thread/queue.rs index 35223289870..99ee31ba15b 100644 --- a/tokio/src/runtime/scheduler/multi_thread/queue.rs +++ b/tokio/src/runtime/scheduler/multi_thread/queue.rs @@ -546,7 +546,7 @@ impl Steal { } } -cfg_metrics! { +cfg_unstable_metrics! { impl Steal { pub(crate) fn len(&self) -> usize { self.0.len() as _ diff --git a/tokio/src/runtime/scheduler/multi_thread/worker.rs b/tokio/src/runtime/scheduler/multi_thread/worker.rs index 9f0dd98dfdc..65851b21e82 100644 --- a/tokio/src/runtime/scheduler/multi_thread/worker.rs +++ b/tokio/src/runtime/scheduler/multi_thread/worker.rs @@ -74,7 +74,7 @@ use std::cell::RefCell; use std::task::Waker; use std::time::Duration; -cfg_metrics! { +cfg_unstable_metrics! { mod metrics; } diff --git a/tokio/src/runtime/scheduler/multi_thread_alt/handle.rs b/tokio/src/runtime/scheduler/multi_thread_alt/handle.rs index d746bca1a18..1f5b7818521 100644 --- a/tokio/src/runtime/scheduler/multi_thread_alt/handle.rs +++ b/tokio/src/runtime/scheduler/multi_thread_alt/handle.rs @@ -9,7 +9,7 @@ use crate::util::RngSeedGenerator; use std::fmt; -cfg_metrics! { +cfg_unstable_metrics! { mod metrics; } diff --git a/tokio/src/runtime/scheduler/multi_thread_alt/queue.rs b/tokio/src/runtime/scheduler/multi_thread_alt/queue.rs index 2694d27cbdf..c8293fdc845 100644 --- a/tokio/src/runtime/scheduler/multi_thread_alt/queue.rs +++ b/tokio/src/runtime/scheduler/multi_thread_alt/queue.rs @@ -538,7 +538,7 @@ impl Steal { } } -cfg_metrics! { +cfg_unstable_metrics! { impl Steal { pub(crate) fn len(&self) -> usize { self.0.len() as _ diff --git a/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs b/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs index 63ae0a49743..9ceb7815a53 100644 --- a/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs +++ b/tokio/src/runtime/scheduler/multi_thread_alt/worker.rs @@ -74,7 +74,7 @@ use std::cmp; use std::task::Waker; use std::time::Duration; -cfg_metrics! { +cfg_unstable_metrics! { mod metrics; } diff --git a/tokio/src/runtime/tests/queue.rs b/tokio/src/runtime/tests/queue.rs index 55429b1b11b..f463355f0d3 100644 --- a/tokio/src/runtime/tests/queue.rs +++ b/tokio/src/runtime/tests/queue.rs @@ -40,7 +40,7 @@ fn fits_256_one_at_a_time() { local.push_back_or_overflow(task, &inject, &mut stats); } - cfg_metrics! { + cfg_unstable_metrics! { assert_metrics!(stats, overflow_count == 0); } @@ -98,7 +98,7 @@ fn overflow() { local.push_back_or_overflow(task, &inject, &mut stats); } - cfg_metrics! { + cfg_unstable_metrics! { assert_metrics!(stats, overflow_count == 1); } @@ -128,7 +128,7 @@ fn steal_batch() { assert!(steal1.steal_into(&mut local2, &mut stats).is_some()); - cfg_metrics! { + cfg_unstable_metrics! { assert_metrics!(stats, steal_count == 2); } @@ -184,7 +184,7 @@ fn stress1() { thread::yield_now(); } - cfg_metrics! { + cfg_unstable_metrics! { assert_metrics!(stats, steal_count == n as _); } From 97bb47b480c66083397c21d54e7ae33cab6c1b20 Mon Sep 17 00:00:00 2001 From: SteveLauC Date: Thu, 30 May 2024 16:44:53 +0800 Subject: [PATCH 152/162] task: fix a typo in doc of `LocalSet::run_until` (#6599) --- tokio/src/task/local.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tokio/src/task/local.rs b/tokio/src/task/local.rs index a40708d08c2..d98697482cc 100644 --- a/tokio/src/task/local.rs +++ b/tokio/src/task/local.rs @@ -598,7 +598,7 @@ impl LocalSet { /// allowing it to call [`spawn_local`] to spawn additional `!Send` futures. /// Any local futures spawned on the local set will be driven in the /// background until the future passed to `run_until` completes. When the future - /// passed to `run` finishes, any local futures which have not completed + /// passed to `run_until` finishes, any local futures which have not completed /// will remain on the local set, and will be driven on subsequent calls to /// `run_until` or when [awaiting the local set] itself. /// From 873cb8ae2fc291eaffbd71e3c83d17b2f0ed7abf Mon Sep 17 00:00:00 2001 From: Weijia Jiang Date: Thu, 30 May 2024 16:45:45 +0800 Subject: [PATCH 153/162] runtime: move task out of the `lifo_slot` in `block_in_place` (#6596) --- tokio/src/runtime/scheduler/multi_thread/worker.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tokio/src/runtime/scheduler/multi_thread/worker.rs b/tokio/src/runtime/scheduler/multi_thread/worker.rs index 65851b21e82..8ef487b09fd 100644 --- a/tokio/src/runtime/scheduler/multi_thread/worker.rs +++ b/tokio/src/runtime/scheduler/multi_thread/worker.rs @@ -395,11 +395,19 @@ where let cx = maybe_cx.expect("no .is_some() == false cases above should lead here"); // Get the worker core. If none is set, then blocking is fine! - let core = match cx.core.borrow_mut().take() { + let mut core = match cx.core.borrow_mut().take() { Some(core) => core, None => return Ok(()), }; + // If we heavily call `spawn_blocking`, there might be no available thread to + // run this core. Except for the task in the lifo_slot, all tasks can be + // stolen, so we move the task out of the lifo_slot to the run_queue. + if let Some(task) = core.lifo_slot.take() { + core.run_queue + .push_back_or_overflow(task, &*cx.worker.handle, &mut core.stats); + } + // We are taking the core from the context and sending it to another // thread. take_core = true; From dbf93c71844a01574a10f9dee0d4d9655a569f0a Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Thu, 30 May 2024 22:21:16 +0200 Subject: [PATCH 154/162] sync: fix incorrect is_empty on mpsc block boundaries (#6603) --- tokio/src/sync/mpsc/block.rs | 15 +++++++++------ tokio/tests/sync_mpsc.rs | 12 ++++++++++++ 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/tokio/src/sync/mpsc/block.rs b/tokio/src/sync/mpsc/block.rs index e7798592531..927c4566463 100644 --- a/tokio/src/sync/mpsc/block.rs +++ b/tokio/src/sync/mpsc/block.rs @@ -168,14 +168,17 @@ impl Block { Some(Read::Value(value.assume_init())) } - /// Returns true if there is a value in the slot to be consumed + /// Returns true if *this* block has a value in the given slot. /// - /// # Safety - /// - /// To maintain safety, the caller must ensure: - /// - /// * No concurrent access to the slot. + /// Always returns false when given an index from a different block. pub(crate) fn has_value(&self, slot_index: usize) -> bool { + if slot_index < self.header.start_index { + return false; + } + if slot_index >= self.header.start_index + super::BLOCK_CAP { + return false; + } + let offset = offset(slot_index); let ready_bits = self.header.ready_slots.load(Acquire); is_ready(ready_bits, offset) diff --git a/tokio/tests/sync_mpsc.rs b/tokio/tests/sync_mpsc.rs index 10a80561537..cc88fa79972 100644 --- a/tokio/tests/sync_mpsc.rs +++ b/tokio/tests/sync_mpsc.rs @@ -1421,4 +1421,16 @@ async fn test_rx_unbounded_len_when_close_is_called_after_dropping_sender() { assert_eq!(rx.len(), 1); } +// Regression test for https://github.com/tokio-rs/tokio/issues/6602 +#[tokio::test] +async fn test_is_empty_32_msgs() { + let (sender, mut receiver) = mpsc::channel(33); + + for value in 1..257 { + sender.send(value).await.unwrap(); + receiver.recv().await.unwrap(); + assert!(receiver.is_empty(), "{value}. len: {}", receiver.len()); + } +} + fn is_debug(_: &T) {} From 65cbf730de48ef9d3c84959d26ab717a85a5de62 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Thu, 30 May 2024 22:21:51 +0200 Subject: [PATCH 155/162] chore: prepare tokio-macros v2.3.0 (#6600) --- tokio-macros/CHANGELOG.md | 6 ++++++ tokio-macros/Cargo.toml | 2 +- tokio/Cargo.toml | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/tokio-macros/CHANGELOG.md b/tokio-macros/CHANGELOG.md index 32961394a09..9d305bf3b74 100644 --- a/tokio-macros/CHANGELOG.md +++ b/tokio-macros/CHANGELOG.md @@ -1,3 +1,9 @@ +# 2.3.0 (May 30th, 2024) + +- macros: make `#[tokio::test]` append `#[test]` at the end of the attribute list ([#6497]) + +[#6497]: https://github.com/tokio-rs/tokio/pull/6497 + # 2.2.0 (November 19th, 2023) ### Changed diff --git a/tokio-macros/Cargo.toml b/tokio-macros/Cargo.toml index ea9839c6d06..c150334880c 100644 --- a/tokio-macros/Cargo.toml +++ b/tokio-macros/Cargo.toml @@ -4,7 +4,7 @@ name = "tokio-macros" # - Remove path dependencies # - Update CHANGELOG.md. # - Create "tokio-macros-1.x.y" git tag. -version = "2.2.0" +version = "2.3.0" edition = "2021" rust-version = "1.63" authors = ["Tokio Contributors "] diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index e46e274c47a..548c5f6e7aa 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -89,7 +89,7 @@ test-util = ["rt", "sync", "time"] time = [] [dependencies] -tokio-macros = { version = "~2.2.0", path = "../tokio-macros", optional = true } +tokio-macros = { version = "~2.3.0", path = "../tokio-macros", optional = true } pin-project-lite = "0.2.11" From 14c17fc09656a30230177b600bacceb9db33e942 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Thu, 30 May 2024 22:24:58 +0200 Subject: [PATCH 156/162] chore: prepare Tokio v1.38.0 (#6601) --- README.md | 2 +- tokio/CHANGELOG.md | 100 +++++++++++++++++++++++++++++++++++++++++++++ tokio/Cargo.toml | 2 +- tokio/README.md | 2 +- 4 files changed, 103 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 5706d1ab6b0..b9d9eb785c3 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.37.0", features = ["full"] } +tokio = { version = "1.38.0", features = ["full"] } ``` Then, on your main.rs: diff --git a/tokio/CHANGELOG.md b/tokio/CHANGELOG.md index 54346a78d97..acd3bacd5e6 100644 --- a/tokio/CHANGELOG.md +++ b/tokio/CHANGELOG.md @@ -1,3 +1,103 @@ +# 1.38.0 (May 30th, 2024) + +This release marks the beginning of stabilization for runtime metrics. It +stabilizes `RuntimeMetrics::worker_count`. Future releases will continue to +stabilize more metrics. + +### Added + +- fs: add `File::create_new` ([#6573]) +- io: add `copy_bidirectional_with_sizes` ([#6500]) +- io: implement `AsyncBufRead` for `Join` ([#6449]) +- net: add Apple visionOS support ([#6465]) +- net: implement `Clone` for `NamedPipeInfo` ([#6586]) +- net: support QNX OS ([#6421]) +- sync: add `Notify::notify_last` ([#6520]) +- sync: add `mpsc::Receiver::{capacity,max_capacity}` ([#6511]) +- sync: add `split` method to the semaphore permit ([#6472], [#6478]) +- task: add `tokio::task::join_set::Builder::spawn_blocking` ([#6578]) +- wasm: support rt-multi-thread with wasm32-wasi-preview1-threads ([#6510]) + +### Changed + +- macros: make `#[tokio::test]` append `#[test]` at the end of the attribute list ([#6497]) +- metrics: fix `blocking_threads` count ([#6551]) +- metrics: stabilize `RuntimeMetrics::worker_count` ([#6556]) +- runtime: move task out of the `lifo_slot` in `block_in_place` ([#6596]) +- runtime: panic if `global_queue_interval` is zero ([#6445]) +- sync: always drop message in destructor for oneshot receiver ([#6558]) +- sync: instrument `Semaphore` for task dumps ([#6499]) +- sync: use FIFO ordering when waking batches of wakers ([#6521]) +- task: make `LocalKey::get` work with Clone types ([#6433]) +- tests: update nix and mio-aio dev-dependencies ([#6552]) +- time: clean up implementation ([#6517]) +- time: lazily init timers on first poll ([#6512]) +- time: remove the `true_when` field in `TimerShared` ([#6563]) +- time: use sharding for timer implementation ([#6534]) + +### Fixed + +- taskdump: allow building taskdump docs on non-unix machines ([#6564]) +- time: check for overflow in `Interval::poll_tick` ([#6487]) +- sync: fix incorrect `is_empty` on mpsc block boundaries ([#6603]) + +### Documented + +- fs: rewrite file system docs ([#6467]) +- io: fix `stdin` documentation ([#6581]) +- io: fix obsolete reference in `ReadHalf::unsplit()` documentation ([#6498]) +- macros: render more comprehensible documentation for `select!` ([#6468]) +- net: add missing types to module docs ([#6482]) +- net: fix misleading `NamedPipeServer` example ([#6590]) +- sync: add examples for `SemaphorePermit`, `OwnedSemaphorePermit` ([#6477]) +- sync: document that `Barrier::wait` is not cancel safe ([#6494]) +- sync: explain relation between `watch::Sender::{subscribe,closed}` ([#6490]) +- task: clarify that you can't abort `spawn_blocking` tasks ([#6571]) +- task: fix a typo in doc of `LocalSet::run_until` ([#6599]) +- time: fix test-util requirement for pause and resume in docs ([#6503]) + +[#6421]: https://github.com/tokio-rs/tokio/pull/6421 +[#6433]: https://github.com/tokio-rs/tokio/pull/6433 +[#6445]: https://github.com/tokio-rs/tokio/pull/6445 +[#6449]: https://github.com/tokio-rs/tokio/pull/6449 +[#6465]: https://github.com/tokio-rs/tokio/pull/6465 +[#6467]: https://github.com/tokio-rs/tokio/pull/6467 +[#6468]: https://github.com/tokio-rs/tokio/pull/6468 +[#6472]: https://github.com/tokio-rs/tokio/pull/6472 +[#6477]: https://github.com/tokio-rs/tokio/pull/6477 +[#6478]: https://github.com/tokio-rs/tokio/pull/6478 +[#6482]: https://github.com/tokio-rs/tokio/pull/6482 +[#6487]: https://github.com/tokio-rs/tokio/pull/6487 +[#6490]: https://github.com/tokio-rs/tokio/pull/6490 +[#6494]: https://github.com/tokio-rs/tokio/pull/6494 +[#6497]: https://github.com/tokio-rs/tokio/pull/6497 +[#6498]: https://github.com/tokio-rs/tokio/pull/6498 +[#6499]: https://github.com/tokio-rs/tokio/pull/6499 +[#6500]: https://github.com/tokio-rs/tokio/pull/6500 +[#6503]: https://github.com/tokio-rs/tokio/pull/6503 +[#6510]: https://github.com/tokio-rs/tokio/pull/6510 +[#6511]: https://github.com/tokio-rs/tokio/pull/6511 +[#6512]: https://github.com/tokio-rs/tokio/pull/6512 +[#6517]: https://github.com/tokio-rs/tokio/pull/6517 +[#6520]: https://github.com/tokio-rs/tokio/pull/6520 +[#6521]: https://github.com/tokio-rs/tokio/pull/6521 +[#6534]: https://github.com/tokio-rs/tokio/pull/6534 +[#6551]: https://github.com/tokio-rs/tokio/pull/6551 +[#6552]: https://github.com/tokio-rs/tokio/pull/6552 +[#6556]: https://github.com/tokio-rs/tokio/pull/6556 +[#6558]: https://github.com/tokio-rs/tokio/pull/6558 +[#6563]: https://github.com/tokio-rs/tokio/pull/6563 +[#6564]: https://github.com/tokio-rs/tokio/pull/6564 +[#6571]: https://github.com/tokio-rs/tokio/pull/6571 +[#6573]: https://github.com/tokio-rs/tokio/pull/6573 +[#6578]: https://github.com/tokio-rs/tokio/pull/6578 +[#6581]: https://github.com/tokio-rs/tokio/pull/6581 +[#6586]: https://github.com/tokio-rs/tokio/pull/6586 +[#6590]: https://github.com/tokio-rs/tokio/pull/6590 +[#6596]: https://github.com/tokio-rs/tokio/pull/6596 +[#6599]: https://github.com/tokio-rs/tokio/pull/6599 +[#6603]: https://github.com/tokio-rs/tokio/pull/6603 + # 1.37.0 (March 28th, 2024) ### Added diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 548c5f6e7aa..ac759515315 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -6,7 +6,7 @@ name = "tokio" # - README.md # - Update CHANGELOG.md. # - Create "v1.x.y" git tag. -version = "1.37.0" +version = "1.38.0" edition = "2021" rust-version = "1.63" authors = ["Tokio Contributors "] diff --git a/tokio/README.md b/tokio/README.md index 5706d1ab6b0..b9d9eb785c3 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.37.0", features = ["full"] } +tokio = { version = "1.38.0", features = ["full"] } ``` Then, on your main.rs: From 24344dfe4b69931bfe9fe686d2424c9f626dc75b Mon Sep 17 00:00:00 2001 From: Weijia Jiang Date: Tue, 16 Jul 2024 21:25:59 +0800 Subject: [PATCH 157/162] time: fix race condition leading to lost timers (#6683) --- tokio/src/runtime/time/mod.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/tokio/src/runtime/time/mod.rs b/tokio/src/runtime/time/mod.rs index 37b04ef0000..c01a5f2b25e 100644 --- a/tokio/src/runtime/time/mod.rs +++ b/tokio/src/runtime/time/mod.rs @@ -190,11 +190,13 @@ impl Driver { assert!(!handle.is_shutdown()); // Finds out the min expiration time to park. - let expiration_time = (0..rt_handle.time().inner.get_shard_size()) - .filter_map(|id| { - let lock = rt_handle.time().inner.lock_sharded_wheel(id); - lock.next_expiration_time() - }) + let locks = (0..rt_handle.time().inner.get_shard_size()) + .map(|id| rt_handle.time().inner.lock_sharded_wheel(id)) + .collect::>(); + + let expiration_time = locks + .iter() + .filter_map(|lock| lock.next_expiration_time()) .min(); rt_handle @@ -203,6 +205,9 @@ impl Driver { .next_wake .store(next_wake_time(expiration_time)); + // Safety: After updating the `next_wake`, we drop all the locks. + drop(locks); + match expiration_time { Some(when) => { let now = handle.time_source.now(rt_handle.clock()); From 14b9f7115728b77c82db8d21b6d768d16dc472a6 Mon Sep 17 00:00:00 2001 From: Weijia Jiang Date: Tue, 16 Jul 2024 23:16:29 +0800 Subject: [PATCH 158/162] chore: release Tokio v1.38.1 (#6688) --- README.md | 2 +- tokio/CHANGELOG.md | 12 ++++++++++++ tokio/Cargo.toml | 2 +- tokio/README.md | 2 +- 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index b9d9eb785c3..6a3d0e2be66 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.38.0", features = ["full"] } +tokio = { version = "1.38.1", features = ["full"] } ``` Then, on your main.rs: diff --git a/tokio/CHANGELOG.md b/tokio/CHANGELOG.md index acd3bacd5e6..cfe70fa4d52 100644 --- a/tokio/CHANGELOG.md +++ b/tokio/CHANGELOG.md @@ -1,3 +1,15 @@ +# 1.38.1 (July 16th, 2024) + +This release fixes the bug identified as ([#6682]), which caused timers not +to fire when they should. + +### Fixed + +- time: update `wake_up` while holding all the locks of sharded time wheels ([#6683]) + +[#6682]: https://github.com/tokio-rs/tokio/pull/6682 +[#6683]: https://github.com/tokio-rs/tokio/pull/6683 + # 1.38.0 (May 30th, 2024) This release marks the beginning of stabilization for runtime metrics. It diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index ac759515315..13a9a8c0579 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -6,7 +6,7 @@ name = "tokio" # - README.md # - Update CHANGELOG.md. # - Create "v1.x.y" git tag. -version = "1.38.0" +version = "1.38.1" edition = "2021" rust-version = "1.63" authors = ["Tokio Contributors "] diff --git a/tokio/README.md b/tokio/README.md index b9d9eb785c3..6a3d0e2be66 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.38.0", features = ["full"] } +tokio = { version = "1.38.1", features = ["full"] } ``` Then, on your main.rs: From 9681ce2b95ae7271c041f69b9fc48912259a7ea8 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sun, 21 Jul 2024 16:28:12 +0000 Subject: [PATCH 159/162] chore: make 1.38 an LTS (#6706) --- README.md | 1 + tokio/README.md | 1 + 2 files changed, 2 insertions(+) diff --git a/README.md b/README.md index 6a3d0e2be66..b04599b99bd 100644 --- a/README.md +++ b/README.md @@ -217,6 +217,7 @@ releases are: * `1.32.x` - LTS release until September 2024. (MSRV 1.63) * `1.36.x` - LTS release until March 2025. (MSRV 1.63) + * `1.38.x` - LTS release until July 2025. (MSRV 1.63) Each LTS release will continue to receive backported fixes for at least a year. If you wish to use a fixed minor release in your project, we recommend that you diff --git a/tokio/README.md b/tokio/README.md index 6a3d0e2be66..b04599b99bd 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -217,6 +217,7 @@ releases are: * `1.32.x` - LTS release until September 2024. (MSRV 1.63) * `1.36.x` - LTS release until March 2025. (MSRV 1.63) + * `1.38.x` - LTS release until July 2025. (MSRV 1.63) Each LTS release will continue to receive backported fixes for at least a year. If you wish to use a fixed minor release in your project, we recommend that you From 4b174ce2c95fe1d1a217917db93fcc935e17e0da Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Mon, 31 Mar 2025 09:42:43 -0700 Subject: [PATCH 160/162] sync: fix cloning value when receiving from broadcast channel The broadcast channel does not require values to implement `Sync` yet it calls the `.clone()` method without synchronizing. This is unsound logic. This patch adds per-value synchronization on receive to handle this case. It is unlikely any usage of the broadcast channel is currently at risk of the unsoundeness issue as it requires accessing a `!Sync` type during `.clone()`, which would be very unusual when using the broadcast channel. --- tokio/src/sync/broadcast.rs | 55 ++++++++++++++++++------------------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/tokio/src/sync/broadcast.rs b/tokio/src/sync/broadcast.rs index ba0a44fb8b9..10dfd0efb75 100644 --- a/tokio/src/sync/broadcast.rs +++ b/tokio/src/sync/broadcast.rs @@ -118,7 +118,7 @@ use crate::loom::cell::UnsafeCell; use crate::loom::sync::atomic::{AtomicBool, AtomicUsize}; -use crate::loom::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard}; +use crate::loom::sync::{Arc, Mutex, MutexGuard}; use crate::util::linked_list::{self, GuardedLinkedList, LinkedList}; use crate::util::WakeList; @@ -303,7 +303,7 @@ use self::error::{RecvError, SendError, TryRecvError}; /// Data shared between senders and receivers. struct Shared { /// slots in the channel. - buffer: Box<[RwLock>]>, + buffer: Box<[Mutex>]>, /// Mask a position -> index. mask: usize, @@ -347,7 +347,7 @@ struct Slot { /// /// The value is set by `send` when the write lock is held. When a reader /// drops, `rem` is decremented. When it hits zero, the value is dropped. - val: UnsafeCell>, + val: Option, } /// An entry in the wait queue. @@ -385,7 +385,7 @@ generate_addr_of_methods! { } struct RecvGuard<'a, T> { - slot: RwLockReadGuard<'a, Slot>, + slot: MutexGuard<'a, Slot>, } /// Receive a value future. @@ -394,11 +394,15 @@ struct Recv<'a, T> { receiver: &'a mut Receiver, /// Entry in the waiter `LinkedList`. - waiter: UnsafeCell, + waiter: WaiterCell, } -unsafe impl<'a, T: Send> Send for Recv<'a, T> {} -unsafe impl<'a, T: Send> Sync for Recv<'a, T> {} +// The wrapper around `UnsafeCell` isolates the unsafe impl `Send` and `Sync` +// from `Recv`. +struct WaiterCell(UnsafeCell); + +unsafe impl Send for WaiterCell {} +unsafe impl Sync for WaiterCell {} /// Max number of receivers. Reserve space to lock. const MAX_RECEIVERS: usize = usize::MAX >> 2; @@ -466,12 +470,6 @@ pub fn channel(capacity: usize) -> (Sender, Receiver) { (tx, rx) } -unsafe impl Send for Sender {} -unsafe impl Sync for Sender {} - -unsafe impl Send for Receiver {} -unsafe impl Sync for Receiver {} - impl Sender { /// Creates the sending-half of the [`broadcast`] channel. /// @@ -510,10 +508,10 @@ impl Sender { let mut buffer = Vec::with_capacity(capacity); for i in 0..capacity { - buffer.push(RwLock::new(Slot { + buffer.push(Mutex::new(Slot { rem: AtomicUsize::new(0), pos: (i as u64).wrapping_sub(capacity as u64), - val: UnsafeCell::new(None), + val: None, })); } @@ -599,7 +597,7 @@ impl Sender { tail.pos = tail.pos.wrapping_add(1); // Get the slot - let mut slot = self.shared.buffer[idx].write().unwrap(); + let mut slot = self.shared.buffer[idx].lock(); // Track the position slot.pos = pos; @@ -608,7 +606,7 @@ impl Sender { slot.rem.with_mut(|v| *v = rem); // Write the value - slot.val = UnsafeCell::new(Some(value)); + slot.val = Some(value); // Release the slot lock before notifying the receivers. drop(slot); @@ -695,7 +693,7 @@ impl Sender { while low < high { let mid = low + (high - low) / 2; let idx = base_idx.wrapping_add(mid) & self.shared.mask; - if self.shared.buffer[idx].read().unwrap().rem.load(SeqCst) == 0 { + if self.shared.buffer[idx].lock().rem.load(SeqCst) == 0 { low = mid + 1; } else { high = mid; @@ -737,7 +735,7 @@ impl Sender { let tail = self.shared.tail.lock(); let idx = (tail.pos.wrapping_sub(1) & self.shared.mask as u64) as usize; - self.shared.buffer[idx].read().unwrap().rem.load(SeqCst) == 0 + self.shared.buffer[idx].lock().rem.load(SeqCst) == 0 } /// Returns the number of active receivers. @@ -1057,7 +1055,7 @@ impl Receiver { let idx = (self.next & self.shared.mask as u64) as usize; // The slot holding the next value to read - let mut slot = self.shared.buffer[idx].read().unwrap(); + let mut slot = self.shared.buffer[idx].lock(); if slot.pos != self.next { // Release the `slot` lock before attempting to acquire the `tail` @@ -1074,7 +1072,7 @@ impl Receiver { let mut tail = self.shared.tail.lock(); // Acquire slot lock again - slot = self.shared.buffer[idx].read().unwrap(); + slot = self.shared.buffer[idx].lock(); // Make sure the position did not change. This could happen in the // unlikely event that the buffer is wrapped between dropping the @@ -1367,12 +1365,12 @@ impl<'a, T> Recv<'a, T> { fn new(receiver: &'a mut Receiver) -> Recv<'a, T> { Recv { receiver, - waiter: UnsafeCell::new(Waiter { + waiter: WaiterCell(UnsafeCell::new(Waiter { queued: AtomicBool::new(false), waker: None, pointers: linked_list::Pointers::new(), _p: PhantomPinned, - }), + })), } } @@ -1384,7 +1382,7 @@ impl<'a, T> Recv<'a, T> { is_unpin::<&mut Receiver>(); let me = self.get_unchecked_mut(); - (me.receiver, &me.waiter) + (me.receiver, &me.waiter.0) } } } @@ -1418,6 +1416,7 @@ impl<'a, T> Drop for Recv<'a, T> { // `Shared::notify_rx` before we drop the object. let queued = self .waiter + .0 .with(|ptr| unsafe { (*ptr).queued.load(Acquire) }); // If the waiter is queued, we need to unlink it from the waiters list. @@ -1432,6 +1431,7 @@ impl<'a, T> Drop for Recv<'a, T> { // `Relaxed` order suffices because we hold the tail lock. let queued = self .waiter + .0 .with_mut(|ptr| unsafe { (*ptr).queued.load(Relaxed) }); if queued { @@ -1440,7 +1440,7 @@ impl<'a, T> Drop for Recv<'a, T> { // safety: tail lock is held and the wait node is verified to be in // the list. unsafe { - self.waiter.with_mut(|ptr| { + self.waiter.0.with_mut(|ptr| { tail.waiters.remove((&mut *ptr).into()); }); } @@ -1486,7 +1486,7 @@ impl<'a, T> RecvGuard<'a, T> { where T: Clone, { - self.slot.val.with(|ptr| unsafe { (*ptr).clone() }) + self.slot.val.clone() } } @@ -1494,8 +1494,7 @@ impl<'a, T> Drop for RecvGuard<'a, T> { fn drop(&mut self) { // Decrement the remaining counter if 1 == self.slot.rem.fetch_sub(1, SeqCst) { - // Safety: Last receiver, drop the value - self.slot.val.with_mut(|ptr| unsafe { *ptr = None }); + self.slot.val = None; } } } From 7b6ccb515ff067151ed62db835f735e5653f8784 Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Wed, 2 Apr 2025 14:25:45 -0700 Subject: [PATCH 161/162] chore: backport CI fixes --- .cirrus.yml | 2 +- .github/workflows/ci.yml | 40 +++++++++++++++++++++++--------- Cargo.toml | 13 +++++++++++ examples/Cargo.toml | 3 +++ tokio/Cargo.toml | 4 +++- tokio/src/runtime/tests/queue.rs | 13 ----------- 6 files changed, 49 insertions(+), 26 deletions(-) diff --git a/.cirrus.yml b/.cirrus.yml index bdf3af74098..abf07ca4852 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -1,7 +1,7 @@ only_if: $CIRRUS_TAG == '' && ($CIRRUS_PR != '' || $CIRRUS_BRANCH == 'master' || $CIRRUS_BRANCH =~ 'tokio-.*') auto_cancellation: $CIRRUS_BRANCH != 'master' && $CIRRUS_BRANCH !=~ 'tokio-.*' freebsd_instance: - image_family: freebsd-14-0 + image_family: freebsd-14-2 env: RUST_STABLE: stable RUST_NIGHTLY: nightly-2024-05-05 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 76151902576..fb96e27c532 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -177,7 +177,7 @@ jobs: run: | set -euxo pipefail RUSTFLAGS="$RUSTFLAGS -C panic=abort -Zpanic-abort-tests" cargo nextest run --workspace --exclude tokio-macros --exclude tests-build --all-features --tests - + test-integration-tests-per-feature: needs: basics name: Run integration tests for each feature @@ -455,10 +455,18 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: Check semver + - name: Check `tokio` semver + uses: obi1kenobi/cargo-semver-checks-action@v2 + with: + rust-toolchain: ${{ env.rust_stable }} + package: tokio + release-type: minor + - name: Check semver for rest of the workspace + if: ${{ !startsWith(github.event.pull_request.base.ref, 'tokio-1.') }} uses: obi1kenobi/cargo-semver-checks-action@v2 with: rust-toolchain: ${{ env.rust_stable }} + exclude: tokio release-type: minor cross-check: @@ -689,7 +697,14 @@ jobs: toolchain: ${{ env.rust_min }} - uses: Swatinem/rust-cache@v2 - name: "check --workspace --all-features" - run: cargo check --workspace --all-features + run: | + if [[ "${{ github.event.pull_request.base.ref }}" =~ ^tokio-1\..* ]]; then + # Only check `tokio` crate as the PR is backporting to an earlier tokio release. + cargo check -p tokio --all-features + else + # Check all crates in the workspace + cargo check --workspace --all-features + fi env: RUSTFLAGS: "" # remove -Dwarnings @@ -927,10 +942,10 @@ jobs: targets: ${{ matrix.target }} # Install dependencies - - name: Install cargo-hack, wasmtime, and cargo-wasi + - name: Install cargo-hack, wasmtime uses: taiki-e/install-action@v2 with: - tool: cargo-hack,wasmtime,cargo-wasi + tool: cargo-hack,wasmtime - uses: Swatinem/rust-cache@v2 - name: WASI test tokio full @@ -956,9 +971,12 @@ jobs: - name: test tests-integration --features wasi-rt # TODO: this should become: `cargo hack wasi test --each-feature` - run: cargo wasi test --test rt_yield --features wasi-rt + run: cargo test --target ${{ matrix.target }} --test rt_yield --features wasi-rt if: matrix.target == 'wasm32-wasip1' working-directory: tests-integration + env: + CARGO_TARGET_WASM32_WASIP1_RUNNER: "wasmtime run --" + RUSTFLAGS: -Dwarnings -C target-feature=+atomics,+bulk-memory -C link-args=--max-memory=67108864 - name: test tests-integration --features wasi-threads-rt run: cargo test --target ${{ matrix.target }} --features wasi-threads-rt @@ -980,7 +998,7 @@ jobs: rust: # `check-external-types` requires a specific Rust nightly version. See # the README for details: https://github.com/awslabs/cargo-check-external-types - - nightly-2023-10-21 + - nightly-2024-06-30 steps: - uses: actions/checkout@v4 - name: Install Rust ${{ matrix.rust }} @@ -991,7 +1009,7 @@ jobs: - name: Install cargo-check-external-types uses: taiki-e/cache-cargo-install-action@v1 with: - tool: cargo-check-external-types@0.1.10 + tool: cargo-check-external-types@0.1.13 - name: check-external-types run: cargo check-external-types --all-features working-directory: tokio @@ -1051,11 +1069,11 @@ jobs: - name: Make sure dictionary words are sorted and unique run: | # `sed` removes the first line (number of words) and - # the last line (new line). - # + # the last line (new line). + # # `sort` makes sure everything in between is sorted # and contains no duplicates. - # + # # Since `sort` is sensitive to locale, we set it # using LC_ALL to en_US.UTF8 to be consistent in different # environments. diff --git a/Cargo.toml b/Cargo.toml index 2238deac71c..618b310e32c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,3 +17,16 @@ members = [ [workspace.metadata.spellcheck] config = "spellcheck.toml" + +[workspace.lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = [ + 'cfg(fuzzing)', + 'cfg(loom)', + 'cfg(mio_unsupported_force_poll_poll)', + 'cfg(tokio_allow_from_blocking_fd)', + 'cfg(tokio_internal_mt_counters)', + 'cfg(tokio_no_parking_lot)', + 'cfg(tokio_no_tuning_tests)', + 'cfg(tokio_taskdump)', + 'cfg(tokio_unstable)', +] } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index a244fccaca1..8d42ca3d8fa 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -94,3 +94,6 @@ path = "named-pipe-multi-client.rs" [[example]] name = "dump" path = "dump.rs" + +[lints] +workspace = true diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 13a9a8c0579..bfcb55ed5f9 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -171,6 +171,8 @@ features = ["full", "test-util"] allowed_external_types = [ "bytes::buf::buf_impl::Buf", "bytes::buf::buf_mut::BufMut", - "tokio_macros::*", ] + +[lints] +workspace = true diff --git a/tokio/src/runtime/tests/queue.rs b/tokio/src/runtime/tests/queue.rs index f463355f0d3..9047f4ad7af 100644 --- a/tokio/src/runtime/tests/queue.rs +++ b/tokio/src/runtime/tests/queue.rs @@ -1,5 +1,4 @@ use crate::runtime::scheduler::multi_thread::{queue, Stats}; -use crate::runtime::task::{self, Schedule, Task}; use std::cell::RefCell; use std::thread; @@ -272,15 +271,3 @@ fn stress2() { assert_eq!(num_pop, NUM_TASKS); } } - -struct Runtime; - -impl Schedule for Runtime { - fn release(&self, _task: &Task) -> Option> { - None - } - - fn schedule(&self, _task: task::Notified) { - unreachable!(); - } -} From aa303bc2051f7c21b48bb7bfcafe8fd4f39afd21 Mon Sep 17 00:00:00 2001 From: Carl Lerche Date: Wed, 2 Apr 2025 17:29:30 -0700 Subject: [PATCH 162/162] chore: prepare Tokio v1.38.2 release --- README.md | 2 +- tokio/CHANGELOG.md | 14 ++++++++++++++ tokio/Cargo.toml | 2 +- tokio/README.md | 2 +- 4 files changed, 17 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index b04599b99bd..b82b6bf345a 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.38.1", features = ["full"] } +tokio = { version = "1.38.2", features = ["full"] } ``` Then, on your main.rs: diff --git a/tokio/CHANGELOG.md b/tokio/CHANGELOG.md index cfe70fa4d52..2e911098d55 100644 --- a/tokio/CHANGELOG.md +++ b/tokio/CHANGELOG.md @@ -1,3 +1,17 @@ +# 1.38.2 (April 2nd, 2025) + +This release fixes a soundness issue in the broadcast channel. The channel +accepts values that are `Send` but `!Sync`. Previously, the channel called +`clone()` on these values without synchronizing. This release fixes the channel +by synchronizing calls to `.clone()` (Thanks Austin Bonander for finding and +reporting the issue). + +### Fixed + +- sync: synchronize `clone()` call in broadcast channel ([#7232]) + +[#7232]: https://github.com/tokio-rs/tokio/pull/7232 + # 1.38.1 (July 16th, 2024) This release fixes the bug identified as ([#6682]), which caused timers not diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index bfcb55ed5f9..6a908042596 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -6,7 +6,7 @@ name = "tokio" # - README.md # - Update CHANGELOG.md. # - Create "v1.x.y" git tag. -version = "1.38.1" +version = "1.38.2" edition = "2021" rust-version = "1.63" authors = ["Tokio Contributors "] diff --git a/tokio/README.md b/tokio/README.md index b04599b99bd..b82b6bf345a 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.38.1", features = ["full"] } +tokio = { version = "1.38.2", features = ["full"] } ``` Then, on your main.rs: