diff --git a/.circleci/config.yml b/.circleci/config.yml index d8d8ca672ec..6f901a95e93 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -22,4 +22,4 @@ jobs: workflows: ci: jobs: - - test-arm \ No newline at end of file + - test-arm diff --git a/.cirrus.yml b/.cirrus.yml index 1adc492ad6c..fac5b4a34da 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -11,6 +11,7 @@ env: # the system's binaries, so the environment shouldn't matter. task: name: FreeBSD 64-bit + auto_cancellation: $CIRRUS_BRANCH != 'master' && $CIRRUS_BRANCH !=~ 'tokio-.*' setup_script: - pkg install -y bash curl - curl https://sh.rustup.rs -sSf --output rustup.sh @@ -25,6 +26,7 @@ task: task: name: FreeBSD docs + auto_cancellation: $CIRRUS_BRANCH != 'master' && $CIRRUS_BRANCH !=~ 'tokio-.*' env: RUSTFLAGS: --cfg docsrs --cfg tokio_unstable RUSTDOCFLAGS: --cfg docsrs --cfg tokio_unstable -Dwarnings @@ -42,6 +44,7 @@ task: task: name: FreeBSD 32-bit + auto_cancellation: $CIRRUS_BRANCH != 'master' && $CIRRUS_BRANCH !=~ 'tokio-.*' setup_script: - pkg install -y bash curl - curl https://sh.rustup.rs -sSf --output rustup.sh diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3a5b83b7aa5..5aff6c09c66 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,7 +12,7 @@ env: # Change to specific Rust release to pin rust_stable: stable rust_nightly: nightly-2022-07-26 - rust_clippy: 1.52.0 + rust_clippy: 1.56.0 # When updating this, also update: # - README.md # - tokio/README.md @@ -49,6 +49,7 @@ jobs: - loom-compile - check-readme - test-hyper + - x86_64-fortanix-unknown-sgx - wasm32-unknown-unknown - wasm32-wasi - check-external-types @@ -75,7 +76,7 @@ jobs: run: rustup update stable - uses: Swatinem/rust-cache@v1 - name: Install cargo-hack - run: cargo install cargo-hack + uses: taiki-e/install-action@cargo-hack # Run `tokio` with `full` features. This excludes testing utilities which # can alter the runtime behavior of Tokio. @@ -141,9 +142,7 @@ jobs: - uses: Swatinem/rust-cache@v1 - name: Install Valgrind - run: | - sudo apt-get update -y - sudo apt-get install -y valgrind + uses: taiki-e/install-action@valgrind # Compile tests - name: cargo build test-mem @@ -278,12 +277,12 @@ jobs: override: true - uses: Swatinem/rust-cache@v1 - name: Install cargo-hack - run: cargo install cargo-hack - - name: check --each-feature - run: cargo hack check --all --each-feature -Z avoid-dev-deps + uses: taiki-e/install-action@cargo-hack + - name: check --feature-powerset + run: cargo hack check --all --feature-powerset --depth 2 -Z avoid-dev-deps --keep-going # Try with unstable feature flags - - name: check --each-feature --unstable - run: cargo hack check --all --each-feature -Z avoid-dev-deps + - name: check --feature-powerset --unstable + run: cargo hack check --all --feature-powerset --depth 2 -Z avoid-dev-deps --keep-going env: RUSTFLAGS: --cfg tokio_unstable -Dwarnings @@ -298,8 +297,9 @@ jobs: toolchain: ${{ env.rust_min }} override: true - uses: Swatinem/rust-cache@v1 - - name: "test --workspace --all-features" - run: cargo check --workspace --all-features + - name: "test --all-features" + run: cargo check --all-features + working-directory: tokio minimal-versions: name: minimal-versions @@ -313,7 +313,7 @@ jobs: override: true - uses: Swatinem/rust-cache@v1 - name: Install cargo-hack - run: cargo install cargo-hack + uses: taiki-e/install-action@cargo-hack - name: "check --all-features -Z minimal-versions" run: | # Remove dev-dependencies from Cargo.toml to prevent the next `cargo update` @@ -450,6 +450,23 @@ jobs: git diff cargo test --features full + x86_64-fortanix-unknown-sgx: + name: build tokio for x86_64-fortanix-unknown-sgx + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Install Rust ${{ env.rust_nightly }} + uses: actions-rs/toolchain@v1 + with: + toolchain: ${{ env.rust_nightly }} + target: x86_64-fortanix-unknown-sgx + override: true + - uses: Swatinem/rust-cache@v1 + # NOTE: Currently the only test we can run is to build tokio with rt and sync features. + - name: build tokio + run: cargo build --target x86_64-fortanix-unknown-sgx --features rt,sync + working-directory: tokio + wasm32-unknown-unknown: name: test tokio for wasm32-unknown-unknown runs-on: ubuntu-latest @@ -481,13 +498,13 @@ jobs: # Install dependencies - name: Install cargo-hack - run: cargo install cargo-hack + uses: taiki-e/install-action@cargo-hack - name: Install wasm32-wasi target run: rustup target add wasm32-wasi - name: Install wasmtime - run: cargo install wasmtime-cli + uses: taiki-e/install-action@wasmtime - name: Install cargo-wasi run: cargo install cargo-wasi @@ -537,4 +554,3 @@ jobs: cargo install cargo-check-external-types --locked --version 0.1.3 cargo check-external-types --all-features --config external-types.toml working-directory: tokio - diff --git a/.github/workflows/stress-test.yml b/.github/workflows/stress-test.yml index e34e0d54907..ecce0c407dc 100644 --- a/.github/workflows/stress-test.yml +++ b/.github/workflows/stress-test.yml @@ -28,9 +28,7 @@ jobs: override: true - uses: Swatinem/rust-cache@v1 - name: Install Valgrind - run: | - sudo apt-get update -y - sudo apt-get install -y valgrind + uses: taiki-e/install-action@valgrind # Compiles each of the stress test examples. - name: Compile stress test examples @@ -38,4 +36,4 @@ jobs: # Runs each of the examples using Valgrind. Detects leaks and displays them. - name: Run valgrind - run: valgrind --leak-check=full --show-leak-kinds=all ./target/release/examples/${{ matrix.stress-test }} + run: valgrind --error-exitcode=1 --leak-check=full --show-leak-kinds=all ./target/release/examples/${{ matrix.stress-test }} diff --git a/README.md b/README.md index 264dde0067f..3ad326e47f1 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.21.0", features = ["full"] } +tokio = { version = "1.21.2", features = ["full"] } ``` Then, on your main.rs: diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 4b9dcdc5d08..3323376216c 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -21,7 +21,6 @@ serde_derive = "1.0" serde_json = "1.0" httparse = "1.0" httpdate = "1.0" -once_cell = "1.5.2" rand = "0.8.3" [target.'cfg(windows)'.dev-dependencies.winapi] @@ -71,11 +70,6 @@ path = "udp-codec.rs" name = "tinyhttp" path = "tinyhttp.rs" -[[example]] -name = "custom-executor" -path = "custom-executor.rs" - - [[example]] name = "custom-executor-tokio-context" path = "custom-executor-tokio-context.rs" diff --git a/tokio-stream/CHANGELOG.md b/tokio-stream/CHANGELOG.md index ce6d340afe9..1037b80db19 100644 --- a/tokio-stream/CHANGELOG.md +++ b/tokio-stream/CHANGELOG.md @@ -1,3 +1,11 @@ +# 0.1.10 (Sept 18, 2022) + +- time: add `StreamExt::chunks_timeout` ([#4695]) +- stream: add track_caller to public APIs ([#4786]) + +[#4695]: https://github.com/tokio-rs/tokio/pull/4695 +[#4786]: https://github.com/tokio-rs/tokio/pull/4786 + # 0.1.9 (June 4, 2022) - deps: upgrade `tokio-util` dependency to `0.7.x` ([#3762]) diff --git a/tokio-stream/Cargo.toml b/tokio-stream/Cargo.toml index 2d693f86000..a4cfcd6d123 100644 --- a/tokio-stream/Cargo.toml +++ b/tokio-stream/Cargo.toml @@ -4,7 +4,7 @@ name = "tokio-stream" # - Remove path dependencies # - Update CHANGELOG.md. # - Create "tokio-stream-0.1.x" git tag. -version = "0.1.9" +version = "0.1.10" edition = "2018" rust-version = "1.49" authors = ["Tokio Contributors "] diff --git a/tokio-util/CHANGELOG.md b/tokio-util/CHANGELOG.md index ffa80e4bad6..ca378b1ed20 100644 --- a/tokio-util/CHANGELOG.md +++ b/tokio-util/CHANGELOG.md @@ -1,3 +1,25 @@ +# 0.7.4 (September 8, 2022) + +### Added + +- io: add `SyncIoBridge::shutdown()` ([#4938]) +- task: improve `LocalPoolHandle` ([#4680]) + +### Fixed + +- util: add `track_caller` to public APIs ([#4785]) + +### Unstable + +- task: fix compilation errors in `JoinMap` with Tokio v1.21.0 ([#4755]) +- task: remove the unstable, deprecated `JoinMap::join_one` ([#4920]) + +[#4680]: https://github.com/tokio-rs/tokio/pull/4680 +[#4755]: https://github.com/tokio-rs/tokio/pull/4755 +[#4785]: https://github.com/tokio-rs/tokio/pull/4785 +[#4920]: https://github.com/tokio-rs/tokio/pull/4920 +[#4938]: https://github.com/tokio-rs/tokio/pull/4938 + # 0.7.3 (June 4, 2022) ### Changed diff --git a/tokio-util/Cargo.toml b/tokio-util/Cargo.toml index 6406af635ae..d5a9f748c18 100644 --- a/tokio-util/Cargo.toml +++ b/tokio-util/Cargo.toml @@ -4,7 +4,7 @@ name = "tokio-util" # - Remove path dependencies # - Update CHANGELOG.md. # - Create "tokio-util-0.7.x" git tag. -version = "0.7.3" +version = "0.7.4" edition = "2018" rust-version = "1.49" authors = ["Tokio Contributors "] @@ -34,7 +34,7 @@ rt = ["tokio/rt", "tokio/sync", "futures-util", "hashbrown"] __docs_rs = ["futures-util"] [dependencies] -tokio = { version = "1.19.0", path = "../tokio", features = ["sync"] } +tokio = { version = "1.21.0", path = "../tokio", features = ["sync"] } bytes = "1.0.0" futures-core = "0.3.0" futures-sink = "0.3.0" diff --git a/tokio-util/src/codec/length_delimited.rs b/tokio-util/src/codec/length_delimited.rs index 93d2f180d0f..a182dcaec0c 100644 --- a/tokio-util/src/codec/length_delimited.rs +++ b/tokio-util/src/codec/length_delimited.rs @@ -522,15 +522,11 @@ impl LengthDelimitedCodec { } }; - let num_skip = self.builder.get_num_skip(); - - if num_skip > 0 { - src.advance(num_skip); - } + src.advance(self.builder.get_num_skip()); // Ensure that the buffer has enough space to read the incoming // payload - src.reserve(n); + src.reserve(n.saturating_sub(src.len())); Ok(Some(n)) } @@ -568,7 +564,7 @@ impl Decoder for LengthDelimitedCodec { self.state = DecodeState::Head; // Make sure the buffer has enough space to read the next head - src.reserve(self.builder.num_head_bytes()); + src.reserve(self.builder.num_head_bytes().saturating_sub(src.len())); Ok(Some(data)) } diff --git a/tokio-util/src/io/stream_reader.rs b/tokio-util/src/io/stream_reader.rs index 6260f9e442c..5ed7c4aed82 100644 --- a/tokio-util/src/io/stream_reader.rs +++ b/tokio-util/src/io/stream_reader.rs @@ -50,9 +50,58 @@ pin_project! { /// # } /// ``` /// + /// If the stream produces errors which are not [std::io::Error], + /// the errors can be converted using [`StreamExt`] to map each + /// element. + /// + /// ``` + /// use bytes::Bytes; + /// use tokio::io::AsyncReadExt; + /// use tokio_util::io::StreamReader; + /// use tokio_stream::StreamExt; + /// # #[tokio::main] + /// # async fn main() -> std::io::Result<()> { + /// + /// // Create a stream from an iterator, including an error. + /// let stream = tokio_stream::iter(vec![ + /// Result::Ok(Bytes::from_static(&[0, 1, 2, 3])), + /// Result::Ok(Bytes::from_static(&[4, 5, 6, 7])), + /// Result::Err("Something bad happened!") + /// ]); + /// + /// // Use StreamExt to map the stream and error to a std::io::Error + /// let stream = stream.map(|result| result.map_err(|err| { + /// std::io::Error::new(std::io::ErrorKind::Other, err) + /// })); + /// + /// // Convert it to an AsyncRead. + /// let mut read = StreamReader::new(stream); + /// + /// // Read five bytes from the stream. + /// let mut buf = [0; 5]; + /// read.read_exact(&mut buf).await?; + /// assert_eq!(buf, [0, 1, 2, 3, 4]); + /// + /// // Read the rest of the current chunk. + /// assert_eq!(read.read(&mut buf).await?, 3); + /// assert_eq!(&buf[..3], [5, 6, 7]); + /// + /// // Reading the next chunk will produce an error + /// let error = read.read(&mut buf).await.unwrap_err(); + /// assert_eq!(error.kind(), std::io::ErrorKind::Other); + /// assert_eq!(error.into_inner().unwrap().to_string(), "Something bad happened!"); + /// + /// // We have now reached the end. + /// assert_eq!(read.read(&mut buf).await?, 0); + /// + /// # Ok(()) + /// # } + /// ``` + /// /// [`AsyncRead`]: tokio::io::AsyncRead /// [`Stream`]: futures_core::Stream /// [`ReaderStream`]: crate::io::ReaderStream + /// [`StreamExt`]: tokio_stream::StreamExt #[derive(Debug)] pub struct StreamReader { #[pin] diff --git a/tokio/CHANGELOG.md b/tokio/CHANGELOG.md index fcb4a740663..b7fd86a2a86 100644 --- a/tokio/CHANGELOG.md +++ b/tokio/CHANGELOG.md @@ -1,3 +1,20 @@ +# 1.21.2 (September 27, 2022) + +This release removes the dependency on the `once_cell` crate to restore the MSRV +of 1.21.x, which is the latest minor version at the time of release. ([#5048]) + +[#5048]: https://github.com/tokio-rs/tokio/pull/5048 + +# 1.21.1 (September 13, 2022) + +### Fixed + +- net: fix dependency resolution for socket2 ([#5000]) +- task: ignore failure to set TLS in `LocalSet` Drop ([#4976]) + +[#4976]: https://github.com/tokio-rs/tokio/pull/4976 +[#5000]: https://github.com/tokio-rs/tokio/pull/5000 + # 1.21.0 (September 2, 2022) This release is the first release of Tokio to intentionally support WASM. The @@ -93,6 +110,13 @@ wasm32-wasi target is given unstable support for the `net` feature. [#4956]: https://github.com/tokio-rs/tokio/pull/4956 [#4959]: https://github.com/tokio-rs/tokio/pull/4959 +# 1.20.2 (September 27, 2022) + +This release removes the dependency on the `once_cell` crate to restore the MSRV +of the 1.20.x LTS release. ([#5048]) + +[#5048]: https://github.com/tokio-rs/tokio/pull/5048 + # 1.20.1 (July 25, 2022) ### Fixed @@ -211,6 +235,13 @@ This release fixes a bug in `Notified::enable`. ([#4747]) [#4729]: https://github.com/tokio-rs/tokio/pull/4729 [#4739]: https://github.com/tokio-rs/tokio/pull/4739 +# 1.18.3 (September 27, 2022) + +This release removes the dependency on the `once_cell` crate to restore the MSRV +of the 1.18.x LTS release. ([#5048]) + +[#5048]: https://github.com/tokio-rs/tokio/pull/5048 + # 1.18.2 (May 5, 2022) Add missing features for the `winapi` dependency. ([#4663]) diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index 795e8321095..e5f1ad3d65a 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -6,7 +6,7 @@ name = "tokio" # - README.md # - Update CHANGELOG.md. # - Create "v1.0.x" git tag. -version = "1.21.0" +version = "1.21.2" edition = "2018" rust-version = "1.49" authors = ["Tokio Contributors "] @@ -51,6 +51,7 @@ net = [ "mio/os-poll", "mio/os-ext", "mio/net", + "socket2", "winapi/fileapi", "winapi/handleapi", "winapi/namedpipeapi", @@ -58,11 +59,10 @@ net = [ "winapi/winnt", "winapi/minwindef", "winapi/accctrl", - "winapi/aclapi" + "winapi/aclapi", ] process = [ "bytes", - "once_cell", "libc", "mio/os-poll", "mio/os-ext", @@ -76,13 +76,12 @@ process = [ "winapi/winnt", ] # Includes basic task execution capabilities -rt = ["once_cell"] +rt = [] rt-multi-thread = [ "num_cpus", "rt", ] signal = [ - "once_cell", "libc", "mio/os-poll", "mio/net", @@ -111,14 +110,13 @@ pin-project-lite = "0.2.0" # Everything else is optional... bytes = { version = "1.0.0", optional = true } -once_cell = { version = "1.5.2", optional = true } memchr = { version = "2.2", optional = true } mio = {git="https://github.com/WasmEdge/mio.git", optional = true, features = ["wasmedge", "os-poll", "net"] } num_cpus = { version = "1.8.0", optional = true } parking_lot = { version = "0.12.0", optional = true } [target.'cfg(not(any(target_arch = "wasm32", target_arch = "wasm64")))'.dependencies] -socket2 = { version = "0.4.4", features = [ "all" ] } +socket2 = { version = "0.4.4", optional = true, features = [ "all" ] } # Currently unstable. The API exposed by these features may be broken at any time. # Requires `--cfg tokio_unstable` to enable. diff --git a/tokio/README.md b/tokio/README.md index 264dde0067f..3ad326e47f1 100644 --- a/tokio/README.md +++ b/tokio/README.md @@ -56,7 +56,7 @@ Make sure you activated the full features of the tokio crate on Cargo.toml: ```toml [dependencies] -tokio = { version = "1.21.0", features = ["full"] } +tokio = { version = "1.21.2", features = ["full"] } ``` Then, on your main.rs: diff --git a/tokio/src/fs/open_options/mock_open_options.rs b/tokio/src/fs/open_options/mock_open_options.rs index cbbda0ec256..17b4a48640a 100644 --- a/tokio/src/fs/open_options/mock_open_options.rs +++ b/tokio/src/fs/open_options/mock_open_options.rs @@ -1,3 +1,4 @@ +#![allow(unreachable_pub)] //! Mock version of std::fs::OpenOptions; use mockall::mock; diff --git a/tokio/src/lib.rs b/tokio/src/lib.rs index b92e26ccbde..72ab8f05765 100644 --- a/tokio/src/lib.rs +++ b/tokio/src/lib.rs @@ -498,9 +498,14 @@ cfg_rt! { pub mod runtime; } cfg_not_rt! { - cfg_io_driver_impl! { - pub(crate) mod runtime; - } + // The `runtime` module is used when the IO or time driver is needed. + #[cfg(any( + feature = "net", + feature = "time", + all(unix, feature = "process"), + all(unix, feature = "signal"), + ))] + pub(crate) mod runtime; } pub(crate) mod coop; diff --git a/tokio/src/loom/mocked.rs b/tokio/src/loom/mocked.rs index 367d59b43a4..1c4a32dd863 100644 --- a/tokio/src/loom/mocked.rs +++ b/tokio/src/loom/mocked.rs @@ -38,3 +38,8 @@ pub(crate) mod sys { 2 } } + +pub(crate) mod thread { + pub use loom::lazy_static::AccessError; + pub use loom::thread::*; +} diff --git a/tokio/src/loom/std/mod.rs b/tokio/src/loom/std/mod.rs index 0c70bee74eb..1141e4dc07e 100644 --- a/tokio/src/loom/std/mod.rs +++ b/tokio/src/loom/std/mod.rs @@ -102,7 +102,7 @@ pub(crate) mod thread { #[allow(unused_imports)] pub(crate) use std::thread::{ - current, panicking, park, park_timeout, sleep, spawn, Builder, JoinHandle, LocalKey, - Result, Thread, ThreadId, + current, panicking, park, park_timeout, sleep, spawn, AccessError, Builder, JoinHandle, + LocalKey, Result, Thread, ThreadId, }; } diff --git a/tokio/src/loom/std/parking_lot.rs b/tokio/src/loom/std/parking_lot.rs index 034a0ce69a5..e3af258d116 100644 --- a/tokio/src/loom/std/parking_lot.rs +++ b/tokio/src/loom/std/parking_lot.rs @@ -52,7 +52,7 @@ impl Mutex { } #[inline] - #[cfg(all(feature = "parking_lot", not(all(loom, test)),))] + #[cfg(all(feature = "parking_lot", not(all(loom, test))))] #[cfg_attr(docsrs, doc(cfg(all(feature = "parking_lot",))))] pub(crate) const fn const_new(t: T) -> Mutex { Mutex(PhantomData, parking_lot::const_mutex(t)) diff --git a/tokio/src/net/unix/stream.rs b/tokio/src/net/unix/stream.rs index e0a70721630..2ca90f90da2 100644 --- a/tokio/src/net/unix/stream.rs +++ b/tokio/src/net/unix/stream.rs @@ -22,8 +22,8 @@ cfg_io_util! { cfg_net_unix! { /// A structure representing a connected Unix socket. /// - /// This socket can be connected directly with `UnixStream::connect` or accepted - /// from a listener with `UnixListener::incoming`. Additionally, a pair of + /// This socket can be connected directly with [`UnixStream::connect`] or accepted + /// from a listener with [`UnixListener::accept`]. Additionally, a pair of /// anonymous Unix sockets can be created with `UnixStream::pair`. /// /// To shut down the stream in the write direction, you can call the @@ -32,6 +32,7 @@ cfg_net_unix! { /// the stream in one direction. /// /// [`shutdown()`]: fn@crate::io::AsyncWriteExt::shutdown + /// [`UnixListener::accept`]: crate::net::UnixListener::accept pub struct UnixStream { io: PollEvented, } diff --git a/tokio/src/park/either.rs b/tokio/src/park/either.rs deleted file mode 100644 index ee02ec158b0..00000000000 --- a/tokio/src/park/either.rs +++ /dev/null @@ -1,74 +0,0 @@ -#![cfg_attr(not(feature = "full"), allow(dead_code))] - -use crate::park::{Park, Unpark}; - -use std::fmt; -use std::time::Duration; - -pub(crate) enum Either { - A(A), - B(B), -} - -impl Park for Either -where - A: Park, - B: Park, -{ - type Unpark = Either; - type Error = Either; - - fn unpark(&self) -> Self::Unpark { - match self { - Either::A(a) => Either::A(a.unpark()), - Either::B(b) => Either::B(b.unpark()), - } - } - - fn park(&mut self) -> Result<(), Self::Error> { - match self { - Either::A(a) => a.park().map_err(Either::A), - Either::B(b) => b.park().map_err(Either::B), - } - } - - fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { - match self { - Either::A(a) => a.park_timeout(duration).map_err(Either::A), - Either::B(b) => b.park_timeout(duration).map_err(Either::B), - } - } - - fn shutdown(&mut self) { - match self { - Either::A(a) => a.shutdown(), - Either::B(b) => b.shutdown(), - } - } -} - -impl Unpark for Either -where - A: Unpark, - B: Unpark, -{ - fn unpark(&self) { - match self { - Either::A(a) => a.unpark(), - Either::B(b) => b.unpark(), - } - } -} - -impl fmt::Debug for Either -where - A: fmt::Debug, - B: fmt::Debug, -{ - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Either::A(a) => a.fmt(fmt), - Either::B(b) => b.fmt(fmt), - } - } -} diff --git a/tokio/src/park/mod.rs b/tokio/src/park/mod.rs index 9284702a681..a88b33ac15b 100644 --- a/tokio/src/park/mod.rs +++ b/tokio/src/park/mod.rs @@ -34,84 +34,4 @@ //! * `park_timeout` does the same as `park` but allows specifying a maximum //! time to block the thread for. -cfg_rt! { - pub(crate) mod either; -} - -#[cfg(any(feature = "rt", feature = "sync"))] pub(crate) mod thread; - -use std::fmt::Debug; -use std::sync::Arc; -use std::time::Duration; - -/// Blocks the current thread. -pub(crate) trait Park { - /// Unpark handle type for the `Park` implementation. - type Unpark: Unpark; - - /// Error returned by `park`. - type Error: Debug; - - /// Gets a new `Unpark` handle associated with this `Park` instance. - fn unpark(&self) -> Self::Unpark; - - /// Blocks the current thread unless or until the token is available. - /// - /// A call to `park` does not guarantee that the thread will remain blocked - /// forever, and callers should be prepared for this possibility. This - /// function may wakeup spuriously for any reason. - /// - /// # Panics - /// - /// This function **should** not panic, but ultimately, panics are left as - /// an implementation detail. Refer to the documentation for the specific - /// `Park` implementation. - fn park(&mut self) -> Result<(), Self::Error>; - - /// Parks the current thread for at most `duration`. - /// - /// This function is the same as `park` but allows specifying a maximum time - /// to block the thread for. - /// - /// Same as `park`, there is no guarantee that the thread will remain - /// blocked for any amount of time. Spurious wakeups are permitted for any - /// reason. - /// - /// # Panics - /// - /// This function **should** not panic, but ultimately, panics are left as - /// an implementation detail. Refer to the documentation for the specific - /// `Park` implementation. - fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error>; - - /// Releases all resources held by the parker for proper leak-free shutdown. - fn shutdown(&mut self); -} - -/// Unblock a thread blocked by the associated `Park` instance. -pub(crate) trait Unpark: Sync + Send + 'static { - /// Unblocks a thread that is blocked by the associated `Park` handle. - /// - /// Calling `unpark` atomically makes available the unpark token, if it is - /// not already available. - /// - /// # Panics - /// - /// This function **should** not panic, but ultimately, panics are left as - /// an implementation detail. Refer to the documentation for the specific - /// `Unpark` implementation. - fn unpark(&self); -} - -impl Unpark for Box { - fn unpark(&self) { - (**self).unpark() - } -} - -impl Unpark for Arc { - fn unpark(&self) { - (**self).unpark() - } -} diff --git a/tokio/src/park/thread.rs b/tokio/src/park/thread.rs index 4db1c1b31b6..abcdcb9c5cd 100644 --- a/tokio/src/park/thread.rs +++ b/tokio/src/park/thread.rs @@ -2,7 +2,6 @@ use crate::loom::sync::atomic::AtomicUsize; use crate::loom::sync::{Arc, Condvar, Mutex}; -use crate::park::{Park, Unpark}; use std::sync::atomic::Ordering::SeqCst; use std::time::Duration; @@ -12,8 +11,6 @@ pub(crate) struct ParkThread { inner: Arc, } -pub(crate) type ParkError = (); - /// Unblocks a thread that was blocked by `ParkThread`. #[derive(Clone, Debug)] pub(crate) struct UnparkThread { @@ -47,32 +44,25 @@ impl ParkThread { }), } } -} - -impl Park for ParkThread { - type Unpark = UnparkThread; - type Error = ParkError; - fn unpark(&self) -> Self::Unpark { + pub(crate) fn unpark(&self) -> UnparkThread { let inner = self.inner.clone(); UnparkThread { inner } } - fn park(&mut self) -> Result<(), Self::Error> { + pub(crate) fn park(&mut self) { self.inner.park(); - Ok(()) } - fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { + pub(crate) fn park_timeout(&mut self, duration: Duration) { // Wasm doesn't have threads, so just sleep. #[cfg(not(tokio_wasm))] self.inner.park_timeout(duration); #[cfg(tokio_wasm)] std::thread::sleep(duration); - Ok(()) } - fn shutdown(&mut self) { + pub(crate) fn shutdown(&mut self) { self.inner.shutdown(); } } @@ -212,12 +202,13 @@ impl Default for ParkThread { // ===== impl UnparkThread ===== -impl Unpark for UnparkThread { - fn unpark(&self) { +impl UnparkThread { + pub(crate) fn unpark(&self) { self.inner.unpark(); } } +use crate::loom::thread::AccessError; use std::future::Future; use std::marker::PhantomData; use std::mem; @@ -241,24 +232,38 @@ impl CachedParkThread { } } - pub(crate) fn get_unpark(&self) -> Result { + pub(crate) fn waker(&self) -> Result { + self.unpark().map(|unpark| unpark.into_waker()) + } + + fn unpark(&self) -> Result { self.with_current(|park_thread| park_thread.unpark()) } + pub(crate) fn park(&mut self) { + self.with_current(|park_thread| park_thread.inner.park()) + .unwrap(); + } + + pub(crate) fn park_timeout(&mut self, duration: Duration) { + self.with_current(|park_thread| park_thread.inner.park_timeout(duration)) + .unwrap(); + } + /// Gets a reference to the `ParkThread` handle for this thread. - fn with_current(&self, f: F) -> Result + fn with_current(&self, f: F) -> Result where F: FnOnce(&ParkThread) -> R, { - CURRENT_PARKER.try_with(|inner| f(inner)).map_err(|_| ()) + CURRENT_PARKER.try_with(|inner| f(inner)) } - pub(crate) fn block_on(&mut self, f: F) -> Result { + pub(crate) fn block_on(&mut self, f: F) -> Result { use std::task::Context; use std::task::Poll::Ready; // `get_unpark()` should not return a Result - let waker = self.get_unpark()?.into_waker(); + let waker = self.waker()?; let mut cx = Context::from_waker(&waker); pin!(f); @@ -268,34 +273,11 @@ impl CachedParkThread { return Ok(v); } - self.park()?; + self.park(); } } } -impl Park for CachedParkThread { - type Unpark = UnparkThread; - type Error = ParkError; - - fn unpark(&self) -> Self::Unpark { - self.get_unpark().unwrap() - } - - fn park(&mut self) -> Result<(), Self::Error> { - self.with_current(|park_thread| park_thread.inner.park())?; - Ok(()) - } - - fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { - self.with_current(|park_thread| park_thread.inner.park_timeout(duration))?; - Ok(()) - } - - fn shutdown(&mut self) { - let _ = self.with_current(|park_thread| park_thread.inner.shutdown()); - } -} - impl UnparkThread { pub(crate) fn into_waker(self) -> Waker { unsafe { diff --git a/tokio/src/process/unix/driver.rs b/tokio/src/process/unix/driver.rs index 84dc8fbd027..bb6ae369538 100644 --- a/tokio/src/process/unix/driver.rs +++ b/tokio/src/process/unix/driver.rs @@ -2,11 +2,9 @@ //! Process driver. -use crate::park::Park; use crate::process::unix::GlobalOrphanQueue; use crate::signal::unix::driver::{Driver as SignalDriver, Handle as SignalHandle}; -use std::io; use std::time::Duration; /// Responsible for cleaning up orphaned child processes on Unix platforms. @@ -28,31 +26,18 @@ impl Driver { signal_handle, } } -} - -// ===== impl Park for Driver ===== - -impl Park for Driver { - type Unpark = ::Unpark; - type Error = io::Error; - - fn unpark(&self) -> Self::Unpark { - self.park.unpark() - } - fn park(&mut self) -> Result<(), Self::Error> { - self.park.park()?; + pub(crate) fn park(&mut self) { + self.park.park(); GlobalOrphanQueue::reap_orphans(&self.signal_handle); - Ok(()) } - fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { - self.park.park_timeout(duration)?; + pub(crate) fn park_timeout(&mut self, duration: Duration) { + self.park.park_timeout(duration); GlobalOrphanQueue::reap_orphans(&self.signal_handle); - Ok(()) } - fn shutdown(&mut self) { + pub(crate) fn shutdown(&mut self) { self.park.shutdown() } } diff --git a/tokio/src/process/unix/mod.rs b/tokio/src/process/unix/mod.rs index ba34c852b58..c5a6090c0e6 100644 --- a/tokio/src/process/unix/mod.rs +++ b/tokio/src/process/unix/mod.rs @@ -34,10 +34,10 @@ use crate::process::kill::Kill; use crate::process::SpawnedChild; use crate::signal::unix::driver::Handle as SignalHandle; use crate::signal::unix::{signal, Signal, SignalKind}; +use crate::util::once_cell::OnceCell; use mio::event::Source; use mio::unix::SourceFd; -use once_cell::sync::Lazy; use std::fmt; use std::fs::File; use std::future::Future; @@ -64,25 +64,29 @@ impl Kill for StdChild { } } -static ORPHAN_QUEUE: Lazy> = Lazy::new(OrphanQueueImpl::new); +fn get_orphan_queue() -> &'static OrphanQueueImpl { + static ORPHAN_QUEUE: OnceCell> = OnceCell::new(); + + ORPHAN_QUEUE.get(OrphanQueueImpl::new) +} pub(crate) struct GlobalOrphanQueue; impl fmt::Debug for GlobalOrphanQueue { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - ORPHAN_QUEUE.fmt(fmt) + get_orphan_queue().fmt(fmt) } } impl GlobalOrphanQueue { fn reap_orphans(handle: &SignalHandle) { - ORPHAN_QUEUE.reap_orphans(handle) + get_orphan_queue().reap_orphans(handle) } } impl OrphanQueue for GlobalOrphanQueue { fn push_orphan(&self, orphan: StdChild) { - ORPHAN_QUEUE.push_orphan(orphan) + get_orphan_queue().push_orphan(orphan) } } diff --git a/tokio/src/runtime/blocking/mod.rs b/tokio/src/runtime/blocking/mod.rs index 7633299b302..88bdcfd6421 100644 --- a/tokio/src/runtime/blocking/mod.rs +++ b/tokio/src/runtime/blocking/mod.rs @@ -4,15 +4,20 @@ //! compilation. mod pool; -pub(crate) use pool::{spawn_blocking, BlockingPool, Mandatory, SpawnError, Spawner, Task}; +pub(crate) use pool::{spawn_blocking, BlockingPool, Spawner}; cfg_fs! { pub(crate) use pool::spawn_mandatory_blocking; } +cfg_trace! { + pub(crate) use pool::Mandatory; +} + mod schedule; mod shutdown; mod task; +#[cfg(all(test, not(tokio_wasm)))] pub(crate) use schedule::NoopSchedule; pub(crate) use task::BlockingTask; diff --git a/tokio/src/runtime/blocking/pool.rs b/tokio/src/runtime/blocking/pool.rs index 19315388563..79bc15c6ecd 100644 --- a/tokio/src/runtime/blocking/pool.rs +++ b/tokio/src/runtime/blocking/pool.rs @@ -3,11 +3,12 @@ use crate::loom::sync::{Arc, Condvar, Mutex}; use crate::loom::thread; use crate::runtime::blocking::schedule::NoopSchedule; -use crate::runtime::blocking::shutdown; +use crate::runtime::blocking::{shutdown, BlockingTask}; use crate::runtime::builder::ThreadNameFn; use crate::runtime::context; use crate::runtime::task::{self, JoinHandle}; -use crate::runtime::{Builder, Callback, ToHandle}; +use crate::runtime::{Builder, Callback, Handle}; +use crate::util::{replace_thread_rng, RngSeedGenerator}; use std::collections::{HashMap, VecDeque}; use std::fmt; @@ -48,6 +49,9 @@ struct Inner { // Customizable wait timeout. keep_alive: Duration, + + // Random number seed + seed_generator: RngSeedGenerator, } struct Shared { @@ -150,7 +154,7 @@ cfg_fs! { R: Send + 'static, { let rt = context::current(); - rt.as_inner().spawn_mandatory_blocking(&rt, func) + rt.inner.blocking_spawner().spawn_mandatory_blocking(&rt, func) } } @@ -182,6 +186,7 @@ impl BlockingPool { before_stop: builder.before_stop.clone(), thread_cap, keep_alive, + seed_generator: builder.seed_generator.next_generator(), }), }, shutdown_rx, @@ -241,7 +246,103 @@ impl fmt::Debug for BlockingPool { // ===== impl Spawner ===== impl Spawner { - pub(crate) fn spawn(&self, task: Task, rt: &dyn ToHandle) -> Result<(), SpawnError> { + #[track_caller] + pub(crate) fn spawn_blocking(&self, rt: &Handle, func: F) -> JoinHandle + where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, + { + let (join_handle, spawn_result) = + if cfg!(debug_assertions) && std::mem::size_of::() > 2048 { + self.spawn_blocking_inner(Box::new(func), Mandatory::NonMandatory, None, rt) + } else { + self.spawn_blocking_inner(func, Mandatory::NonMandatory, None, rt) + }; + + match spawn_result { + Ok(()) => join_handle, + // Compat: do not panic here, return the join_handle even though it will never resolve + Err(SpawnError::ShuttingDown) => join_handle, + Err(SpawnError::NoThreads(e)) => { + panic!("OS can't spawn worker thread: {}", e) + } + } + } + + cfg_fs! { + #[track_caller] + #[cfg_attr(any( + all(loom, not(test)), // the function is covered by loom tests + test + ), allow(dead_code))] + pub(crate) fn spawn_mandatory_blocking(&self, rt: &Handle, func: F) -> Option> + where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, + { + let (join_handle, spawn_result) = if cfg!(debug_assertions) && std::mem::size_of::() > 2048 { + self.spawn_blocking_inner( + Box::new(func), + Mandatory::Mandatory, + None, + rt, + ) + } else { + self.spawn_blocking_inner( + func, + Mandatory::Mandatory, + None, + rt, + ) + }; + + if spawn_result.is_ok() { + Some(join_handle) + } else { + None + } + } + } + + #[track_caller] + pub(crate) fn spawn_blocking_inner( + &self, + func: F, + is_mandatory: Mandatory, + name: Option<&str>, + rt: &Handle, + ) -> (JoinHandle, Result<(), SpawnError>) + where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, + { + let fut = BlockingTask::new(func); + let id = task::Id::next(); + #[cfg(all(tokio_unstable, feature = "tracing"))] + let fut = { + use tracing::Instrument; + let location = std::panic::Location::caller(); + let span = tracing::trace_span!( + target: "tokio::task::blocking", + "runtime.spawn", + kind = %"blocking", + task.name = %name.unwrap_or_default(), + task.id = id.as_u64(), + "fn" = %std::any::type_name::(), + spawn.location = %format_args!("{}:{}:{}", location.file(), location.line(), location.column()), + ); + fut.instrument(span) + }; + + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let _ = name; + + let (task, handle) = task::unowned(fut, NoopSchedule, id); + let spawned = self.spawn_task(Task::new(task, is_mandatory), rt); + (handle, spawned) + } + + fn spawn_task(&self, task: Task, rt: &Handle) -> Result<(), SpawnError> { let mut shared = self.inner.shared.lock(); if shared.shutdown { @@ -304,7 +405,7 @@ impl Spawner { fn spawn_thread( &self, shutdown_tx: shutdown::Sender, - rt: &dyn ToHandle, + rt: &Handle, id: usize, ) -> std::io::Result> { let mut builder = thread::Builder::new().name((self.inner.thread_name)()); @@ -313,12 +414,12 @@ impl Spawner { builder = builder.stack_size(stack_size); } - let rt = rt.to_handle(); + let rt = rt.clone(); builder.spawn(move || { // Only the reference should be moved into the closure let _enter = crate::runtime::context::enter(rt.clone()); - rt.as_inner().blocking_spawner.inner.run(id); + rt.inner.blocking_spawner().inner.run(id); drop(shutdown_tx); }) } @@ -335,6 +436,8 @@ impl Inner { if let Some(f) = &self.after_start { f() } + // We own this thread so there is no need to replace the RngSeed once we're done. + let _ = replace_thread_rng(self.seed_generator.next_seed()); let mut shared = self.shared.lock(); let mut join_on_thread = None; diff --git a/tokio/src/runtime/builder.rs b/tokio/src/runtime/builder.rs index b06fca2ddae..562733b1226 100644 --- a/tokio/src/runtime/builder.rs +++ b/tokio/src/runtime/builder.rs @@ -1,5 +1,6 @@ use crate::runtime::handle::Handle; -use crate::runtime::{blocking, driver, Callback, Runtime, Spawner}; +use crate::runtime::{blocking, driver, Callback, Runtime}; +use crate::util::{RngSeed, RngSeedGenerator}; use std::fmt; use std::io; @@ -90,6 +91,9 @@ pub struct Builder { /// This option should only be exposed as unstable. pub(super) disable_lifo_slot: bool, + /// Specify a random number generator seed to provide deterministic results + pub(super) seed_generator: RngSeedGenerator, + #[cfg(tokio_unstable)] pub(super) unhandled_panic: UnhandledPanic, } @@ -255,6 +259,8 @@ impl Builder { global_queue_interval, event_interval, + seed_generator: RngSeedGenerator::new(RngSeed::new()), + #[cfg(tokio_unstable)] unhandled_panic: UnhandledPanic::Ignore, @@ -829,24 +835,57 @@ impl Builder { self.disable_lifo_slot = true; self } + + /// Specifies the random number generation seed to use within all threads associated + /// with the runtime being built. + /// + /// This option is intended to make certain parts of the runtime deterministic. + /// Specifically, it affects the [`tokio::select!`] macro and the work stealing + /// algorithm. In the case of [`tokio::select!`] it will ensure that the order that + /// branches are polled is deterministic. + /// + /// In the case of work stealing, it's a little more complicated. Each worker will + /// be given a deterministic seed so that the starting peer for each work stealing + /// search will be deterministic. + /// + /// In addition to the code specifying `rng_seed` and interacting with the runtime, + /// the internals of Tokio and the Rust compiler may affect the sequences of random + /// numbers. In order to ensure repeatable results, the version of Tokio, the versions + /// of all other dependencies that interact with Tokio, and the Rust compiler version + /// should also all remain constant. + /// + /// # Examples + /// + /// ``` + /// # use tokio::runtime::{self, RngSeed}; + /// # pub fn main() { + /// let seed = RngSeed::from_bytes(b"place your seed here"); + /// let rt = runtime::Builder::new_current_thread() + /// .rng_seed(seed) + /// .build(); + /// # } + /// ``` + /// + /// [`tokio::select!`]: crate::select + pub fn rng_seed(&mut self, seed: RngSeed) -> &mut Self { + self.seed_generator = RngSeedGenerator::new(seed); + self + } } fn build_current_thread_runtime(&mut self) -> io::Result { - use crate::runtime::{Config, CurrentThread, HandleInner, Kind}; + use crate::runtime::scheduler::{self, CurrentThread}; + use crate::runtime::{Config, Scheduler}; - let (driver, resources) = driver::Driver::new(self.get_cfg())?; + let (driver, driver_handle) = driver::Driver::new(self.get_cfg())?; // Blocking pool let blocking_pool = blocking::create_blocking_pool(self, self.max_blocking_threads); let blocking_spawner = blocking_pool.spawner().clone(); - let handle_inner = HandleInner { - io_handle: resources.io_handle, - time_handle: resources.time_handle, - signal_handle: resources.signal_handle, - clock: resources.clock, - blocking_spawner, - }; + // Generate a rng seed for this runtime. + let seed_generator_1 = self.seed_generator.next_generator(); + let seed_generator_2 = self.seed_generator.next_generator(); // And now put a single-threaded scheduler on top of the timer. When // there are no futures ready to do something, it'll let the timer or @@ -854,7 +893,9 @@ impl Builder { // in their life. let scheduler = CurrentThread::new( driver, - handle_inner, + driver_handle, + blocking_spawner, + seed_generator_2, Config { before_park: self.before_park.clone(), after_unpark: self.after_unpark.clone(), @@ -863,13 +904,15 @@ impl Builder { #[cfg(tokio_unstable)] unhandled_panic: self.unhandled_panic.clone(), disable_lifo_slot: self.disable_lifo_slot, + seed_generator: seed_generator_1, }, ); - let spawner = Spawner::CurrentThread(scheduler.spawner().clone()); + + let handle = scheduler::Handle::CurrentThread(scheduler.handle().clone()); Ok(Runtime { - kind: Kind::CurrentThread(scheduler), - handle: Handle { spawner }, + scheduler: Scheduler::CurrentThread(scheduler), + handle: Handle { inner: handle }, blocking_pool, }) } @@ -951,29 +994,28 @@ cfg_rt_multi_thread! { impl Builder { fn build_threaded_runtime(&mut self) -> io::Result { use crate::loom::sys::num_cpus; - use crate::runtime::{Config, HandleInner, Kind, MultiThread}; + use crate::runtime::{Config, Scheduler}; + use crate::runtime::scheduler::{self, MultiThread}; let core_threads = self.worker_threads.unwrap_or_else(num_cpus); - let (driver, resources) = driver::Driver::new(self.get_cfg())?; + let (driver, driver_handle) = driver::Driver::new(self.get_cfg())?; // Create the blocking pool let blocking_pool = blocking::create_blocking_pool(self, self.max_blocking_threads + core_threads); let blocking_spawner = blocking_pool.spawner().clone(); - let handle_inner = HandleInner { - io_handle: resources.io_handle, - time_handle: resources.time_handle, - signal_handle: resources.signal_handle, - clock: resources.clock, - blocking_spawner, - }; + // Generate a rng seed for this runtime. + let seed_generator_1 = self.seed_generator.next_generator(); + let seed_generator_2 = self.seed_generator.next_generator(); let (scheduler, launch) = MultiThread::new( core_threads, driver, - handle_inner, + driver_handle, + blocking_spawner, + seed_generator_2, Config { before_park: self.before_park.clone(), after_unpark: self.after_unpark.clone(), @@ -982,19 +1024,19 @@ cfg_rt_multi_thread! { #[cfg(tokio_unstable)] unhandled_panic: self.unhandled_panic.clone(), disable_lifo_slot: self.disable_lifo_slot, + seed_generator: seed_generator_1, }, ); - let spawner = Spawner::MultiThread(scheduler.spawner().clone()); - // Create the runtime handle - let handle = Handle { spawner }; + let handle = scheduler::Handle::MultiThread(scheduler.handle().clone()); + let handle = Handle { inner: handle }; // Spawn the thread pool workers let _enter = crate::runtime::context::enter(handle.clone()); launch.launch(); Ok(Runtime { - kind: Kind::MultiThread(scheduler), + scheduler: Scheduler::MultiThread(scheduler), handle, blocking_pool, }) diff --git a/tokio/src/runtime/config.rs b/tokio/src/runtime/config.rs index 59c19988e5e..39eb1cf118b 100644 --- a/tokio/src/runtime/config.rs +++ b/tokio/src/runtime/config.rs @@ -1,5 +1,6 @@ #![cfg_attr(any(not(feature = "full"), tokio_wasm), allow(dead_code))] use crate::runtime::Callback; +use crate::util::RngSeedGenerator; pub(crate) struct Config { /// How many ticks before pulling a task from the global/remote queue? @@ -23,6 +24,10 @@ pub(crate) struct Config { /// stop-gap, this unstable option lets users disable the LIFO task. pub(crate) disable_lifo_slot: bool, + /// Random number generator seed to configure runtimes to act in a + /// deterministic way. + pub(crate) seed_generator: RngSeedGenerator, + #[cfg(tokio_unstable)] /// How to respond to unhandled task panics. pub(crate) unhandled_panic: crate::runtime::UnhandledPanic, diff --git a/tokio/src/runtime/context.rs b/tokio/src/runtime/context.rs index 4215124fc83..c35bf806c8c 100644 --- a/tokio/src/runtime/context.rs +++ b/tokio/src/runtime/context.rs @@ -1,5 +1,6 @@ //! Thread local runtime context use crate::runtime::{Handle, TryCurrentError}; +use crate::util::{replace_thread_rng, RngSeed}; use std::cell::RefCell; @@ -28,7 +29,12 @@ cfg_io_driver! { pub(crate) fn io_handle() -> crate::runtime::driver::IoHandle { match CONTEXT.try_with(|ctx| { let ctx = ctx.borrow(); - ctx.as_ref().expect(crate::util::error::CONTEXT_MISSING_ERROR).as_inner().io_handle.clone() + ctx.as_ref() + .expect(crate::util::error::CONTEXT_MISSING_ERROR) + .inner + .driver() + .io + .clone() }) { Ok(io_handle) => io_handle, Err(_) => panic!("{}", crate::util::error::THREAD_LOCAL_DESTROYED_ERROR), @@ -41,7 +47,11 @@ cfg_signal_internal! { pub(crate) fn signal_handle() -> crate::runtime::driver::SignalHandle { match CONTEXT.try_with(|ctx| { let ctx = ctx.borrow(); - ctx.as_ref().expect(crate::util::error::CONTEXT_MISSING_ERROR).as_inner().signal_handle.clone() + ctx.as_ref() + .expect(crate::util::error::CONTEXT_MISSING_ERROR) + .inner + .signal() + .clone() }) { Ok(signal_handle) => signal_handle, Err(_) => panic!("{}", crate::util::error::THREAD_LOCAL_DESTROYED_ERROR), @@ -50,19 +60,14 @@ cfg_signal_internal! { } cfg_time! { - pub(crate) fn time_handle() -> crate::runtime::driver::TimeHandle { - match CONTEXT.try_with(|ctx| { - let ctx = ctx.borrow(); - ctx.as_ref().expect(crate::util::error::CONTEXT_MISSING_ERROR).as_inner().time_handle.clone() - }) { - Ok(time_handle) => time_handle, - Err(_) => panic!("{}", crate::util::error::THREAD_LOCAL_DESTROYED_ERROR), - } - } - cfg_test_util! { pub(crate) fn clock() -> Option { - match CONTEXT.try_with(|ctx| (*ctx.borrow()).as_ref().map(|ctx| ctx.as_inner().clock.clone())) { + match CONTEXT.try_with(|ctx| { + let ctx = ctx.borrow(); + ctx + .as_ref() + .map(|ctx| ctx.inner.clock().clone()) + }) { Ok(clock) => clock, Err(_) => panic!("{}", crate::util::error::THREAD_LOCAL_DESTROYED_ERROR), } @@ -70,15 +75,6 @@ cfg_time! { } } -cfg_rt! { - pub(crate) fn spawn_handle() -> Option { - match CONTEXT.try_with(|ctx| (*ctx.borrow()).as_ref().map(|ctx| ctx.spawner.clone())) { - Ok(spawner) => spawner, - Err(_) => panic!("{}", crate::util::error::THREAD_LOCAL_DESTROYED_ERROR), - } - } -} - /// Sets this [`Handle`] as the current active [`Handle`]. /// /// [`Handle`]: Handle @@ -93,21 +89,29 @@ pub(crate) fn enter(new: Handle) -> EnterGuard { /// /// [`Handle`]: Handle pub(crate) fn try_enter(new: Handle) -> Option { - CONTEXT - .try_with(|ctx| { - let old = ctx.borrow_mut().replace(new); - EnterGuard(old) - }) - .ok() + let rng_seed = new.inner.seed_generator().next_seed(); + let old_handle = CONTEXT.try_with(|ctx| ctx.borrow_mut().replace(new)).ok()?; + + let old_seed = replace_thread_rng(rng_seed); + + Some(EnterGuard { + old_handle, + old_seed, + }) } #[derive(Debug)] -pub(crate) struct EnterGuard(Option); +pub(crate) struct EnterGuard { + old_handle: Option, + old_seed: RngSeed, +} impl Drop for EnterGuard { fn drop(&mut self) { CONTEXT.with(|ctx| { - *ctx.borrow_mut() = self.0.take(); + *ctx.borrow_mut() = self.old_handle.take(); }); + // We discard the RngSeed associated with this guard + let _ = replace_thread_rng(self.old_seed.clone()); } } diff --git a/tokio/src/runtime/driver.rs b/tokio/src/runtime/driver.rs index 916e1bfbdc6..74d9009240a 100644 --- a/tokio/src/runtime/driver.rs +++ b/tokio/src/runtime/driver.rs @@ -1,20 +1,105 @@ //! Abstracts out the entire chain of runtime sub-drivers into common types. -use crate::park::thread::ParkThread; -use crate::park::Park; + +// Eventually, this file will see significant refactoring / cleanup. For now, we +// don't need to worry much about dead code with certain feature permutations. +#![cfg_attr(not(feature = "full"), allow(dead_code))] + +use crate::park::thread::{ParkThread, UnparkThread}; use std::io; use std::time::Duration; +#[derive(Debug)] +pub(crate) struct Driver { + inner: TimeDriver, +} + +#[derive(Debug)] +pub(crate) struct Handle { + /// IO driver handle + pub(crate) io: IoHandle, + + /// Signal driver handle + #[cfg_attr(any(not(unix), loom), allow(dead_code))] + pub(crate) signal: SignalHandle, + + /// Time driver handle + pub(crate) time: TimeHandle, + + /// Source of `Instant::now()` + #[cfg_attr(not(all(feature = "time", feature = "test-util")), allow(dead_code))] + pub(crate) clock: Clock, +} + +pub(crate) struct Cfg { + pub(crate) enable_io: bool, + pub(crate) enable_time: bool, + pub(crate) enable_pause_time: bool, + pub(crate) start_paused: bool, +} + +impl Driver { + pub(crate) fn new(cfg: Cfg) -> io::Result<(Self, Handle)> { + let (io_stack, io_handle, signal_handle) = create_io_stack(cfg.enable_io)?; + + let clock = create_clock(cfg.enable_pause_time, cfg.start_paused); + + let (time_driver, time_handle) = + create_time_driver(cfg.enable_time, io_stack, clock.clone()); + + Ok(( + Self { inner: time_driver }, + Handle { + io: io_handle, + signal: signal_handle, + time: time_handle, + clock, + }, + )) + } + + pub(crate) fn park(&mut self) { + self.inner.park() + } + + pub(crate) fn park_timeout(&mut self, duration: Duration) { + self.inner.park_timeout(duration) + } + + pub(crate) fn shutdown(&mut self) { + self.inner.shutdown() + } +} + +impl Handle { + pub(crate) fn unpark(&self) { + #[cfg(feature = "time")] + if let Some(handle) = &self.time { + handle.unpark(); + } + + self.io.unpark(); + } +} + // ===== io driver ===== cfg_io_driver! { - type IoDriver = crate::runtime::io::Driver; - type IoStack = crate::park::either::Either; - pub(crate) type IoHandle = Option; + pub(crate) type IoDriver = crate::runtime::io::Driver; - fn create_io_stack(enabled: bool) -> io::Result<(IoStack, IoHandle, SignalHandle)> { - use crate::park::either::Either; + #[derive(Debug)] + pub(crate) enum IoStack { + Enabled(ProcessDriver), + Disabled(ParkThread), + } + + #[derive(Debug, Clone)] + pub(crate) enum IoHandle { + Enabled(crate::runtime::io::Handle), + Disabled(UnparkThread), + } + fn create_io_stack(enabled: bool) -> io::Result<(IoStack, IoHandle, SignalHandle)> { #[cfg(loom)] assert!(!enabled); @@ -25,21 +110,83 @@ cfg_io_driver! { let (signal_driver, signal_handle) = create_signal_driver(io_driver)?; let process_driver = create_process_driver(signal_driver); - (Either::A(process_driver), Some(io_handle), signal_handle) + (IoStack::Enabled(process_driver), IoHandle::Enabled(io_handle), signal_handle) } else { - (Either::B(ParkThread::new()), Default::default(), Default::default()) + let park_thread = ParkThread::new(); + let unpark_thread = park_thread.unpark(); + (IoStack::Disabled(park_thread), IoHandle::Disabled(unpark_thread), Default::default()) }; Ok(ret) } + + impl IoStack { + /* + pub(crate) fn handle(&self) -> IoHandle { + match self { + IoStack::Enabled(v) => IoHandle::Enabled(v.handle()), + IoStack::Disabled(v) => IoHandle::Disabled(v.unpark()), + } + }] + */ + + pub(crate) fn park(&mut self) { + match self { + IoStack::Enabled(v) => v.park(), + IoStack::Disabled(v) => v.park(), + } + } + + pub(crate) fn park_timeout(&mut self, duration: Duration) { + match self { + IoStack::Enabled(v) => v.park_timeout(duration), + IoStack::Disabled(v) => v.park_timeout(duration), + } + } + + pub(crate) fn shutdown(&mut self) { + match self { + IoStack::Enabled(v) => v.shutdown(), + IoStack::Disabled(v) => v.shutdown(), + } + } + } + + impl IoHandle { + pub(crate) fn unpark(&self) { + match self { + IoHandle::Enabled(handle) => handle.unpark(), + IoHandle::Disabled(handle) => handle.unpark(), + } + } + + #[track_caller] + pub(crate) fn expect(self, msg: &'static str) -> crate::runtime::io::Handle { + match self { + IoHandle::Enabled(v) => v, + IoHandle::Disabled(..) => panic!("{}", msg), + } + } + + cfg_unstable! { + pub(crate) fn as_ref(&self) -> Option<&crate::runtime::io::Handle> { + match self { + IoHandle::Enabled(v) => Some(v), + IoHandle::Disabled(..) => None, + } + } + } + } } cfg_not_io_driver! { - pub(crate) type IoHandle = (); - type IoStack = ParkThread; + pub(crate) type IoHandle = UnparkThread; + pub(crate) type IoStack = ParkThread; fn create_io_stack(_enabled: bool) -> io::Result<(IoStack, IoHandle, SignalHandle)> { - Ok((ParkThread::new(), Default::default(), Default::default())) + let park_thread = ParkThread::new(); + let unpark_thread = park_thread.unpark(); + Ok((park_thread, unpark_thread, Default::default())) } } @@ -98,10 +245,17 @@ cfg_not_process_driver! { // ===== time driver ===== cfg_time! { - type TimeDriver = crate::park::either::Either, IoStack>; + #[derive(Debug)] + pub(crate) enum TimeDriver { + Enabled { + driver: crate::runtime::time::Driver, + handle: crate::runtime::time::Handle, + }, + Disabled(IoStack), + } pub(crate) type Clock = crate::time::Clock; - pub(crate) type TimeHandle = Option; + pub(crate) type TimeHandle = Option; fn create_clock(enable_pausing: bool, start_paused: bool) -> Clock { crate::time::Clock::new(enable_pausing, start_paused) @@ -112,15 +266,35 @@ cfg_time! { io_stack: IoStack, clock: Clock, ) -> (TimeDriver, TimeHandle) { - use crate::park::either::Either; - if enable { - let driver = crate::time::driver::Driver::new(io_stack, clock); - let handle = driver.handle(); + let (driver, handle) = crate::runtime::time::Driver::new(io_stack, clock); - (Either::A(driver), Some(handle)) + (TimeDriver::Enabled { driver, handle: handle.clone() }, Some(handle)) } else { - (Either::B(io_stack), None) + (TimeDriver::Disabled(io_stack), None) + } + } + + impl TimeDriver { + pub(crate) fn park(&mut self) { + match self { + TimeDriver::Enabled { driver, handle } => driver.park(handle), + TimeDriver::Disabled(v) => v.park(), + } + } + + pub(crate) fn park_timeout(&mut self, duration: Duration) { + match self { + TimeDriver::Enabled { driver, handle } => driver.park_timeout(handle, duration), + TimeDriver::Disabled(v) => v.park_timeout(duration), + } + } + + pub(crate) fn shutdown(&mut self) { + match self { + TimeDriver::Enabled { driver, handle } => driver.shutdown(handle), + TimeDriver::Disabled(v) => v.shutdown(), + } } } } @@ -143,66 +317,3 @@ cfg_not_time! { (io_stack, ()) } } - -// ===== runtime driver ===== - -#[derive(Debug)] -pub(crate) struct Driver { - inner: TimeDriver, -} - -pub(crate) struct Resources { - pub(crate) io_handle: IoHandle, - pub(crate) signal_handle: SignalHandle, - pub(crate) time_handle: TimeHandle, - pub(crate) clock: Clock, -} - -pub(crate) struct Cfg { - pub(crate) enable_io: bool, - pub(crate) enable_time: bool, - pub(crate) enable_pause_time: bool, - pub(crate) start_paused: bool, -} - -impl Driver { - pub(crate) fn new(cfg: Cfg) -> io::Result<(Self, Resources)> { - let (io_stack, io_handle, signal_handle) = create_io_stack(cfg.enable_io)?; - - let clock = create_clock(cfg.enable_pause_time, cfg.start_paused); - - let (time_driver, time_handle) = - create_time_driver(cfg.enable_time, io_stack, clock.clone()); - - Ok(( - Self { inner: time_driver }, - Resources { - io_handle, - signal_handle, - time_handle, - clock, - }, - )) - } -} - -impl Park for Driver { - type Unpark = ::Unpark; - type Error = ::Error; - - fn unpark(&self) -> Self::Unpark { - self.inner.unpark() - } - - fn park(&mut self) -> Result<(), Self::Error> { - self.inner.park() - } - - fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { - self.inner.park_timeout(duration) - } - - fn shutdown(&mut self) { - self.inner.shutdown() - } -} diff --git a/tokio/src/runtime/enter.rs b/tokio/src/runtime/enter.rs index 6e4e37d70ff..66b17868b95 100644 --- a/tokio/src/runtime/enter.rs +++ b/tokio/src/runtime/enter.rs @@ -25,8 +25,6 @@ pub(crate) struct Enter { } cfg_rt! { - use crate::park::thread::ParkError; - use std::time::Duration; /// Marks the current thread as being within the dynamic extent of an @@ -139,10 +137,12 @@ cfg_rt_multi_thread! { } cfg_rt! { + use crate::loom::thread::AccessError; + impl Enter { /// Blocks the thread on the specified future, returning the value with /// which that future completes. - pub(crate) fn block_on(&mut self, f: F) -> Result + pub(crate) fn block_on(&mut self, f: F) -> Result where F: std::future::Future, { @@ -156,18 +156,17 @@ cfg_rt! { /// /// If the future completes before `timeout`, the result is returned. If /// `timeout` elapses, then `Err` is returned. - pub(crate) fn block_on_timeout(&mut self, f: F, timeout: Duration) -> Result + pub(crate) fn block_on_timeout(&mut self, f: F, timeout: Duration) -> Result where F: std::future::Future, { - use crate::park::Park; use crate::park::thread::CachedParkThread; use std::task::Context; use std::task::Poll::Ready; use std::time::Instant; let mut park = CachedParkThread::new(); - let waker = park.get_unpark()?.into_waker(); + let waker = park.waker().map_err(|_| ())?; let mut cx = Context::from_waker(&waker); pin!(f); @@ -184,7 +183,7 @@ cfg_rt! { return Err(()); } - park.park_timeout(when - now)?; + park.park_timeout(when - now); } } } diff --git a/tokio/src/runtime/handle.rs b/tokio/src/runtime/handle.rs index 075792a3077..c7e3ce9ec37 100644 --- a/tokio/src/runtime/handle.rs +++ b/tokio/src/runtime/handle.rs @@ -1,11 +1,4 @@ -use crate::runtime::blocking::{BlockingTask, NoopSchedule}; -use crate::runtime::task::{self, JoinHandle}; -use crate::runtime::{blocking, context, driver, Spawner}; -use crate::util::error::{CONTEXT_MISSING_ERROR, THREAD_LOCAL_DESTROYED_ERROR}; - -use std::future::Future; -use std::marker::PhantomData; -use std::{error, fmt}; +use crate::runtime::scheduler; /// Handle to the runtime. /// @@ -14,51 +7,19 @@ use std::{error, fmt}; /// /// [`Runtime::handle`]: crate::runtime::Runtime::handle() #[derive(Debug, Clone)] +// When the `rt` feature is *not* enabled, this type is still defined, but not +// included in the public API. pub struct Handle { - pub(super) spawner: Spawner, + pub(crate) inner: scheduler::Handle, } -/// All internal handles that are *not* the scheduler's spawner. -#[derive(Debug)] -pub(crate) struct HandleInner { - /// Handles to the I/O drivers - #[cfg_attr( - not(any( - feature = "net", - all(unix, feature = "process"), - all(unix, feature = "signal"), - )), - allow(dead_code) - )] - pub(super) io_handle: driver::IoHandle, - - /// Handles to the signal drivers - #[cfg_attr( - any( - loom, - not(all(unix, feature = "signal")), - not(all(unix, feature = "process")), - ), - allow(dead_code) - )] - pub(super) signal_handle: driver::SignalHandle, - - /// Handles to the time drivers - #[cfg_attr(not(feature = "time"), allow(dead_code))] - pub(super) time_handle: driver::TimeHandle, - - /// Source of `Instant::now()` - #[cfg_attr(not(all(feature = "time", feature = "test-util")), allow(dead_code))] - pub(super) clock: driver::Clock, - - /// Blocking pool spawner - pub(super) blocking_spawner: blocking::Spawner, -} +use crate::runtime::context; +use crate::runtime::task::JoinHandle; +use crate::util::error::{CONTEXT_MISSING_ERROR, THREAD_LOCAL_DESTROYED_ERROR}; -/// Create a new runtime handle. -pub(crate) trait ToHandle { - fn to_handle(&self) -> Handle; -} +use std::future::Future; +use std::marker::PhantomData; +use std::{error, fmt}; /// Runtime context guard. /// @@ -208,11 +169,7 @@ impl Handle { F: FnOnce() -> R + Send + 'static, R: Send + 'static, { - self.as_inner().spawn_blocking(self, func) - } - - pub(crate) fn as_inner(&self) -> &HandleInner { - self.spawner.as_handle_inner() + self.inner.blocking_spawner().spawn_blocking(self, func) } /// Runs a future to completion on this `Handle`'s associated `Runtime`. @@ -312,17 +269,7 @@ impl Handle { let id = crate::runtime::task::Id::next(); #[cfg(all(tokio_unstable, feature = "tracing"))] let future = crate::util::trace::task(future, "task", _name, id.as_u64()); - self.spawner.spawn(future, id) - } - - pub(crate) fn shutdown(mut self) { - self.spawner.shutdown(); - } -} - -impl ToHandle for Handle { - fn to_handle(&self) -> Handle { - self.clone() + self.inner.spawn(future, id) } } @@ -338,107 +285,6 @@ cfg_metrics! { } } -impl HandleInner { - #[track_caller] - pub(crate) fn spawn_blocking(&self, rt: &dyn ToHandle, func: F) -> JoinHandle - where - F: FnOnce() -> R + Send + 'static, - R: Send + 'static, - { - let (join_handle, spawn_result) = if cfg!(debug_assertions) - && std::mem::size_of::() > 2048 - { - self.spawn_blocking_inner(Box::new(func), blocking::Mandatory::NonMandatory, None, rt) - } else { - self.spawn_blocking_inner(func, blocking::Mandatory::NonMandatory, None, rt) - }; - - match spawn_result { - Ok(()) => join_handle, - // Compat: do not panic here, return the join_handle even though it will never resolve - Err(blocking::SpawnError::ShuttingDown) => join_handle, - Err(blocking::SpawnError::NoThreads(e)) => { - panic!("OS can't spawn worker thread: {}", e) - } - } - } - - cfg_fs! { - #[track_caller] - #[cfg_attr(any( - all(loom, not(test)), // the function is covered by loom tests - test - ), allow(dead_code))] - pub(crate) fn spawn_mandatory_blocking(&self, rt: &dyn ToHandle, func: F) -> Option> - where - F: FnOnce() -> R + Send + 'static, - R: Send + 'static, - { - let (join_handle, spawn_result) = if cfg!(debug_assertions) && std::mem::size_of::() > 2048 { - self.spawn_blocking_inner( - Box::new(func), - blocking::Mandatory::Mandatory, - None, - rt, - ) - } else { - self.spawn_blocking_inner( - func, - blocking::Mandatory::Mandatory, - None, - rt, - ) - }; - - if spawn_result.is_ok() { - Some(join_handle) - } else { - None - } - } - } - - #[track_caller] - pub(crate) fn spawn_blocking_inner( - &self, - func: F, - is_mandatory: blocking::Mandatory, - name: Option<&str>, - rt: &dyn ToHandle, - ) -> (JoinHandle, Result<(), blocking::SpawnError>) - where - F: FnOnce() -> R + Send + 'static, - R: Send + 'static, - { - let fut = BlockingTask::new(func); - let id = super::task::Id::next(); - #[cfg(all(tokio_unstable, feature = "tracing"))] - let fut = { - use tracing::Instrument; - let location = std::panic::Location::caller(); - let span = tracing::trace_span!( - target: "tokio::task::blocking", - "runtime.spawn", - kind = %"blocking", - task.name = %name.unwrap_or_default(), - task.id = id.as_u64(), - "fn" = %std::any::type_name::(), - spawn.location = %format_args!("{}:{}:{}", location.file(), location.line(), location.column()), - ); - fut.instrument(span) - }; - - #[cfg(not(all(tokio_unstable, feature = "tracing")))] - let _ = name; - - let (task, handle) = task::unowned(fut, NoopSchedule, id); - let spawned = self - .blocking_spawner - .spawn(blocking::Task::new(task, is_mandatory), rt); - (handle, spawned) - } -} - /// Error returned by `try_current` when no Runtime has been started #[derive(Debug)] pub struct TryCurrentError { diff --git a/tokio/src/runtime/io/mod.rs b/tokio/src/runtime/io/mod.rs index d447b87eba4..aa9447322ef 100644 --- a/tokio/src/runtime/io/mod.rs +++ b/tokio/src/runtime/io/mod.rs @@ -10,7 +10,6 @@ mod metrics; use crate::io::interest::Interest; use crate::io::ready::Ready; -use crate::park::{Park, Unpark}; use crate::util::slab::{self, Slab}; use crate::{loom::sync::RwLock, util::bit}; @@ -145,7 +144,26 @@ impl Driver { } } - fn turn(&mut self, max_wait: Option) -> io::Result<()> { + pub(crate) fn park(&mut self) { + self.turn(None); + } + + pub(crate) fn park_timeout(&mut self, duration: Duration) { + self.turn(Some(duration)); + } + + pub(crate) fn shutdown(&mut self) { + if self.inner.shutdown() { + self.resources.for_each(|io| { + // If a task is waiting on the I/O resource, notify it. The task + // will then attempt to use the I/O resource and fail due to the + // driver being shutdown. And shutdown will clear all wakers. + io.shutdown(); + }); + } + } + + fn turn(&mut self, max_wait: Option) { // How often to call `compact()` on the resource slab const COMPACT_INTERVAL: u8 = 255; @@ -167,7 +185,7 @@ impl Driver { // In case of wasm32_wasi this error happens, when trying to poll without subscriptions // just return from the park, as there would be nothing, which wakes us up. } - Err(e) => return Err(e), + Err(e) => panic!("unexpected error when polling the I/O driver: {:?}", e), } // Process all the events that came in, dispatching appropriately @@ -184,8 +202,6 @@ impl Driver { self.inner.metrics.incr_ready_count_by(ready_count); self.events = Some(events); - - Ok(()) } fn dispatch(&mut self, token: mio::Token, ready: Ready) { @@ -209,42 +225,6 @@ impl Driver { } } -impl Drop for Driver { - fn drop(&mut self) { - self.shutdown(); - } -} - -impl Park for Driver { - type Unpark = Handle; - type Error = io::Error; - - fn unpark(&self) -> Self::Unpark { - self.handle() - } - - fn park(&mut self) -> io::Result<()> { - self.turn(None)?; - Ok(()) - } - - fn park_timeout(&mut self, duration: Duration) -> io::Result<()> { - self.turn(Some(duration))?; - Ok(()) - } - - fn shutdown(&mut self) { - if self.inner.shutdown() { - self.resources.for_each(|io| { - // If a task is waiting on the I/O resource, notify it. The task - // will then attempt to use the I/O resource and fail due to the - // driver being shutdown. And shutdown will clear all wakers. - io.shutdown(); - }); - } - } -} - impl fmt::Debug for Driver { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Driver") @@ -303,18 +283,12 @@ impl Handle { /// after this method has been called. If the reactor is not currently /// blocked in `turn`, then the next call to `turn` will not block and /// return immediately. - fn wakeup(&self) { + pub(crate) fn unpark(&self) { #[cfg(not(tokio_wasi))] self.inner.waker.wake().expect("failed to wake I/O driver"); } } -impl Unpark for Handle { - fn unpark(&self) { - self.wakeup(); - } -} - impl fmt::Debug for Handle { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Handle") diff --git a/tokio/src/runtime/io/registration.rs b/tokio/src/runtime/io/registration.rs index a82eaee46b1..7b823460ad6 100644 --- a/tokio/src/runtime/io/registration.rs +++ b/tokio/src/runtime/io/registration.rs @@ -57,10 +57,8 @@ unsafe impl Sync for Registration {} // ===== impl Registration ===== impl Registration { - /// Registers the I/O resource with the default reactor, for a specific - /// `Interest`. `new_with_interest` should be used over `new` when you need - /// control over the readiness state, such as when a file descriptor only - /// allows reads. This does not add `hup` or `error` so if you are + /// Registers the I/O resource with the reactor for the provided handle, for + /// a specific `Interest`. This does not add `hup` or `error` so if you are /// interested in those states, you will need to add them to the readiness /// state passed to this function. /// diff --git a/tokio/src/runtime/metrics/runtime.rs b/tokio/src/runtime/metrics/runtime.rs index 279d70a0e74..6ad5735b0f7 100644 --- a/tokio/src/runtime/metrics/runtime.rs +++ b/tokio/src/runtime/metrics/runtime.rs @@ -39,7 +39,7 @@ impl RuntimeMetrics { /// } /// ``` pub fn num_workers(&self) -> usize { - self.handle.spawner.num_workers() + self.handle.inner.num_workers() } /// Returns the number of tasks scheduled from **outside** of the runtime. @@ -68,7 +68,7 @@ impl RuntimeMetrics { /// ``` pub fn remote_schedule_count(&self) -> u64 { self.handle - .spawner + .inner .scheduler_metrics() .remote_schedule_count .load(Relaxed) @@ -111,7 +111,7 @@ impl RuntimeMetrics { /// ``` pub fn worker_park_count(&self, worker: usize) -> u64 { self.handle - .spawner + .inner .worker_metrics(worker) .park_count .load(Relaxed) @@ -154,7 +154,7 @@ impl RuntimeMetrics { /// ``` pub fn worker_noop_count(&self, worker: usize) -> u64 { self.handle - .spawner + .inner .worker_metrics(worker) .noop_count .load(Relaxed) @@ -199,7 +199,7 @@ impl RuntimeMetrics { /// ``` pub fn worker_steal_count(&self, worker: usize) -> u64 { self.handle - .spawner + .inner .worker_metrics(worker) .steal_count .load(Relaxed) @@ -240,7 +240,7 @@ impl RuntimeMetrics { /// ``` pub fn worker_poll_count(&self, worker: usize) -> u64 { self.handle - .spawner + .inner .worker_metrics(worker) .poll_count .load(Relaxed) @@ -285,7 +285,7 @@ impl RuntimeMetrics { pub fn worker_total_busy_duration(&self, worker: usize) -> Duration { let nanos = self .handle - .spawner + .inner .worker_metrics(worker) .busy_duration_total .load(Relaxed); @@ -331,7 +331,7 @@ impl RuntimeMetrics { /// ``` pub fn worker_local_schedule_count(&self, worker: usize) -> u64 { self.handle - .spawner + .inner .worker_metrics(worker) .local_schedule_count .load(Relaxed) @@ -377,7 +377,7 @@ impl RuntimeMetrics { /// ``` pub fn worker_overflow_count(&self, worker: usize) -> u64 { self.handle - .spawner + .inner .worker_metrics(worker) .overflow_count .load(Relaxed) @@ -406,7 +406,7 @@ impl RuntimeMetrics { /// } /// ``` pub fn injection_queue_depth(&self) -> usize { - self.handle.spawner.injection_queue_depth() + self.handle.inner.injection_queue_depth() } /// Returns the number of tasks currently scheduled in the given worker's @@ -444,7 +444,7 @@ impl RuntimeMetrics { /// } /// ``` pub fn worker_local_queue_depth(&self, worker: usize) -> usize { - self.handle.spawner.worker_local_queue_depth(worker) + self.handle.inner.worker_local_queue_depth(worker) } } @@ -526,8 +526,9 @@ cfg_net! { // TODO: Investigate if this should return 0, most of our metrics always increase // thus this breaks that guarantee. self.handle - .as_inner() - .io_handle + .inner + .driver() + .io .as_ref() .map(|h| f(h.metrics())) .unwrap_or(0) diff --git a/tokio/src/runtime/mod.rs b/tokio/src/runtime/mod.rs index 5b1bc2ac376..1ca9b944152 100644 --- a/tokio/src/runtime/mod.rs +++ b/tokio/src/runtime/mod.rs @@ -177,16 +177,22 @@ #[macro_use] mod tests; +mod driver; +pub(crate) mod scheduler; + cfg_io_driver_impl! { pub(crate) mod io; } +cfg_time! { + pub(crate) mod time; +} + cfg_rt! { pub(crate) mod enter; pub(crate) mod task; - pub(crate) mod scheduler; use scheduler::CurrentThread; mod config; @@ -209,19 +215,15 @@ cfg_rt! { pub use self::builder::Builder; cfg_unstable! { pub use self::builder::UnhandledPanic; + pub use crate::util::RngSeed; } pub(crate) mod context; - mod driver; use self::enter::enter; mod handle; pub use handle::{EnterGuard, Handle, TryCurrentError}; - pub(crate) use handle::{HandleInner, ToHandle}; - - mod spawner; - use self::spawner::Spawner; cfg_metrics! { mod metrics; @@ -292,8 +294,8 @@ cfg_rt! { /// [`Builder`]: struct@Builder #[derive(Debug)] pub struct Runtime { - /// Task executor - kind: Kind, + /// Task scheduler + scheduler: Scheduler, /// Handle to runtime, also contains driver handles handle: Handle, @@ -302,9 +304,9 @@ cfg_rt! { blocking_pool: BlockingPool, } - /// The runtime executor is either a multi-thread or a current-thread executor. + /// The runtime scheduler is either a multi-thread or a current-thread executor. #[derive(Debug)] - enum Kind { + enum Scheduler { /// Execute all tasks on the current-thread. CurrentThread(CurrentThread), @@ -486,10 +488,10 @@ cfg_rt! { let _enter = self.enter(); - match &self.kind { - Kind::CurrentThread(exec) => exec.block_on(future), + match &self.scheduler { + Scheduler::CurrentThread(exec) => exec.block_on(future), #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] - Kind::MultiThread(exec) => exec.block_on(future), + Scheduler::MultiThread(exec) => exec.block_on(future), } } @@ -566,7 +568,7 @@ cfg_rt! { /// ``` pub fn shutdown_timeout(mut self, duration: Duration) { // Wakeup and shutdown all the worker threads - self.handle.clone().shutdown(); + self.handle.inner.shutdown(); self.blocking_pool.shutdown(Some(duration)); } @@ -604,8 +606,8 @@ cfg_rt! { #[allow(clippy::single_match)] // there are comments in the error branch, so we don't want if-let impl Drop for Runtime { fn drop(&mut self) { - match &mut self.kind { - Kind::CurrentThread(current_thread) => { + match &mut self.scheduler { + Scheduler::CurrentThread(current_thread) => { // This ensures that tasks spawned on the current-thread // runtime are dropped inside the runtime's context. match self::context::try_enter(self.handle.clone()) { @@ -619,7 +621,7 @@ cfg_rt! { } }, #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] - Kind::MultiThread(_) => { + Scheduler::MultiThread(_) => { // The threaded scheduler drops its tasks on its worker threads, which is // already in the runtime's context. }, diff --git a/tokio/src/runtime/scheduler/current_thread.rs b/tokio/src/runtime/scheduler/current_thread.rs index 59472cf240e..666be6b13f2 100644 --- a/tokio/src/runtime/scheduler/current_thread.rs +++ b/tokio/src/runtime/scheduler/current_thread.rs @@ -1,15 +1,14 @@ use crate::future::poll_fn; use crate::loom::sync::atomic::AtomicBool; use crate::loom::sync::{Arc, Mutex}; -use crate::park::{Park, Unpark}; use crate::runtime::context::EnterGuard; -use crate::runtime::driver::Driver; +use crate::runtime::driver::{self, Driver}; use crate::runtime::task::{self, JoinHandle, OwnedTasks, Schedule, Task}; -use crate::runtime::{Config, HandleInner}; +use crate::runtime::{blocking, Config}; use crate::runtime::{MetricsBatch, SchedulerMetrics, WorkerMetrics}; use crate::sync::notify::Notify; use crate::util::atomic_cell::AtomicCell; -use crate::util::{waker_ref, Wake, WakerRef}; +use crate::util::{waker_ref, RngSeedGenerator, Wake, WakerRef}; use std::cell::RefCell; use std::collections::VecDeque; @@ -28,8 +27,8 @@ pub(crate) struct CurrentThread { /// driver. notify: Notify, - /// Sendable task spawner - spawner: Spawner, + /// Shared handle to the scheduler + handle: Arc, /// This is usually None, but right before dropping the CurrentThread /// scheduler, it is changed to `Some` with the context being the runtime's @@ -38,14 +37,26 @@ pub(crate) struct CurrentThread { context_guard: Option, } +/// Handle to the current thread scheduler +pub(crate) struct Handle { + /// Scheduler state shared across threads + shared: Shared, + + /// Resource driver handles + pub(crate) driver: driver::Handle, + + /// Blocking pool spawner + pub(crate) blocking_spawner: blocking::Spawner, + + /// Current random number generator seed + pub(crate) seed_generator: RngSeedGenerator, +} + /// Data required for executing the scheduler. The struct is passed around to /// a function that will perform the scheduling work and acts as a capability token. struct Core { /// Scheduler run queue - tasks: VecDeque>>, - - /// Sendable task spawner - spawner: Spawner, + tasks: VecDeque>>, /// Current tick tick: u32, @@ -63,28 +74,17 @@ struct Core { unhandled_panic: bool, } -#[derive(Clone)] -pub(crate) struct Spawner { - shared: Arc, -} - /// Scheduler state shared between threads. struct Shared { /// Remote run queue. None if the `Runtime` has been dropped. - queue: Mutex>>>>, + queue: Mutex>>>>, /// Collection of all active tasks spawned onto this executor. - owned: OwnedTasks>, - - /// Unpark the blocked thread. - unpark: ::Unpark, + owned: OwnedTasks>, /// Indicates whether the blocked on thread was woken. woken: AtomicBool, - /// Handle to I/O driver, timer, blocking pool, ... - handle_inner: HandleInner, - /// Scheduler configuration options config: Config, @@ -97,8 +97,8 @@ struct Shared { /// Thread-local context. struct Context { - /// Handle to the spawner - spawner: Spawner, + /// Scheduler handle + handle: Arc, /// Scheduler core, enabling the holder of `Context` to execute the /// scheduler. @@ -112,25 +112,29 @@ const INITIAL_CAPACITY: usize = 64; scoped_thread_local!(static CURRENT: Context); impl CurrentThread { - pub(crate) fn new(driver: Driver, handle_inner: HandleInner, config: Config) -> CurrentThread { - let unpark = driver.unpark(); - - let spawner = Spawner { - shared: Arc::new(Shared { + pub(crate) fn new( + driver: Driver, + driver_handle: driver::Handle, + blocking_spawner: blocking::Spawner, + seed_generator: RngSeedGenerator, + config: Config, + ) -> CurrentThread { + let handle = Arc::new(Handle { + shared: Shared { queue: Mutex::new(Some(VecDeque::with_capacity(INITIAL_CAPACITY))), owned: OwnedTasks::new(), - unpark, woken: AtomicBool::new(false), - handle_inner, config, scheduler_metrics: SchedulerMetrics::new(), worker_metrics: WorkerMetrics::new(), - }), - }; + }, + driver: driver_handle, + blocking_spawner, + seed_generator, + }); let core = AtomicCell::new(Some(Box::new(Core { tasks: VecDeque::with_capacity(INITIAL_CAPACITY), - spawner: spawner.clone(), tick: 0, driver: Some(driver), metrics: MetricsBatch::new(), @@ -140,13 +144,13 @@ impl CurrentThread { CurrentThread { core, notify: Notify::new(), - spawner, + handle, context_guard: None, } } - pub(crate) fn spawner(&self) -> &Spawner { - &self.spawner + pub(crate) fn handle(&self) -> &Arc { + &self.handle } #[track_caller] @@ -190,7 +194,7 @@ impl CurrentThread { Some(CoreGuard { context: Context { - spawner: self.spawner.clone(), + handle: self.handle.clone(), core: RefCell::new(Some(core)), }, scheduler: self, @@ -217,16 +221,16 @@ impl Drop for CurrentThread { // Drain the OwnedTasks collection. This call also closes the // collection, ensuring that no tasks are ever pushed after this // call returns. - context.spawner.shared.owned.close_and_shutdown_all(); + context.handle.shared.owned.close_and_shutdown_all(); // Drain local queue // We already shut down every task, so we just need to drop the task. - while let Some(task) = core.pop_task() { + while let Some(task) = core.pop_task(&self.handle) { drop(task); } // Drain remote queue and set it to None - let remote_queue = core.spawner.shared.queue.lock().take(); + let remote_queue = self.handle.shared.queue.lock().take(); // Using `Option::take` to replace the shared queue with `None`. // We already shut down every task, so we just need to drop the task. @@ -236,10 +240,15 @@ impl Drop for CurrentThread { } } - assert!(context.spawner.shared.owned.is_empty()); + assert!(context.handle.shared.owned.is_empty()); // Submit metrics - core.metrics.submit(&core.spawner.shared.worker_metrics); + core.metrics.submit(&self.handle.shared.worker_metrics); + + // Shutdown the resource drivers + if let Some(driver) = core.driver.as_mut() { + driver.shutdown(); + } (core, ()) }); @@ -255,19 +264,19 @@ impl fmt::Debug for CurrentThread { // ===== impl Core ===== impl Core { - fn pop_task(&mut self) -> Option>> { + fn pop_task(&mut self, handle: &Handle) -> Option>> { let ret = self.tasks.pop_front(); - self.spawner + handle .shared .worker_metrics .set_queue_depth(self.tasks.len()); ret } - fn push_task(&mut self, task: task::Notified>) { + fn push_task(&mut self, handle: &Handle, task: task::Notified>) { self.tasks.push_back(task); self.metrics.inc_local_schedule_count(); - self.spawner + handle .shared .worker_metrics .set_queue_depth(self.tasks.len()); @@ -289,7 +298,7 @@ impl Context { fn park(&self, mut core: Box) -> Box { let mut driver = core.driver.take().expect("driver missing"); - if let Some(f) = &self.spawner.shared.config.before_park { + if let Some(f) = &self.handle.shared.config.before_park { // Incorrect lint, the closures are actually different types so `f` // cannot be passed as an argument to `enter`. #[allow(clippy::redundant_closure)] @@ -302,17 +311,17 @@ impl Context { if core.tasks.is_empty() { // Park until the thread is signaled core.metrics.about_to_park(); - core.metrics.submit(&core.spawner.shared.worker_metrics); + core.metrics.submit(&self.handle.shared.worker_metrics); let (c, _) = self.enter(core, || { - driver.park().expect("failed to park"); + driver.park(); }); core = c; core.metrics.returned_from_park(); } - if let Some(f) = &self.spawner.shared.config.after_unpark { + if let Some(f) = &self.handle.shared.config.after_unpark { // Incorrect lint, the closures are actually different types so `f` // cannot be passed as an argument to `enter`. #[allow(clippy::redundant_closure)] @@ -328,11 +337,9 @@ impl Context { fn park_yield(&self, mut core: Box) -> Box { let mut driver = core.driver.take().expect("driver missing"); - core.metrics.submit(&core.spawner.shared.worker_metrics); + core.metrics.submit(&self.handle.shared.worker_metrics); let (mut core, _) = self.enter(core, || { - driver - .park_timeout(Duration::from_millis(0)) - .expect("failed to park"); + driver.park_timeout(Duration::from_millis(0)); }); core.driver = Some(driver); @@ -354,50 +361,50 @@ impl Context { } } -// ===== impl Spawner ===== +// ===== impl Handle ===== -impl Spawner { +impl Handle { /// Spawns a future onto the `CurrentThread` scheduler - pub(crate) fn spawn(&self, future: F, id: crate::runtime::task::Id) -> JoinHandle + pub(crate) fn spawn( + me: &Arc, + future: F, + id: crate::runtime::task::Id, + ) -> JoinHandle where F: crate::future::Future + Send + 'static, F::Output: Send + 'static, { - let (handle, notified) = self.shared.owned.bind(future, self.shared.clone(), id); + let (handle, notified) = me.shared.owned.bind(future, me.clone(), id); if let Some(notified) = notified { - self.shared.schedule(notified); + me.schedule(notified); } handle } - fn pop(&self) -> Option>> { + fn pop(&self) -> Option>> { match self.shared.queue.lock().as_mut() { Some(queue) => queue.pop_front(), None => None, } } - fn waker_ref(&self) -> WakerRef<'_> { + fn waker_ref(me: &Arc) -> WakerRef<'_> { // Set woken to true when enter block_on, ensure outer future // be polled for the first time when enter loop - self.shared.woken.store(true, Release); - waker_ref(&self.shared) + me.shared.woken.store(true, Release); + waker_ref(me) } // reset woken to false and return original value pub(crate) fn reset_woken(&self) -> bool { self.shared.woken.swap(false, AcqRel) } - - pub(crate) fn as_handle_inner(&self) -> &HandleInner { - &self.shared.handle_inner - } } cfg_metrics! { - impl Spawner { + impl Handle { pub(crate) fn scheduler_metrics(&self) -> &SchedulerMetrics { &self.shared.scheduler_metrics } @@ -405,7 +412,9 @@ cfg_metrics! { pub(crate) fn injection_queue_depth(&self) -> usize { // TODO: avoid having to lock. The multi-threaded injection queue // could probably be used here. - self.shared.queue.lock() + self.shared + .queue + .lock() .as_ref() .map(|queue| queue.len()) .unwrap_or(0) @@ -418,41 +427,41 @@ cfg_metrics! { } } -impl fmt::Debug for Spawner { +impl fmt::Debug for Handle { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Spawner").finish() + fmt.debug_struct("current_thread::Handle { ... }").finish() } } // ===== impl Shared ===== -impl Schedule for Arc { +impl Schedule for Arc { fn release(&self, task: &Task) -> Option> { - self.owned.remove(task) + self.shared.owned.remove(task) } fn schedule(&self, task: task::Notified) { CURRENT.with(|maybe_cx| match maybe_cx { - Some(cx) if Arc::ptr_eq(self, &cx.spawner.shared) => { + Some(cx) if Arc::ptr_eq(self, &cx.handle) => { let mut core = cx.core.borrow_mut(); // If `None`, the runtime is shutting down, so there is no need // to schedule the task. if let Some(core) = core.as_mut() { - core.push_task(task); + core.push_task(self, task); } } _ => { // Track that a task was scheduled from **outside** of the runtime. - self.scheduler_metrics.inc_remote_schedule_count(); + self.shared.scheduler_metrics.inc_remote_schedule_count(); // If the queue is None, then the runtime has shut down. We // don't need to do anything with the notification in that case. - let mut guard = self.queue.lock(); + let mut guard = self.shared.queue.lock(); if let Some(queue) = guard.as_mut() { queue.push_back(task); drop(guard); - self.unpark.unpark(); + self.driver.unpark(); } } }); @@ -462,7 +471,7 @@ impl Schedule for Arc { fn unhandled_panic(&self) { use crate::runtime::UnhandledPanic; - match self.config.unhandled_panic { + match self.shared.config.unhandled_panic { UnhandledPanic::Ignore => { // Do nothing } @@ -471,13 +480,13 @@ impl Schedule for Arc { // `CURRENT` should match with `&self`, i.e. there is no // opportunity for a nested scheduler to be called. CURRENT.with(|maybe_cx| match maybe_cx { - Some(cx) if Arc::ptr_eq(self, &cx.spawner.shared) => { + Some(cx) if Arc::ptr_eq(self, &cx.handle) => { let mut core = cx.core.borrow_mut(); // If `None`, the runtime is shutting down, so there is no need to signal shutdown if let Some(core) = core.as_mut() { core.unhandled_panic = true; - self.owned.close_and_shutdown_all(); + self.shared.owned.close_and_shutdown_all(); } } _ => unreachable!("runtime core not set in CURRENT thread-local"), @@ -488,15 +497,15 @@ impl Schedule for Arc { } } -impl Wake for Shared { +impl Wake for Handle { fn wake(arc_self: Arc) { Wake::wake_by_ref(&arc_self) } /// Wake by reference fn wake_by_ref(arc_self: &Arc) { - arc_self.woken.store(true, Release); - arc_self.unpark.unpark(); + arc_self.shared.woken.store(true, Release); + arc_self.driver.unpark(); } } @@ -514,13 +523,15 @@ impl CoreGuard<'_> { fn block_on(self, future: F) -> F::Output { let ret = self.enter(|mut core, context| { let _enter = crate::runtime::enter(false); - let waker = context.spawner.waker_ref(); + let waker = Handle::waker_ref(&context.handle); let mut cx = std::task::Context::from_waker(&waker); pin!(future); 'outer: loop { - if core.spawner.reset_woken() { + let handle = &context.handle; + + if handle.reset_woken() { let (c, res) = context.enter(core, || { crate::coop::budget(|| future.as_mut().poll(&mut cx)) }); @@ -532,7 +543,7 @@ impl CoreGuard<'_> { } } - for _ in 0..core.spawner.shared.config.event_interval { + for _ in 0..handle.shared.config.event_interval { // Make sure we didn't hit an unhandled_panic if core.unhandled_panic { return (core, None); @@ -542,10 +553,10 @@ impl CoreGuard<'_> { let tick = core.tick; core.tick = core.tick.wrapping_add(1); - let entry = if tick % core.spawner.shared.config.global_queue_interval == 0 { - core.spawner.pop().or_else(|| core.tasks.pop_front()) + let entry = if tick % handle.shared.config.global_queue_interval == 0 { + handle.pop().or_else(|| core.tasks.pop_front()) } else { - core.tasks.pop_front().or_else(|| core.spawner.pop()) + core.tasks.pop_front().or_else(|| handle.pop()) }; let task = match entry { @@ -558,7 +569,7 @@ impl CoreGuard<'_> { } }; - let task = context.spawner.shared.owned.assert_owner(task); + let task = context.handle.shared.owned.assert_owner(task); let (c, _) = context.run_task(core, || { task.run(); diff --git a/tokio/src/runtime/scheduler/mod.rs b/tokio/src/runtime/scheduler/mod.rs index 6ca18614c49..e214086d4d0 100644 --- a/tokio/src/runtime/scheduler/mod.rs +++ b/tokio/src/runtime/scheduler/mod.rs @@ -1,7 +1,161 @@ -pub(crate) mod current_thread; -pub(crate) use current_thread::CurrentThread; +cfg_rt! { + pub(crate) mod current_thread; + pub(crate) use current_thread::CurrentThread; +} cfg_rt_multi_thread! { pub(crate) mod multi_thread; pub(crate) use multi_thread::MultiThread; } + +use crate::runtime::driver; + +#[derive(Debug, Clone)] +pub(crate) enum Handle { + #[cfg(feature = "rt")] + CurrentThread(Arc), + + #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] + MultiThread(Arc), + + // TODO: This is to avoid triggering "dead code" warnings many other places + // in the codebase. Remove this during a later cleanup + #[cfg(not(feature = "rt"))] + #[allow(dead_code)] + Disabled, +} + +impl Handle { + #[cfg_attr(not(feature = "full"), allow(dead_code))] + pub(crate) fn driver(&self) -> &driver::Handle { + match *self { + #[cfg(feature = "rt")] + Handle::CurrentThread(ref h) => &h.driver, + + #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] + Handle::MultiThread(ref h) => &h.driver, + + #[cfg(not(feature = "rt"))] + Handle::Disabled => unreachable!(), + } + } + + cfg_time! { + #[track_caller] + pub(crate) fn time(&self) -> &crate::runtime::time::Handle { + self.driver() + .time + .as_ref() + .expect("A Tokio 1.x context was found, but timers are disabled. Call `enable_time` on the runtime builder to enable timers.") + } + + cfg_test_util! { + pub(crate) fn clock(&self) -> &driver::Clock { + &self.driver().clock + } + } + } +} + +cfg_rt! { + use crate::future::Future; + use crate::loom::sync::Arc; + use crate::runtime::{blocking, task::Id}; + use crate::task::JoinHandle; + use crate::util::RngSeedGenerator; + + impl Handle { + pub(crate) fn blocking_spawner(&self) -> &blocking::Spawner { + match self { + Handle::CurrentThread(h) => &h.blocking_spawner, + + #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] + Handle::MultiThread(h) => &h.blocking_spawner, + } + } + + pub(crate) fn spawn(&self, future: F, id: Id) -> JoinHandle + where + F: Future + Send + 'static, + F::Output: Send + 'static, + { + match self { + Handle::CurrentThread(h) => current_thread::Handle::spawn(h, future, id), + + #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] + Handle::MultiThread(h) => multi_thread::Handle::spawn(h, future, id), + } + } + + pub(crate) fn shutdown(&self) { + match *self { + Handle::CurrentThread(_) => {}, + + #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] + Handle::MultiThread(ref h) => h.shutdown(), + } + } + + pub(crate) fn seed_generator(&self) -> &RngSeedGenerator { + match self { + Handle::CurrentThread(h) => &h.seed_generator, + + #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] + Handle::MultiThread(h) => &h.seed_generator, + } + } + + #[cfg(unix)] + cfg_signal_internal! { + pub(crate) fn signal(&self) -> &driver::SignalHandle { + &self.driver().signal + } + } + } + + cfg_metrics! { + use crate::runtime::{SchedulerMetrics, WorkerMetrics}; + + impl Handle { + pub(crate) fn num_workers(&self) -> usize { + match self { + Handle::CurrentThread(_) => 1, + #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] + Handle::MultiThread(handle) => handle.num_workers(), + } + } + + pub(crate) fn scheduler_metrics(&self) -> &SchedulerMetrics { + match self { + Handle::CurrentThread(handle) => handle.scheduler_metrics(), + #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] + Handle::MultiThread(handle) => handle.scheduler_metrics(), + } + } + + pub(crate) fn worker_metrics(&self, worker: usize) -> &WorkerMetrics { + match self { + Handle::CurrentThread(handle) => handle.worker_metrics(worker), + #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] + Handle::MultiThread(handle) => handle.worker_metrics(worker), + } + } + + pub(crate) fn injection_queue_depth(&self) -> usize { + match self { + Handle::CurrentThread(handle) => handle.injection_queue_depth(), + #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] + Handle::MultiThread(handle) => handle.injection_queue_depth(), + } + } + + pub(crate) fn worker_local_queue_depth(&self, worker: usize) -> usize { + match self { + Handle::CurrentThread(handle) => handle.worker_metrics(worker).queue_depth(), + #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] + Handle::MultiThread(handle) => handle.worker_local_queue_depth(worker), + } + } + } + } +} diff --git a/tokio/src/runtime/scheduler/multi_thread/handle.rs b/tokio/src/runtime/scheduler/multi_thread/handle.rs new file mode 100644 index 00000000000..884f400bf00 --- /dev/null +++ b/tokio/src/runtime/scheduler/multi_thread/handle.rs @@ -0,0 +1,86 @@ +use crate::future::Future; +use crate::loom::sync::Arc; +use crate::runtime::scheduler::multi_thread::worker; +use crate::runtime::{ + blocking, driver, + task::{self, JoinHandle}, +}; +use crate::util::RngSeedGenerator; + +use std::fmt; + +/// Handle to the multi thread scheduler +pub(crate) struct Handle { + /// Task spawner + pub(super) shared: worker::Shared, + + /// Resource driver handles + pub(crate) driver: driver::Handle, + + /// Blocking pool spawner + pub(crate) blocking_spawner: blocking::Spawner, + + /// Current random number generator seed + pub(crate) seed_generator: RngSeedGenerator, +} + +impl Handle { + /// Spawns a future onto the thread pool + pub(crate) fn spawn(me: &Arc, future: F, id: task::Id) -> JoinHandle + where + F: crate::future::Future + Send + 'static, + F::Output: Send + 'static, + { + Self::bind_new_task(me, future, id) + } + + pub(crate) fn shutdown(&self) { + self.close(); + } + + pub(super) fn bind_new_task(me: &Arc, future: T, id: task::Id) -> JoinHandle + where + T: Future + Send + 'static, + T::Output: Send + 'static, + { + let (handle, notified) = me.shared.owned.bind(future, me.clone(), id); + + if let Some(notified) = notified { + me.schedule_task(notified, false); + } + + handle + } +} + +cfg_metrics! { + use crate::runtime::{SchedulerMetrics, WorkerMetrics}; + + impl Handle { + pub(crate) fn num_workers(&self) -> usize { + self.shared.worker_metrics.len() + } + + pub(crate) fn scheduler_metrics(&self) -> &SchedulerMetrics { + &self.shared.scheduler_metrics + } + + pub(crate) fn worker_metrics(&self, worker: usize) -> &WorkerMetrics { + &self.shared.worker_metrics[worker] + } + + pub(crate) fn injection_queue_depth(&self) -> usize { + self.shared.injection_queue_depth() + } + + pub(crate) fn worker_local_queue_depth(&self, worker: usize) -> usize { + self.shared.worker_local_queue_depth(worker) + } + } +} + +impl fmt::Debug for Handle { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("multi_thread::Handle { ... }").finish() + } +} diff --git a/tokio/src/runtime/scheduler/multi_thread/mod.rs b/tokio/src/runtime/scheduler/multi_thread/mod.rs index e6c452bd1b0..403eabacfbc 100644 --- a/tokio/src/runtime/scheduler/multi_thread/mod.rs +++ b/tokio/src/runtime/scheduler/multi_thread/mod.rs @@ -1,5 +1,8 @@ //! Multi-threaded runtime +mod handle; +pub(crate) use handle::Handle; + mod idle; use self::idle::Idle; @@ -14,32 +17,15 @@ pub(crate) use worker::Launch; pub(crate) use worker::block_in_place; use crate::loom::sync::Arc; -use crate::runtime::task::{self, JoinHandle}; -use crate::runtime::{Config, Driver, HandleInner}; +use crate::runtime::{blocking, driver, Config, Driver}; +use crate::util::RngSeedGenerator; use std::fmt; use std::future::Future; /// Work-stealing based thread pool for executing futures. pub(crate) struct MultiThread { - spawner: Spawner, -} - -/// Submits futures to the associated thread pool for execution. -/// -/// A `Spawner` instance is a handle to a single thread pool that allows the owner -/// of the handle to spawn futures onto the thread pool. -/// -/// The `Spawner` handle is *only* used for spawning new futures. It does not -/// impact the lifecycle of the thread pool in any way. The thread pool may -/// shut down while there are outstanding `Spawner` instances. -/// -/// `Spawner` instances are obtained by calling [`MultiThread::spawner`]. -/// -/// [`MultiThread::spawner`]: method@MultiThread::spawner -#[derive(Clone)] -pub(crate) struct Spawner { - shared: Arc, + handle: Arc, } // ===== impl MultiThread ===== @@ -48,13 +34,21 @@ impl MultiThread { pub(crate) fn new( size: usize, driver: Driver, - handle_inner: HandleInner, + driver_handle: driver::Handle, + blocking_spawner: blocking::Spawner, + seed_generator: RngSeedGenerator, config: Config, ) -> (MultiThread, Launch) { let parker = Parker::new(driver); - let (shared, launch) = worker::create(size, parker, handle_inner, config); - let spawner = Spawner { shared }; - let multi_thread = MultiThread { spawner }; + let (handle, launch) = worker::create( + size, + parker, + driver_handle, + blocking_spawner, + seed_generator, + config, + ); + let multi_thread = MultiThread { handle }; (multi_thread, launch) } @@ -63,8 +57,8 @@ impl MultiThread { /// /// The `Spawner` handle can be cloned and enables spawning tasks from other /// threads. - pub(crate) fn spawner(&self) -> &Spawner { - &self.spawner + pub(crate) fn handle(&self) -> &Arc { + &self.handle } /// Blocks the current thread waiting for the future to complete. @@ -88,59 +82,6 @@ impl fmt::Debug for MultiThread { impl Drop for MultiThread { fn drop(&mut self) { - self.spawner.shutdown(); - } -} - -// ==== impl Spawner ===== - -impl Spawner { - /// Spawns a future onto the thread pool - pub(crate) fn spawn(&self, future: F, id: task::Id) -> JoinHandle - where - F: crate::future::Future + Send + 'static, - F::Output: Send + 'static, - { - worker::Shared::bind_new_task(&self.shared, future, id) - } - - pub(crate) fn shutdown(&mut self) { - self.shared.close(); - } - - pub(crate) fn as_handle_inner(&self) -> &HandleInner { - self.shared.as_handle_inner() - } -} - -cfg_metrics! { - use crate::runtime::{SchedulerMetrics, WorkerMetrics}; - - impl Spawner { - pub(crate) fn num_workers(&self) -> usize { - self.shared.worker_metrics.len() - } - - pub(crate) fn scheduler_metrics(&self) -> &SchedulerMetrics { - &self.shared.scheduler_metrics - } - - pub(crate) fn worker_metrics(&self, worker: usize) -> &WorkerMetrics { - &self.shared.worker_metrics[worker] - } - - pub(crate) fn injection_queue_depth(&self) -> usize { - self.shared.injection_queue_depth() - } - - pub(crate) fn worker_local_queue_depth(&self, worker: usize) -> usize { - self.shared.worker_local_queue_depth(worker) - } - } -} - -impl fmt::Debug for Spawner { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Spawner").finish() + self.handle.shutdown(); } } diff --git a/tokio/src/runtime/scheduler/multi_thread/park.rs b/tokio/src/runtime/scheduler/multi_thread/park.rs index 033b9f20bee..46432f4f036 100644 --- a/tokio/src/runtime/scheduler/multi_thread/park.rs +++ b/tokio/src/runtime/scheduler/multi_thread/park.rs @@ -5,8 +5,7 @@ use crate::loom::sync::atomic::AtomicUsize; use crate::loom::sync::{Arc, Condvar, Mutex}; use crate::loom::thread; -use crate::park::{Park, Unpark}; -use crate::runtime::driver::Driver; +use crate::runtime::driver::{self, Driver}; use crate::util::TryLock; use std::sync::atomic::Ordering::SeqCst; @@ -43,15 +42,10 @@ const NOTIFIED: usize = 3; struct Shared { /// Shared driver. Only one thread at a time can use this driver: TryLock, - - /// Unpark handle - handle: ::Unpark, } impl Parker { pub(crate) fn new(driver: Driver) -> Parker { - let handle = driver.unpark(); - Parker { inner: Arc::new(Inner { state: AtomicUsize::new(EMPTY), @@ -59,60 +53,51 @@ impl Parker { condvar: Condvar::new(), shared: Arc::new(Shared { driver: TryLock::new(driver), - handle, }), }), } } -} - -impl Clone for Parker { - fn clone(&self) -> Parker { - Parker { - inner: Arc::new(Inner { - state: AtomicUsize::new(EMPTY), - mutex: Mutex::new(()), - condvar: Condvar::new(), - shared: self.inner.shared.clone(), - }), - } - } -} - -impl Park for Parker { - type Unpark = Unparker; - type Error = (); - fn unpark(&self) -> Unparker { + pub(crate) fn unpark(&self) -> Unparker { Unparker { inner: self.inner.clone(), } } - fn park(&mut self) -> Result<(), Self::Error> { + pub(crate) fn park(&mut self) { self.inner.park(); - Ok(()) } - fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { + pub(crate) fn park_timeout(&mut self, duration: Duration) { // Only parking with zero is supported... assert_eq!(duration, Duration::from_millis(0)); if let Some(mut driver) = self.inner.shared.driver.try_lock() { - driver.park_timeout(duration).map_err(|_| ()) - } else { - Ok(()) + driver.park_timeout(duration) } } - fn shutdown(&mut self) { + pub(crate) fn shutdown(&mut self) { self.inner.shutdown(); } } -impl Unpark for Unparker { - fn unpark(&self) { - self.inner.unpark(); +impl Clone for Parker { + fn clone(&self) -> Parker { + Parker { + inner: Arc::new(Inner { + state: AtomicUsize::new(EMPTY), + mutex: Mutex::new(()), + condvar: Condvar::new(), + shared: self.inner.shared.clone(), + }), + } + } +} + +impl Unparker { + pub(crate) fn unpark(&self, driver: &driver::Handle) { + self.inner.unpark(driver); } } @@ -201,8 +186,7 @@ impl Inner { Err(actual) => panic!("inconsistent park state; actual = {}", actual), } - // TODO: don't unwrap - driver.park().unwrap(); + driver.park(); match self.state.swap(EMPTY, SeqCst) { NOTIFIED => {} // got a notification, hurray! @@ -211,7 +195,7 @@ impl Inner { } } - fn unpark(&self) { + fn unpark(&self, driver: &driver::Handle) { // To ensure the unparked thread will observe any writes we made before // this call, we must perform a release operation that `park` can // synchronize with. To do that we must write `NOTIFIED` even if `state` @@ -221,7 +205,7 @@ impl Inner { EMPTY => {} // no one was waiting NOTIFIED => {} // already unparked PARKED_CONDVAR => self.unpark_condvar(), - PARKED_DRIVER => self.unpark_driver(), + PARKED_DRIVER => driver.unpark(), actual => panic!("inconsistent state in unpark; actual = {}", actual), } } @@ -243,10 +227,6 @@ impl Inner { self.condvar.notify_one() } - fn unpark_driver(&self) { - self.shared.handle.unpark(); - } - fn shutdown(&self) { if let Some(mut driver) = self.shared.driver.try_lock() { driver.shutdown(); diff --git a/tokio/src/runtime/scheduler/multi_thread/worker.rs b/tokio/src/runtime/scheduler/multi_thread/worker.rs index 6e2f4fed87f..34ef0d9f126 100644 --- a/tokio/src/runtime/scheduler/multi_thread/worker.rs +++ b/tokio/src/runtime/scheduler/multi_thread/worker.rs @@ -57,25 +57,24 @@ //! leak. use crate::coop; -use crate::future::Future; -use crate::loom::rand::seed; use crate::loom::sync::{Arc, Mutex}; -use crate::park::{Park, Unpark}; use crate::runtime; use crate::runtime::enter::EnterContext; -use crate::runtime::scheduler::multi_thread::{queue, Idle, Parker, Unparker}; -use crate::runtime::task::{Inject, JoinHandle, OwnedTasks}; -use crate::runtime::{task, Config, HandleInner, MetricsBatch, SchedulerMetrics, WorkerMetrics}; +use crate::runtime::scheduler::multi_thread::{queue, Handle, Idle, Parker, Unparker}; +use crate::runtime::task::{Inject, OwnedTasks}; +use crate::runtime::{ + blocking, driver, task, Config, MetricsBatch, SchedulerMetrics, WorkerMetrics, +}; use crate::util::atomic_cell::AtomicCell; -use crate::util::FastRand; +use crate::util::{FastRand, RngSeedGenerator}; use std::cell::RefCell; use std::time::Duration; /// A scheduler worker pub(super) struct Worker { - /// Reference to shared state - shared: Arc, + /// Reference to scheduler's handle + handle: Arc, /// Index holding this worker's remote state index: usize, @@ -97,7 +96,7 @@ struct Core { lifo_slot: Option, /// The worker-local run queue. - run_queue: queue::Local>, + run_queue: queue::Local>, /// True if the worker is currently searching for more work. Searching /// involves attempting to steal from other workers. @@ -121,9 +120,6 @@ struct Core { /// State shared across all workers pub(super) struct Shared { - /// Handle to the I/O driver, timer, blocking spawner, ... - handle_inner: HandleInner, - /// Per-worker remote state. All other workers have access to this and is /// how they communicate between each other. remotes: Box<[Remote]>, @@ -131,13 +127,13 @@ pub(super) struct Shared { /// Global task queue used for: /// 1. Submit work to the scheduler while **not** currently on a worker thread. /// 2. Submit work to the scheduler when a worker run queue is saturated - inject: Inject>, + inject: Inject>, /// Coordinates idle workers idle: Idle, /// Collection of all active tasks spawned onto this executor. - owned: OwnedTasks>, + pub(super) owned: OwnedTasks>, /// Cores that have observed the shutdown signal /// @@ -158,7 +154,7 @@ pub(super) struct Shared { /// Used to communicate with a worker from other threads. struct Remote { /// Steals tasks from this worker. - steal: queue::Steal>, + steal: queue::Steal>, /// Unparks the associated worker thread unpark: Unparker, @@ -182,10 +178,10 @@ pub(crate) struct Launch(Vec>); type RunResult = Result, ()>; /// A task handle -type Task = task::Task>; +type Task = task::Task>; /// A notified task handle -type Notified = task::Notified>; +type Notified = task::Notified>; // Tracks thread-local state scoped_thread_local!(static CURRENT: Context); @@ -193,9 +189,11 @@ scoped_thread_local!(static CURRENT: Context); pub(super) fn create( size: usize, park: Parker, - handle_inner: HandleInner, + driver_handle: driver::Handle, + blocking_spawner: blocking::Spawner, + seed_generator: RngSeedGenerator, config: Config, -) -> (Arc, Launch) { +) -> (Arc, Launch) { let mut cores = Vec::with_capacity(size); let mut remotes = Vec::with_capacity(size); let mut worker_metrics = Vec::with_capacity(size); @@ -215,38 +213,43 @@ pub(super) fn create( is_shutdown: false, park: Some(park), metrics: MetricsBatch::new(), - rand: FastRand::new(seed()), + rand: FastRand::new(config.seed_generator.next_seed()), })); remotes.push(Remote { steal, unpark }); worker_metrics.push(WorkerMetrics::new()); } - let shared = Arc::new(Shared { - handle_inner, - remotes: remotes.into_boxed_slice(), - inject: Inject::new(), - idle: Idle::new(size), - owned: OwnedTasks::new(), - shutdown_cores: Mutex::new(vec![]), - config, - scheduler_metrics: SchedulerMetrics::new(), - worker_metrics: worker_metrics.into_boxed_slice(), + let handle = Arc::new(Handle { + shared: Shared { + remotes: remotes.into_boxed_slice(), + inject: Inject::new(), + idle: Idle::new(size), + owned: OwnedTasks::new(), + shutdown_cores: Mutex::new(vec![]), + config, + scheduler_metrics: SchedulerMetrics::new(), + worker_metrics: worker_metrics.into_boxed_slice(), + }, + driver: driver_handle, + blocking_spawner, + seed_generator, }); let mut launch = Launch(vec![]); for (index, core) in cores.drain(..).enumerate() { launch.0.push(Arc::new(Worker { - shared: shared.clone(), + handle: handle.clone(), index, core: AtomicCell::new(Some(core)), })); } - (shared, launch) + (handle, launch) } +#[track_caller] pub(crate) fn block_in_place(f: F) -> R where F: FnOnce() -> R, @@ -273,7 +276,7 @@ where let mut had_entered = false; - CURRENT.with(|maybe_cx| { + let setup_result = CURRENT.with(|maybe_cx| { match (crate::runtime::enter::context(), maybe_cx.is_some()) { (EnterContext::Entered { .. }, true) => { // We are on a thread pool runtime thread, so we just need to @@ -286,22 +289,24 @@ where // method: if allow_blocking { had_entered = true; - return; + return Ok(()); } else { // This probably means we are on the current_thread runtime or in a // LocalSet, where it is _not_ okay to block. - panic!("can call blocking only when running on the multi-threaded runtime"); + return Err( + "can call blocking only when running on the multi-threaded runtime", + ); } } (EnterContext::NotEntered, true) => { // This is a nested call to block_in_place (we already exited). // All the necessary setup has already been done. - return; + return Ok(()); } (EnterContext::NotEntered, false) => { // We are outside of the tokio runtime, so blocking is fine. // We can also skip all of the thread pool blocking setup steps. - return; + return Ok(()); } } @@ -310,7 +315,7 @@ where // Get the worker core. If none is set, then blocking is fine! let core = match cx.core.borrow_mut().take() { Some(core) => core, - None => return, + None => return Ok(()), }; // The parker should be set here @@ -329,8 +334,13 @@ where // steal the core back. let worker = cx.worker.clone(); runtime::spawn_blocking(move || run(worker)); + Ok(()) }); + if let Err(panic_message) = setup_result { + panic!("{}", panic_message); + } + if had_entered { // Unset the current task's budget. Blocking sections are not // constrained by task budgets. @@ -401,12 +411,12 @@ impl Context { core.pre_shutdown(&self.worker); // Signal shutdown - self.worker.shared.shutdown(core); + self.worker.handle.shutdown_core(core); Err(()) } fn run_task(&self, task: Notified, mut core: Box) -> RunResult { - let task = self.worker.shared.owned.assert_owner(task); + let task = self.worker.handle.shared.owned.assert_owner(task); // Make sure the worker is not in the **searching** state. This enables // another idle worker to try to steal work. @@ -440,7 +450,7 @@ impl Context { // Run the LIFO task, then loop core.metrics.incr_poll_count(); *self.core.borrow_mut() = Some(core); - let task = self.worker.shared.owned.assert_owner(task); + let task = self.worker.handle.shared.owned.assert_owner(task); task.run(); } else { // Not enough budget left to run the LIFO task, push it to @@ -454,7 +464,7 @@ impl Context { } fn maintenance(&self, mut core: Box) -> Box { - if core.tick % self.worker.shared.config.event_interval == 0 { + if core.tick % self.worker.handle.shared.config.event_interval == 0 { // Call `park` with a 0 timeout. This enables the I/O driver, timer, ... // to run without actually putting the thread to sleep. core = self.park_timeout(core, Some(Duration::from_millis(0))); @@ -478,7 +488,7 @@ impl Context { /// Also, we rely on the workstealing algorithm to spread the tasks amongst workers /// after all the IOs get dispatched fn park(&self, mut core: Box) -> Box { - if let Some(f) = &self.worker.shared.config.before_park { + if let Some(f) = &self.worker.handle.shared.config.before_park { f(); } @@ -497,7 +507,7 @@ impl Context { } } - if let Some(f) = &self.worker.shared.config.after_unpark { + if let Some(f) = &self.worker.handle.shared.config.after_unpark { f(); } core @@ -512,9 +522,9 @@ impl Context { // Park thread if let Some(timeout) = duration { - park.park_timeout(timeout).expect("park failed"); + park.park_timeout(timeout); } else { - park.park().expect("park failed"); + park.park(); } // Remove `core` from context @@ -526,7 +536,7 @@ impl Context { // If there are tasks available to steal, but this worker is not // looking for tasks to steal, notify another worker. if !core.is_searching && core.run_queue.is_stealable() { - self.worker.shared.notify_parked(); + self.worker.handle.notify_parked(); } core @@ -541,7 +551,7 @@ impl Core { /// Return the next notified task available to this worker. fn next_task(&mut self, worker: &Worker) -> Option { - if self.tick % worker.shared.config.global_queue_interval == 0 { + if self.tick % worker.handle.shared.config.global_queue_interval == 0 { worker.inject().pop().or_else(|| self.next_local_task()) } else { self.next_local_task().or_else(|| worker.inject().pop()) @@ -562,7 +572,7 @@ impl Core { return None; } - let num = worker.shared.remotes.len(); + let num = worker.handle.shared.remotes.len(); // Start from a random worker let start = self.rand.fastrand_n(num as u32) as usize; @@ -574,7 +584,7 @@ impl Core { continue; } - let target = &worker.shared.remotes[i]; + let target = &worker.handle.shared.remotes[i]; if let Some(task) = target .steal .steal_into(&mut self.run_queue, &mut self.metrics) @@ -584,12 +594,12 @@ impl Core { } // Fallback on checking the global queue - worker.shared.inject.pop() + worker.handle.shared.inject.pop() } fn transition_to_searching(&mut self, worker: &Worker) -> bool { if !self.is_searching { - self.is_searching = worker.shared.idle.transition_worker_to_searching(); + self.is_searching = worker.handle.shared.idle.transition_worker_to_searching(); } self.is_searching @@ -601,7 +611,7 @@ impl Core { } self.is_searching = false; - worker.shared.transition_worker_from_searching(); + worker.handle.transition_worker_from_searching(); } /// Prepares the worker state for parking. @@ -617,6 +627,7 @@ impl Core { // must check all the queues one last time in case work materialized // between the last work scan and transitioning out of searching. let is_last_searcher = worker + .handle .shared .idle .transition_worker_to_parked(worker.index, self.is_searching); @@ -626,7 +637,7 @@ impl Core { self.is_searching = false; if is_last_searcher { - worker.shared.notify_if_work_pending(); + worker.handle.notify_if_work_pending(); } true @@ -641,11 +652,11 @@ impl Core { // state when the wake originates from another worker *or* a new task // is pushed. We do *not* want the worker to transition to "searching" // when it wakes when the I/O driver receives new events. - self.is_searching = !worker.shared.idle.unpark_worker_by_id(worker.index); + self.is_searching = !worker.handle.shared.idle.unpark_worker_by_id(worker.index); return true; } - if worker.shared.idle.is_parked(worker.index) { + if worker.handle.shared.idle.is_parked(worker.index) { return false; } @@ -657,7 +668,7 @@ impl Core { /// Runs maintenance work such as checking the pool's state. fn maintenance(&mut self, worker: &Worker) { self.metrics - .submit(&worker.shared.worker_metrics[worker.index]); + .submit(&worker.handle.shared.worker_metrics[worker.index]); if !self.is_shutdown { // Check if the scheduler has been shutdown @@ -669,10 +680,10 @@ impl Core { /// before we enter the single-threaded phase of shutdown processing. fn pre_shutdown(&mut self, worker: &Worker) { // Signal to all tasks to shut down. - worker.shared.owned.close_and_shutdown_all(); + worker.handle.shared.owned.close_and_shutdown_all(); self.metrics - .submit(&worker.shared.worker_metrics[worker.index]); + .submit(&worker.handle.shared.worker_metrics[worker.index]); } /// Shuts down the core. @@ -689,53 +700,32 @@ impl Core { impl Worker { /// Returns a reference to the scheduler's injection queue. - fn inject(&self) -> &Inject> { - &self.shared.inject + fn inject(&self) -> &Inject> { + &self.handle.shared.inject } } -impl task::Schedule for Arc { +// TODO: Move `Handle` impls into handle.rs +impl task::Schedule for Arc { fn release(&self, task: &Task) -> Option { - self.owned.remove(task) + self.shared.owned.remove(task) } fn schedule(&self, task: Notified) { - (**self).schedule(task, false); + self.schedule_task(task, false); } fn yield_now(&self, task: Notified) { - (**self).schedule(task, true); + self.schedule_task(task, true); } } -impl Shared { - pub(crate) fn as_handle_inner(&self) -> &HandleInner { - &self.handle_inner - } - - pub(super) fn bind_new_task( - me: &Arc, - future: T, - id: crate::runtime::task::Id, - ) -> JoinHandle - where - T: Future + Send + 'static, - T::Output: Send + 'static, - { - let (handle, notified) = me.owned.bind(future, me.clone(), id); - - if let Some(notified) = notified { - me.schedule(notified, false); - } - - handle - } - - pub(super) fn schedule(&self, task: Notified, is_yield: bool) { +impl Handle { + pub(super) fn schedule_task(&self, task: Notified, is_yield: bool) { CURRENT.with(|maybe_cx| { if let Some(cx) = maybe_cx { // Make sure the task is part of the **current** scheduler. - if self.ptr_eq(&cx.worker.shared) { + if self.ptr_eq(&cx.worker.handle) { // And the current thread still holds a core if let Some(core) = cx.core.borrow_mut().as_mut() { self.schedule_local(core, task, is_yield); @@ -745,8 +735,8 @@ impl Shared { } // Otherwise, use the inject queue. - self.inject.push(task); - self.scheduler_metrics.inc_remote_schedule_count(); + self.shared.inject.push(task); + self.shared.scheduler_metrics.inc_remote_schedule_count(); self.notify_parked(); }) } @@ -758,9 +748,9 @@ impl Shared { // task must always be pushed to the back of the queue, enabling other // tasks to be executed. If **not** a yield, then there is more // flexibility and the task may go to the front of the queue. - let should_notify = if is_yield || self.config.disable_lifo_slot { + let should_notify = if is_yield || self.shared.config.disable_lifo_slot { core.run_queue - .push_back(task, &self.inject, &mut core.metrics); + .push_back(task, &self.shared.inject, &mut core.metrics); true } else { // Push to the LIFO slot @@ -769,7 +759,7 @@ impl Shared { if let Some(prev) = prev { core.run_queue - .push_back(prev, &self.inject, &mut core.metrics); + .push_back(prev, &self.shared.inject, &mut core.metrics); } core.lifo_slot = Some(task); @@ -786,38 +776,38 @@ impl Shared { } pub(super) fn close(&self) { - if self.inject.close() { + if self.shared.inject.close() { self.notify_all(); } } fn notify_parked(&self) { - if let Some(index) = self.idle.worker_to_notify() { - self.remotes[index].unpark.unpark(); + if let Some(index) = self.shared.idle.worker_to_notify() { + self.shared.remotes[index].unpark.unpark(&self.driver); } } fn notify_all(&self) { - for remote in &self.remotes[..] { - remote.unpark.unpark(); + for remote in &self.shared.remotes[..] { + remote.unpark.unpark(&self.driver); } } fn notify_if_work_pending(&self) { - for remote in &self.remotes[..] { + for remote in &self.shared.remotes[..] { if !remote.steal.is_empty() { self.notify_parked(); return; } } - if !self.inject.is_empty() { + if !self.shared.inject.is_empty() { self.notify_parked(); } } fn transition_worker_from_searching(&self) { - if self.idle.transition_worker_from_searching() { + if self.shared.idle.transition_worker_from_searching() { // We are the final searching worker. Because work was found, we // need to notify another worker. self.notify_parked(); @@ -828,15 +818,15 @@ impl Shared { /// its core back into its handle. /// /// If all workers have reached this point, the final cleanup is performed. - fn shutdown(&self, core: Box) { - let mut cores = self.shutdown_cores.lock(); + fn shutdown_core(&self, core: Box) { + let mut cores = self.shared.shutdown_cores.lock(); cores.push(core); - if cores.len() != self.remotes.len() { + if cores.len() != self.shared.remotes.len() { return; } - debug_assert!(self.owned.is_empty()); + debug_assert!(self.shared.owned.is_empty()); for mut core in cores.drain(..) { core.shutdown(); @@ -845,29 +835,16 @@ impl Shared { // Drain the injection queue // // We already shut down every task, so we can simply drop the tasks. - while let Some(task) = self.inject.pop() { + while let Some(task) = self.shared.inject.pop() { drop(task); } } - fn ptr_eq(&self, other: &Shared) -> bool { + fn ptr_eq(&self, other: &Handle) -> bool { std::ptr::eq(self, other) } } -impl crate::runtime::ToHandle for Arc { - fn to_handle(&self) -> crate::runtime::Handle { - use crate::runtime::scheduler::multi_thread::Spawner; - use crate::runtime::{self, Handle}; - - Handle { - spawner: runtime::Spawner::MultiThread(Spawner { - shared: self.clone(), - }), - } - } -} - cfg_metrics! { impl Shared { pub(super) fn injection_queue_depth(&self) -> usize { diff --git a/tokio/src/runtime/spawner.rs b/tokio/src/runtime/spawner.rs deleted file mode 100644 index 87fc3666588..00000000000 --- a/tokio/src/runtime/spawner.rs +++ /dev/null @@ -1,93 +0,0 @@ -use crate::future::Future; -use crate::runtime::scheduler::current_thread; -use crate::runtime::task::Id; -use crate::runtime::HandleInner; -use crate::task::JoinHandle; - -cfg_rt_multi_thread! { - use crate::runtime::scheduler::multi_thread; -} - -#[derive(Debug, Clone)] -pub(crate) enum Spawner { - CurrentThread(current_thread::Spawner), - #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] - MultiThread(multi_thread::Spawner), -} - -impl Spawner { - pub(crate) fn shutdown(&mut self) { - #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] - { - if let Spawner::MultiThread(spawner) = self { - spawner.shutdown(); - } - } - } - - pub(crate) fn spawn(&self, future: F, id: Id) -> JoinHandle - where - F: Future + Send + 'static, - F::Output: Send + 'static, - { - match self { - Spawner::CurrentThread(spawner) => spawner.spawn(future, id), - #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] - Spawner::MultiThread(spawner) => spawner.spawn(future, id), - } - } - - pub(crate) fn as_handle_inner(&self) -> &HandleInner { - match self { - Spawner::CurrentThread(spawner) => spawner.as_handle_inner(), - #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] - Spawner::MultiThread(spawner) => spawner.as_handle_inner(), - } - } -} - -cfg_metrics! { - use crate::runtime::{SchedulerMetrics, WorkerMetrics}; - - impl Spawner { - pub(crate) fn num_workers(&self) -> usize { - match self { - Spawner::CurrentThread(_) => 1, - #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] - Spawner::MultiThread(spawner) => spawner.num_workers(), - } - } - - pub(crate) fn scheduler_metrics(&self) -> &SchedulerMetrics { - match self { - Spawner::CurrentThread(spawner) => spawner.scheduler_metrics(), - #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] - Spawner::MultiThread(spawner) => spawner.scheduler_metrics(), - } - } - - pub(crate) fn worker_metrics(&self, worker: usize) -> &WorkerMetrics { - match self { - Spawner::CurrentThread(spawner) => spawner.worker_metrics(worker), - #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] - Spawner::MultiThread(spawner) => spawner.worker_metrics(worker), - } - } - - pub(crate) fn injection_queue_depth(&self) -> usize { - match self { - Spawner::CurrentThread(spawner) => spawner.injection_queue_depth(), - #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] - Spawner::MultiThread(spawner) => spawner.injection_queue_depth(), - } - } - - pub(crate) fn worker_local_queue_depth(&self, worker: usize) -> usize { - match self { - Spawner::CurrentThread(spawner) => spawner.worker_metrics(worker).queue_depth(), - #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] - Spawner::MultiThread(spawner) => spawner.worker_local_queue_depth(worker), - } - } - } -} diff --git a/tokio/src/runtime/task/mod.rs b/tokio/src/runtime/task/mod.rs index 145f3f2cec5..5a2c11bd3ea 100644 --- a/tokio/src/runtime/task/mod.rs +++ b/tokio/src/runtime/task/mod.rs @@ -508,11 +508,17 @@ impl Id { cfg_not_has_atomic_u64! { pub(crate) fn next() -> Self { - use once_cell::sync::Lazy; + use crate::util::once_cell::OnceCell; use crate::loom::sync::Mutex; - static NEXT_ID: Lazy> = Lazy::new(|| Mutex::new(1)); - let mut lock = NEXT_ID.lock(); + fn init_next_id() -> Mutex { + Mutex::new(1) + } + + static NEXT_ID: OnceCell> = OnceCell::new(); + + let next_id = NEXT_ID.get(init_next_id); + let mut lock = next_id.lock(); let id = *lock; *lock += 1; Self(id) diff --git a/tokio/src/runtime/tests/task_combinations.rs b/tokio/src/runtime/tests/task_combinations.rs index 5c7a0b0109b..73a20d97600 100644 --- a/tokio/src/runtime/tests/task_combinations.rs +++ b/tokio/src/runtime/tests/task_combinations.rs @@ -1,3 +1,4 @@ +use std::fmt; use std::future::Future; use std::panic; use std::pin::Pin; @@ -149,6 +150,8 @@ fn test_combinations() { } } +fn is_debug(_: &T) {} + #[allow(clippy::too_many_arguments)] fn test_combination( rt: CombiRuntime, @@ -184,7 +187,15 @@ fn test_combination( return; } - println!("Runtime {:?}, LocalSet {:?}, Task {:?}, Output {:?}, JoinInterest {:?}, JoinHandle {:?}, AbortHandle {:?}, Abort {:?} ({:?})", rt, ls, task, output, ji, jh, ah, abort, abort_src); + is_debug(&rt); + is_debug(&ls); + is_debug(&task); + is_debug(&output); + is_debug(&ji); + is_debug(&jh); + is_debug(&ah); + is_debug(&abort); + is_debug(&abort_src); // A runtime optionally with a LocalSet struct Rt { diff --git a/tokio/src/time/driver/entry.rs b/tokio/src/runtime/time/entry.rs similarity index 96% rename from tokio/src/time/driver/entry.rs rename to tokio/src/runtime/time/entry.rs index 48856bf3223..3b8907add7d 100644 --- a/tokio/src/time/driver/entry.rs +++ b/tokio/src/runtime/time/entry.rs @@ -58,12 +58,11 @@ use crate::loom::cell::UnsafeCell; use crate::loom::sync::atomic::AtomicU64; use crate::loom::sync::atomic::Ordering; +use crate::runtime::scheduler; use crate::sync::AtomicWaker; use crate::time::Instant; use crate::util::linked_list; -use super::Handle; - use std::cell::UnsafeCell as StdUnsafeCell; use std::task::{Context, Poll, Waker}; use std::{marker::PhantomPinned, pin::Pin, ptr::NonNull}; @@ -283,10 +282,10 @@ impl StateCell { /// timer. As this participates in intrusive data structures, it must be pinned /// before polling. #[derive(Debug)] -pub(super) struct TimerEntry { - /// Arc reference to the driver. We can only free the driver after +pub(crate) struct TimerEntry { + /// Arc reference to the runtime handle. We can only free the driver after /// deregistering everything from their respective timer wheels. - driver: Handle, + driver: scheduler::Handle, /// Shared inner structure; this is part of an intrusive linked list, and /// therefore other references can exist to it while mutable references to /// Entry exist. @@ -490,7 +489,11 @@ unsafe impl linked_list::Link for TimerShared { // ===== impl Entry ===== impl TimerEntry { - pub(crate) fn new(handle: &Handle, deadline: Instant) -> Self { + #[track_caller] + pub(crate) fn new(handle: &scheduler::Handle, deadline: Instant) -> Self { + // Panic if the time driver is not enabled + let _ = handle.time(); + let driver = handle.clone(); Self { @@ -533,20 +536,21 @@ impl TimerEntry { // driver did so far and happens-before everything the driver does in // the future. While we have the lock held, we also go ahead and // deregister the entry if necessary. - unsafe { self.driver.clear_entry(NonNull::from(self.inner())) }; + unsafe { self.driver().clear_entry(NonNull::from(self.inner())) }; } pub(crate) fn reset(mut self: Pin<&mut Self>, new_time: Instant) { unsafe { self.as_mut().get_unchecked_mut() }.initial_deadline = None; - let tick = self.driver.time_source().deadline_to_tick(new_time); + let tick = self.driver().time_source().deadline_to_tick(new_time); if self.inner().extend_expiration(tick).is_ok() { return; } unsafe { - self.driver.reregister(tick, self.inner().into()); + self.driver() + .reregister(&self.driver.driver().io, tick, self.inner().into()); } } @@ -554,7 +558,7 @@ impl TimerEntry { mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { - if self.driver.is_shutdown() { + if self.driver().is_shutdown() { panic!("{}", crate::util::error::RUNTIME_SHUTTING_DOWN_ERROR); } @@ -566,6 +570,10 @@ impl TimerEntry { this.inner().state.poll(cx.waker()) } + + fn driver(&self) -> &super::Handle { + self.driver.time() + } } impl TimerHandle { diff --git a/tokio/src/time/driver/handle.rs b/tokio/src/runtime/time/handle.rs similarity index 58% rename from tokio/src/time/driver/handle.rs rename to tokio/src/runtime/time/handle.rs index 136919d9e78..8338f2b5b12 100644 --- a/tokio/src/time/driver/handle.rs +++ b/tokio/src/runtime/time/handle.rs @@ -1,12 +1,12 @@ use crate::loom::sync::Arc; -use crate::time::driver::ClockTime; +use crate::runtime::time::TimeSource; use std::fmt; /// Handle to time driver instance. #[derive(Clone)] pub(crate) struct Handle { - time_source: ClockTime, - inner: Arc, + time_source: TimeSource, + pub(super) inner: Arc, } impl Handle { @@ -17,7 +17,7 @@ impl Handle { } /// Returns the time source associated with this handle. - pub(super) fn time_source(&self) -> &ClockTime { + pub(crate) fn time_source(&self) -> &TimeSource { &self.time_source } @@ -30,34 +30,13 @@ impl Handle { pub(super) fn is_shutdown(&self) -> bool { self.inner.is_shutdown() } -} -cfg_rt! { - impl Handle { - /// Tries to get a handle to the current timer. - /// - /// # Panics - /// - /// This function panics if there is no current timer set. - /// - /// It can be triggered when [`Builder::enable_time`] or - /// [`Builder::enable_all`] are not included in the builder. - /// - /// It can also panic whenever a timer is created outside of a - /// Tokio runtime. That is why `rt.block_on(sleep(...))` will panic, - /// since the function is executed outside of the runtime. - /// Whereas `rt.block_on(async {sleep(...).await})` doesn't panic. - /// And this is because wrapping the function on an async makes it lazy, - /// and so gets executed inside the runtime successfully without - /// panicking. - /// - /// [`Builder::enable_time`]: crate::runtime::Builder::enable_time - /// [`Builder::enable_all`]: crate::runtime::Builder::enable_all - #[track_caller] - pub(crate) fn current() -> Self { - crate::runtime::context::time_handle() - .expect("A Tokio 1.x context was found, but timers are disabled. Call `enable_time` on the runtime builder to enable timers.") - } + /// Track that the driver is being unparked + pub(crate) fn unpark(&self) { + #[cfg(feature = "test-util")] + self.inner + .did_wake + .store(true, std::sync::atomic::Ordering::SeqCst); } } diff --git a/tokio/src/time/driver/mod.rs b/tokio/src/runtime/time/mod.rs similarity index 73% rename from tokio/src/time/driver/mod.rs rename to tokio/src/runtime/time/mod.rs index 99718774793..ec4333bf8df 100644 --- a/tokio/src/time/driver/mod.rs +++ b/tokio/src/runtime/time/mod.rs @@ -7,22 +7,23 @@ //! Time driver. mod entry; -pub(self) use self::entry::{EntryList, TimerEntry, TimerHandle, TimerShared}; +pub(crate) use entry::TimerEntry; +use entry::{EntryList, TimerHandle, TimerShared}; mod handle; pub(crate) use self::handle::Handle; -mod wheel; +mod source; +pub(crate) use source::TimeSource; -pub(super) mod sleep; +mod wheel; use crate::loom::sync::atomic::{AtomicBool, Ordering}; use crate::loom::sync::{Arc, Mutex}; -use crate::park::{Park, Unpark}; +use crate::runtime::driver::{IoHandle, IoStack}; use crate::time::error::Error; -use crate::time::{Clock, Duration, Instant}; +use crate::time::{Clock, Duration}; -use std::convert::TryInto; use std::fmt; use std::{num::NonZeroU64, ptr::NonNull, task::Waker}; @@ -82,15 +83,12 @@ use std::{num::NonZeroU64, ptr::NonNull, task::Waker}; /// [timeout]: crate::time::Timeout /// [interval]: crate::time::Interval #[derive(Debug)] -pub(crate) struct Driver { +pub(crate) struct Driver { /// Timing backend in use. - time_source: ClockTime, - - /// Shared state. - handle: Handle, + time_source: TimeSource, /// Parker to delegate to. - park: P, + park: IoStack, // When `true`, a call to `park_timeout` should immediately return and time // should not advance. One reason for this to be `true` is if the task @@ -102,45 +100,6 @@ pub(crate) struct Driver { did_wake: Arc, } -/// A structure which handles conversion from Instants to u64 timestamps. -#[derive(Debug, Clone)] -pub(self) struct ClockTime { - clock: super::clock::Clock, - start_time: Instant, -} - -impl ClockTime { - pub(self) fn new(clock: Clock) -> Self { - Self { - start_time: clock.now(), - clock, - } - } - - pub(self) fn deadline_to_tick(&self, t: Instant) -> u64 { - // Round up to the end of a ms - self.instant_to_tick(t + Duration::from_nanos(999_999)) - } - - pub(self) fn instant_to_tick(&self, t: Instant) -> u64 { - // round up - let dur: Duration = t - .checked_duration_since(self.start_time) - .unwrap_or_else(|| Duration::from_secs(0)); - let ms = dur.as_millis(); - - ms.try_into().unwrap_or(u64::MAX) - } - - pub(self) fn tick_to_duration(&self, t: u64) -> Duration { - Duration::from_millis(t) - } - - pub(self) fn now(&self) -> u64 { - self.instant_to_tick(self.clock.now()) - } -} - /// Timer state shared between `Driver`, `Handle`, and `Registration`. struct Inner { // The state is split like this so `Handle` can access `is_shutdown` without locking the mutex @@ -148,12 +107,15 @@ struct Inner { /// True if the driver is being shutdown. pub(super) is_shutdown: AtomicBool, + + #[cfg(feature = "test-util")] + did_wake: Arc, } /// Time state shared which must be protected by a `Mutex` struct InnerState { /// Timing backend in use. - time_source: ClockTime, + time_source: TimeSource, /// The last published timer `elapsed` value. elapsed: u64, @@ -163,49 +125,72 @@ struct InnerState { /// Timer wheel. wheel: wheel::Wheel, - - /// Unparker that can be used to wake the time driver. - unpark: Box, } // ===== impl Driver ===== -impl

Driver

-where - P: Park + 'static, -{ +impl Driver { /// Creates a new `Driver` instance that uses `park` to block the current /// thread and `time_source` to get the current time and convert to ticks. /// /// Specifying the source of time is useful when testing. - pub(crate) fn new(park: P, clock: Clock) -> Driver

{ - let time_source = ClockTime::new(clock); + pub(crate) fn new(park: IoStack, clock: Clock) -> (Driver, Handle) { + let time_source = TimeSource::new(clock); - let inner = Inner::new(time_source.clone(), Box::new(park.unpark())); + #[cfg(feature = "test-util")] + let did_wake = Arc::new(AtomicBool::new(false)); - Driver { + let inner = Arc::new(Inner { + state: Mutex::new(InnerState { + time_source: time_source.clone(), + elapsed: 0, + next_wake: None, + wheel: wheel::Wheel::new(), + }), + is_shutdown: AtomicBool::new(false), + + #[cfg(feature = "test-util")] + did_wake: did_wake.clone(), + }); + + let handle = Handle::new(inner); + + let driver = Driver { time_source, - handle: Handle::new(Arc::new(inner)), park, #[cfg(feature = "test-util")] - did_wake: Arc::new(AtomicBool::new(false)), - } + did_wake, + }; + + (driver, handle) } - /// Returns a handle to the timer. - /// - /// The `Handle` is how `Sleep` instances are created. The `Sleep` instances - /// can either be created directly or the `Handle` instance can be passed to - /// `with_default`, setting the timer as the default timer for the execution - /// context. - pub(crate) fn handle(&self) -> Handle { - self.handle.clone() + pub(crate) fn park(&mut self, handle: &Handle) { + self.park_internal(handle, None) + } + + pub(crate) fn park_timeout(&mut self, handle: &Handle, duration: Duration) { + self.park_internal(handle, Some(duration)) + } + + pub(crate) fn shutdown(&mut self, handle: &Handle) { + if handle.is_shutdown() { + return; + } + + handle.get().is_shutdown.store(true, Ordering::SeqCst); + + // Advance time forward to the end of time. + + handle.process_at_time(u64::MAX); + + self.park.shutdown(); } - fn park_internal(&mut self, limit: Option) -> Result<(), P::Error> { - let mut lock = self.handle.get().state.lock(); + fn park_internal(&mut self, handle: &Handle, limit: Option) { + let mut lock = handle.get().state.lock(); - assert!(!self.handle.is_shutdown()); + assert!(!handle.is_shutdown()); let next_wake = lock.wheel.next_expiration_time(); lock.next_wake = @@ -226,32 +211,30 @@ where duration = std::cmp::min(limit, duration); } - self.park_timeout(duration)?; + self.park_thread_timeout(duration); } else { - self.park.park_timeout(Duration::from_secs(0))?; + self.park.park_timeout(Duration::from_secs(0)); } } None => { if let Some(duration) = limit { - self.park_timeout(duration)?; + self.park_thread_timeout(duration); } else { - self.park.park()?; + self.park.park(); } } } // Process pending timers after waking up - self.handle.process(); - - Ok(()) + handle.process(); } cfg_test_util! { - fn park_timeout(&mut self, duration: Duration) -> Result<(), P::Error> { + fn park_thread_timeout(&mut self, duration: Duration) { let clock = &self.time_source.clock; if clock.is_paused() { - self.park.park_timeout(Duration::from_secs(0))?; + self.park.park_timeout(Duration::from_secs(0)); // If the time driver was woken, then the park completed // before the "duration" elapsed (usually caused by a @@ -262,10 +245,8 @@ where clock.advance(duration); } } else { - self.park.park_timeout(duration)?; + self.park.park_timeout(duration); } - - Ok(()) } fn did_wake(&self) -> bool { @@ -274,8 +255,8 @@ where } cfg_not_test_util! { - fn park_timeout(&mut self, duration: Duration) -> Result<(), P::Error> { - self.park.park_timeout(duration) + fn park_thread_timeout(&mut self, duration: Duration) { + self.park.park_timeout(duration); } } } @@ -370,7 +351,12 @@ impl Handle { /// driver. No other threads are allowed to concurrently manipulate the /// timer at all (the current thread should hold an exclusive reference to /// the `TimerEntry`) - pub(self) unsafe fn reregister(&self, new_tick: u64, entry: NonNull) { + pub(self) unsafe fn reregister( + &self, + unpark: &IoHandle, + new_tick: u64, + entry: NonNull, + ) { let waker = unsafe { let mut lock = self.get().lock(); @@ -398,12 +384,12 @@ impl Handle { .map(|next_wake| when < next_wake.get()) .unwrap_or(true) { - lock.unpark.unpark(); + unpark.unpark(); } None } - Err((entry, super::error::InsertError::Elapsed)) => unsafe { + Err((entry, crate::time::error::InsertError::Elapsed)) => unsafe { entry.fire(Ok(())) }, } @@ -421,92 +407,9 @@ impl Handle { } } -impl

Park for Driver

-where - P: Park + 'static, -{ - type Unpark = TimerUnpark

; - type Error = P::Error; - - fn unpark(&self) -> Self::Unpark { - TimerUnpark::new(self) - } - - fn park(&mut self) -> Result<(), Self::Error> { - self.park_internal(None) - } - - fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { - self.park_internal(Some(duration)) - } - - fn shutdown(&mut self) { - if self.handle.is_shutdown() { - return; - } - - self.handle.get().is_shutdown.store(true, Ordering::SeqCst); - - // Advance time forward to the end of time. - - self.handle.process_at_time(u64::MAX); - - self.park.shutdown(); - } -} - -impl

Drop for Driver

-where - P: Park + 'static, -{ - fn drop(&mut self) { - self.shutdown(); - } -} - -pub(crate) struct TimerUnpark { - inner: P::Unpark, - - #[cfg(feature = "test-util")] - did_wake: Arc, -} - -impl TimerUnpark

{ - fn new(driver: &Driver

) -> TimerUnpark

{ - TimerUnpark { - inner: driver.park.unpark(), - - #[cfg(feature = "test-util")] - did_wake: driver.did_wake.clone(), - } - } -} - -impl Unpark for TimerUnpark

{ - fn unpark(&self) { - #[cfg(feature = "test-util")] - self.did_wake.store(true, Ordering::SeqCst); - - self.inner.unpark(); - } -} - // ===== impl Inner ===== impl Inner { - pub(self) fn new(time_source: ClockTime, unpark: Box) -> Self { - Inner { - state: Mutex::new(InnerState { - time_source, - elapsed: 0, - next_wake: None, - unpark, - wheel: wheel::Wheel::new(), - }), - is_shutdown: AtomicBool::new(false), - } - } - /// Locks the driver's inner structure pub(super) fn lock(&self) -> crate::loom::sync::MutexGuard<'_, InnerState> { self.state.lock() diff --git a/tokio/src/runtime/time/source.rs b/tokio/src/runtime/time/source.rs new file mode 100644 index 00000000000..1cdb86891d6 --- /dev/null +++ b/tokio/src/runtime/time/source.rs @@ -0,0 +1,42 @@ +use crate::time::{Clock, Duration, Instant}; + +use std::convert::TryInto; + +/// A structure which handles conversion from Instants to u64 timestamps. +#[derive(Debug, Clone)] +pub(crate) struct TimeSource { + pub(crate) clock: Clock, + start_time: Instant, +} + +impl TimeSource { + pub(crate) fn new(clock: Clock) -> Self { + Self { + start_time: clock.now(), + clock, + } + } + + pub(crate) fn deadline_to_tick(&self, t: Instant) -> u64 { + // Round up to the end of a ms + self.instant_to_tick(t + Duration::from_nanos(999_999)) + } + + pub(crate) fn instant_to_tick(&self, t: Instant) -> u64 { + // round up + let dur: Duration = t + .checked_duration_since(self.start_time) + .unwrap_or_else(|| Duration::from_secs(0)); + let ms = dur.as_millis(); + + ms.try_into().unwrap_or(u64::MAX) + } + + pub(crate) fn tick_to_duration(&self, t: u64) -> Duration { + Duration::from_millis(t) + } + + pub(crate) fn now(&self) -> u64 { + self.instant_to_tick(self.clock.now()) + } +} diff --git a/tokio/src/time/driver/tests/mod.rs b/tokio/src/runtime/time/tests/mod.rs similarity index 54% rename from tokio/src/time/driver/tests/mod.rs rename to tokio/src/runtime/time/tests/mod.rs index efccd57253a..1ba95f13de5 100644 --- a/tokio/src/time/driver/tests/mod.rs +++ b/tokio/src/runtime/time/tests/mod.rs @@ -5,24 +5,11 @@ use std::{task::Context, time::Duration}; #[cfg(not(loom))] use futures::task::noop_waker_ref; +use crate::loom::sync::atomic::{AtomicBool, Ordering}; use crate::loom::sync::Arc; use crate::loom::thread; -use crate::{ - loom::sync::atomic::{AtomicBool, Ordering}, - park::Unpark, -}; -use super::{Handle, TimerEntry}; - -struct MockUnpark {} -impl Unpark for MockUnpark { - fn unpark(&self) {} -} -impl MockUnpark { - fn mock() -> Box { - Box::new(Self {}) - } -} +use super::TimerEntry; fn block_on(f: impl std::future::Future) -> T { #[cfg(loom)] @@ -45,18 +32,26 @@ fn model(f: impl Fn() + Send + Sync + 'static) { f(); } +fn rt(start_paused: bool) -> crate::runtime::Runtime { + crate::runtime::Builder::new_current_thread() + .enable_time() + .start_paused(start_paused) + .build() + .unwrap() +} + #[test] fn single_timer() { model(|| { - let clock = crate::time::clock::Clock::new(true, false); - let time_source = super::ClockTime::new(clock.clone()); - - let inner = super::Inner::new(time_source.clone(), MockUnpark::mock()); - let handle = Handle::new(Arc::new(inner)); + let rt = rt(false); + let handle = rt.handle(); let handle_ = handle.clone(); let jh = thread::spawn(move || { - let entry = TimerEntry::new(&handle_, clock.now() + Duration::from_secs(1)); + let entry = TimerEntry::new( + &handle_.inner, + handle_.inner.clock().now() + Duration::from_secs(1), + ); pin!(entry); block_on(futures::future::poll_fn(|cx| { @@ -67,10 +62,12 @@ fn single_timer() { thread::yield_now(); + let handle = handle.inner.time(); + // This may or may not return Some (depending on how it races with the // thread). If it does return None, however, the timer should complete // synchronously. - handle.process_at_time(time_source.now() + 2_000_000_000); + handle.process_at_time(handle.time_source().now() + 2_000_000_000); jh.join().unwrap(); }) @@ -79,15 +76,15 @@ fn single_timer() { #[test] fn drop_timer() { model(|| { - let clock = crate::time::clock::Clock::new(true, false); - let time_source = super::ClockTime::new(clock.clone()); - - let inner = super::Inner::new(time_source.clone(), MockUnpark::mock()); - let handle = Handle::new(Arc::new(inner)); + let rt = rt(false); + let handle = rt.handle(); let handle_ = handle.clone(); let jh = thread::spawn(move || { - let entry = TimerEntry::new(&handle_, clock.now() + Duration::from_secs(1)); + let entry = TimerEntry::new( + &handle_.inner, + handle_.inner.clock().now() + Duration::from_secs(1), + ); pin!(entry); let _ = entry @@ -100,8 +97,10 @@ fn drop_timer() { thread::yield_now(); + let handle = handle.inner.time(); + // advance 2s in the future. - handle.process_at_time(time_source.now() + 2_000_000_000); + handle.process_at_time(handle.time_source().now() + 2_000_000_000); jh.join().unwrap(); }) @@ -110,15 +109,15 @@ fn drop_timer() { #[test] fn change_waker() { model(|| { - let clock = crate::time::clock::Clock::new(true, false); - let time_source = super::ClockTime::new(clock.clone()); - - let inner = super::Inner::new(time_source.clone(), MockUnpark::mock()); - let handle = Handle::new(Arc::new(inner)); + let rt = rt(false); + let handle = rt.handle(); let handle_ = handle.clone(); let jh = thread::spawn(move || { - let entry = TimerEntry::new(&handle_, clock.now() + Duration::from_secs(1)); + let entry = TimerEntry::new( + &handle_.inner, + handle_.inner.clock().now() + Duration::from_secs(1), + ); pin!(entry); let _ = entry @@ -133,8 +132,10 @@ fn change_waker() { thread::yield_now(); + let handle = handle.inner.time(); + // advance 2s - handle.process_at_time(time_source.now() + 2_000_000_000); + handle.process_at_time(handle.time_source().now() + 2_000_000_000); jh.join().unwrap(); }) @@ -145,18 +146,15 @@ fn reset_future() { model(|| { let finished_early = Arc::new(AtomicBool::new(false)); - let clock = crate::time::clock::Clock::new(true, false); - let time_source = super::ClockTime::new(clock.clone()); - - let inner = super::Inner::new(time_source.clone(), MockUnpark::mock()); - let handle = Handle::new(Arc::new(inner)); + let rt = rt(false); + let handle = rt.handle(); let handle_ = handle.clone(); let finished_early_ = finished_early.clone(); - let start = clock.now(); + let start = handle.inner.clock().now(); let jh = thread::spawn(move || { - let entry = TimerEntry::new(&handle_, start + Duration::from_secs(1)); + let entry = TimerEntry::new(&handle_.inner, start + Duration::from_secs(1)); pin!(entry); let _ = entry @@ -176,12 +174,22 @@ fn reset_future() { thread::yield_now(); + let handle = handle.inner.time(); + // This may or may not return a wakeup time. - handle.process_at_time(time_source.instant_to_tick(start + Duration::from_millis(1500))); + handle.process_at_time( + handle + .time_source() + .instant_to_tick(start + Duration::from_millis(1500)), + ); assert!(!finished_early.load(Ordering::Relaxed)); - handle.process_at_time(time_source.instant_to_tick(start + Duration::from_millis(2500))); + handle.process_at_time( + handle + .time_source() + .instant_to_tick(start + Duration::from_millis(2500)), + ); jh.join().unwrap(); @@ -201,20 +209,15 @@ fn normal_or_miri(normal: T, miri: T) -> T { #[test] #[cfg(not(loom))] fn poll_process_levels() { - let clock = crate::time::clock::Clock::new(true, false); - clock.pause(); - - let time_source = super::ClockTime::new(clock.clone()); - - let inner = super::Inner::new(time_source, MockUnpark::mock()); - let handle = Handle::new(Arc::new(inner)); + let rt = rt(true); + let handle = rt.handle(); let mut entries = vec![]; for i in 0..normal_or_miri(1024, 64) { let mut entry = Box::pin(TimerEntry::new( - &handle, - clock.now() + Duration::from_millis(i), + &handle.inner, + handle.inner.clock().now() + Duration::from_millis(i), )); let _ = entry @@ -225,7 +228,8 @@ fn poll_process_levels() { } for t in 1..normal_or_miri(1024, 64) { - handle.process_at_time(t as u64); + handle.inner.time().process_at_time(t as u64); + for (deadline, future) in entries.iter_mut().enumerate() { let mut context = Context::from_waker(noop_waker_ref()); if deadline <= t { @@ -242,62 +246,19 @@ fn poll_process_levels() { fn poll_process_levels_targeted() { let mut context = Context::from_waker(noop_waker_ref()); - let clock = crate::time::clock::Clock::new(true, false); - clock.pause(); - - let time_source = super::ClockTime::new(clock.clone()); - - let inner = super::Inner::new(time_source, MockUnpark::mock()); - let handle = Handle::new(Arc::new(inner)); + let rt = rt(true); + let handle = rt.handle(); - let e1 = TimerEntry::new(&handle, clock.now() + Duration::from_millis(193)); + let e1 = TimerEntry::new( + &handle.inner, + handle.inner.clock().now() + Duration::from_millis(193), + ); pin!(e1); + let handle = handle.inner.time(); + handle.process_at_time(62); assert!(e1.as_mut().poll_elapsed(&mut context).is_pending()); handle.process_at_time(192); handle.process_at_time(192); } - -/* -#[test] -fn balanced_incr_and_decr() { - const OPS: usize = 5; - - fn incr(inner: Arc) { - for _ in 0..OPS { - inner.increment().expect("increment should not have failed"); - thread::yield_now(); - } - } - - fn decr(inner: Arc) { - let mut ops_performed = 0; - while ops_performed < OPS { - if inner.num(Ordering::Relaxed) > 0 { - ops_performed += 1; - inner.decrement(); - } - thread::yield_now(); - } - } - - loom::model(|| { - let unpark = Box::new(MockUnpark); - let instant = Instant::now(); - - let inner = Arc::new(Inner::new(instant, unpark)); - - let incr_inner = inner.clone(); - let decr_inner = inner.clone(); - - let incr_handle = thread::spawn(move || incr(incr_inner)); - let decr_handle = thread::spawn(move || decr(decr_inner)); - - incr_handle.join().expect("should never fail"); - decr_handle.join().expect("should never fail"); - - assert_eq!(inner.num(Ordering::SeqCst), 0); - }) -} -*/ diff --git a/tokio/src/time/driver/wheel/level.rs b/tokio/src/runtime/time/wheel/level.rs similarity index 98% rename from tokio/src/time/driver/wheel/level.rs rename to tokio/src/runtime/time/wheel/level.rs index 878754177b9..7e48ff5c57d 100644 --- a/tokio/src/time/driver/wheel/level.rs +++ b/tokio/src/runtime/time/wheel/level.rs @@ -1,6 +1,4 @@ -use crate::time::driver::TimerHandle; - -use crate::time::driver::{EntryList, TimerShared}; +use crate::runtime::time::{EntryList, TimerHandle, TimerShared}; use std::{fmt, ptr::NonNull}; diff --git a/tokio/src/time/driver/wheel/mod.rs b/tokio/src/runtime/time/wheel/mod.rs similarity index 99% rename from tokio/src/time/driver/wheel/mod.rs rename to tokio/src/runtime/time/wheel/mod.rs index f088f2cfd66..c3ba3643305 100644 --- a/tokio/src/time/driver/wheel/mod.rs +++ b/tokio/src/runtime/time/wheel/mod.rs @@ -1,4 +1,4 @@ -use crate::time::driver::{TimerHandle, TimerShared}; +use crate::runtime::time::{TimerHandle, TimerShared}; use crate::time::error::InsertError; mod level; diff --git a/tokio/src/signal/registry.rs b/tokio/src/signal/registry.rs index 7795ca8dfa8..e1b3d108767 100644 --- a/tokio/src/signal/registry.rs +++ b/tokio/src/signal/registry.rs @@ -1,10 +1,9 @@ #![allow(clippy::unit_arg)] use crate::signal::os::{OsExtraData, OsStorage}; - use crate::sync::watch; +use crate::util::once_cell::OnceCell; -use once_cell::sync::Lazy; use std::ops; use std::pin::Pin; use std::sync::atomic::{AtomicBool, Ordering}; @@ -152,19 +151,25 @@ impl Globals { } } +fn globals_init() -> Globals +where + OsExtraData: 'static + Send + Sync + Init, + OsStorage: 'static + Send + Sync + Init, +{ + Globals { + extra: OsExtraData::init(), + registry: Registry::new(OsStorage::init()), + } +} + pub(crate) fn globals() -> Pin<&'static Globals> where OsExtraData: 'static + Send + Sync + Init, OsStorage: 'static + Send + Sync + Init, { - static GLOBALS: Lazy>> = Lazy::new(|| { - Box::pin(Globals { - extra: OsExtraData::init(), - registry: Registry::new(OsStorage::init()), - }) - }); - - GLOBALS.as_ref() + static GLOBALS: OnceCell = OnceCell::new(); + + Pin::new(GLOBALS.get(globals_init)) } #[cfg(all(test, not(loom)))] diff --git a/tokio/src/signal/unix/driver.rs b/tokio/src/signal/unix/driver.rs index 54959e04df5..327d6425b02 100644 --- a/tokio/src/signal/unix/driver.rs +++ b/tokio/src/signal/unix/driver.rs @@ -4,12 +4,11 @@ use crate::io::interest::Interest; use crate::io::PollEvented; -use crate::park::Park; -use crate::runtime::io::Driver as IoDriver; +use crate::runtime::io; use crate::signal::registry::globals; use mio::net::UnixStream; -use std::io::{self, Read}; +use std::io::{self as std_io, Read}; use std::ptr; use std::sync::{Arc, Weak}; use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; @@ -23,7 +22,7 @@ use std::time::Duration; #[derive(Debug)] pub(crate) struct Driver { /// Thread parker. The `Driver` park implementation delegates to this. - park: IoDriver, + park: io::Driver, /// A pipe for receiving wake events from the signal handler receiver: PollEvented, @@ -44,7 +43,7 @@ pub(super) struct Inner(()); impl Driver { /// Creates a new signal `Driver` instance that delegates wakeups to `park`. - pub(crate) fn new(park: IoDriver) -> io::Result { + pub(crate) fn new(park: io::Driver) -> std_io::Result { use std::mem::ManuallyDrop; use std::os::unix::io::{AsRawFd, FromRawFd}; @@ -93,6 +92,20 @@ impl Driver { } } + pub(crate) fn park(&mut self) { + self.park.park(); + self.process(); + } + + pub(crate) fn park_timeout(&mut self, duration: Duration) { + self.park.park_timeout(duration); + self.process(); + } + + pub(crate) fn shutdown(&mut self) { + self.park.shutdown() + } + fn process(&self) { // Check if the pipe is ready to read and therefore has "woken" us up // @@ -114,7 +127,7 @@ impl Driver { match (&*self.receiver).read(&mut buf) { Ok(0) => panic!("EOF on self-pipe"), Ok(_) => continue, // Keep reading - Err(e) if e.kind() == io::ErrorKind::WouldBlock => break, + Err(e) if e.kind() == std_io::ErrorKind::WouldBlock => break, Err(e) => panic!("Bad read on self-pipe: {}", e), } } @@ -134,41 +147,17 @@ unsafe fn noop_clone(_data: *const ()) -> RawWaker { unsafe fn noop(_data: *const ()) {} -// ===== impl Park for Driver ===== - -impl Park for Driver { - type Unpark = ::Unpark; - type Error = io::Error; - - fn unpark(&self) -> Self::Unpark { - self.park.unpark() - } - - fn park(&mut self) -> Result<(), Self::Error> { - self.park.park()?; - self.process(); - Ok(()) - } - - fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { - self.park.park_timeout(duration)?; - self.process(); - Ok(()) - } - - fn shutdown(&mut self) { - self.park.shutdown() - } -} - // ===== impl Handle ===== impl Handle { - pub(super) fn check_inner(&self) -> io::Result<()> { + pub(super) fn check_inner(&self) -> std_io::Result<()> { if self.inner.strong_count() > 0 { Ok(()) } else { - Err(io::Error::new(io::ErrorKind::Other, "signal driver gone")) + Err(std_io::Error::new( + std_io::ErrorKind::Other, + "signal driver gone", + )) } } } diff --git a/tokio/src/sync/mpsc/chan.rs b/tokio/src/sync/mpsc/chan.rs index a10ffb7d797..f0e9dc27f33 100644 --- a/tokio/src/sync/mpsc/chan.rs +++ b/tokio/src/sync/mpsc/chan.rs @@ -3,7 +3,6 @@ use crate::loom::future::AtomicWaker; use crate::loom::sync::atomic::AtomicUsize; use crate::loom::sync::Arc; use crate::park::thread::CachedParkThread; -use crate::park::Park; use crate::sync::mpsc::error::TryRecvError; use crate::sync::mpsc::list; use crate::sync::notify::Notify; @@ -326,13 +325,13 @@ impl Rx { // Park the thread until the problematic send has completed. let mut park = CachedParkThread::new(); - let waker = park.unpark().into_waker(); + let waker = park.waker().unwrap(); loop { self.inner.rx_waker.register_by_ref(&waker); // It is possible that the problematic send has now completed, // so we have to check for messages again. try_recv!(); - park.park().expect("park failed"); + park.park(); } }) } diff --git a/tokio/src/task/blocking.rs b/tokio/src/task/blocking.rs index bcebbf5edc9..46756a9b302 100644 --- a/tokio/src/task/blocking.rs +++ b/tokio/src/task/blocking.rs @@ -70,6 +70,7 @@ cfg_rt_multi_thread! { /// This function panics if called from a [`current_thread`] runtime. /// /// [`current_thread`]: fn@crate::runtime::Builder::new_current_thread + #[track_caller] pub fn block_in_place(f: F) -> R where F: FnOnce() -> R, diff --git a/tokio/src/task/builder.rs b/tokio/src/task/builder.rs index c7a101b294c..007d8a4474a 100644 --- a/tokio/src/task/builder.rs +++ b/tokio/src/task/builder.rs @@ -187,7 +187,7 @@ impl<'a> Builder<'a> { Output: Send + 'static, { use crate::runtime::Mandatory; - let (join_handle, spawn_result) = handle.as_inner().spawn_blocking_inner( + let (join_handle, spawn_result) = handle.inner.blocking_spawner().spawn_blocking_inner( function, Mandatory::NonMandatory, self.name, diff --git a/tokio/src/task/local.rs b/tokio/src/task/local.rs index a5bd1bb8835..0ac28e67fc2 100644 --- a/tokio/src/task/local.rs +++ b/tokio/src/task/local.rs @@ -2,7 +2,7 @@ use crate::loom::sync::{Arc, Mutex}; use crate::runtime::task::{self, JoinHandle, LocalOwnedTasks, Task}; use crate::sync::AtomicWaker; -use crate::util::VecDequeCell; +use crate::util::{RcCell, VecDequeCell}; use std::cell::Cell; use std::collections::VecDeque; @@ -261,7 +261,11 @@ pin_project! { } } -thread_local!(static CURRENT: Cell>> = Cell::new(None)); +#[cfg(any(loom, tokio_no_const_thread_local))] +thread_local!(static CURRENT: RcCell = RcCell::new()); + +#[cfg(not(any(loom, tokio_no_const_thread_local)))] +thread_local!(static CURRENT: RcCell = const { RcCell::new() }); cfg_rt! { /// Spawns a `!Send` future on the local task set. @@ -310,14 +314,10 @@ cfg_rt! { where F: Future + 'static, F::Output: 'static { - CURRENT.with(|maybe_cx| { - let ctx = clone_rc(maybe_cx); - match ctx { - None => panic!("`spawn_local` called from outside of a `task::LocalSet`"), - Some(cx) => cx.spawn(future, name) - } - - }) + match CURRENT.with(|maybe_cx| maybe_cx.get()) { + None => panic!("`spawn_local` called from outside of a `task::LocalSet`"), + Some(cx) => cx.spawn(future, name) + } } } @@ -336,7 +336,7 @@ pub struct LocalEnterGuard(Option>); impl Drop for LocalEnterGuard { fn drop(&mut self) { CURRENT.with(|ctx| { - ctx.replace(self.0.take()); + ctx.set(self.0.take()); }) } } @@ -615,12 +615,12 @@ impl LocalSet { fn with(&self, f: impl FnOnce() -> T) -> T { CURRENT.with(|ctx| { struct Reset<'a> { - ctx_ref: &'a Cell>>, + ctx_ref: &'a RcCell, val: Option>, } impl<'a> Drop for Reset<'a> { fn drop(&mut self) { - self.ctx_ref.replace(self.val.take()); + self.ctx_ref.set(self.val.take()); } } let old = ctx.replace(Some(self.context.clone())); @@ -633,6 +633,37 @@ impl LocalSet { f() }) } + + /// This method is like `with`, but it just calls `f` without setting the thread-local if that + /// fails. + fn with_if_possible(&self, f: impl FnOnce() -> T) -> T { + let mut f = Some(f); + + let res = CURRENT.try_with(|ctx| { + struct Reset<'a> { + ctx_ref: &'a RcCell, + val: Option>, + } + impl<'a> Drop for Reset<'a> { + fn drop(&mut self) { + self.ctx_ref.replace(self.val.take()); + } + } + let old = ctx.replace(Some(self.context.clone())); + + let _reset = Reset { + ctx_ref: ctx, + val: old, + }; + + (f.take().unwrap())() + }); + + match res { + Ok(res) => res, + Err(_access_error) => (f.take().unwrap())(), + } + } } cfg_unstable! { @@ -744,7 +775,7 @@ impl Default for LocalSet { impl Drop for LocalSet { fn drop(&mut self) { - self.with(|| { + self.with_if_possible(|| { // Shut down all tasks in the LocalOwnedTasks and close it to // prevent new tasks from ever being added. self.context.owned.close_and_shutdown_all(); @@ -822,19 +853,11 @@ impl Future for RunUntil<'_, T> { } } -fn clone_rc(rc: &Cell>>) -> Option> { - let value = rc.take(); - let cloned = value.clone(); - rc.set(value); - cloned -} - impl Shared { /// Schedule the provided task on the scheduler. fn schedule(&self, task: task::Notified>) { CURRENT.with(|maybe_cx| { - let ctx = clone_rc(maybe_cx); - match ctx { + match maybe_cx.get() { Some(cx) if cx.shared.ptr_eq(self) => { cx.queue.push_back(task); } @@ -861,14 +884,11 @@ impl Shared { impl task::Schedule for Arc { fn release(&self, task: &Task) -> Option> { - CURRENT.with(|maybe_cx| { - let ctx = clone_rc(maybe_cx); - match ctx { - None => panic!("scheduler context missing"), - Some(cx) => { - assert!(cx.shared.ptr_eq(self)); - cx.owned.remove(task) - } + CURRENT.with(|maybe_cx| match maybe_cx.get() { + None => panic!("scheduler context missing"), + Some(cx) => { + assert!(cx.shared.ptr_eq(self)); + cx.owned.remove(task) } }) } @@ -889,15 +909,13 @@ impl task::Schedule for Arc { // This hook is only called from within the runtime, so // `CURRENT` should match with `&self`, i.e. there is no // opportunity for a nested scheduler to be called. - CURRENT.with(|maybe_cx| { - let ctx = clone_rc(maybe_cx); - match ctx { + CURRENT.with(|maybe_cx| match maybe_cx.get() { Some(cx) if Arc::ptr_eq(self, &cx.shared) => { cx.unhandled_panic.set(true); cx.owned.close_and_shutdown_all(); } _ => unreachable!("runtime core not set in CURRENT thread-local"), - }}) + }) } } } diff --git a/tokio/src/task/spawn.rs b/tokio/src/task/spawn.rs index 5a60f9d66e6..3fdd0357e80 100644 --- a/tokio/src/task/spawn.rs +++ b/tokio/src/task/spawn.rs @@ -1,4 +1,5 @@ -use crate::{task::JoinHandle, util::error::CONTEXT_MISSING_ERROR}; +use crate::runtime::Handle; +use crate::task::JoinHandle; use std::future::Future; @@ -142,10 +143,10 @@ cfg_rt! { T: Future + Send + 'static, T::Output: Send + 'static, { - use crate::runtime::{task, context}; + use crate::runtime::task; let id = task::Id::next(); - let spawn_handle = context::spawn_handle().expect(CONTEXT_MISSING_ERROR); let task = crate::util::trace::task(future, "task", name, id.as_u64()); - spawn_handle.spawn(task, id) + let handle = Handle::current(); + handle.inner.spawn(task, id) } } diff --git a/tokio/src/time/mod.rs b/tokio/src/time/mod.rs index 281990ef9ac..a1f27b839e9 100644 --- a/tokio/src/time/mod.rs +++ b/tokio/src/time/mod.rs @@ -82,17 +82,13 @@ //! ``` //! //! [`interval`]: crate::time::interval() +//! [`sleep`]: sleep() mod clock; pub(crate) use self::clock::Clock; #[cfg(feature = "test-util")] pub use clock::{advance, pause, resume}; -pub(crate) mod driver; - -#[doc(inline)] -pub use driver::sleep::{sleep, sleep_until, Sleep}; - pub mod error; mod instant; @@ -101,14 +97,13 @@ pub use self::instant::Instant; mod interval; pub use interval::{interval, interval_at, Interval, MissedTickBehavior}; +mod sleep; +pub use sleep::{sleep, sleep_until, Sleep}; + mod timeout; #[doc(inline)] pub use timeout::{timeout, timeout_at, Timeout}; -#[cfg(test)] -#[cfg(not(loom))] -mod tests; - // Re-export for convenience #[doc(no_inline)] pub use std::time::Duration; diff --git a/tokio/src/time/driver/sleep.rs b/tokio/src/time/sleep.rs similarity index 82% rename from tokio/src/time/driver/sleep.rs rename to tokio/src/time/sleep.rs index 2ff6ad592b2..b2e77515982 100644 --- a/tokio/src/time/driver/sleep.rs +++ b/tokio/src/time/sleep.rs @@ -1,6 +1,6 @@ #[cfg(all(tokio_unstable, feature = "tracing"))] -use crate::time::driver::ClockTime; -use crate::time::driver::{Handle, TimerEntry}; +use crate::runtime::time::TimeSource; +use crate::runtime::time::TimerEntry; use crate::time::{error::Error, Duration, Instant}; use crate::util::trace; @@ -239,7 +239,7 @@ cfg_trace! { struct Inner { deadline: Instant, ctx: trace::AsyncOpTracingCtx, - time_source: ClockTime, + time_source: TimeSource, } } @@ -251,62 +251,77 @@ cfg_not_trace! { } impl Sleep { - #[cfg_attr(not(all(tokio_unstable, feature = "tracing")), allow(unused_variables))] - #[track_caller] - pub(crate) fn new_timeout( - deadline: Instant, - location: Option<&'static Location<'static>>, - ) -> Sleep { - let handle = Handle::current(); - let entry = TimerEntry::new(&handle, deadline); - - #[cfg(all(tokio_unstable, feature = "tracing"))] - let inner = { - let time_source = handle.time_source().clone(); - let deadline_tick = time_source.deadline_to_tick(deadline); - let duration = deadline_tick.saturating_sub(time_source.now()); - - let location = location.expect("should have location if tracing"); - let resource_span = tracing::trace_span!( - "runtime.resource", - concrete_type = "Sleep", - kind = "timer", - loc.file = location.file(), - loc.line = location.line(), - loc.col = location.column(), - ); - - let async_op_span = resource_span.in_scope(|| { - tracing::trace!( - target: "runtime::resource::state_update", - duration = duration, - duration.unit = "ms", - duration.op = "override", + cfg_rt! { + #[cfg_attr(not(all(tokio_unstable, feature = "tracing")), allow(unused_variables))] + #[track_caller] + pub(crate) fn new_timeout( + deadline: Instant, + location: Option<&'static Location<'static>>, + ) -> Sleep { + use crate::runtime::Handle; + + let handle = Handle::current().inner; + let entry = TimerEntry::new(&handle, deadline); + + #[cfg(all(tokio_unstable, feature = "tracing"))] + let inner = { + let handle = &handle.time(); + let time_source = handle.time_source().clone(); + let deadline_tick = time_source.deadline_to_tick(deadline); + let duration = deadline_tick.saturating_sub(time_source.now()); + + let location = location.expect("should have location if tracing"); + let resource_span = tracing::trace_span!( + "runtime.resource", + concrete_type = "Sleep", + kind = "timer", + loc.file = location.file(), + loc.line = location.line(), + loc.col = location.column(), ); - tracing::trace_span!("runtime.resource.async_op", source = "Sleep::new_timeout") - }); - - let async_op_poll_span = - async_op_span.in_scope(|| tracing::trace_span!("runtime.resource.async_op.poll")); - - let ctx = trace::AsyncOpTracingCtx { - async_op_span, - async_op_poll_span, - resource_span, + let async_op_span = resource_span.in_scope(|| { + tracing::trace!( + target: "runtime::resource::state_update", + duration = duration, + duration.unit = "ms", + duration.op = "override", + ); + + tracing::trace_span!("runtime.resource.async_op", source = "Sleep::new_timeout") + }); + + let async_op_poll_span = + async_op_span.in_scope(|| tracing::trace_span!("runtime.resource.async_op.poll")); + + let ctx = trace::AsyncOpTracingCtx { + async_op_span, + async_op_poll_span, + resource_span, + }; + + Inner { + deadline, + ctx, + time_source, + } }; - Inner { - deadline, - ctx, - time_source, - } - }; + #[cfg(not(all(tokio_unstable, feature = "tracing")))] + let inner = Inner { deadline }; - #[cfg(not(all(tokio_unstable, feature = "tracing")))] - let inner = Inner { deadline }; + Sleep { inner, entry } + } + } - Sleep { inner, entry } + cfg_not_rt! { + #[track_caller] + pub(crate) fn new_timeout( + _deadline: Instant, + _location: Option<&'static Location<'static>>, + ) -> Sleep { + panic!("{}", crate::util::error::CONTEXT_MISSING_ERROR) + } } pub(crate) fn far_future(location: Option<&'static Location<'static>>) -> Sleep { diff --git a/tokio/src/time/tests/mod.rs b/tokio/src/time/tests/mod.rs deleted file mode 100644 index 35e1060aca0..00000000000 --- a/tokio/src/time/tests/mod.rs +++ /dev/null @@ -1,22 +0,0 @@ -mod test_sleep; - -use crate::time::{self, Instant}; -use std::time::Duration; - -fn assert_send() {} -fn assert_sync() {} - -#[test] -fn registration_is_send_and_sync() { - use crate::time::Sleep; - - assert_send::(); - assert_sync::(); -} - -#[test] -#[should_panic] -fn sleep_is_eager() { - let when = Instant::now() + Duration::from_millis(100); - let _ = time::sleep_until(when); -} diff --git a/tokio/src/time/tests/test_sleep.rs b/tokio/src/time/tests/test_sleep.rs deleted file mode 100644 index 77ca07e319e..00000000000 --- a/tokio/src/time/tests/test_sleep.rs +++ /dev/null @@ -1,443 +0,0 @@ -//use crate::time::driver::{Driver, Entry, Handle}; - -/* -macro_rules! poll { - ($e:expr) => { - $e.enter(|cx, e| e.poll_elapsed(cx)) - }; -} - -#[test] -fn frozen_utility_returns_correct_advanced_duration() { - let clock = Clock::new(); - clock.pause(); - let start = clock.now(); - - clock.advance(ms(10)); - assert_eq!(clock.now() - start, ms(10)); -} - -#[test] -fn immediate_sleep() { - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - let when = clock.now(); - let mut e = task::spawn(sleep_until(&handle, when)); - - assert_ready_ok!(poll!(e)); - - assert_ok!(driver.park_timeout(Duration::from_millis(1000))); - - // The time has not advanced. The `turn` completed immediately. - assert_eq!(clock.now() - start, ms(1000)); -} - -#[test] -fn delayed_sleep_level_0() { - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - for &i in &[1, 10, 60] { - // Create a `Sleep` that elapses in the future - let mut e = task::spawn(sleep_until(&handle, start + ms(i))); - - // The sleep instance has not elapsed. - assert_pending!(poll!(e)); - - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(i)); - - assert_ready_ok!(poll!(e)); - } -} - -#[test] -fn sub_ms_delayed_sleep() { - let (mut driver, clock, handle) = setup(); - - for _ in 0..5 { - let deadline = clock.now() + ms(1) + Duration::new(0, 1); - - let mut e = task::spawn(sleep_until(&handle, deadline)); - - assert_pending!(poll!(e)); - - assert_ok!(driver.park()); - assert_ready_ok!(poll!(e)); - - assert!(clock.now() >= deadline); - - clock.advance(Duration::new(0, 1)); - } -} - -#[test] -fn delayed_sleep_wrapping_level_0() { - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - assert_ok!(driver.park_timeout(ms(5))); - assert_eq!(clock.now() - start, ms(5)); - - let mut e = task::spawn(sleep_until(&handle, clock.now() + ms(60))); - - assert_pending!(poll!(e)); - - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(64)); - assert_pending!(poll!(e)); - - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(65)); - - assert_ready_ok!(poll!(e)); -} - -#[test] -fn timer_wrapping_with_higher_levels() { - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - // Set sleep to hit level 1 - let mut e1 = task::spawn(sleep_until(&handle, clock.now() + ms(64))); - assert_pending!(poll!(e1)); - - // Turn a bit - assert_ok!(driver.park_timeout(ms(5))); - - // Set timeout such that it will hit level 0, but wrap - let mut e2 = task::spawn(sleep_until(&handle, clock.now() + ms(60))); - assert_pending!(poll!(e2)); - - // This should result in s1 firing - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(64)); - - assert_ready_ok!(poll!(e1)); - assert_pending!(poll!(e2)); - - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(65)); - - assert_ready_ok!(poll!(e1)); -} - -#[test] -fn sleep_with_deadline_in_past() { - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - // Create `Sleep` that elapsed immediately. - let mut e = task::spawn(sleep_until(&handle, clock.now() - ms(100))); - - // Even though the `Sleep` expires in the past, it is not ready yet - // because the timer must observe it. - assert_ready_ok!(poll!(e)); - - // Turn the timer, it runs for the elapsed time - assert_ok!(driver.park_timeout(ms(1000))); - - // The time has not advanced. The `turn` completed immediately. - assert_eq!(clock.now() - start, ms(1000)); -} - -#[test] -fn delayed_sleep_level_1() { - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - // Create a `Sleep` that elapses in the future - let mut e = task::spawn(sleep_until(&handle, clock.now() + ms(234))); - - // The sleep has not elapsed. - assert_pending!(poll!(e)); - - // Turn the timer, this will wake up to cascade the timer down. - assert_ok!(driver.park_timeout(ms(1000))); - assert_eq!(clock.now() - start, ms(192)); - - // The sleep has not elapsed. - assert_pending!(poll!(e)); - - // Turn the timer again - assert_ok!(driver.park_timeout(ms(1000))); - assert_eq!(clock.now() - start, ms(234)); - - // The sleep has elapsed. - assert_ready_ok!(poll!(e)); - - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - // Create a `Sleep` that elapses in the future - let mut e = task::spawn(sleep_until(&handle, clock.now() + ms(234))); - - // The sleep has not elapsed. - assert_pending!(poll!(e)); - - // Turn the timer with a smaller timeout than the cascade. - assert_ok!(driver.park_timeout(ms(100))); - assert_eq!(clock.now() - start, ms(100)); - - assert_pending!(poll!(e)); - - // Turn the timer, this will wake up to cascade the timer down. - assert_ok!(driver.park_timeout(ms(1000))); - assert_eq!(clock.now() - start, ms(192)); - - // The sleep has not elapsed. - assert_pending!(poll!(e)); - - // Turn the timer again - assert_ok!(driver.park_timeout(ms(1000))); - assert_eq!(clock.now() - start, ms(234)); - - // The sleep has elapsed. - assert_ready_ok!(poll!(e)); -} - -#[test] -fn concurrently_set_two_timers_second_one_shorter() { - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - let mut e1 = task::spawn(sleep_until(&handle, clock.now() + ms(500))); - let mut e2 = task::spawn(sleep_until(&handle, clock.now() + ms(200))); - - // The sleep has not elapsed - assert_pending!(poll!(e1)); - assert_pending!(poll!(e2)); - - // Sleep until a cascade - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(192)); - - // Sleep until the second timer. - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(200)); - - // The shorter sleep fires - assert_ready_ok!(poll!(e2)); - assert_pending!(poll!(e1)); - - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(448)); - - assert_pending!(poll!(e1)); - - // Turn again, this time the time will advance to the second sleep - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(500)); - - assert_ready_ok!(poll!(e1)); -} - -#[test] -fn short_sleep() { - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - // Create a `Sleep` that elapses in the future - let mut e = task::spawn(sleep_until(&handle, clock.now() + ms(1))); - - // The sleep has not elapsed. - assert_pending!(poll!(e)); - - // Turn the timer, but not enough time will go by. - assert_ok!(driver.park()); - - // The sleep has elapsed. - assert_ready_ok!(poll!(e)); - - // The time has advanced to the point of the sleep elapsing. - assert_eq!(clock.now() - start, ms(1)); -} - -#[test] -fn sorta_long_sleep_until() { - const MIN_5: u64 = 5 * 60 * 1000; - - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - // Create a `Sleep` that elapses in the future - let mut e = task::spawn(sleep_until(&handle, clock.now() + ms(MIN_5))); - - // The sleep has not elapsed. - assert_pending!(poll!(e)); - - let cascades = &[262_144, 262_144 + 9 * 4096, 262_144 + 9 * 4096 + 15 * 64]; - - for &elapsed in cascades { - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(elapsed)); - - assert_pending!(poll!(e)); - } - - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(MIN_5)); - - // The sleep has elapsed. - assert_ready_ok!(poll!(e)); -} - -#[test] -fn very_long_sleep() { - const MO_5: u64 = 5 * 30 * 24 * 60 * 60 * 1000; - - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - // Create a `Sleep` that elapses in the future - let mut e = task::spawn(sleep_until(&handle, clock.now() + ms(MO_5))); - - // The sleep has not elapsed. - assert_pending!(poll!(e)); - - let cascades = &[ - 12_884_901_888, - 12_952_010_752, - 12_959_875_072, - 12_959_997_952, - ]; - - for &elapsed in cascades { - assert_ok!(driver.park()); - assert_eq!(clock.now() - start, ms(elapsed)); - - assert_pending!(poll!(e)); - } - - // Turn the timer, but not enough time will go by. - assert_ok!(driver.park()); - - // The time has advanced to the point of the sleep elapsing. - assert_eq!(clock.now() - start, ms(MO_5)); - - // The sleep has elapsed. - assert_ready_ok!(poll!(e)); -} - -#[test] -fn unpark_is_delayed() { - // A special park that will take much longer than the requested duration - struct MockPark(Clock); - - struct MockUnpark; - - impl Park for MockPark { - type Unpark = MockUnpark; - type Error = (); - - fn unpark(&self) -> Self::Unpark { - MockUnpark - } - - fn park(&mut self) -> Result<(), Self::Error> { - panic!("parking forever"); - } - - fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { - assert_eq!(duration, ms(0)); - self.0.advance(ms(436)); - Ok(()) - } - - fn shutdown(&mut self) {} - } - - impl Unpark for MockUnpark { - fn unpark(&self) {} - } - - let clock = Clock::new(); - clock.pause(); - let start = clock.now(); - let mut driver = Driver::new(MockPark(clock.clone()), clock.clone()); - let handle = driver.handle(); - - let mut e1 = task::spawn(sleep_until(&handle, clock.now() + ms(100))); - let mut e2 = task::spawn(sleep_until(&handle, clock.now() + ms(101))); - let mut e3 = task::spawn(sleep_until(&handle, clock.now() + ms(200))); - - assert_pending!(poll!(e1)); - assert_pending!(poll!(e2)); - assert_pending!(poll!(e3)); - - assert_ok!(driver.park()); - - assert_eq!(clock.now() - start, ms(500)); - - assert_ready_ok!(poll!(e1)); - assert_ready_ok!(poll!(e2)); - assert_ready_ok!(poll!(e3)); -} - -#[test] -fn set_timeout_at_deadline_greater_than_max_timer() { - const YR_1: u64 = 365 * 24 * 60 * 60 * 1000; - const YR_5: u64 = 5 * YR_1; - - let (mut driver, clock, handle) = setup(); - let start = clock.now(); - - for _ in 0..5 { - assert_ok!(driver.park_timeout(ms(YR_1))); - } - - let mut e = task::spawn(sleep_until(&handle, clock.now() + ms(1))); - assert_pending!(poll!(e)); - - assert_ok!(driver.park_timeout(ms(1000))); - assert_eq!(clock.now() - start, ms(YR_5) + ms(1)); - - assert_ready_ok!(poll!(e)); -} - -fn setup() -> (Driver, Clock, Handle) { - let clock = Clock::new(); - clock.pause(); - let driver = Driver::new(MockPark(clock.clone()), clock.clone()); - let handle = driver.handle(); - - (driver, clock, handle) -} - -fn sleep_until(handle: &Handle, when: Instant) -> Arc { - Entry::new(&handle, when, ms(0)) -} - -struct MockPark(Clock); - -struct MockUnpark; - -impl Park for MockPark { - type Unpark = MockUnpark; - type Error = (); - - fn unpark(&self) -> Self::Unpark { - MockUnpark - } - - fn park(&mut self) -> Result<(), Self::Error> { - panic!("parking forever"); - } - - fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { - self.0.advance(duration); - Ok(()) - } - - fn shutdown(&mut self) {} -} - -impl Unpark for MockUnpark { - fn unpark(&self) {} -} - -fn ms(n: u64) -> Duration { - Duration::from_millis(n) -} -*/ diff --git a/tokio/src/util/error.rs b/tokio/src/util/error.rs index 8f252c0c912..ebb27f6385f 100644 --- a/tokio/src/util/error.rs +++ b/tokio/src/util/error.rs @@ -1,15 +1,14 @@ +// Some combinations of features may not use these constants. +#![cfg_attr(not(feature = "full"), allow(dead_code))] + /// Error string explaining that the Tokio context hasn't been instantiated. pub(crate) const CONTEXT_MISSING_ERROR: &str = "there is no reactor running, must be called from the context of a Tokio 1.x runtime"; -// some combinations of features might not use this -#[allow(dead_code)] /// Error string explaining that the Tokio context is shutting down and cannot drive timers. pub(crate) const RUNTIME_SHUTTING_DOWN_ERROR: &str = "A Tokio 1.x context was found, but it is being shutdown."; -// some combinations of features might not use this -#[allow(dead_code)] /// Error string explaining that the Tokio context is not available because the /// thread-local storing it has been destroyed. This usually only happens during /// destructors of other thread-locals. diff --git a/tokio/src/util/mod.rs b/tokio/src/util/mod.rs index 41a3bce051f..e88c839e431 100644 --- a/tokio/src/util/mod.rs +++ b/tokio/src/util/mod.rs @@ -6,6 +6,15 @@ cfg_io_driver! { #[cfg(feature = "rt")] pub(crate) mod atomic_cell; +cfg_has_atomic_u64! { + #[cfg(any(feature = "signal", all(unix, feature = "process")))] + pub(crate) mod once_cell; +} +cfg_not_has_atomic_u64! { + #[cfg(any(feature = "rt", feature = "signal", all(unix, feature = "process")))] + pub(crate) mod once_cell; +} + #[cfg(any( // io driver uses `WakeList` directly feature = "net", @@ -40,13 +49,15 @@ pub(crate) use wake_list::WakeList; ))] pub(crate) mod linked_list; -#[cfg(any(feature = "rt-multi-thread", feature = "macros"))] +#[cfg(any(feature = "rt", feature = "macros"))] mod rand; cfg_rt! { mod idle_notified_set; pub(crate) use idle_notified_set::IdleNotifiedSet; + pub(crate) use self::rand::{RngSeedGenerator,replace_thread_rng}; + mod wake; pub(crate) use wake::WakerRef; pub(crate) use wake::{waker_ref, Wake}; @@ -56,8 +67,19 @@ cfg_rt! { mod vec_deque_cell; pub(crate) use vec_deque_cell::VecDequeCell; + + mod rc_cell; + pub(crate) use rc_cell::RcCell; } +#[cfg_attr(not(tokio_unstable), allow(unreachable_pub))] +#[cfg(feature = "rt")] +pub use self::rand::RngSeed; + +#[cfg(any(feature = "macros"))] +#[cfg_attr(not(feature = "macros"), allow(unreachable_pub))] +pub use self::rand::thread_rng_n; + cfg_rt_multi_thread! { pub(crate) use self::rand::FastRand; @@ -67,15 +89,4 @@ cfg_rt_multi_thread! { pub(crate) mod trace; -#[cfg(any(feature = "macros"))] -#[cfg_attr(not(feature = "macros"), allow(unreachable_pub))] -pub use self::rand::thread_rng_n; - -#[cfg(any( - feature = "rt", - feature = "time", - feature = "net", - feature = "process", - all(unix, feature = "signal") -))] pub(crate) mod error; diff --git a/tokio/src/util/once_cell.rs b/tokio/src/util/once_cell.rs new file mode 100644 index 00000000000..15639e6307f --- /dev/null +++ b/tokio/src/util/once_cell.rs @@ -0,0 +1,70 @@ +#![cfg_attr(loom, allow(dead_code))] +use std::cell::UnsafeCell; +use std::mem::MaybeUninit; +use std::sync::Once; + +pub(crate) struct OnceCell { + once: Once, + value: UnsafeCell>, +} + +unsafe impl Send for OnceCell {} +unsafe impl Sync for OnceCell {} + +impl OnceCell { + pub(crate) const fn new() -> Self { + Self { + once: Once::new(), + value: UnsafeCell::new(MaybeUninit::uninit()), + } + } + + /// Get the value inside this cell, intiailizing it using the provided + /// function if necessary. + /// + /// If the `init` closure panics, then the `OnceCell` is poisoned and all + /// future calls to `get` will panic. + #[inline] + pub(crate) fn get(&self, init: fn() -> T) -> &T { + if !self.once.is_completed() { + self.do_init(init); + } + + // Safety: The `std::sync::Once` guarantees that we can only reach this + // line if a `call_once` closure has been run exactly once and without + // panicking. Thus, the value is not uninitialized. + // + // There is also no race because the only `&self` method that modifies + // `value` is `do_init`, but if the `call_once` closure is still + // running, then no thread has gotten past the `call_once`. + unsafe { &*(self.value.get() as *const T) } + } + + #[cold] + fn do_init(&self, init: fn() -> T) { + let value_ptr = self.value.get() as *mut T; + + self.once.call_once(|| { + let set_to = init(); + + // Safety: The `std::sync::Once` guarantees that this initialization + // will run at most once, and that no thread can get past the + // `call_once` until it has run exactly once. Thus, we have + // exclusive access to `value`. + unsafe { + std::ptr::write(value_ptr, set_to); + } + }); + } +} + +impl Drop for OnceCell { + fn drop(&mut self) { + if self.once.is_completed() { + let value_ptr = self.value.get() as *mut T; + unsafe { + std::ptr::drop_in_place(value_ptr); + } + } + } +} diff --git a/tokio/src/util/rand.rs b/tokio/src/util/rand.rs index 6b19c8be957..09754cea993 100644 --- a/tokio/src/util/rand.rs +++ b/tokio/src/util/rand.rs @@ -1,5 +1,103 @@ use std::cell::Cell; +cfg_rt! { + use std::sync::Mutex; + + /// A deterministic generator for seeds (and other generators). + /// + /// Given the same initial seed, the generator will output the same sequence of seeds. + /// + /// Since the seed generator will be kept in a runtime handle, we need to wrap `FastRand` + /// in a Mutex to make it thread safe. Different to the `FastRand` that we keep in a + /// thread local store, the expectation is that seed generation will not need to happen + /// very frequently, so the cost of the mutex should be minimal. + #[derive(Debug)] + pub(crate) struct RngSeedGenerator { + /// Internal state for the seed generator. We keep it in a Mutex so that we can safely + /// use it across multiple threads. + state: Mutex, + } + + impl RngSeedGenerator { + /// Returns a new generator from the provided seed. + pub(crate) fn new(seed: RngSeed) -> Self { + Self { + state: Mutex::new(FastRand::new(seed)), + } + } + + /// Returns the next seed in the sequence. + pub(crate) fn next_seed(&self) -> RngSeed { + let rng = self + .state + .lock() + .expect("RNG seed generator is internally corrupt"); + + let s = rng.fastrand(); + let r = rng.fastrand(); + + RngSeed::from_pair(s, r) + } + + /// Directly creates a generator using the next seed. + pub(crate) fn next_generator(&self) -> Self { + RngSeedGenerator::new(self.next_seed()) + } + } +} + +/// A seed for random numnber generation. +/// +/// In order to make certain functions within a runtime deterministic, a seed +/// can be specified at the time of creation. +#[allow(unreachable_pub)] +#[derive(Clone, Debug)] +pub struct RngSeed { + s: u32, + r: u32, +} + +impl RngSeed { + /// Creates a random seed using loom internally. + pub(crate) fn new() -> Self { + Self::from_u64(crate::loom::rand::seed()) + } + + cfg_unstable! { + /// Generates a seed from the provided byte slice. + /// + /// # Example + /// + /// ``` + /// # use tokio::runtime::RngSeed; + /// let seed = RngSeed::from_bytes(b"make me a seed"); + /// ``` + #[cfg(feature = "rt")] + pub fn from_bytes(bytes: &[u8]) -> Self { + use std::{collections::hash_map::DefaultHasher, hash::Hasher}; + + let mut hasher = DefaultHasher::default(); + hasher.write(bytes); + Self::from_u64(hasher.finish()) + } + } + + fn from_u64(seed: u64) -> Self { + let one = (seed >> 32) as u32; + let mut two = seed as u32; + + if two == 0 { + // This value cannot be zero + two = 1; + } + + Self::from_pair(one, two) + } + + fn from_pair(s: u32, r: u32) -> Self { + Self { s, r } + } +} /// Fast random number generate. /// /// Implement xorshift64+: 2 32-bit xorshift sequences added together. @@ -15,21 +113,29 @@ pub(crate) struct FastRand { impl FastRand { /// Initializes a new, thread-local, fast random number generator. - pub(crate) fn new(seed: u64) -> FastRand { - let one = (seed >> 32) as u32; - let mut two = seed as u32; - - if two == 0 { - // This value cannot be zero - two = 1; - } - + pub(crate) fn new(seed: RngSeed) -> FastRand { FastRand { - one: Cell::new(one), - two: Cell::new(two), + one: Cell::new(seed.s), + two: Cell::new(seed.r), } } + /// Replaces the state of the random number generator with the provided seed, returning + /// the seed that represents the previous state of the random number generator. + /// + /// The random number generator will become equivalent to one created with + /// the same seed. + #[cfg(feature = "rt")] + pub(crate) fn replace_seed(&self, seed: RngSeed) -> RngSeed { + let old_seed = RngSeed::from_pair(self.one.get(), self.two.get()); + + self.one.replace(seed.s); + self.two.replace(seed.r); + + old_seed + } + + #[cfg(any(feature = "macros", feature = "rt-multi-thread"))] pub(crate) fn fastrand_n(&self, n: u32) -> u32 { // This is similar to fastrand() % n, but faster. // See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ @@ -51,14 +157,24 @@ impl FastRand { } } +thread_local! { + static THREAD_RNG: FastRand = FastRand::new(RngSeed::new()); +} + +/// Seeds the thread local random number generator with the provided seed and +/// return the previously stored seed. +/// +/// The returned seed can be later used to return the thread local random number +/// generator to its previous state. +#[cfg(feature = "rt")] +pub(crate) fn replace_thread_rng(rng_seed: RngSeed) -> RngSeed { + THREAD_RNG.with(|rng| rng.replace_seed(rng_seed)) +} + // Used by the select macro and `StreamMap` #[cfg(any(feature = "macros"))] #[doc(hidden)] #[cfg_attr(not(feature = "macros"), allow(unreachable_pub))] pub fn thread_rng_n(n: u32) -> u32 { - thread_local! { - static THREAD_RNG: FastRand = FastRand::new(crate::loom::rand::seed()); - } - THREAD_RNG.with(|rng| rng.fastrand_n(n)) } diff --git a/tokio/src/util/rc_cell.rs b/tokio/src/util/rc_cell.rs new file mode 100644 index 00000000000..97c02053c59 --- /dev/null +++ b/tokio/src/util/rc_cell.rs @@ -0,0 +1,57 @@ +use crate::loom::cell::UnsafeCell; + +use std::rc::Rc; + +/// This is exactly like `Cell>>`, except that it provides a `get` +/// method even though `Rc` is not `Copy`. +pub(crate) struct RcCell { + inner: UnsafeCell>>, +} + +impl RcCell { + #[cfg(not(loom))] + pub(crate) const fn new() -> Self { + Self { + inner: UnsafeCell::new(None), + } + } + + // The UnsafeCell in loom does not have a const `new` fn. + #[cfg(loom)] + pub(crate) fn new() -> Self { + Self { + inner: UnsafeCell::new(None), + } + } + + /// Safety: This method may not be called recursively. + #[inline] + unsafe fn with_inner(&self, f: F) -> R + where + F: FnOnce(&mut Option>) -> R, + { + // safety: This type is not Sync, so concurrent calls of this method + // cannot happen. Furthermore, the caller guarantees that the method is + // not called recursively. Finally, this is the only place that can + // create mutable references to the inner Rc. This ensures that any + // mutable references created here are exclusive. + self.inner.with_mut(|ptr| f(&mut *ptr)) + } + + pub(crate) fn get(&self) -> Option> { + // safety: The `Rc::clone` method will not call any unknown user-code, + // so it will not result in a recursive call to `with_inner`. + unsafe { self.with_inner(|rc| rc.clone()) } + } + + pub(crate) fn replace(&self, val: Option>) -> Option> { + // safety: No destructors or other unknown user-code will run inside the + // `with_inner` call, so no recursive call to `with_inner` can happen. + unsafe { self.with_inner(|rc| std::mem::replace(rc, val)) } + } + + pub(crate) fn set(&self, val: Option>) { + let old = self.replace(val); + drop(old); + } +} diff --git a/tokio/tests/io_copy_bidirectional.rs b/tokio/tests/io_copy_bidirectional.rs index 600427866ef..c5496759550 100644 --- a/tokio/tests/io_copy_bidirectional.rs +++ b/tokio/tests/io_copy_bidirectional.rs @@ -111,18 +111,30 @@ async fn blocking_one_side_does_not_block_other() { } #[tokio::test] -async fn immediate_exit_on_error() { - symmetric(|handle, mut a, mut b| async move { - block_write(&mut a).await; +async fn immediate_exit_on_write_error() { + let payload = b"here, take this"; + let error = || io::Error::new(io::ErrorKind::Other, "no thanks!"); - // Fill up the b->copy->a path. We expect that this will _not_ drain - // before we exit the copy task. - let _bytes_written = block_write(&mut b).await; + let mut a = tokio_test::io::Builder::new() + .read(payload) + .write_error(error()) + .build(); - // Drop b. We should not wait for a to consume the data buffered in the - // copy loop, since b will be failing writes. - drop(b); - assert!(handle.await.unwrap().is_err()); - }) - .await + let mut b = tokio_test::io::Builder::new() + .read(payload) + .write_error(error()) + .build(); + + assert!(copy_bidirectional(&mut a, &mut b).await.is_err()); +} + +#[tokio::test] +async fn immediate_exit_on_read_error() { + let error = || io::Error::new(io::ErrorKind::Other, "got nothing!"); + + let mut a = tokio_test::io::Builder::new().read_error(error()).build(); + + let mut b = tokio_test::io::Builder::new().read_error(error()).build(); + + assert!(copy_bidirectional(&mut a, &mut b).await.is_err()); } diff --git a/tokio/tests/macros_select.rs b/tokio/tests/macros_select.rs index 23162a1a918..e642a5d9ff4 100644 --- a/tokio/tests/macros_select.rs +++ b/tokio/tests/macros_select.rs @@ -599,3 +599,62 @@ async fn mut_ref_patterns() { }, }; } + +#[cfg(tokio_unstable)] +mod unstable { + use tokio::runtime::RngSeed; + + #[test] + fn deterministic_select_current_thread() { + let seed = b"bytes used to generate seed"; + let rt = tokio::runtime::Builder::new_current_thread() + .rng_seed(RngSeed::from_bytes(seed)) + .build() + .unwrap(); + + rt.block_on(async { + let num = select_0_to_9().await; + assert_eq!(num, 5); + + let num = select_0_to_9().await; + assert_eq!(num, 1); + }); + } + + #[test] + #[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))] + fn deterministic_select_multi_thread() { + let seed = b"bytes used to generate seed"; + let rt = tokio::runtime::Builder::new_multi_thread() + .worker_threads(1) + .rng_seed(RngSeed::from_bytes(seed)) + .build() + .unwrap(); + + rt.block_on(async { + let _ = tokio::spawn(async { + let num = select_0_to_9().await; + assert_eq!(num, 6); + + let num = select_0_to_9().await; + assert_eq!(num, 9); + }) + .await; + }); + } + + async fn select_0_to_9() -> u32 { + tokio::select!( + x = async { 0 } => x, + x = async { 1 } => x, + x = async { 2 } => x, + x = async { 3 } => x, + x = async { 4 } => x, + x = async { 5 } => x, + x = async { 6 } => x, + x = async { 7 } => x, + x = async { 8 } => x, + x = async { 9 } => x, + ) + } +} diff --git a/tokio/tests/rt_basic.rs b/tokio/tests/rt_basic.rs index 0cb92487af3..2f441865941 100644 --- a/tokio/tests/rt_basic.rs +++ b/tokio/tests/rt_basic.rs @@ -293,7 +293,7 @@ fn timeout_panics_when_no_time_handle() { #[cfg(tokio_unstable)] mod unstable { - use tokio::runtime::{Builder, UnhandledPanic}; + use tokio::runtime::{Builder, RngSeed, UnhandledPanic}; #[test] #[should_panic( @@ -381,6 +381,48 @@ mod unstable { assert!(th.join().is_err()); } } + + #[test] + fn rng_seed() { + let seed = b"bytes used to generate seed"; + let rt = tokio::runtime::Builder::new_current_thread() + .rng_seed(RngSeed::from_bytes(seed)) + .build() + .unwrap(); + + rt.block_on(async { + let random = tokio::macros::support::thread_rng_n(100); + assert_eq!(random, 59); + + let random = tokio::macros::support::thread_rng_n(100); + assert_eq!(random, 10); + }); + } + + #[test] + fn rng_seed_multi_enter() { + let seed = b"bytes used to generate seed"; + let rt = tokio::runtime::Builder::new_current_thread() + .rng_seed(RngSeed::from_bytes(seed)) + .build() + .unwrap(); + + rt.block_on(async { + let random = tokio::macros::support::thread_rng_n(100); + assert_eq!(random, 59); + + let random = tokio::macros::support::thread_rng_n(100); + assert_eq!(random, 10); + }); + + rt.block_on(async { + let random = tokio::macros::support::thread_rng_n(100); + assert_eq!(random, 86); + + let random = tokio::macros::support::thread_rng_n(100); + assert_eq!(random, 1); + }); + } } fn rt() -> Runtime { diff --git a/tokio/tests/rt_handle_block_on.rs b/tokio/tests/rt_handle_block_on.rs index b5d5889d470..5ec783e5588 100644 --- a/tokio/tests/rt_handle_block_on.rs +++ b/tokio/tests/rt_handle_block_on.rs @@ -505,39 +505,30 @@ where F: Fn(), { { - println!("current thread runtime"); - let rt = new_current_thread(); let _enter = rt.enter(); f(); - println!("current thread runtime after shutdown"); rt.shutdown_timeout(Duration::from_secs(1000)); f(); } #[cfg(not(tokio_wasi))] { - println!("multi thread (1 thread) runtime"); - let rt = new_multi_thread(1); let _enter = rt.enter(); f(); - println!("multi thread runtime after shutdown"); rt.shutdown_timeout(Duration::from_secs(1000)); f(); } #[cfg(not(tokio_wasi))] { - println!("multi thread (4 threads) runtime"); - let rt = new_multi_thread(4); let _enter = rt.enter(); f(); - println!("multi thread runtime after shutdown"); rt.shutdown_timeout(Duration::from_secs(1000)); f(); } diff --git a/tokio/tests/rt_threaded.rs b/tokio/tests/rt_threaded.rs index f2fce0800dd..9a8644af6fe 100644 --- a/tokio/tests/rt_threaded.rs +++ b/tokio/tests/rt_threaded.rs @@ -480,9 +480,7 @@ fn wake_during_shutdown() { fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { let me = Pin::into_inner(self); let mut lock = me.shared.lock().unwrap(); - println!("poll {}", me.put_waker); if me.put_waker { - println!("putting"); lock.waker = Some(cx.waker().clone()); } Poll::Pending @@ -491,13 +489,11 @@ fn wake_during_shutdown() { impl Drop for MyFuture { fn drop(&mut self) { - println!("drop {} start", self.put_waker); let mut lock = self.shared.lock().unwrap(); if !self.put_waker { lock.waker.take().unwrap().wake(); } drop(lock); - println!("drop {} stop", self.put_waker); } } @@ -546,6 +542,7 @@ fn rt() -> runtime::Runtime { #[cfg(tokio_unstable)] mod unstable { use super::*; + use tokio::runtime::RngSeed; #[test] fn test_disable_lifo_slot() { @@ -565,4 +562,27 @@ mod unstable { .unwrap(); }) } + + #[test] + fn rng_seed() { + let seed = b"bytes used to generate seed"; + let rt = tokio::runtime::Builder::new_multi_thread() + .worker_threads(1) + .rng_seed(RngSeed::from_bytes(seed)) + .build() + .unwrap(); + + rt.block_on(async { + let random = tokio::macros::support::thread_rng_n(100); + assert_eq!(random, 86); + + let _ = tokio::spawn(async { + // Because we only have a single worker thread, the + // RNG will be deterministic here as well. + let random = tokio::macros::support::thread_rng_n(100); + assert_eq!(random, 64); + }) + .await; + }); + } } diff --git a/tokio/tests/sync_mpsc.rs b/tokio/tests/sync_mpsc.rs index 24f078c62b1..5b15f75f5c3 100644 --- a/tokio/tests/sync_mpsc.rs +++ b/tokio/tests/sync_mpsc.rs @@ -15,6 +15,7 @@ use tokio::sync::mpsc::{self, channel}; use tokio::sync::oneshot; use tokio_test::*; +use std::fmt; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::{Acquire, Release}; use std::sync::Arc; @@ -220,9 +221,9 @@ async fn no_t_bounds_buffer() { let (tx, mut rx) = mpsc::channel(100); // sender should be Debug even though T isn't Debug - println!("{:?}", tx); + is_debug(&tx); // same with Receiver - println!("{:?}", rx); + is_debug(&rx); // and sender should be Clone even though T isn't Clone assert!(tx.clone().try_send(NoImpls).is_ok()); @@ -236,9 +237,9 @@ async fn no_t_bounds_unbounded() { let (tx, mut rx) = mpsc::unbounded_channel(); // sender should be Debug even though T isn't Debug - println!("{:?}", tx); + is_debug(&tx); // same with Receiver - println!("{:?}", rx); + is_debug(&rx); // and sender should be Clone even though T isn't Clone assert!(tx.clone().send(NoImpls).is_ok()); @@ -940,3 +941,5 @@ async fn test_tx_capacity() { assert_eq!(tx.capacity(), 8); assert_eq!(tx.max_capacity(), 10); } + +fn is_debug(_: &T) {} diff --git a/tokio/tests/task_abort.rs b/tokio/tests/task_abort.rs index 9005c3f4d36..492f8b551a3 100644 --- a/tokio/tests/task_abort.rs +++ b/tokio/tests/task_abort.rs @@ -26,10 +26,7 @@ fn test_abort_without_panic_3157() { .unwrap(); rt.block_on(async move { - let handle = tokio::spawn(async move { - println!("task started"); - tokio::time::sleep(Duration::new(100, 0)).await - }); + let handle = tokio::spawn(async move { tokio::time::sleep(Duration::new(100, 0)).await }); // wait for task to sleep. tokio::time::sleep(Duration::from_millis(10)).await; @@ -159,7 +156,6 @@ fn test_abort_wakes_task_3964() { let handle = tokio::spawn(async move { // Make sure the Arc is moved into the task let _notify_dropped = notify_dropped; - println!("task started"); tokio::time::sleep(Duration::new(100, 0)).await }); @@ -187,7 +183,6 @@ fn test_abort_task_that_panics_on_drop_contained() { let handle = tokio::spawn(async move { // Make sure the Arc is moved into the task let _panic_dropped = PanicOnDrop; - println!("task started"); tokio::time::sleep(Duration::new(100, 0)).await }); @@ -211,7 +206,6 @@ fn test_abort_task_that_panics_on_drop_returned() { let handle = tokio::spawn(async move { // Make sure the Arc is moved into the task let _panic_dropped = PanicOnDrop; - println!("task started"); tokio::time::sleep(Duration::new(100, 0)).await }); diff --git a/tokio/tests/task_local_set.rs b/tokio/tests/task_local_set.rs index b6245acf77a..a0ade93412e 100644 --- a/tokio/tests/task_local_set.rs +++ b/tokio/tests/task_local_set.rs @@ -284,14 +284,12 @@ fn join_local_future_elsewhere() { local.block_on(&rt, async move { let (tx, rx) = oneshot::channel(); let join = task::spawn_local(async move { - println!("hello world running..."); assert!( ON_RT_THREAD.with(|cell| cell.get()), "local task must run on local thread, no matter where it is awaited" ); rx.await.unwrap(); - println!("hello world task done"); "hello world" }); let join2 = task::spawn(async move { @@ -301,16 +299,34 @@ fn join_local_future_elsewhere() { ); tx.send(()).expect("task shouldn't have ended yet"); - println!("waking up hello world..."); join.await.expect("task should complete successfully"); - - println!("hello world task joined"); }); join2.await.unwrap() }); } +// Tests for +#[cfg(not(tokio_wasi))] // Wasi doesn't support threads +#[tokio::test(flavor = "multi_thread")] +async fn localset_in_thread_local() { + thread_local! { + static LOCAL_SET: LocalSet = LocalSet::new(); + } + + // holds runtime thread until end of main fn. + let (_tx, rx) = oneshot::channel::<()>(); + let handle = tokio::runtime::Handle::current(); + + std::thread::spawn(move || { + LOCAL_SET.with(|local_set| { + handle.block_on(local_set.run_until(async move { + let _ = rx.await; + })) + }); + }); +} + #[test] fn drop_cancels_tasks() { use std::rc::Rc; @@ -374,9 +390,7 @@ fn with_timeout(timeout: Duration, f: impl FnOnce() + Send + 'static) { ), // Did the test thread panic? We'll find out for sure when we `join` // with it. - Err(RecvTimeoutError::Disconnected) => { - println!("done_rx dropped, did the test thread panic?"); - } + Err(RecvTimeoutError::Disconnected) => {} // Test completed successfully! Ok(()) => {} } @@ -489,7 +503,6 @@ async fn local_tasks_are_polled_after_tick_inner() { time::sleep(Duration::from_millis(20)).await; let rx1 = RX1.load(SeqCst); let rx2 = RX2.load(SeqCst); - println!("EXPECT = {}; RX1 = {}; RX2 = {}", EXPECTED, rx1, rx2); assert_eq!(EXPECTED, rx1); assert_eq!(EXPECTED, rx2); }); diff --git a/tokio/tests/task_panic.rs b/tokio/tests/task_panic.rs index 451243d2e6b..126195222e5 100644 --- a/tokio/tests/task_panic.rs +++ b/tokio/tests/task_panic.rs @@ -3,13 +3,43 @@ use futures::future; use std::error::Error; -use tokio::{runtime::Builder, spawn, task}; +use tokio::runtime::Builder; +use tokio::task::{self, block_in_place}; mod support { pub mod panic; } use support::panic::test_panic; +#[test] +fn block_in_place_panic_caller() -> Result<(), Box> { + let panic_location_file = test_panic(|| { + let rt = Builder::new_current_thread().enable_all().build().unwrap(); + rt.block_on(async { + block_in_place(|| {}); + }); + }); + + // The panic location should be in this file + assert_eq!(&panic_location_file.unwrap(), file!()); + + Ok(()) +} + +#[test] +fn local_set_spawn_local_panic_caller() -> Result<(), Box> { + let panic_location_file = test_panic(|| { + let _local = task::LocalSet::new(); + + let _ = task::spawn_local(async {}); + }); + + // The panic location should be in this file + assert_eq!(&panic_location_file.unwrap(), file!()); + + Ok(()) +} + #[test] fn local_set_block_on_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { @@ -30,7 +60,7 @@ fn local_set_block_on_panic_caller() -> Result<(), Box> { #[test] fn spawn_panic_caller() -> Result<(), Box> { let panic_location_file = test_panic(|| { - spawn(future::pending::<()>()); + tokio::spawn(future::pending::<()>()); }); // The panic location should be in this file diff --git a/tokio/tests/time_sleep.rs b/tokio/tests/time_sleep.rs index 93f63503821..3c628f855a0 100644 --- a/tokio/tests/time_sleep.rs +++ b/tokio/tests/time_sleep.rs @@ -189,10 +189,7 @@ async fn greater_than_max() { #[tokio::test] async fn short_sleeps() { - for i in 0..10000 { - if (i % 10) == 0 { - eprintln!("=== {}", i); - } + for _ in 0..10000 { tokio::time::sleep(std::time::Duration::from_millis(0)).await; } } diff --git a/tokio/tests/uds_datagram.rs b/tokio/tests/uds_datagram.rs index 5e5486ba332..701c4d467e6 100644 --- a/tokio/tests/uds_datagram.rs +++ b/tokio/tests/uds_datagram.rs @@ -29,9 +29,7 @@ async fn echo() -> io::Result<()> { let server_socket = UnixDatagram::bind(server_path.clone())?; tokio::spawn(async move { - if let Err(e) = echo_server(server_socket).await { - eprintln!("Error in echo server: {}", e); - } + let _ = echo_server(server_socket).await; }); { @@ -55,9 +53,7 @@ async fn echo_from() -> io::Result<()> { let server_socket = UnixDatagram::bind(server_path.clone())?; tokio::spawn(async move { - if let Err(e) = echo_server(server_socket).await { - eprintln!("Error in echo server: {}", e); - } + let _ = echo_server(server_socket).await; }); {