diff -Nru rust-parking-lot-0.9.0/appveyor.yml rust-parking-lot-0.10.0/appveyor.yml --- rust-parking-lot-0.9.0/appveyor.yml 2019-07-14 12:52:34.000000000 +0000 +++ rust-parking-lot-0.10.0/appveyor.yml 2019-11-09 11:27:25.000000000 +0000 @@ -25,19 +25,19 @@ - TARGET: x86_64-pc-windows-msvc MSYSTEM: MINGW64 CPU: x86_64 - TOOLCHAIN: 1.32.0 + TOOLCHAIN: 1.36.0 - TARGET: i686-pc-windows-msvc MSYSTEM: MINGW32 CPU: i686 - TOOLCHAIN: 1.32.0 + TOOLCHAIN: 1.36.0 - TARGET: x86_64-pc-windows-gnu MSYSTEM: MINGW64 CPU: x86_64 - TOOLCHAIN: 1.32.0 + TOOLCHAIN: 1.36.0 - TARGET: i686-pc-windows-gnu MSYSTEM: MINGW32 CPU: i686 - TOOLCHAIN: 1.32.0 + TOOLCHAIN: 1.36.0 install: - set PATH=C:\msys64\%MSYSTEM%\bin;c:\msys64\usr\bin;%PATH% diff -Nru rust-parking-lot-0.9.0/build.rs rust-parking-lot-0.10.0/build.rs --- rust-parking-lot-0.9.0/build.rs 2019-05-04 09:27:08.000000000 +0000 +++ rust-parking-lot-0.10.0/build.rs 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -use rustc_version::{version, Version}; - -fn main() { - if version().unwrap() >= Version::parse("1.34.0").unwrap() { - println!("cargo:rustc-cfg=has_sized_atomics"); - println!("cargo:rustc-cfg=has_checked_instant"); - } -} diff -Nru rust-parking-lot-0.9.0/Cargo.toml rust-parking-lot-0.10.0/Cargo.toml --- rust-parking-lot-0.9.0/Cargo.toml 1970-01-01 00:00:00.000000000 +0000 +++ rust-parking-lot-0.10.0/Cargo.toml 2019-11-25 21:16:48.000000000 +0000 @@ -13,7 +13,7 @@ [package] edition = "2018" name = "parking_lot" -version = "0.9.0" +version = "0.10.0" authors = ["Amanieu d'Antras "] description = "More compact and efficient implementations of the standard synchronization primitives." readme = "README.md" @@ -25,7 +25,7 @@ version = "0.3.1" [dependencies.parking_lot_core] -version = "0.6" +version = "0.7.0" [dev-dependencies.bincode] version = "1.1.3" @@ -34,8 +34,6 @@ [dev-dependencies.rand] version = "0.7" -[build-dependencies.rustc_version] -version = "0.2" [features] deadlock_detection = ["parking_lot_core/deadlock_detection"] diff -Nru rust-parking-lot-0.9.0/Cargo.toml.orig rust-parking-lot-0.10.0/Cargo.toml.orig --- rust-parking-lot-0.9.0/Cargo.toml.orig 2019-07-14 12:52:34.000000000 +0000 +++ rust-parking-lot-0.10.0/Cargo.toml.orig 2019-11-25 21:09:35.000000000 +0000 @@ -1,6 +1,6 @@ [package] name = "parking_lot" -version = "0.9.0" +version = "0.10.0" authors = ["Amanieu d'Antras "] description = "More compact and efficient implementations of the standard synchronization primitives." license = "Apache-2.0/MIT" @@ -11,7 +11,7 @@ edition = "2018" [dependencies] -parking_lot_core = { path = "core", version = "0.6" } +parking_lot_core = { path = "core", version = "0.7.0" } lock_api = { path = "lock_api", version = "0.3.1" } [dev-dependencies] @@ -21,9 +21,6 @@ # Used when testing out serde support. bincode = {version = "1.1.3"} -[build-dependencies] -rustc_version = "0.2" - [features] default = [] owning_ref = ["lock_api/owning_ref"] diff -Nru rust-parking-lot-0.9.0/.cargo_vcs_info.json rust-parking-lot-0.10.0/.cargo_vcs_info.json --- rust-parking-lot-0.9.0/.cargo_vcs_info.json 1970-01-01 00:00:00.000000000 +0000 +++ rust-parking-lot-0.10.0/.cargo_vcs_info.json 2019-11-25 21:16:48.000000000 +0000 @@ -1,5 +1,5 @@ { "git": { - "sha1": "1dbf100fbcc9a2722ac12cc10c153e2eb807a0ce" + "sha1": "bbbb57633f74ae27fc23cf4b6aee24e9d3449a10" } } diff -Nru rust-parking-lot-0.9.0/CHANGELOG.md rust-parking-lot-0.10.0/CHANGELOG.md --- rust-parking-lot-0.9.0/CHANGELOG.md 2019-07-14 12:52:45.000000000 +0000 +++ rust-parking-lot-0.10.0/CHANGELOG.md 2019-11-25 21:09:35.000000000 +0000 @@ -1,8 +1,20 @@ +## parking_lot 0.10.0, parking_lot_core 0.7.0, lock_api 0.3.2 (2019-11-25) + +- Upgrade smallvec dependency to 1.0 in parking_lot_core. +- Replace all usage of `mem::unitialized` with `mem::MaybeUninit`. +- The minimum required Rust version is bumped to 1.36. Because of the above two changes. +- Make methods on `WaitTimeoutResult` and `OnceState` take `self` by value instead of reference. + +## parking_lot_core 0.6.2 (2019-07-22) + +- Fixed compile error on Windows with old cfg_if version. (#164) + +## parking_lot_core 0.6.1 (2019-07-17) + +- Fixed Android build. (#163) + ## parking_lot 0.9.0, parking_lot_core 0.6.0, lock_api 0.3.1 (2019-07-14) -- The minimum supported rust version (MSRV) is now 1.32. This was primarily - increased for testing with the latest _rand_ crate. Rust 1.31 may continue to - work for normal use of these releases. - Re-export lock_api (0.3.1) from parking_lot (#150) - Removed (non-dev) dependency on rand crate for fairness mechanism, by including a simple xorshift PRNG in core (#144) diff -Nru rust-parking-lot-0.9.0/debian/cargo-checksum.json rust-parking-lot-0.10.0/debian/cargo-checksum.json --- rust-parking-lot-0.9.0/debian/cargo-checksum.json 2019-09-05 06:27:36.000000000 +0000 +++ rust-parking-lot-0.10.0/debian/cargo-checksum.json 2020-01-08 22:40:18.000000000 +0000 @@ -1 +1 @@ -{"package":"f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252","files":{}} +{"package":"92e98c49ab0b7ce5b222f2cc9193fc4efe11c6d0bd4f648e374684a6857b1cfc","files":{}} diff -Nru rust-parking-lot-0.9.0/debian/changelog rust-parking-lot-0.10.0/debian/changelog --- rust-parking-lot-0.9.0/debian/changelog 2019-09-05 06:27:36.000000000 +0000 +++ rust-parking-lot-0.10.0/debian/changelog 2020-01-08 22:40:18.000000000 +0000 @@ -1,3 +1,10 @@ +rust-parking-lot (0.10.0-1) unstable; urgency=medium + + * Team upload. + * Package parking_lot 0.10.0 from crates.io using debcargo 2.4.2 + + -- Ximin Luo Wed, 08 Jan 2020 22:40:18 +0000 + rust-parking-lot (0.9.0-1) unstable; urgency=medium * Team upload. diff -Nru rust-parking-lot-0.9.0/debian/control rust-parking-lot-0.10.0/debian/control --- rust-parking-lot-0.9.0/debian/control 2019-09-05 06:27:36.000000000 +0000 +++ rust-parking-lot-0.10.0/debian/control 2020-01-08 22:40:18.000000000 +0000 @@ -7,13 +7,12 @@ rustc:native , libstd-rust-dev , librust-lock-api-0.3+default-dev (>= 0.3.1-~~) , - librust-parking-lot-core-0.6+default-dev , - librust-rustc-version-0.2+default-dev + librust-parking-lot-core-0.7+default-dev Maintainer: Debian Rust Maintainers Uploaders: Sylvestre Ledru , Wolfgang Silbermayr -Standards-Version: 4.2.0 +Standards-Version: 4.4.1 Vcs-Git: https://salsa.debian.org/rust-team/debcargo-conf.git [src/parking-lot] Vcs-Browser: https://salsa.debian.org/rust-team/debcargo-conf/tree/master/src/parking-lot X-Cargo-Crate: parking_lot @@ -24,8 +23,7 @@ Depends: ${misc:Depends}, librust-lock-api-0.3+default-dev (>= 0.3.1-~~), - librust-parking-lot-core-0.6+default-dev, - librust-rustc-version-0.2+default-dev + librust-parking-lot-core-0.7+default-dev Suggests: librust-parking-lot+deadlock-detection-dev (= ${binary:Version}), librust-parking-lot+nightly-dev (= ${binary:Version}), @@ -35,10 +33,10 @@ librust-parking-lot+default-dev (= ${binary:Version}), librust-parking-lot-0-dev (= ${binary:Version}), librust-parking-lot-0+default-dev (= ${binary:Version}), - librust-parking-lot-0.9-dev (= ${binary:Version}), - librust-parking-lot-0.9+default-dev (= ${binary:Version}), - librust-parking-lot-0.9.0-dev (= ${binary:Version}), - librust-parking-lot-0.9.0+default-dev (= ${binary:Version}) + librust-parking-lot-0.10-dev (= ${binary:Version}), + librust-parking-lot-0.10+default-dev (= ${binary:Version}), + librust-parking-lot-0.10.0-dev (= ${binary:Version}), + librust-parking-lot-0.10.0+default-dev (= ${binary:Version}) Description: Compact, efficient std sync primitives - Rust source code This package contains the source for the Rust parking_lot crate, packaged by debcargo for use with cargo and dh-cargo. @@ -49,11 +47,11 @@ Depends: ${misc:Depends}, librust-parking-lot-dev (= ${binary:Version}), - librust-parking-lot-core-0.6+deadlock-detection-dev + librust-parking-lot-core-0.7+deadlock-detection-dev Provides: librust-parking-lot-0+deadlock-detection-dev (= ${binary:Version}), - librust-parking-lot-0.9+deadlock-detection-dev (= ${binary:Version}), - librust-parking-lot-0.9.0+deadlock-detection-dev (= ${binary:Version}) + librust-parking-lot-0.10+deadlock-detection-dev (= ${binary:Version}), + librust-parking-lot-0.10.0+deadlock-detection-dev (= ${binary:Version}) Description: Compact, efficient std sync primitives - feature "deadlock_detection" This metapackage enables feature "deadlock_detection" for the Rust parking_lot crate, by pulling in any additional dependencies needed by that feature. @@ -65,11 +63,11 @@ ${misc:Depends}, librust-parking-lot-dev (= ${binary:Version}), librust-lock-api-0.3+nightly-dev (>= 0.3.1-~~), - librust-parking-lot-core-0.6+nightly-dev + librust-parking-lot-core-0.7+nightly-dev Provides: librust-parking-lot-0+nightly-dev (= ${binary:Version}), - librust-parking-lot-0.9+nightly-dev (= ${binary:Version}), - librust-parking-lot-0.9.0+nightly-dev (= ${binary:Version}) + librust-parking-lot-0.10+nightly-dev (= ${binary:Version}), + librust-parking-lot-0.10.0+nightly-dev (= ${binary:Version}) Description: Compact, efficient std sync primitives - feature "nightly" This metapackage enables feature "nightly" for the Rust parking_lot crate, by pulling in any additional dependencies needed by that feature. @@ -83,8 +81,8 @@ librust-lock-api-0.3+owning-ref-dev (>= 0.3.1-~~) Provides: librust-parking-lot-0+owning-ref-dev (= ${binary:Version}), - librust-parking-lot-0.9+owning-ref-dev (= ${binary:Version}), - librust-parking-lot-0.9.0+owning-ref-dev (= ${binary:Version}) + librust-parking-lot-0.10+owning-ref-dev (= ${binary:Version}), + librust-parking-lot-0.10.0+owning-ref-dev (= ${binary:Version}) Description: Compact, efficient std sync primitives - feature "owning_ref" This metapackage enables feature "owning_ref" for the Rust parking_lot crate, by pulling in any additional dependencies needed by that feature. @@ -98,8 +96,8 @@ librust-lock-api-0.3+serde-dev (>= 0.3.1-~~) Provides: librust-parking-lot-0+serde-dev (= ${binary:Version}), - librust-parking-lot-0.9+serde-dev (= ${binary:Version}), - librust-parking-lot-0.9.0+serde-dev (= ${binary:Version}) + librust-parking-lot-0.10+serde-dev (= ${binary:Version}), + librust-parking-lot-0.10.0+serde-dev (= ${binary:Version}) Description: Compact, efficient std sync primitives - feature "serde" This metapackage enables feature "serde" for the Rust parking_lot crate, by pulling in any additional dependencies needed by that feature. diff -Nru rust-parking-lot-0.9.0/debian/copyright.debcargo.hint rust-parking-lot-0.10.0/debian/copyright.debcargo.hint --- rust-parking-lot-0.9.0/debian/copyright.debcargo.hint 2019-09-05 06:27:36.000000000 +0000 +++ rust-parking-lot-0.10.0/debian/copyright.debcargo.hint 2020-01-08 22:40:18.000000000 +0000 @@ -91,9 +91,9 @@ Files: debian/* Copyright: - 2018-2019 Debian Rust Maintainers - 2018-2019 Sylvestre Ledru - 2018-2019 Wolfgang Silbermayr + 2018-2020 Debian Rust Maintainers + 2018-2020 Sylvestre Ledru + 2018-2020 Wolfgang Silbermayr License: Apache-2.0 or MIT License: Apache-2.0 diff -Nru rust-parking-lot-0.9.0/debian/tests/control rust-parking-lot-0.10.0/debian/tests/control --- rust-parking-lot-0.9.0/debian/tests/control 2019-09-05 06:27:36.000000000 +0000 +++ rust-parking-lot-0.10.0/debian/tests/control 2020-01-08 22:40:18.000000000 +0000 @@ -1,23 +1,29 @@ -Test-Command: /usr/share/cargo/bin/cargo-auto-test parking_lot 0.9.0 --all-targets --all-features +Test-Command: /usr/share/cargo/bin/cargo-auto-test parking_lot 0.10.0 --all-targets --all-features +Features: test-name=@ Depends: dh-cargo (>= 18), librust-bincode-1+default-dev (>= 1.1.3-~~), librust-lazy-static-1+default-dev, librust-rand-0.7+default-dev, @ Restrictions: allow-stderr, skip-not-installable -Test-Command: /usr/share/cargo/bin/cargo-auto-test parking_lot 0.9.0 --all-targets --no-default-features -Depends: dh-cargo (>= 18), librust-bincode-1+default-dev (>= 1.1.3-~~), librust-lazy-static-1+default-dev, librust-rand-0.7+default-dev, librust-parking-lot-dev +Test-Command: /usr/share/cargo/bin/cargo-auto-test parking_lot 0.10.0 --all-targets --no-default-features +Features: test-name=librust-parking-lot-dev +Depends: dh-cargo (>= 18), librust-bincode-1+default-dev (>= 1.1.3-~~), librust-lazy-static-1+default-dev, librust-rand-0.7+default-dev, @ Restrictions: allow-stderr, skip-not-installable -Test-Command: /usr/share/cargo/bin/cargo-auto-test parking_lot 0.9.0 --all-targets --features deadlock_detection -Depends: dh-cargo (>= 18), librust-bincode-1+default-dev (>= 1.1.3-~~), librust-lazy-static-1+default-dev, librust-rand-0.7+default-dev, librust-parking-lot+deadlock-detection-dev +Test-Command: /usr/share/cargo/bin/cargo-auto-test parking_lot 0.10.0 --all-targets --features deadlock_detection +Features: test-name=librust-parking-lot+deadlock-detection-dev +Depends: dh-cargo (>= 18), librust-bincode-1+default-dev (>= 1.1.3-~~), librust-lazy-static-1+default-dev, librust-rand-0.7+default-dev, @ Restrictions: allow-stderr, skip-not-installable -Test-Command: /usr/share/cargo/bin/cargo-auto-test parking_lot 0.9.0 --all-targets --features nightly -Depends: dh-cargo (>= 18), librust-bincode-1+default-dev (>= 1.1.3-~~), librust-lazy-static-1+default-dev, librust-rand-0.7+default-dev, librust-parking-lot+nightly-dev +Test-Command: /usr/share/cargo/bin/cargo-auto-test parking_lot 0.10.0 --all-targets --features nightly +Features: test-name=librust-parking-lot+nightly-dev +Depends: dh-cargo (>= 18), librust-bincode-1+default-dev (>= 1.1.3-~~), librust-lazy-static-1+default-dev, librust-rand-0.7+default-dev, @ Restrictions: allow-stderr, skip-not-installable -Test-Command: /usr/share/cargo/bin/cargo-auto-test parking_lot 0.9.0 --all-targets --features owning_ref -Depends: dh-cargo (>= 18), librust-bincode-1+default-dev (>= 1.1.3-~~), librust-lazy-static-1+default-dev, librust-rand-0.7+default-dev, librust-parking-lot+owning-ref-dev +Test-Command: /usr/share/cargo/bin/cargo-auto-test parking_lot 0.10.0 --all-targets --features owning_ref +Features: test-name=librust-parking-lot+owning-ref-dev +Depends: dh-cargo (>= 18), librust-bincode-1+default-dev (>= 1.1.3-~~), librust-lazy-static-1+default-dev, librust-rand-0.7+default-dev, @ Restrictions: allow-stderr, skip-not-installable -Test-Command: /usr/share/cargo/bin/cargo-auto-test parking_lot 0.9.0 --all-targets --features serde -Depends: dh-cargo (>= 18), librust-bincode-1+default-dev (>= 1.1.3-~~), librust-lazy-static-1+default-dev, librust-rand-0.7+default-dev, librust-parking-lot+serde-dev +Test-Command: /usr/share/cargo/bin/cargo-auto-test parking_lot 0.10.0 --all-targets --features serde +Features: test-name=librust-parking-lot+serde-dev +Depends: dh-cargo (>= 18), librust-bincode-1+default-dev (>= 1.1.3-~~), librust-lazy-static-1+default-dev, librust-rand-0.7+default-dev, @ Restrictions: allow-stderr, skip-not-installable diff -Nru rust-parking-lot-0.9.0/debian/watch rust-parking-lot-0.10.0/debian/watch --- rust-parking-lot-0.9.0/debian/watch 2019-09-05 06:27:36.000000000 +0000 +++ rust-parking-lot-0.10.0/debian/watch 2020-01-08 22:40:18.000000000 +0000 @@ -2,4 +2,3 @@ opts=filenamemangle=s/.*\/(.*)\/download/parking_lot-$1\.tar\.gz/g,\ uversionmangle=s/(\d)[_\.\-\+]?((RC|rc|pre|dev|beta|alpha)\d*)$/$1~$2/ \ https://qa.debian.org/cgi-bin/fakeupstream.cgi?upstream=crates.io/parking_lot .*/crates/parking_lot/@ANY_VERSION@/download - diff -Nru rust-parking-lot-0.9.0/README.md rust-parking-lot-0.10.0/README.md --- rust-parking-lot-0.9.0/README.md 2019-07-14 12:52:34.000000000 +0000 +++ rust-parking-lot-0.10.0/README.md 2019-11-25 21:09:35.000000000 +0000 @@ -34,7 +34,7 @@ parallelism. 2. Since they consist of just a single atomic variable, have constant initializers and don't need destructors, these primitives can be used as - `static` global variables. The standard library primitives require + `static` global variables. The standard library primitives require dynamic initialization and thus need to be lazily initialized with `lazy_static!`. 3. Uncontended lock acquisition and release is done through fast inline @@ -87,9 +87,8 @@ There are a few restrictions when using this library on stable Rust: -- `Mutex` and `Once` will use 1 word of space instead of 1 byte. -- You will have to use `lazy_static!` to statically initialize `Mutex`, - `Condvar` and `RwLock` types instead of `const fn`. +- You will have to use `lazy_static!` or equivalent to statically initialize `Mutex` + and `RwLock` types. They use generics and can't be `const fn`s on stable yet. - `RwLock` will not be able to take advantage of hardware lock elision for readers, which improves performance when there are multiple readers. @@ -102,7 +101,7 @@ ```toml [dependencies] -parking_lot = "0.9" +parking_lot = "0.10" ``` and this to your crate root: @@ -115,7 +114,7 @@ ```toml [dependencies] -parking_lot = {version = "0.9", features = ["nightly"]} +parking_lot = { version = "0.10", features = ["nightly"] } ``` The experimental deadlock detector can be enabled with the @@ -127,7 +126,7 @@ ## Minimum Rust version -The current minimum required Rust version is 1.32. Any change to this is +The current minimum required Rust version is 1.36. Any change to this is considered a breaking change and will require a major version bump. ## License diff -Nru rust-parking-lot-0.9.0/src/condvar.rs rust-parking-lot-0.10.0/src/condvar.rs --- rust-parking-lot-0.9.0/src/condvar.rs 2019-07-02 22:27:45.000000000 +0000 +++ rust-parking-lot-0.10.0/src/condvar.rs 2019-11-25 21:09:35.000000000 +0000 @@ -12,7 +12,7 @@ fmt, ptr, sync::atomic::{AtomicPtr, Ordering}, }; -use lock_api::RawMutex as RawMutexTrait; +use lock_api::RawMutex as RawMutex_; use parking_lot_core::{self, ParkResult, RequeueOp, UnparkResult, DEFAULT_PARK_TOKEN}; use std::time::{Duration, Instant}; @@ -24,7 +24,7 @@ impl WaitTimeoutResult { /// Returns whether the wait was known to have timed out. #[inline] - pub fn timed_out(&self) -> bool { + pub fn timed_out(self) -> bool { self.0 } } @@ -78,9 +78,13 @@ /// // wait for the thread to start up /// let &(ref lock, ref cvar) = &*pair; /// let mut started = lock.lock(); -/// while !*started { +/// if !*started { /// cvar.wait(&mut started); /// } +/// // Note that we used and if instead of a while loop above. This is only +/// // possible because parking_lot's Condvar will never spuriously wake up. +/// // This means that wait() will only return after notify_one or notify_all is +/// // called. /// ``` pub struct Condvar { state: AtomicPtr, @@ -91,7 +95,9 @@ /// notified. #[inline] pub const fn new() -> Condvar { - Condvar { state: AtomicPtr::new(ptr::null_mut()) } + Condvar { + state: AtomicPtr::new(ptr::null_mut()), + } } /// Wakes up one blocked thread on this condvar. @@ -282,7 +288,10 @@ mutex_guard: &mut MutexGuard<'_, T>, timeout: Instant, ) -> WaitTimeoutResult { - self.wait_until_internal(unsafe { MutexGuard::mutex(mutex_guard).raw() }, Some(timeout)) + self.wait_until_internal( + unsafe { MutexGuard::mutex(mutex_guard).raw() }, + Some(timeout), + ) } // This is a non-generic function to reduce the monomorphization cost of @@ -573,8 +582,10 @@ let _g = m2.lock(); c2.notify_one(); }); - let timeout_res = - c.wait_until(&mut g, Instant::now() + Duration::from_millis(u32::max_value() as u64)); + let timeout_res = c.wait_until( + &mut g, + Instant::now() + Duration::from_millis(u32::max_value() as u64), + ); assert!(!timeout_res.timed_out()); drop(g); } @@ -607,7 +618,7 @@ rx.recv().unwrap(); let _g = m.lock(); let _guard = PanicGuard(&*c); - let _ = c.wait(&mut m3.lock()); + c.wait(&mut m3.lock()); } #[test] @@ -681,3 +692,361 @@ } } } + +/// This module contains an integration test that is heavily inspired from WebKit's own integration +/// tests for it's own Condvar. +#[cfg(test)] +mod webkit_queue_test { + use crate::{Condvar, Mutex, MutexGuard}; + use std::{collections::VecDeque, sync::Arc, thread, time::Duration}; + + #[derive(Clone, Copy)] + enum Timeout { + Bounded(Duration), + Forever, + } + + #[derive(Clone, Copy)] + enum NotifyStyle { + One, + All, + } + + struct Queue { + items: VecDeque, + should_continue: bool, + } + + impl Queue { + fn new() -> Self { + Self { + items: VecDeque::new(), + should_continue: true, + } + } + } + + fn wait( + condition: &Condvar, + lock: &mut MutexGuard<'_, T>, + predicate: impl Fn(&mut MutexGuard<'_, T>) -> bool, + timeout: &Timeout, + ) { + while !predicate(lock) { + match timeout { + Timeout::Forever => condition.wait(lock), + Timeout::Bounded(bound) => { + condition.wait_for(lock, *bound); + } + } + } + } + + fn notify(style: NotifyStyle, condition: &Condvar, should_notify: bool) { + match style { + NotifyStyle::One => { + condition.notify_one(); + } + NotifyStyle::All => { + if should_notify { + condition.notify_all(); + } + } + } + } + + fn run_queue_test( + num_producers: usize, + num_consumers: usize, + max_queue_size: usize, + messages_per_producer: usize, + notify_style: NotifyStyle, + timeout: Timeout, + delay: Duration, + ) { + let input_queue = Arc::new(Mutex::new(Queue::new())); + let empty_condition = Arc::new(Condvar::new()); + let full_condition = Arc::new(Condvar::new()); + + let output_vec = Arc::new(Mutex::new(vec![])); + + let consumers = (0..num_consumers) + .map(|_| { + consumer_thread( + input_queue.clone(), + empty_condition.clone(), + full_condition.clone(), + timeout, + notify_style, + output_vec.clone(), + max_queue_size, + ) + }) + .collect::>(); + let producers = (0..num_producers) + .map(|_| { + producer_thread( + messages_per_producer, + input_queue.clone(), + empty_condition.clone(), + full_condition.clone(), + timeout, + notify_style, + max_queue_size, + ) + }) + .collect::>(); + + thread::sleep(delay); + + for producer in producers.into_iter() { + producer.join().expect("Producer thread panicked"); + } + + { + let mut input_queue = input_queue.lock(); + input_queue.should_continue = false; + } + empty_condition.notify_all(); + + for consumer in consumers.into_iter() { + consumer.join().expect("Consumer thread panicked"); + } + + let mut output_vec = output_vec.lock(); + assert_eq!(output_vec.len(), num_producers * messages_per_producer); + output_vec.sort(); + for msg_idx in 0..messages_per_producer { + for producer_idx in 0..num_producers { + assert_eq!(msg_idx, output_vec[msg_idx * num_producers + producer_idx]); + } + } + } + + fn consumer_thread( + input_queue: Arc>, + empty_condition: Arc, + full_condition: Arc, + timeout: Timeout, + notify_style: NotifyStyle, + output_queue: Arc>>, + max_queue_size: usize, + ) -> thread::JoinHandle<()> { + thread::spawn(move || loop { + let (should_notify, result) = { + let mut queue = input_queue.lock(); + wait( + &*empty_condition, + &mut queue, + |state| -> bool { !state.items.is_empty() || !state.should_continue }, + &timeout, + ); + if queue.items.is_empty() && !queue.should_continue { + return; + } + let should_notify = queue.items.len() == max_queue_size; + let result = queue.items.pop_front(); + std::mem::drop(queue); + (should_notify, result) + }; + notify(notify_style, &*full_condition, should_notify); + + if let Some(result) = result { + output_queue.lock().push(result); + } + }) + } + + fn producer_thread( + num_messages: usize, + queue: Arc>, + empty_condition: Arc, + full_condition: Arc, + timeout: Timeout, + notify_style: NotifyStyle, + max_queue_size: usize, + ) -> thread::JoinHandle<()> { + thread::spawn(move || { + for message in 0..num_messages { + let should_notify = { + let mut queue = queue.lock(); + wait( + &*full_condition, + &mut queue, + |state| state.items.len() < max_queue_size, + &timeout, + ); + let should_notify = queue.items.is_empty(); + queue.items.push_back(message); + std::mem::drop(queue); + should_notify + }; + notify(notify_style, &*empty_condition, should_notify); + } + }) + } + + macro_rules! run_queue_tests { + ( $( $name:ident( + num_producers: $num_producers:expr, + num_consumers: $num_consumers:expr, + max_queue_size: $max_queue_size:expr, + messages_per_producer: $messages_per_producer:expr, + notification_style: $notification_style:expr, + timeout: $timeout:expr, + delay_seconds: $delay_seconds:expr); + )* ) => { + $(#[test] + fn $name() { + let delay = Duration::from_secs($delay_seconds); + run_queue_test( + $num_producers, + $num_consumers, + $max_queue_size, + $messages_per_producer, + $notification_style, + $timeout, + delay, + ); + })* + }; + } + + run_queue_tests! { + sanity_check_queue( + num_producers: 1, + num_consumers: 1, + max_queue_size: 1, + messages_per_producer: 100_000, + notification_style: NotifyStyle::All, + timeout: Timeout::Bounded(Duration::from_secs(1)), + delay_seconds: 0 + ); + sanity_check_queue_timeout( + num_producers: 1, + num_consumers: 1, + max_queue_size: 1, + messages_per_producer: 100_000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + new_test_without_timeout_5( + num_producers: 1, + num_consumers: 5, + max_queue_size: 1, + messages_per_producer: 100_000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + one_producer_one_consumer_one_slot( + num_producers: 1, + num_consumers: 1, + max_queue_size: 1, + messages_per_producer: 100_000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + one_producer_one_consumer_one_slot_timeout( + num_producers: 1, + num_consumers: 1, + max_queue_size: 1, + messages_per_producer: 100_000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 1 + ); + one_producer_one_consumer_hundred_slots( + num_producers: 1, + num_consumers: 1, + max_queue_size: 100, + messages_per_producer: 1_000_000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + ten_producers_one_consumer_one_slot( + num_producers: 10, + num_consumers: 1, + max_queue_size: 1, + messages_per_producer: 10000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + ten_producers_one_consumer_hundred_slots_notify_all( + num_producers: 10, + num_consumers: 1, + max_queue_size: 100, + messages_per_producer: 10000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + ten_producers_one_consumer_hundred_slots_notify_one( + num_producers: 10, + num_consumers: 1, + max_queue_size: 100, + messages_per_producer: 10000, + notification_style: NotifyStyle::One, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + one_producer_ten_consumers_one_slot( + num_producers: 1, + num_consumers: 10, + max_queue_size: 1, + messages_per_producer: 10000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + one_producer_ten_consumers_hundred_slots_notify_all( + num_producers: 1, + num_consumers: 10, + max_queue_size: 100, + messages_per_producer: 100_000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + one_producer_ten_consumers_hundred_slots_notify_one( + num_producers: 1, + num_consumers: 10, + max_queue_size: 100, + messages_per_producer: 100_000, + notification_style: NotifyStyle::One, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + ten_producers_ten_consumers_one_slot( + num_producers: 10, + num_consumers: 10, + max_queue_size: 1, + messages_per_producer: 50000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + ten_producers_ten_consumers_hundred_slots_notify_all( + num_producers: 10, + num_consumers: 10, + max_queue_size: 100, + messages_per_producer: 50000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + ten_producers_ten_consumers_hundred_slots_notify_one( + num_producers: 10, + num_consumers: 10, + max_queue_size: 100, + messages_per_producer: 50000, + notification_style: NotifyStyle::One, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + } +} diff -Nru rust-parking-lot-0.9.0/src/elision.rs rust-parking-lot-0.10.0/src/elision.rs --- rust-parking-lot-0.9.0/src/elision.rs 2019-07-02 22:27:45.000000000 +0000 +++ rust-parking-lot-0.10.0/src/elision.rs 2019-10-04 06:52:57.000000000 +0000 @@ -25,7 +25,10 @@ // Indicates whether the target architecture supports lock elision #[inline] pub fn have_elision() -> bool { - cfg!(all(feature = "nightly", any(target_arch = "x86", target_arch = "x86_64"),)) + cfg!(all( + feature = "nightly", + any(target_arch = "x86", target_arch = "x86_64"), + )) } // This implementation is never actually called because it is guarded by @@ -59,7 +62,11 @@ : "r" (new), "{eax}" (current) : "memory" : "volatile"); - if prev == current { Ok(prev) } else { Err(prev) } + if prev == current { + Ok(prev) + } else { + Err(prev) + } } } #[cfg(target_pointer_width = "64")] @@ -72,7 +79,11 @@ : "r" (new), "{rax}" (current) : "memory" : "volatile"); - if prev == current { Ok(prev) } else { Err(prev) } + if prev == current { + Ok(prev) + } else { + Err(prev) + } } } diff -Nru rust-parking-lot-0.9.0/src/lib.rs rust-parking-lot-0.10.0/src/lib.rs --- rust-parking-lot-0.9.0/src/lib.rs 2019-07-02 22:27:45.000000000 +0000 +++ rust-parking-lot-0.10.0/src/lib.rs 2019-10-04 06:54:21.000000000 +0000 @@ -28,7 +28,6 @@ #[cfg(not(feature = "deadlock_detection"))] mod deadlock; -pub use ::lock_api as lock_api; pub use self::condvar::{Condvar, WaitTimeoutResult}; pub use self::mutex::{MappedMutexGuard, Mutex, MutexGuard}; pub use self::once::{Once, OnceState}; @@ -41,3 +40,4 @@ MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard, }; +pub use ::lock_api; diff -Nru rust-parking-lot-0.9.0/src/mutex.rs rust-parking-lot-0.10.0/src/mutex.rs --- rust-parking-lot-0.9.0/src/mutex.rs 2019-05-04 09:27:08.000000000 +0000 +++ rust-parking-lot-0.10.0/src/mutex.rs 2019-11-25 21:09:35.000000000 +0000 @@ -245,7 +245,7 @@ fn test_mutex_arc_access_in_unwind() { let arc = Arc::new(Mutex::new(1)); let arc2 = arc.clone(); - let _ = thread::spawn(move || -> () { + let _ = thread::spawn(move || { struct Unwinder { i: Arc>, } diff -Nru rust-parking-lot-0.9.0/src/once.rs rust-parking-lot-0.10.0/src/once.rs --- rust-parking-lot-0.9.0/src/once.rs 2019-07-02 22:27:45.000000000 +0000 +++ rust-parking-lot-0.10.0/src/once.rs 2019-11-25 21:09:35.000000000 +0000 @@ -6,25 +6,16 @@ // copied, modified, or distributed except according to those terms. use crate::util::UncheckedOptionExt; -#[cfg(has_sized_atomics)] -use core::sync::atomic::AtomicU8; -#[cfg(not(has_sized_atomics))] -use core::sync::atomic::AtomicUsize as AtomicU8; use core::{ fmt, mem, - sync::atomic::{fence, Ordering}, + sync::atomic::{fence, AtomicU8, Ordering}, }; use parking_lot_core::{self, SpinWait, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN}; -#[cfg(has_sized_atomics)] -type U8 = u8; -#[cfg(not(has_sized_atomics))] -type U8 = usize; - -const DONE_BIT: U8 = 1; -const POISON_BIT: U8 = 2; -const LOCKED_BIT: U8 = 4; -const PARKED_BIT: U8 = 8; +const DONE_BIT: u8 = 1; +const POISON_BIT: u8 = 2; +const LOCKED_BIT: u8 = 4; +const PARKED_BIT: u8 = 8; /// Current state of a `Once`. #[derive(Copy, Clone, Eq, PartialEq, Debug)] @@ -48,8 +39,8 @@ /// Once an initialization routine for a `Once` has panicked it will forever /// indicate to future forced initialization routines that it is poisoned. #[inline] - pub fn poisoned(&self) -> bool { - match *self { + pub fn poisoned(self) -> bool { + match self { OnceState::Poisoned => true, _ => false, } @@ -58,8 +49,8 @@ /// Returns whether the associated `Once` has successfully executed a /// closure. #[inline] - pub fn done(&self) -> bool { - match *self { + pub fn done(self) -> bool { + match self { OnceState::Done => true, _ => false, } @@ -194,7 +185,9 @@ } let mut f = Some(f); - self.call_once_slow(true, &mut |state| unsafe { f.take().unchecked_unwrap()(state) }); + self.call_once_slow(true, &mut |state| unsafe { + f.take().unchecked_unwrap()(state) + }); } // This is a non-generic function to reduce the monomorphization cost of @@ -303,7 +296,11 @@ // At this point we have the lock, so run the closure. Make sure we // properly clean up if the closure panicks. let guard = PanicGuard(self); - let once_state = if state & POISON_BIT != 0 { OnceState::Poisoned } else { OnceState::New }; + let once_state = if state & POISON_BIT != 0 { + OnceState::Poisoned + } else { + OnceState::New + }; f(once_state); mem::forget(guard); @@ -327,7 +324,9 @@ impl fmt::Debug for Once { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Once").field("state", &self.state()).finish() + f.debug_struct("Once") + .field("state", &self.state()) + .finish() } } diff -Nru rust-parking-lot-0.9.0/src/raw_mutex.rs rust-parking-lot-0.10.0/src/raw_mutex.rs --- rust-parking-lot-0.9.0/src/raw_mutex.rs 2019-07-02 22:27:45.000000000 +0000 +++ rust-parking-lot-0.10.0/src/raw_mutex.rs 2019-11-09 11:27:25.000000000 +0000 @@ -6,20 +6,14 @@ // copied, modified, or distributed except according to those terms. use crate::{deadlock, util}; -#[cfg(has_sized_atomics)] -use core::sync::atomic::AtomicU8; -#[cfg(not(has_sized_atomics))] -use core::sync::atomic::AtomicUsize as AtomicU8; -use core::{sync::atomic::Ordering, time::Duration}; -use lock_api::{GuardNoSend, RawMutex as RawMutexTrait, RawMutexFair, RawMutexTimed}; +use core::{ + sync::atomic::{AtomicU8, Ordering}, + time::Duration, +}; +use lock_api::{GuardNoSend, RawMutex as RawMutex_}; use parking_lot_core::{self, ParkResult, SpinWait, UnparkResult, UnparkToken, DEFAULT_PARK_TOKEN}; use std::time::Instant; -#[cfg(has_sized_atomics)] -type U8 = u8; -#[cfg(not(has_sized_atomics))] -type U8 = usize; - // UnparkToken used to indicate that that the target thread should attempt to // lock the mutex again as soon as it is unparked. pub(crate) const TOKEN_NORMAL: UnparkToken = UnparkToken(0); @@ -28,16 +22,43 @@ // thread directly without unlocking it. pub(crate) const TOKEN_HANDOFF: UnparkToken = UnparkToken(1); -const LOCKED_BIT: U8 = 1; -const PARKED_BIT: U8 = 2; +/// This bit is set in the `state` of a `RawMutex` when that mutex is locked by some thread. +const LOCKED_BIT: u8 = 0b01; +/// This bit is set in the `state` of a `RawMutex` just before parking a thread. A thread is being +/// parked if it wants to lock the mutex, but it is currently being held by some other thread. +const PARKED_BIT: u8 = 0b10; /// Raw mutex type backed by the parking lot. pub struct RawMutex { + /// This atomic integer holds the current state of the mutex instance. Only the two lowest bits + /// are used. See `LOCKED_BIT` and `PARKED_BIT` for the bitmask for these bits. + /// + /// # State table: + /// + /// PARKED_BIT | LOCKED_BIT | Description + /// 0 | 0 | The mutex is not locked, nor is anyone waiting for it. + /// -----------+------------+------------------------------------------------------------------ + /// 0 | 1 | The mutex is locked by exactly one thread. No other thread is + /// | | waiting for it. + /// -----------+------------+------------------------------------------------------------------ + /// 1 | 0 | The mutex is not locked. One or more thread is parked or about to + /// | | park. At least one of the parked threads are just about to be + /// | | unparked, or a thread heading for parking might abort the park. + /// -----------+------------+------------------------------------------------------------------ + /// 1 | 1 | The mutex is locked by exactly one thread. One or more thread is + /// | | parked or about to park, waiting for the lock to become available. + /// | | In this state, PARKED_BIT is only ever cleared when a bucket lock + /// | | is held (i.e. in a parking_lot_core callback). This ensures that + /// | | we never end up in a situation where there are parked threads but + /// | | PARKED_BIT is not set (which would result in those threads + /// | | potentially never getting woken up). state: AtomicU8, } -unsafe impl RawMutexTrait for RawMutex { - const INIT: RawMutex = RawMutex { state: AtomicU8::new(0) }; +unsafe impl lock_api::RawMutex for RawMutex { + const INIT: RawMutex = RawMutex { + state: AtomicU8::new(0), + }; type GuardMarker = GuardNoSend; @@ -78,7 +99,10 @@ #[inline] fn unlock(&self) { unsafe { deadlock::release_resource(self as *const _ as usize) }; - if self.state.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed).is_ok() + if self + .state + .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed) + .is_ok() { return; } @@ -86,11 +110,14 @@ } } -unsafe impl RawMutexFair for RawMutex { +unsafe impl lock_api::RawMutexFair for RawMutex { #[inline] fn unlock_fair(&self) { unsafe { deadlock::release_resource(self as *const _ as usize) }; - if self.state.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed).is_ok() + if self + .state + .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed) + .is_ok() { return; } @@ -105,7 +132,7 @@ } } -unsafe impl RawMutexTimed for RawMutex { +unsafe impl lock_api::RawMutexTimed for RawMutex { type Duration = Duration; type Instant = Instant; @@ -212,37 +239,41 @@ } // Park our thread until we are woken up by an unlock - unsafe { - let addr = self as *const _ as usize; - let validate = || self.state.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT; - let before_sleep = || {}; - let timed_out = |_, was_last_thread| { - // Clear the parked bit if we were the last parked thread - if was_last_thread { - self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); - } - }; - match parking_lot_core::park( + let addr = self as *const _ as usize; + let validate = || self.state.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT; + let before_sleep = || {}; + let timed_out = |_, was_last_thread| { + // Clear the parked bit if we were the last parked thread + if was_last_thread { + self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); + } + }; + // SAFETY: + // * `addr` is an address we control. + // * `validate`/`timed_out` does not panic or call into any function of `parking_lot`. + // * `before_sleep` does not call `park`, nor does it panic. + match unsafe { + parking_lot_core::park( addr, validate, before_sleep, timed_out, DEFAULT_PARK_TOKEN, timeout, - ) { - // The thread that unparked us passed the lock on to us - // directly without unlocking it. - ParkResult::Unparked(TOKEN_HANDOFF) => return true, + ) + } { + // The thread that unparked us passed the lock on to us + // directly without unlocking it. + ParkResult::Unparked(TOKEN_HANDOFF) => return true, - // We were unparked normally, try acquiring the lock again - ParkResult::Unparked(_) => (), + // We were unparked normally, try acquiring the lock again + ParkResult::Unparked(_) => (), - // The validation function failed, try locking again - ParkResult::Invalid => (), + // The validation function failed, try locking again + ParkResult::Invalid => (), - // Timeout expired - ParkResult::TimedOut => return false, - } + // Timeout expired + ParkResult::TimedOut => return false, } // Loop back and try locking again @@ -255,29 +286,32 @@ fn unlock_slow(&self, force_fair: bool) { // Unpark one thread and leave the parked bit set if there might // still be parked threads on this address. - unsafe { - let addr = self as *const _ as usize; - let callback = |result: UnparkResult| { - // If we are using a fair unlock then we should keep the - // mutex locked and hand it off to the unparked thread. - if result.unparked_threads != 0 && (force_fair || result.be_fair) { - // Clear the parked bit if there are no more parked - // threads. - if !result.have_more_threads { - self.state.store(LOCKED_BIT, Ordering::Relaxed); - } - return TOKEN_HANDOFF; + let addr = self as *const _ as usize; + let callback = |result: UnparkResult| { + // If we are using a fair unlock then we should keep the + // mutex locked and hand it off to the unparked thread. + if result.unparked_threads != 0 && (force_fair || result.be_fair) { + // Clear the parked bit if there are no more parked + // threads. + if !result.have_more_threads { + self.state.store(LOCKED_BIT, Ordering::Relaxed); } + return TOKEN_HANDOFF; + } - // Clear the locked bit, and the parked bit as well if there - // are no more parked threads. - if result.have_more_threads { - self.state.store(PARKED_BIT, Ordering::Release); - } else { - self.state.store(0, Ordering::Release); - } - TOKEN_NORMAL - }; + // Clear the locked bit, and the parked bit as well if there + // are no more parked threads. + if result.have_more_threads { + self.state.store(PARKED_BIT, Ordering::Release); + } else { + self.state.store(0, Ordering::Release); + } + TOKEN_NORMAL + }; + // SAFETY: + // * `addr` is an address we control. + // * `callback` does not panic or call into any function of `parking_lot`. + unsafe { parking_lot_core::unpark_one(addr, callback); } } diff -Nru rust-parking-lot-0.9.0/src/raw_rwlock.rs rust-parking-lot-0.10.0/src/raw_rwlock.rs --- rust-parking-lot-0.9.0/src/raw_rwlock.rs 2019-07-02 22:27:45.000000000 +0000 +++ rust-parking-lot-0.10.0/src/raw_rwlock.rs 2019-10-04 06:54:21.000000000 +0000 @@ -12,11 +12,7 @@ cell::Cell, sync::atomic::{AtomicUsize, Ordering}, }; -use lock_api::{ - GuardNoSend, RawRwLock as RawRwLockTrait, RawRwLockDowngrade, RawRwLockFair, - RawRwLockRecursive, RawRwLockRecursiveTimed, RawRwLockTimed, RawRwLockUpgrade, - RawRwLockUpgradeDowngrade, RawRwLockUpgradeFair, RawRwLockUpgradeTimed, -}; +use lock_api::{GuardNoSend, RawRwLock as RawRwLock_, RawRwLockUpgrade}; use parking_lot_core::{ self, deadlock, FilterOp, ParkResult, ParkToken, SpinWait, UnparkResult, UnparkToken, }; @@ -60,8 +56,10 @@ state: AtomicUsize, } -unsafe impl RawRwLockTrait for RawRwLock { - const INIT: RawRwLock = RawRwLock { state: AtomicUsize::new(0) }; +unsafe impl lock_api::RawRwLock for RawRwLock { + const INIT: RawRwLock = RawRwLock { + state: AtomicUsize::new(0), + }; type GuardMarker = GuardNoSend; @@ -80,7 +78,10 @@ #[inline] fn try_lock_exclusive(&self) -> bool { - if self.state.compare_exchange(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed).is_ok() + if self + .state + .compare_exchange(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed) + .is_ok() { self.deadlock_acquire(); true @@ -92,7 +93,10 @@ #[inline] fn unlock_exclusive(&self) { self.deadlock_release(); - if self.state.compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed).is_ok() + if self + .state + .compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed) + .is_ok() { return; } @@ -110,8 +114,11 @@ #[inline] fn try_lock_shared(&self) -> bool { - let result = - if self.try_lock_shared_fast(false) { true } else { self.try_lock_shared_slow(false) }; + let result = if self.try_lock_shared_fast(false) { + true + } else { + self.try_lock_shared_slow(false) + }; if result { self.deadlock_acquire(); } @@ -132,7 +139,7 @@ } } -unsafe impl RawRwLockFair for RawRwLock { +unsafe impl lock_api::RawRwLockFair for RawRwLock { #[inline] fn unlock_shared_fair(&self) { // Shared unlocking is always fair in this implementation. @@ -142,7 +149,10 @@ #[inline] fn unlock_exclusive_fair(&self) { self.deadlock_release(); - if self.state.compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed).is_ok() + if self + .state + .compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed) + .is_ok() { return; } @@ -166,10 +176,12 @@ } } -unsafe impl RawRwLockDowngrade for RawRwLock { +unsafe impl lock_api::RawRwLockDowngrade for RawRwLock { #[inline] fn downgrade(&self) { - let state = self.state.fetch_add(ONE_READER - WRITER_BIT, Ordering::Release); + let state = self + .state + .fetch_add(ONE_READER - WRITER_BIT, Ordering::Release); // Wake up parked shared and upgradable threads if there are any if state & PARKED_BIT != 0 { @@ -178,7 +190,7 @@ } } -unsafe impl RawRwLockTimed for RawRwLock { +unsafe impl lock_api::RawRwLockTimed for RawRwLock { type Duration = Duration; type Instant = Instant; @@ -243,7 +255,7 @@ } } -unsafe impl RawRwLockRecursive for RawRwLock { +unsafe impl lock_api::RawRwLockRecursive for RawRwLock { #[inline] fn lock_shared_recursive(&self) { if !self.try_lock_shared_fast(true) { @@ -255,8 +267,11 @@ #[inline] fn try_lock_shared_recursive(&self) -> bool { - let result = - if self.try_lock_shared_fast(true) { true } else { self.try_lock_shared_slow(true) }; + let result = if self.try_lock_shared_fast(true) { + true + } else { + self.try_lock_shared_slow(true) + }; if result { self.deadlock_acquire(); } @@ -264,7 +279,7 @@ } } -unsafe impl RawRwLockRecursiveTimed for RawRwLock { +unsafe impl lock_api::RawRwLockRecursiveTimed for RawRwLock { #[inline] fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool { let result = if self.try_lock_shared_fast(true) { @@ -292,7 +307,7 @@ } } -unsafe impl RawRwLockUpgrade for RawRwLock { +unsafe impl lock_api::RawRwLockUpgrade for RawRwLock { #[inline] fn lock_upgradable(&self) { if !self.try_lock_upgradable_fast() { @@ -304,8 +319,11 @@ #[inline] fn try_lock_upgradable(&self) -> bool { - let result = - if self.try_lock_upgradable_fast() { true } else { self.try_lock_upgradable_slow() }; + let result = if self.try_lock_upgradable_fast() { + true + } else { + self.try_lock_upgradable_slow() + }; if result { self.deadlock_acquire(); } @@ -335,8 +353,10 @@ #[inline] fn upgrade(&self) { - let state = - self.state.fetch_sub((ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, Ordering::Relaxed); + let state = self.state.fetch_sub( + (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, + Ordering::Relaxed, + ); if state & READERS_MASK != ONE_READER { let result = self.upgrade_slow(None); debug_assert!(result); @@ -362,7 +382,7 @@ } } -unsafe impl RawRwLockUpgradeFair for RawRwLock { +unsafe impl lock_api::RawRwLockUpgradeFair for RawRwLock { #[inline] fn unlock_upgradable_fair(&self) { self.deadlock_release(); @@ -392,7 +412,7 @@ } } -unsafe impl RawRwLockUpgradeDowngrade for RawRwLock { +unsafe impl lock_api::RawRwLockUpgradeDowngrade for RawRwLock { #[inline] fn downgrade_upgradable(&self) { let state = self.state.fetch_sub(UPGRADABLE_BIT, Ordering::Relaxed); @@ -405,8 +425,10 @@ #[inline] fn downgrade_to_upgradable(&self) { - let state = - self.state.fetch_add((ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, Ordering::Release); + let state = self.state.fetch_add( + (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, + Ordering::Release, + ); // Wake up parked shared threads if there are any if state & PARKED_BIT != 0 { @@ -415,7 +437,7 @@ } } -unsafe impl RawRwLockUpgradeTimed for RawRwLock { +unsafe impl lock_api::RawRwLockUpgradeTimed for RawRwLock { #[inline] fn try_lock_upgradable_until(&self, timeout: Instant) -> bool { let result = if self.try_lock_upgradable_fast() { @@ -444,15 +466,23 @@ #[inline] fn try_upgrade_until(&self, timeout: Instant) -> bool { - let state = - self.state.fetch_sub((ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, Ordering::Relaxed); - if state & READERS_MASK == ONE_READER { true } else { self.upgrade_slow(Some(timeout)) } + let state = self.state.fetch_sub( + (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, + Ordering::Relaxed, + ); + if state & READERS_MASK == ONE_READER { + true + } else { + self.upgrade_slow(Some(timeout)) + } } #[inline] fn try_upgrade_for(&self, timeout: Duration) -> bool { - let state = - self.state.fetch_sub((ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, Ordering::Relaxed); + let state = self.state.fetch_sub( + (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, + Ordering::Relaxed, + ); if state & READERS_MASK == ONE_READER { true } else { @@ -481,7 +511,9 @@ // readers try to acquire the lock. We only do this if the lock is // completely empty since elision handles conflicts poorly. if have_elision() && state == 0 { - self.state.elision_compare_exchange_acquire(0, ONE_READER).is_ok() + self.state + .elision_compare_exchange_acquire(0, ONE_READER) + .is_ok() } else if let Some(new_state) = state.checked_add(ONE_READER) { self.state .compare_exchange_weak(state, new_state, Ordering::Acquire, Ordering::Relaxed) @@ -509,7 +541,9 @@ } else { match self.state.compare_exchange_weak( state, - state.checked_add(ONE_READER).expect("RwLock reader count overflow"), + state + .checked_add(ONE_READER) + .expect("RwLock reader count overflow"), Ordering::Acquire, Ordering::Relaxed, ) { @@ -564,29 +598,31 @@ #[cold] fn lock_exclusive_slow(&self, timeout: Option) -> bool { + let try_lock = |state: &mut usize| { + loop { + if *state & (WRITER_BIT | UPGRADABLE_BIT) != 0 { + return false; + } + + // Grab WRITER_BIT if it isn't set, even if there are parked threads. + match self.state.compare_exchange_weak( + *state, + *state | WRITER_BIT, + Ordering::Acquire, + Ordering::Relaxed, + ) { + Ok(_) => return true, + Err(x) => *state = x, + } + } + }; + // Step 1: grab exclusive ownership of WRITER_BIT let timed_out = !self.lock_common( timeout, TOKEN_EXCLUSIVE, - |state| { - loop { - if *state & (WRITER_BIT | UPGRADABLE_BIT) != 0 { - return false; - } - - // Grab WRITER_BIT if it isn't set, even if there are parked threads. - match self.state.compare_exchange_weak( - *state, - *state | WRITER_BIT, - Ordering::Acquire, - Ordering::Relaxed, - ) { - Ok(_) => return true, - Err(x) => *state = x, - } - } - }, - |state| state & (WRITER_BIT | UPGRADABLE_BIT) != 0, + try_lock, + WRITER_BIT | UPGRADABLE_BIT, ); if timed_out { return false; @@ -618,111 +654,115 @@ TOKEN_NORMAL } }; - self.wake_parked_threads(0, callback); + // SAFETY: `callback` does not panic or call into any function of `parking_lot`. + unsafe { + self.wake_parked_threads(0, callback); + } } #[cold] fn lock_shared_slow(&self, recursive: bool, timeout: Option) -> bool { - self.lock_common( - timeout, - TOKEN_SHARED, - |state| { - let mut spinwait_shared = SpinWait::new(); - loop { - // Use hardware lock elision to avoid cache conflicts when multiple - // readers try to acquire the lock. We only do this if the lock is - // completely empty since elision handles conflicts poorly. - if have_elision() && *state == 0 { - match self.state.elision_compare_exchange_acquire(0, ONE_READER) { - Ok(_) => return true, - Err(x) => *state = x, - } - } - - // This is the same condition as try_lock_shared_fast - if *state & WRITER_BIT != 0 { - if !recursive || *state & READERS_MASK == 0 { - return false; - } + let try_lock = |state: &mut usize| { + let mut spinwait_shared = SpinWait::new(); + loop { + // Use hardware lock elision to avoid cache conflicts when multiple + // readers try to acquire the lock. We only do this if the lock is + // completely empty since elision handles conflicts poorly. + if have_elision() && *state == 0 { + match self.state.elision_compare_exchange_acquire(0, ONE_READER) { + Ok(_) => return true, + Err(x) => *state = x, } + } - if self - .state - .compare_exchange_weak( - *state, - state.checked_add(ONE_READER).expect("RwLock reader count overflow"), - Ordering::Acquire, - Ordering::Relaxed, - ) - .is_ok() - { - return true; + // This is the same condition as try_lock_shared_fast + if *state & WRITER_BIT != 0 { + if !recursive || *state & READERS_MASK == 0 { + return false; } + } - // If there is high contention on the reader count then we want - // to leave some time between attempts to acquire the lock to - // let other threads make progress. - spinwait_shared.spin_no_yield(); - *state = self.state.load(Ordering::Relaxed); + if self + .state + .compare_exchange_weak( + *state, + state + .checked_add(ONE_READER) + .expect("RwLock reader count overflow"), + Ordering::Acquire, + Ordering::Relaxed, + ) + .is_ok() + { + return true; } - }, - |state| state & WRITER_BIT != 0, - ) + + // If there is high contention on the reader count then we want + // to leave some time between attempts to acquire the lock to + // let other threads make progress. + spinwait_shared.spin_no_yield(); + *state = self.state.load(Ordering::Relaxed); + } + }; + self.lock_common(timeout, TOKEN_SHARED, try_lock, WRITER_BIT) } #[cold] fn unlock_shared_slow(&self) { // At this point WRITER_PARKED_BIT is set and READER_MASK is empty. We // just need to wake up a potentially sleeping pending writer. + // Using the 2nd key at addr + 1 + let addr = self as *const _ as usize + 1; + let callback = |_result: UnparkResult| { + // Clear the WRITER_PARKED_BIT here since there can only be one + // parked writer thread. + self.state.fetch_and(!WRITER_PARKED_BIT, Ordering::Relaxed); + TOKEN_NORMAL + }; + // SAFETY: + // * `addr` is an address we control. + // * `callback` does not panic or call into any function of `parking_lot`. unsafe { - // Using the 2nd key at addr + 1 - let addr = self as *const _ as usize + 1; - let callback = |result: UnparkResult| { - // Clear the WRITER_PARKED_BIT here since there can only be one - // parked writer thread. - debug_assert!(!result.have_more_threads); - self.state.fetch_and(!WRITER_PARKED_BIT, Ordering::Relaxed); - TOKEN_NORMAL - }; parking_lot_core::unpark_one(addr, callback); } } #[cold] fn lock_upgradable_slow(&self, timeout: Option) -> bool { + let try_lock = |state: &mut usize| { + let mut spinwait_shared = SpinWait::new(); + loop { + if *state & (WRITER_BIT | UPGRADABLE_BIT) != 0 { + return false; + } + + if self + .state + .compare_exchange_weak( + *state, + state + .checked_add(ONE_READER | UPGRADABLE_BIT) + .expect("RwLock reader count overflow"), + Ordering::Acquire, + Ordering::Relaxed, + ) + .is_ok() + { + return true; + } + + // If there is high contention on the reader count then we want + // to leave some time between attempts to acquire the lock to + // let other threads make progress. + spinwait_shared.spin_no_yield(); + *state = self.state.load(Ordering::Relaxed); + } + }; self.lock_common( timeout, TOKEN_UPGRADABLE, - |state| { - let mut spinwait_shared = SpinWait::new(); - loop { - if *state & (WRITER_BIT | UPGRADABLE_BIT) != 0 { - return false; - } - - if self - .state - .compare_exchange_weak( - *state, - state - .checked_add(ONE_READER | UPGRADABLE_BIT) - .expect("RwLock reader count overflow"), - Ordering::Acquire, - Ordering::Relaxed, - ) - .is_ok() - { - return true; - } - - // If there is high contention on the reader count then we want - // to leave some time between attempts to acquire the lock to - // let other threads make progress. - spinwait_shared.spin_no_yield(); - *state = self.state.load(Ordering::Relaxed); - } - }, - |state| state & (WRITER_BIT | UPGRADABLE_BIT) != 0, + try_lock, + WRITER_BIT | UPGRADABLE_BIT, ) } @@ -789,7 +829,10 @@ } } }; - self.wake_parked_threads(0, callback); + // SAFETY: `callback` does not panic or call into any function of `parking_lot`. + unsafe { + self.wake_parked_threads(0, callback); + } } #[cold] @@ -826,7 +869,10 @@ } TOKEN_NORMAL }; - self.wake_parked_threads(ONE_READER, callback); + // SAFETY: `callback` does not panic or call into any function of `parking_lot`. + unsafe { + self.wake_parked_threads(ONE_READER, callback); + } } #[cold] @@ -839,7 +885,10 @@ } TOKEN_NORMAL }; - self.wake_parked_threads(ONE_READER | UPGRADABLE_BIT, callback); + // SAFETY: `callback` does not panic or call into any function of `parking_lot`. + unsafe { + self.wake_parked_threads(ONE_READER | UPGRADABLE_BIT, callback); + } } #[cold] @@ -862,41 +911,49 @@ self.lock_upgradable(); } - // Common code for waking up parked threads after releasing WRITER_BIT or - // UPGRADABLE_BIT. + /// Common code for waking up parked threads after releasing WRITER_BIT or + /// UPGRADABLE_BIT. + /// + /// # Safety + /// + /// `callback` must uphold the requirements of the `callback` parameter to + /// `parking_lot_core::unpark_filter`. Meaning no panics or calls into any function in + /// `parking_lot`. #[inline] - fn wake_parked_threads(&self, new_state: usize, callback: C) - where - C: FnOnce(usize, UnparkResult) -> UnparkToken, - { + unsafe fn wake_parked_threads( + &self, + new_state: usize, + callback: impl FnOnce(usize, UnparkResult) -> UnparkToken, + ) { // We must wake up at least one upgrader or writer if there is one, // otherwise they may end up parked indefinitely since unlock_shared // does not call wake_parked_threads. let new_state = Cell::new(new_state); - unsafe { - let addr = self as *const _ as usize; - let filter = |ParkToken(token)| { - let s = new_state.get(); - - // If we are waking up a writer, don't wake anything else. - if s & WRITER_BIT != 0 { - return FilterOp::Stop; - } + let addr = self as *const _ as usize; + let filter = |ParkToken(token)| { + let s = new_state.get(); + + // If we are waking up a writer, don't wake anything else. + if s & WRITER_BIT != 0 { + return FilterOp::Stop; + } - // Otherwise wake *all* readers and one upgrader/writer. - if token & (UPGRADABLE_BIT | WRITER_BIT) != 0 && s & UPGRADABLE_BIT != 0 { - // Skip writers and upgradable readers if we already have - // a writer/upgradable reader. - FilterOp::Skip - } else { - new_state.set(s + token); - FilterOp::Unpark - } - }; - parking_lot_core::unpark_filter(addr, filter, |result| { - callback(new_state.get(), result) - }); - } + // Otherwise wake *all* readers and one upgrader/writer. + if token & (UPGRADABLE_BIT | WRITER_BIT) != 0 && s & UPGRADABLE_BIT != 0 { + // Skip writers and upgradable readers if we already have + // a writer/upgradable reader. + FilterOp::Skip + } else { + new_state.set(s + token); + FilterOp::Unpark + } + }; + let callback = |result| callback(new_state.get(), result); + // SAFETY: + // * `addr` is an address we control. + // * `filter` does not panic or call into any function of `parking_lot`. + // * `callback` safety responsibility is on caller + parking_lot_core::unpark_filter(addr, filter, callback); } // Common code for waiting for readers to exit the lock after acquiring @@ -928,71 +985,75 @@ } // Park our thread until we are woken up by an unlock - unsafe { - // Using the 2nd key at addr + 1 - let addr = self as *const _ as usize + 1; - let validate = || { - let state = self.state.load(Ordering::Relaxed); - state & READERS_MASK != 0 && state & WRITER_PARKED_BIT != 0 - }; - let before_sleep = || {}; - let timed_out = |_, _| {}; - match parking_lot_core::park( + // Using the 2nd key at addr + 1 + let addr = self as *const _ as usize + 1; + let validate = || { + let state = self.state.load(Ordering::Relaxed); + state & READERS_MASK != 0 && state & WRITER_PARKED_BIT != 0 + }; + let before_sleep = || {}; + let timed_out = |_, _| {}; + // SAFETY: + // * `addr` is an address we control. + // * `validate`/`timed_out` does not panic or call into any function of `parking_lot`. + // * `before_sleep` does not call `park`, nor does it panic. + let park_result = unsafe { + parking_lot_core::park( addr, validate, before_sleep, timed_out, TOKEN_EXCLUSIVE, timeout, - ) { - // We still need to re-check the state if we are unparked - // since a previous writer timing-out could have allowed - // another reader to sneak in before we parked. - ParkResult::Unparked(_) | ParkResult::Invalid => { - state = self.state.load(Ordering::Relaxed); - continue; - } + ) + }; + match park_result { + // We still need to re-check the state if we are unparked + // since a previous writer timing-out could have allowed + // another reader to sneak in before we parked. + ParkResult::Unparked(_) | ParkResult::Invalid => { + state = self.state.load(Ordering::Relaxed); + continue; + } - // Timeout expired - ParkResult::TimedOut => { - // We need to release WRITER_BIT and revert back to - // our previous value. We also wake up any threads that - // might be waiting on WRITER_BIT. - let state = self.state.fetch_add( - prev_value.wrapping_sub(WRITER_BIT | WRITER_PARKED_BIT), - Ordering::Relaxed, - ); - if state & PARKED_BIT != 0 { - let callback = |_, result: UnparkResult| { - // Clear the parked bit if there no more parked threads - if !result.have_more_threads { - self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); - } - TOKEN_NORMAL - }; + // Timeout expired + ParkResult::TimedOut => { + // We need to release WRITER_BIT and revert back to + // our previous value. We also wake up any threads that + // might be waiting on WRITER_BIT. + let state = self.state.fetch_add( + prev_value.wrapping_sub(WRITER_BIT | WRITER_PARKED_BIT), + Ordering::Relaxed, + ); + if state & PARKED_BIT != 0 { + let callback = |_, result: UnparkResult| { + // Clear the parked bit if there no more parked threads + if !result.have_more_threads { + self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); + } + TOKEN_NORMAL + }; + // SAFETY: `callback` does not panic or call any function of `parking_lot`. + unsafe { self.wake_parked_threads(ONE_READER | UPGRADABLE_BIT, callback); } - return false; } + return false; } } } true } - // Common code for acquiring a lock + /// Common code for acquiring a lock #[inline] - fn lock_common( + fn lock_common( &self, timeout: Option, token: ParkToken, - mut try_lock: F, - validate: V, - ) -> bool - where - F: FnMut(&mut usize) -> bool, - V: Fn(usize) -> bool, - { + mut try_lock: impl FnMut(&mut usize) -> bool, + validate_flags: usize, + ) -> bool { let mut spinwait = SpinWait::new(); let mut state = self.state.load(Ordering::Relaxed); loop { @@ -1021,40 +1082,39 @@ } // Park our thread until we are woken up by an unlock - unsafe { - let addr = self as *const _ as usize; - let validate = || { - let state = self.state.load(Ordering::Relaxed); - state & PARKED_BIT != 0 && validate(state) - }; - let before_sleep = || {}; - let timed_out = |_, was_last_thread| { - // Clear the parked bit if we were the last parked thread - if was_last_thread { - self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); - } - }; - match parking_lot_core::park( - addr, - validate, - before_sleep, - timed_out, - token, - timeout, - ) { - // The thread that unparked us passed the lock on to us - // directly without unlocking it. - ParkResult::Unparked(TOKEN_HANDOFF) => return true, + let addr = self as *const _ as usize; + let validate = || { + let state = self.state.load(Ordering::Relaxed); + state & PARKED_BIT != 0 && (state & validate_flags != 0) + }; + let before_sleep = || {}; + let timed_out = |_, was_last_thread| { + // Clear the parked bit if we were the last parked thread + if was_last_thread { + self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); + } + }; + + // SAFETY: + // * `addr` is an address we control. + // * `validate`/`timed_out` does not panic or call into any function of `parking_lot`. + // * `before_sleep` does not call `park`, nor does it panic. + let park_result = unsafe { + parking_lot_core::park(addr, validate, before_sleep, timed_out, token, timeout) + }; + match park_result { + // The thread that unparked us passed the lock on to us + // directly without unlocking it. + ParkResult::Unparked(TOKEN_HANDOFF) => return true, - // We were unparked normally, try acquiring the lock again - ParkResult::Unparked(_) => (), + // We were unparked normally, try acquiring the lock again + ParkResult::Unparked(_) => (), - // The validation function failed, try locking again - ParkResult::Invalid => (), + // The validation function failed, try locking again + ParkResult::Invalid => (), - // Timeout expired - ParkResult::TimedOut => return false, - } + // Timeout expired + ParkResult::TimedOut => return false, } // Loop back and try locking again diff -Nru rust-parking-lot-0.9.0/src/remutex.rs rust-parking-lot-0.10.0/src/remutex.rs --- rust-parking-lot-0.9.0/src/remutex.rs 2019-07-02 22:27:45.000000000 +0000 +++ rust-parking-lot-0.10.0/src/remutex.rs 2019-11-25 21:09:35.000000000 +0000 @@ -17,8 +17,9 @@ fn nonzero_thread_id(&self) -> NonZeroUsize { // The address of a thread-local variable is guaranteed to be unique to the - // current thread, and is also guaranteed to be non-zero. - thread_local!(static KEY: u8 = unsafe { ::std::mem::uninitialized() }); + // current thread, and is also guaranteed to be non-zero. The variable has to have a + // non-zero size to guarantee it has a unique address for each thread. + thread_local!(static KEY: u8 = 0); KEY.with(|x| { NonZeroUsize::new(x as *const _ as usize) .expect("thread-local variable address is null") @@ -68,18 +69,18 @@ #[test] fn smoke() { - let m = ReentrantMutex::new(()); + let m = ReentrantMutex::new(2); { let a = m.lock(); { let b = m.lock(); { let c = m.lock(); - assert_eq!(*c, ()); + assert_eq!(*c, 2); } - assert_eq!(*b, ()); + assert_eq!(*b, 2); } - assert_eq!(*a, ()); + assert_eq!(*a, 2); } } diff -Nru rust-parking-lot-0.9.0/src/rwlock.rs rust-parking-lot-0.10.0/src/rwlock.rs --- rust-parking-lot-0.9.0/src/rwlock.rs 2019-07-02 22:27:45.000000000 +0000 +++ rust-parking-lot-0.10.0/src/rwlock.rs 2019-11-25 21:09:35.000000000 +0000 @@ -322,7 +322,7 @@ fn test_rw_arc_access_in_unwind() { let arc = Arc::new(RwLock::new(1)); let arc2 = arc.clone(); - let _ = thread::spawn(move || -> () { + let _ = thread::spawn(move || { struct Unwinder { i: Arc>, } @@ -359,7 +359,10 @@ let read_guard = lock.read(); let read_result = lock.try_read(); - assert!(read_result.is_some(), "try_read should succeed while read_guard is in scope"); + assert!( + read_result.is_some(), + "try_read should succeed while read_guard is in scope" + ); drop(read_guard); } @@ -378,7 +381,10 @@ let write_guard = lock.write(); let read_result = lock.try_read(); - assert!(read_result.is_none(), "try_read should fail while write_guard is in scope"); + assert!( + read_result.is_none(), + "try_read should fail while write_guard is in scope" + ); drop(write_guard); } @@ -391,7 +397,10 @@ let read_guard = lock.read(); let write_result = lock.try_write(); - assert!(write_result.is_none(), "try_write should fail while read_guard is in scope"); + assert!( + write_result.is_none(), + "try_write should fail while read_guard is in scope" + ); drop(read_guard); } @@ -410,7 +419,10 @@ let write_guard = lock.write(); let write_result = lock.try_write(); - assert!(write_result.is_none(), "try_write should fail while write_guard is in scope"); + assert!( + write_result.is_none(), + "try_write should fail while write_guard is in scope" + ); drop(write_guard); } diff -Nru rust-parking-lot-0.9.0/src/util.rs rust-parking-lot-0.10.0/src/util.rs --- rust-parking-lot-0.9.0/src/util.rs 2019-07-02 22:27:45.000000000 +0000 +++ rust-parking-lot-0.10.0/src/util.rs 2019-11-09 11:27:25.000000000 +0000 @@ -34,10 +34,5 @@ #[inline] pub fn to_deadline(timeout: Duration) -> Option { - #[cfg(has_checked_instant)] - let deadline = Instant::now().checked_add(timeout); - #[cfg(not(has_checked_instant))] - let deadline = Some(Instant::now() + timeout); - - deadline + Instant::now().checked_add(timeout) } diff -Nru rust-parking-lot-0.9.0/.travis.yml rust-parking-lot-0.10.0/.travis.yml --- rust-parking-lot-0.9.0/.travis.yml 2019-07-14 12:52:34.000000000 +0000 +++ rust-parking-lot-0.10.0/.travis.yml 2019-11-09 11:27:25.000000000 +0000 @@ -7,7 +7,16 @@ matrix: include: - - rust: 1.32.0 + - rust: 1.36.0 + os: linux + script: + - cargo build --lib + - cargo build --lib --features serde + # Build on other platforms + - rustup target add wasm32-unknown-unknown + - cargo build --lib --target wasm32-unknown-unknown + + - rust: 1.36.0 os: linux script: &script - cargo build @@ -38,9 +47,11 @@ - rustup target add x86_64-fortanix-unknown-sgx - rustup target add x86_64-unknown-redox - rustup target add x86_64-unknown-cloudabi + - rustup target add x86_64-linux-android - cargo test --all --no-run --target x86_64-fortanix-unknown-sgx --features nightly - cargo build --all --target x86_64-unknown-redox --features nightly - cargo build --all --target x86_64-unknown-cloudabi --features nightly + - cargo build --all --target x86_64-linux-android --features nightly # Test building the docs - cargo doc --all-features --no-deps -p parking_lot -p parking_lot_core -p lock_api # Run the benchmarks @@ -49,7 +60,7 @@ - cargo run --release --bin rwlock -- 1 1 1 0 1 2 - cd .. - - rust: 1.32.0 + - rust: 1.36.0 os: osx script: *script