Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

1270
vendor/parking_lot/src/condvar.rs vendored Normal file

File diff suppressed because it is too large Load Diff

232
vendor/parking_lot/src/deadlock.rs vendored Normal file
View File

@@ -0,0 +1,232 @@
//! \[Experimental\] Deadlock detection
//!
//! This feature is optional and can be enabled via the `deadlock_detection` feature flag.
//!
//! # Example
//!
//! ```
//! #[cfg(feature = "deadlock_detection")]
//! { // only for #[cfg]
//! use std::thread;
//! use std::time::Duration;
//! use parking_lot::deadlock;
//!
//! // Create a background thread which checks for deadlocks every 10s
//! thread::spawn(move || {
//! loop {
//! thread::sleep(Duration::from_secs(10));
//! let deadlocks = deadlock::check_deadlock();
//! if deadlocks.is_empty() {
//! continue;
//! }
//!
//! println!("{} deadlocks detected", deadlocks.len());
//! for (i, threads) in deadlocks.iter().enumerate() {
//! println!("Deadlock #{}", i);
//! for t in threads {
//! println!("Thread Id {:#?}", t.thread_id());
//! println!("{:#?}", t.backtrace());
//! }
//! }
//! }
//! });
//! } // only for #[cfg]
//! ```
#[cfg(feature = "deadlock_detection")]
pub use parking_lot_core::deadlock::check_deadlock;
pub(crate) use parking_lot_core::deadlock::{acquire_resource, release_resource};
#[cfg(test)]
#[cfg(feature = "deadlock_detection")]
mod tests {
use crate::{Mutex, ReentrantMutex, RwLock};
use std::sync::{Arc, Barrier};
use std::thread::{self, sleep};
use std::time::Duration;
// We need to serialize these tests since deadlock detection uses global state
static DEADLOCK_DETECTION_LOCK: Mutex<()> = crate::const_mutex(());
fn check_deadlock() -> bool {
use parking_lot_core::deadlock::check_deadlock;
!check_deadlock().is_empty()
}
#[test]
fn test_mutex_deadlock() {
let _guard = DEADLOCK_DETECTION_LOCK.lock();
let m1: Arc<Mutex<()>> = Default::default();
let m2: Arc<Mutex<()>> = Default::default();
let m3: Arc<Mutex<()>> = Default::default();
let b = Arc::new(Barrier::new(4));
let m1_ = m1.clone();
let m2_ = m2.clone();
let m3_ = m3.clone();
let b1 = b.clone();
let b2 = b.clone();
let b3 = b.clone();
assert!(!check_deadlock());
let _t1 = thread::spawn(move || {
let _g = m1.lock();
b1.wait();
let _ = m2_.lock();
});
let _t2 = thread::spawn(move || {
let _g = m2.lock();
b2.wait();
let _ = m3_.lock();
});
let _t3 = thread::spawn(move || {
let _g = m3.lock();
b3.wait();
let _ = m1_.lock();
});
assert!(!check_deadlock());
b.wait();
sleep(Duration::from_millis(50));
assert!(check_deadlock());
assert!(!check_deadlock());
}
#[test]
fn test_mutex_deadlock_reentrant() {
let _guard = DEADLOCK_DETECTION_LOCK.lock();
let m1: Arc<Mutex<()>> = Default::default();
assert!(!check_deadlock());
let _t1 = thread::spawn(move || {
let _g = m1.lock();
let _ = m1.lock();
});
sleep(Duration::from_millis(50));
assert!(check_deadlock());
assert!(!check_deadlock());
}
#[test]
fn test_remutex_deadlock() {
let _guard = DEADLOCK_DETECTION_LOCK.lock();
let m1: Arc<ReentrantMutex<()>> = Default::default();
let m2: Arc<ReentrantMutex<()>> = Default::default();
let m3: Arc<ReentrantMutex<()>> = Default::default();
let b = Arc::new(Barrier::new(4));
let m1_ = m1.clone();
let m2_ = m2.clone();
let m3_ = m3.clone();
let b1 = b.clone();
let b2 = b.clone();
let b3 = b.clone();
assert!(!check_deadlock());
let _t1 = thread::spawn(move || {
let _g = m1.lock();
let _g = m1.lock();
b1.wait();
let _ = m2_.lock();
});
let _t2 = thread::spawn(move || {
let _g = m2.lock();
let _g = m2.lock();
b2.wait();
let _ = m3_.lock();
});
let _t3 = thread::spawn(move || {
let _g = m3.lock();
let _g = m3.lock();
b3.wait();
let _ = m1_.lock();
});
assert!(!check_deadlock());
b.wait();
sleep(Duration::from_millis(50));
assert!(check_deadlock());
assert!(!check_deadlock());
}
#[test]
fn test_rwlock_deadlock() {
let _guard = DEADLOCK_DETECTION_LOCK.lock();
let m1: Arc<RwLock<()>> = Default::default();
let m2: Arc<RwLock<()>> = Default::default();
let m3: Arc<RwLock<()>> = Default::default();
let b = Arc::new(Barrier::new(4));
let m1_ = m1.clone();
let m2_ = m2.clone();
let m3_ = m3.clone();
let b1 = b.clone();
let b2 = b.clone();
let b3 = b.clone();
assert!(!check_deadlock());
let _t1 = thread::spawn(move || {
let _g = m1.read();
b1.wait();
let _g = m2_.write();
});
let _t2 = thread::spawn(move || {
let _g = m2.read();
b2.wait();
let _g = m3_.write();
});
let _t3 = thread::spawn(move || {
let _g = m3.read();
b3.wait();
let _ = m1_.write();
});
assert!(!check_deadlock());
b.wait();
sleep(Duration::from_millis(50));
assert!(check_deadlock());
assert!(!check_deadlock());
}
#[cfg(rwlock_deadlock_detection_not_supported)]
#[test]
fn test_rwlock_deadlock_reentrant() {
let _guard = DEADLOCK_DETECTION_LOCK.lock();
let m1: Arc<RwLock<()>> = Default::default();
assert!(!check_deadlock());
let _t1 = thread::spawn(move || {
let _g = m1.read();
let _ = m1.write();
});
sleep(Duration::from_millis(50));
assert!(check_deadlock());
assert!(!check_deadlock());
}
}

116
vendor/parking_lot/src/elision.rs vendored Normal file
View File

@@ -0,0 +1,116 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::sync::atomic::AtomicUsize;
// Extension trait to add lock elision primitives to atomic types
pub trait AtomicElisionExt {
type IntType;
// Perform a compare_exchange and start a transaction
fn elision_compare_exchange_acquire(
&self,
current: Self::IntType,
new: Self::IntType,
) -> Result<Self::IntType, Self::IntType>;
// Perform a fetch_sub and end a transaction
fn elision_fetch_sub_release(&self, val: Self::IntType) -> Self::IntType;
}
// Indicates whether the target architecture supports lock elision
#[inline]
pub fn have_elision() -> bool {
cfg!(all(
feature = "hardware-lock-elision",
any(target_arch = "x86", target_arch = "x86_64"),
))
}
// This implementation is never actually called because it is guarded by
// have_elision().
#[cfg(not(all(
feature = "hardware-lock-elision",
any(target_arch = "x86", target_arch = "x86_64")
)))]
impl AtomicElisionExt for AtomicUsize {
type IntType = usize;
#[inline]
fn elision_compare_exchange_acquire(&self, _: usize, _: usize) -> Result<usize, usize> {
unreachable!();
}
#[inline]
fn elision_fetch_sub_release(&self, _: usize) -> usize {
unreachable!();
}
}
#[cfg(all(
feature = "hardware-lock-elision",
any(target_arch = "x86", target_arch = "x86_64")
))]
impl AtomicElisionExt for AtomicUsize {
type IntType = usize;
#[inline]
fn elision_compare_exchange_acquire(&self, current: usize, new: usize) -> Result<usize, usize> {
unsafe {
use core::arch::asm;
let prev: usize;
#[cfg(target_pointer_width = "32")]
asm!(
"xacquire",
"lock",
"cmpxchg [{:e}], {:e}",
in(reg) self,
in(reg) new,
inout("eax") current => prev,
);
#[cfg(target_pointer_width = "64")]
asm!(
"xacquire",
"lock",
"cmpxchg [{}], {}",
in(reg) self,
in(reg) new,
inout("rax") current => prev,
);
if prev == current {
Ok(prev)
} else {
Err(prev)
}
}
}
#[inline]
fn elision_fetch_sub_release(&self, val: usize) -> usize {
unsafe {
use core::arch::asm;
let prev: usize;
#[cfg(target_pointer_width = "32")]
asm!(
"xrelease",
"lock",
"xadd [{:e}], {:e}",
in(reg) self,
inout(reg) val.wrapping_neg() => prev,
);
#[cfg(target_pointer_width = "64")]
asm!(
"xrelease",
"lock",
"xadd [{}], {}",
in(reg) self,
inout(reg) val.wrapping_neg() => prev,
);
prev
}
}
}

274
vendor/parking_lot/src/fair_mutex.rs vendored Normal file
View File

@@ -0,0 +1,274 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::raw_fair_mutex::RawFairMutex;
/// A mutual exclusive primitive that is always fair, useful for protecting shared data
///
/// This mutex will block threads waiting for the lock to become available. The
/// mutex can be statically initialized or created by the `new`
/// constructor. Each mutex has a type parameter which represents the data that
/// it is protecting. The data can only be accessed through the RAII guards
/// returned from `lock` and `try_lock`, which guarantees that the data is only
/// ever accessed when the mutex is locked.
///
/// The regular mutex provided by `parking_lot` uses eventual fairness
/// (after some time it will default to the fair algorithm), but eventual
/// fairness does not provide the same guarantees an always fair method would.
/// Fair mutexes are generally slower, but sometimes needed.
///
/// In a fair mutex the waiters form a queue, and the lock is always granted to
/// the next requester in the queue, in first-in first-out order. This ensures
/// that one thread cannot starve others by quickly re-acquiring the lock after
/// releasing it.
///
/// A fair mutex may not be interesting if threads have different priorities (this is known as
/// priority inversion).
///
/// # Differences from the standard library `Mutex`
///
/// - No poisoning, the lock is released normally on panic.
/// - Only requires 1 byte of space, whereas the standard library boxes the
/// `FairMutex` due to platform limitations.
/// - Can be statically constructed.
/// - Does not require any drop glue when dropped.
/// - Inline fast path for the uncontended case.
/// - Efficient handling of micro-contention using adaptive spinning.
/// - Allows raw locking & unlocking without a guard.
///
/// # Examples
///
/// ```
/// use parking_lot::FairMutex;
/// use std::sync::{Arc, mpsc::channel};
/// use std::thread;
///
/// const N: usize = 10;
///
/// // Spawn a few threads to increment a shared variable (non-atomically), and
/// // let the main thread know once all increments are done.
/// //
/// // Here we're using an Arc to share memory among threads, and the data inside
/// // the Arc is protected with a mutex.
/// let data = Arc::new(FairMutex::new(0));
///
/// let (tx, rx) = channel();
/// for _ in 0..10 {
/// let (data, tx) = (Arc::clone(&data), tx.clone());
/// thread::spawn(move || {
/// // The shared state can only be accessed once the lock is held.
/// // Our non-atomic increment is safe because we're the only thread
/// // which can access the shared state when the lock is held.
/// let mut data = data.lock();
/// *data += 1;
/// if *data == N {
/// tx.send(()).unwrap();
/// }
/// // the lock is unlocked here when `data` goes out of scope.
/// });
/// }
///
/// rx.recv().unwrap();
/// ```
pub type FairMutex<T> = lock_api::Mutex<RawFairMutex, T>;
/// Creates a new fair mutex in an unlocked state ready for use.
///
/// This allows creating a fair mutex in a constant context on stable Rust.
pub const fn const_fair_mutex<T>(val: T) -> FairMutex<T> {
FairMutex::const_new(<RawFairMutex as lock_api::RawMutex>::INIT, val)
}
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
/// dropped (falls out of scope), the lock will be unlocked.
///
/// The data protected by the mutex can be accessed through this guard via its
/// `Deref` and `DerefMut` implementations.
pub type FairMutexGuard<'a, T> = lock_api::MutexGuard<'a, RawFairMutex, T>;
/// An RAII mutex guard returned by `FairMutexGuard::map`, which can point to a
/// subfield of the protected data.
///
/// The main difference between `MappedFairMutexGuard` and `FairMutexGuard` is that the
/// former doesn't support temporarily unlocking and re-locking, since that
/// could introduce soundness issues if the locked object is modified by another
/// thread.
pub type MappedFairMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawFairMutex, T>;
#[cfg(test)]
mod tests {
use crate::FairMutex;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
#[cfg(feature = "serde")]
use bincode::{deserialize, serialize};
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
#[test]
fn smoke() {
let m = FairMutex::new(());
drop(m.lock());
drop(m.lock());
}
#[test]
fn lots_and_lots() {
const J: u32 = 1000;
const K: u32 = 3;
let m = Arc::new(FairMutex::new(0));
fn inc(m: &FairMutex<u32>) {
for _ in 0..J {
*m.lock() += 1;
}
}
let (tx, rx) = channel();
for _ in 0..K {
let tx2 = tx.clone();
let m2 = m.clone();
thread::spawn(move || {
inc(&m2);
tx2.send(()).unwrap();
});
let tx2 = tx.clone();
let m2 = m.clone();
thread::spawn(move || {
inc(&m2);
tx2.send(()).unwrap();
});
}
drop(tx);
for _ in 0..2 * K {
rx.recv().unwrap();
}
assert_eq!(*m.lock(), J * K * 2);
}
#[test]
fn try_lock() {
let m = FairMutex::new(());
*m.try_lock().unwrap() = ();
}
#[test]
fn test_into_inner() {
let m = FairMutex::new(NonCopy(10));
assert_eq!(m.into_inner(), NonCopy(10));
}
#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = FairMutex::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
#[test]
fn test_get_mut() {
let mut m = FairMutex::new(NonCopy(10));
*m.get_mut() = NonCopy(20);
assert_eq!(m.into_inner(), NonCopy(20));
}
#[test]
fn test_mutex_arc_nested() {
// Tests nested mutexes and access
// to underlying data.
let arc = Arc::new(FairMutex::new(1));
let arc2 = Arc::new(FairMutex::new(arc));
let (tx, rx) = channel();
let _t = thread::spawn(move || {
let lock = arc2.lock();
let lock2 = lock.lock();
assert_eq!(*lock2, 1);
tx.send(()).unwrap();
});
rx.recv().unwrap();
}
#[test]
fn test_mutex_arc_access_in_unwind() {
let arc = Arc::new(FairMutex::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move || {
struct Unwinder {
i: Arc<FairMutex<i32>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
*self.i.lock() += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
})
.join();
let lock = arc.lock();
assert_eq!(*lock, 2);
}
#[test]
fn test_mutex_unsized() {
let mutex: &FairMutex<[i32]> = &FairMutex::new([1, 2, 3]);
{
let b = &mut *mutex.lock();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*mutex.lock(), comp);
}
#[test]
fn test_mutexguard_sync() {
fn sync<T: Sync>(_: T) {}
let mutex = FairMutex::new(());
sync(mutex.lock());
}
#[test]
fn test_mutex_debug() {
let mutex = FairMutex::new(vec![0u8, 10]);
assert_eq!(format!("{:?}", mutex), "Mutex { data: [0, 10] }");
let _lock = mutex.lock();
assert_eq!(format!("{:?}", mutex), "Mutex { data: <locked> }");
}
#[cfg(feature = "serde")]
#[test]
fn test_serde() {
let contents: Vec<u8> = vec![0, 1, 2];
let mutex = FairMutex::new(contents.clone());
let serialized = serialize(&mutex).unwrap();
let deserialized: FairMutex<Vec<u8>> = deserialize(&serialized).unwrap();
assert_eq!(*(mutex.lock()), *(deserialized.lock()));
assert_eq!(contents, *(deserialized.lock()));
}
}

59
vendor/parking_lot/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,59 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! This library provides implementations of `Mutex`, `RwLock`, `Condvar` and
//! `Once` that are smaller, faster and more flexible than those in the Rust
//! standard library. It also provides a `ReentrantMutex` type.
#![warn(missing_docs)]
#![warn(rust_2018_idioms)]
mod condvar;
mod elision;
mod fair_mutex;
mod mutex;
mod once;
mod raw_fair_mutex;
mod raw_mutex;
mod raw_rwlock;
mod remutex;
mod rwlock;
mod util;
#[cfg(feature = "deadlock_detection")]
pub mod deadlock;
#[cfg(not(feature = "deadlock_detection"))]
mod deadlock;
// If deadlock detection is enabled, we cannot allow lock guards to be sent to
// other threads.
#[cfg(all(feature = "send_guard", feature = "deadlock_detection"))]
compile_error!("the `send_guard` and `deadlock_detection` features cannot be used together");
#[cfg(feature = "send_guard")]
type GuardMarker = lock_api::GuardSend;
#[cfg(not(feature = "send_guard"))]
type GuardMarker = lock_api::GuardNoSend;
pub use self::condvar::{Condvar, WaitTimeoutResult};
pub use self::fair_mutex::{const_fair_mutex, FairMutex, FairMutexGuard, MappedFairMutexGuard};
pub use self::mutex::{const_mutex, MappedMutexGuard, Mutex, MutexGuard};
pub use self::once::{Once, OnceState};
pub use self::raw_fair_mutex::RawFairMutex;
pub use self::raw_mutex::RawMutex;
pub use self::raw_rwlock::RawRwLock;
pub use self::remutex::{
const_reentrant_mutex, MappedReentrantMutexGuard, RawThreadId, ReentrantMutex,
ReentrantMutexGuard,
};
pub use self::rwlock::{
const_rwlock, MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard,
RwLockUpgradableReadGuard, RwLockWriteGuard,
};
pub use ::lock_api;
#[cfg(feature = "arc_lock")]
pub use self::lock_api::{ArcMutexGuard, ArcReentrantMutexGuard, ArcRwLockReadGuard, ArcRwLockUpgradableReadGuard, ArcRwLockWriteGuard};

311
vendor/parking_lot/src/mutex.rs vendored Normal file
View File

@@ -0,0 +1,311 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::raw_mutex::RawMutex;
/// A mutual exclusion primitive useful for protecting shared data
///
/// This mutex will block threads waiting for the lock to become available. The
/// mutex can be statically initialized or created by the `new`
/// constructor. Each mutex has a type parameter which represents the data that
/// it is protecting. The data can only be accessed through the RAII guards
/// returned from `lock` and `try_lock`, which guarantees that the data is only
/// ever accessed when the mutex is locked.
///
/// # Fairness
///
/// A typical unfair lock can often end up in a situation where a single thread
/// quickly acquires and releases the same mutex in succession, which can starve
/// other threads waiting to acquire the mutex. While this improves throughput
/// because it doesn't force a context switch when a thread tries to re-acquire
/// a mutex it has just released, this can starve other threads.
///
/// This mutex uses [eventual fairness](https://trac.webkit.org/changeset/203350)
/// to ensure that the lock will be fair on average without sacrificing
/// throughput. This is done by forcing a fair unlock on average every 0.5ms,
/// which will force the lock to go to the next thread waiting for the mutex.
///
/// Additionally, any critical section longer than 1ms will always use a fair
/// unlock, which has a negligible impact on throughput considering the length
/// of the critical section.
///
/// You can also force a fair unlock by calling `MutexGuard::unlock_fair` when
/// unlocking a mutex instead of simply dropping the `MutexGuard`.
///
/// # Differences from the standard library `Mutex`
///
/// - No poisoning, the lock is released normally on panic.
/// - Only requires 1 byte of space, whereas the standard library boxes the
/// `Mutex` due to platform limitations.
/// - Can be statically constructed.
/// - Does not require any drop glue when dropped.
/// - Inline fast path for the uncontended case.
/// - Efficient handling of micro-contention using adaptive spinning.
/// - Allows raw locking & unlocking without a guard.
/// - Supports eventual fairness so that the mutex is fair on average.
/// - Optionally allows making the mutex fair by calling `MutexGuard::unlock_fair`.
///
/// # Examples
///
/// ```
/// use parking_lot::Mutex;
/// use std::sync::{Arc, mpsc::channel};
/// use std::thread;
///
/// const N: usize = 10;
///
/// // Spawn a few threads to increment a shared variable (non-atomically), and
/// // let the main thread know once all increments are done.
/// //
/// // Here we're using an Arc to share memory among threads, and the data inside
/// // the Arc is protected with a mutex.
/// let data = Arc::new(Mutex::new(0));
///
/// let (tx, rx) = channel();
/// for _ in 0..10 {
/// let (data, tx) = (Arc::clone(&data), tx.clone());
/// thread::spawn(move || {
/// // The shared state can only be accessed once the lock is held.
/// // Our non-atomic increment is safe because we're the only thread
/// // which can access the shared state when the lock is held.
/// let mut data = data.lock();
/// *data += 1;
/// if *data == N {
/// tx.send(()).unwrap();
/// }
/// // the lock is unlocked here when `data` goes out of scope.
/// });
/// }
///
/// rx.recv().unwrap();
/// ```
pub type Mutex<T> = lock_api::Mutex<RawMutex, T>;
/// Creates a new mutex in an unlocked state ready for use.
///
/// This allows creating a mutex in a constant context on stable Rust.
pub const fn const_mutex<T>(val: T) -> Mutex<T> {
Mutex::const_new(<RawMutex as lock_api::RawMutex>::INIT, val)
}
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
/// dropped (falls out of scope), the lock will be unlocked.
///
/// The data protected by the mutex can be accessed through this guard via its
/// `Deref` and `DerefMut` implementations.
pub type MutexGuard<'a, T> = lock_api::MutexGuard<'a, RawMutex, T>;
/// An RAII mutex guard returned by `MutexGuard::map`, which can point to a
/// subfield of the protected data.
///
/// The main difference between `MappedMutexGuard` and `MutexGuard` is that the
/// former doesn't support temporarily unlocking and re-locking, since that
/// could introduce soundness issues if the locked object is modified by another
/// thread.
pub type MappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawMutex, T>;
#[cfg(test)]
mod tests {
use crate::{Condvar, Mutex};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
#[cfg(feature = "serde")]
use bincode::{deserialize, serialize};
struct Packet<T>(Arc<(Mutex<T>, Condvar)>);
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
unsafe impl<T: Send> Send for Packet<T> {}
unsafe impl<T> Sync for Packet<T> {}
#[test]
fn smoke() {
let m = Mutex::new(());
drop(m.lock());
drop(m.lock());
}
#[test]
fn lots_and_lots() {
const J: u32 = 1000;
const K: u32 = 3;
let m = Arc::new(Mutex::new(0));
fn inc(m: &Mutex<u32>) {
for _ in 0..J {
*m.lock() += 1;
}
}
let (tx, rx) = channel();
for _ in 0..K {
let tx2 = tx.clone();
let m2 = m.clone();
thread::spawn(move || {
inc(&m2);
tx2.send(()).unwrap();
});
let tx2 = tx.clone();
let m2 = m.clone();
thread::spawn(move || {
inc(&m2);
tx2.send(()).unwrap();
});
}
drop(tx);
for _ in 0..2 * K {
rx.recv().unwrap();
}
assert_eq!(*m.lock(), J * K * 2);
}
#[test]
fn try_lock() {
let m = Mutex::new(());
*m.try_lock().unwrap() = ();
}
#[test]
fn test_into_inner() {
let m = Mutex::new(NonCopy(10));
assert_eq!(m.into_inner(), NonCopy(10));
}
#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = Mutex::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
#[test]
fn test_get_mut() {
let mut m = Mutex::new(NonCopy(10));
*m.get_mut() = NonCopy(20);
assert_eq!(m.into_inner(), NonCopy(20));
}
#[test]
fn test_mutex_arc_condvar() {
let packet = Packet(Arc::new((Mutex::new(false), Condvar::new())));
let packet2 = Packet(packet.0.clone());
let (tx, rx) = channel();
let _t = thread::spawn(move || {
// wait until parent gets in
rx.recv().unwrap();
let (lock, cvar) = &*packet2.0;
let mut lock = lock.lock();
*lock = true;
cvar.notify_one();
});
let (lock, cvar) = &*packet.0;
let mut lock = lock.lock();
tx.send(()).unwrap();
assert!(!*lock);
while !*lock {
cvar.wait(&mut lock);
}
}
#[test]
fn test_mutex_arc_nested() {
// Tests nested mutexes and access
// to underlying data.
let arc = Arc::new(Mutex::new(1));
let arc2 = Arc::new(Mutex::new(arc));
let (tx, rx) = channel();
let _t = thread::spawn(move || {
let lock = arc2.lock();
let lock2 = lock.lock();
assert_eq!(*lock2, 1);
tx.send(()).unwrap();
});
rx.recv().unwrap();
}
#[test]
fn test_mutex_arc_access_in_unwind() {
let arc = Arc::new(Mutex::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move || {
struct Unwinder {
i: Arc<Mutex<i32>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
*self.i.lock() += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
})
.join();
let lock = arc.lock();
assert_eq!(*lock, 2);
}
#[test]
fn test_mutex_unsized() {
let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]);
{
let b = &mut *mutex.lock();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*mutex.lock(), comp);
}
#[test]
fn test_mutexguard_sync() {
fn sync<T: Sync>(_: T) {}
let mutex = Mutex::new(());
sync(mutex.lock());
}
#[test]
fn test_mutex_debug() {
let mutex = Mutex::new(vec![0u8, 10]);
assert_eq!(format!("{:?}", mutex), "Mutex { data: [0, 10] }");
let _lock = mutex.lock();
assert_eq!(format!("{:?}", mutex), "Mutex { data: <locked> }");
}
#[cfg(feature = "serde")]
#[test]
fn test_serde() {
let contents: Vec<u8> = vec![0, 1, 2];
let mutex = Mutex::new(contents.clone());
let serialized = serialize(&mutex).unwrap();
let deserialized: Mutex<Vec<u8>> = deserialize(&serialized).unwrap();
assert_eq!(*(mutex.lock()), *(deserialized.lock()));
assert_eq!(contents, *(deserialized.lock()));
}
}

452
vendor/parking_lot/src/once.rs vendored Normal file
View File

@@ -0,0 +1,452 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::util::UncheckedOptionExt;
use core::{
fmt, mem,
sync::atomic::{fence, AtomicU8, Ordering},
};
use parking_lot_core::{self, SpinWait, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN};
const DONE_BIT: u8 = 1;
const POISON_BIT: u8 = 2;
const LOCKED_BIT: u8 = 4;
const PARKED_BIT: u8 = 8;
/// Current state of a `Once`.
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub enum OnceState {
/// A closure has not been executed yet
New,
/// A closure was executed but panicked.
Poisoned,
/// A thread is currently executing a closure.
InProgress,
/// A closure has completed successfully.
Done,
}
impl OnceState {
/// Returns whether the associated `Once` has been poisoned.
///
/// Once an initialization routine for a `Once` has panicked it will forever
/// indicate to future forced initialization routines that it is poisoned.
#[inline]
pub fn poisoned(self) -> bool {
matches!(self, OnceState::Poisoned)
}
/// Returns whether the associated `Once` has successfully executed a
/// closure.
#[inline]
pub fn done(self) -> bool {
matches!(self, OnceState::Done)
}
}
/// A synchronization primitive which can be used to run a one-time
/// initialization. Useful for one-time initialization for globals, FFI or
/// related functionality.
///
/// # Differences from the standard library `Once`
///
/// - Only requires 1 byte of space, instead of 1 word.
/// - Not required to be `'static`.
/// - Relaxed memory barriers in the fast path, which can significantly improve
/// performance on some architectures.
/// - Efficient handling of micro-contention using adaptive spinning.
///
/// # Examples
///
/// ```
/// use parking_lot::Once;
///
/// static START: Once = Once::new();
///
/// START.call_once(|| {
/// // run initialization here
/// });
/// ```
pub struct Once(AtomicU8);
impl Once {
/// Creates a new `Once` value.
#[inline]
pub const fn new() -> Once {
Once(AtomicU8::new(0))
}
/// Returns the current state of this `Once`.
#[inline]
pub fn state(&self) -> OnceState {
let state = self.0.load(Ordering::Acquire);
if state & DONE_BIT != 0 {
OnceState::Done
} else if state & LOCKED_BIT != 0 {
OnceState::InProgress
} else if state & POISON_BIT != 0 {
OnceState::Poisoned
} else {
OnceState::New
}
}
/// Performs an initialization routine once and only once. The given closure
/// will be executed if this is the first time `call_once` has been called,
/// and otherwise the routine will *not* be invoked.
///
/// This method will block the calling thread if another initialization
/// routine is currently running.
///
/// When this function returns, it is guaranteed that some initialization
/// has run and completed (it may not be the closure specified). It is also
/// guaranteed that any memory writes performed by the executed closure can
/// be reliably observed by other threads at this point (there is a
/// happens-before relation between the closure and code executing after the
/// return).
///
/// # Examples
///
/// ```
/// use parking_lot::Once;
///
/// static mut VAL: usize = 0;
/// static INIT: Once = Once::new();
///
/// // Accessing a `static mut` is unsafe much of the time, but if we do so
/// // in a synchronized fashion (e.g. write once or read all) then we're
/// // good to go!
/// //
/// // This function will only call `expensive_computation` once, and will
/// // otherwise always return the value returned from the first invocation.
/// fn get_cached_val() -> usize {
/// unsafe {
/// INIT.call_once(|| {
/// VAL = expensive_computation();
/// });
/// VAL
/// }
/// }
///
/// fn expensive_computation() -> usize {
/// // ...
/// # 2
/// }
/// ```
///
/// # Panics
///
/// The closure `f` will only be executed once if this is called
/// concurrently amongst many threads. If that closure panics, however, then
/// it will *poison* this `Once` instance, causing all future invocations of
/// `call_once` to also panic.
#[inline]
pub fn call_once<F>(&self, f: F)
where
F: FnOnce(),
{
if self.0.load(Ordering::Acquire) == DONE_BIT {
return;
}
let mut f = Some(f);
self.call_once_slow(false, &mut |_| unsafe { f.take().unchecked_unwrap()() });
}
/// Performs the same function as `call_once` except ignores poisoning.
///
/// If this `Once` has been poisoned (some initialization panicked) then
/// this function will continue to attempt to call initialization functions
/// until one of them doesn't panic.
///
/// The closure `f` is yielded a structure which can be used to query the
/// state of this `Once` (whether initialization has previously panicked or
/// not).
#[inline]
pub fn call_once_force<F>(&self, f: F)
where
F: FnOnce(OnceState),
{
if self.0.load(Ordering::Acquire) == DONE_BIT {
return;
}
let mut f = Some(f);
self.call_once_slow(true, &mut |state| unsafe {
f.take().unchecked_unwrap()(state)
});
}
// This is a non-generic function to reduce the monomorphization cost of
// using `call_once` (this isn't exactly a trivial or small implementation).
//
// Additionally, this is tagged with `#[cold]` as it should indeed be cold
// and it helps let LLVM know that calls to this function should be off the
// fast path. Essentially, this should help generate more straight line code
// in LLVM.
//
// Finally, this takes an `FnMut` instead of a `FnOnce` because there's
// currently no way to take an `FnOnce` and call it via virtual dispatch
// without some allocation overhead.
#[cold]
fn call_once_slow(&self, ignore_poison: bool, f: &mut dyn FnMut(OnceState)) {
let mut spinwait = SpinWait::new();
let mut state = self.0.load(Ordering::Relaxed);
loop {
// If another thread called the closure, we're done
if state & DONE_BIT != 0 {
// An acquire fence is needed here since we didn't load the
// state with Ordering::Acquire.
fence(Ordering::Acquire);
return;
}
// If the state has been poisoned and we aren't forcing, then panic
if state & POISON_BIT != 0 && !ignore_poison {
// Need the fence here as well for the same reason
fence(Ordering::Acquire);
panic!("Once instance has previously been poisoned");
}
// Grab the lock if it isn't locked, even if there is a queue on it.
// We also clear the poison bit since we are going to try running
// the closure again.
if state & LOCKED_BIT == 0 {
match self.0.compare_exchange_weak(
state,
(state | LOCKED_BIT) & !POISON_BIT,
Ordering::Acquire,
Ordering::Relaxed,
) {
Ok(_) => break,
Err(x) => state = x,
}
continue;
}
// If there is no queue, try spinning a few times
if state & PARKED_BIT == 0 && spinwait.spin() {
state = self.0.load(Ordering::Relaxed);
continue;
}
// Set the parked bit
if state & PARKED_BIT == 0 {
if let Err(x) = self.0.compare_exchange_weak(
state,
state | PARKED_BIT,
Ordering::Relaxed,
Ordering::Relaxed,
) {
state = x;
continue;
}
}
// Park our thread until we are woken up by the thread that owns the
// lock.
let addr = self as *const _ as usize;
let validate = || self.0.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT;
let before_sleep = || {};
let timed_out = |_, _| unreachable!();
unsafe {
parking_lot_core::park(
addr,
validate,
before_sleep,
timed_out,
DEFAULT_PARK_TOKEN,
None,
);
}
// Loop back and check if the done bit was set
spinwait.reset();
state = self.0.load(Ordering::Relaxed);
}
struct PanicGuard<'a>(&'a Once);
impl<'a> Drop for PanicGuard<'a> {
fn drop(&mut self) {
// Mark the state as poisoned, unlock it and unpark all threads.
let once = self.0;
let state = once.0.swap(POISON_BIT, Ordering::Release);
if state & PARKED_BIT != 0 {
let addr = once as *const _ as usize;
unsafe {
parking_lot_core::unpark_all(addr, DEFAULT_UNPARK_TOKEN);
}
}
}
}
// At this point we have the lock, so run the closure. Make sure we
// properly clean up if the closure panicks.
let guard = PanicGuard(self);
let once_state = if state & POISON_BIT != 0 {
OnceState::Poisoned
} else {
OnceState::New
};
f(once_state);
mem::forget(guard);
// Now unlock the state, set the done bit and unpark all threads
let state = self.0.swap(DONE_BIT, Ordering::Release);
if state & PARKED_BIT != 0 {
let addr = self as *const _ as usize;
unsafe {
parking_lot_core::unpark_all(addr, DEFAULT_UNPARK_TOKEN);
}
}
}
}
impl Default for Once {
#[inline]
fn default() -> Once {
Once::new()
}
}
impl fmt::Debug for Once {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Once")
.field("state", &self.state())
.finish()
}
}
#[cfg(test)]
mod tests {
use crate::Once;
use std::panic;
use std::sync::mpsc::channel;
use std::thread;
#[test]
fn smoke_once() {
static O: Once = Once::new();
let mut a = 0;
O.call_once(|| a += 1);
assert_eq!(a, 1);
O.call_once(|| a += 1);
assert_eq!(a, 1);
}
#[test]
fn stampede_once() {
static O: Once = Once::new();
static mut RUN: bool = false;
let (tx, rx) = channel();
for _ in 0..10 {
let tx = tx.clone();
thread::spawn(move || {
for _ in 0..4 {
thread::yield_now()
}
unsafe {
O.call_once(|| {
assert!(!RUN);
RUN = true;
});
assert!(RUN);
}
tx.send(()).unwrap();
});
}
unsafe {
O.call_once(|| {
assert!(!RUN);
RUN = true;
});
assert!(RUN);
}
for _ in 0..10 {
rx.recv().unwrap();
}
}
#[test]
fn poison_bad() {
static O: Once = Once::new();
// poison the once
let t = panic::catch_unwind(|| {
O.call_once(|| panic!());
});
assert!(t.is_err());
// poisoning propagates
let t = panic::catch_unwind(|| {
O.call_once(|| {});
});
assert!(t.is_err());
// we can subvert poisoning, however
let mut called = false;
O.call_once_force(|p| {
called = true;
assert!(p.poisoned())
});
assert!(called);
// once any success happens, we stop propagating the poison
O.call_once(|| {});
}
#[test]
fn wait_for_force_to_finish() {
static O: Once = Once::new();
// poison the once
let t = panic::catch_unwind(|| {
O.call_once(|| panic!());
});
assert!(t.is_err());
// make sure someone's waiting inside the once via a force
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
let t1 = thread::spawn(move || {
O.call_once_force(|p| {
assert!(p.poisoned());
tx1.send(()).unwrap();
rx2.recv().unwrap();
});
});
rx1.recv().unwrap();
// put another waiter on the once
let t2 = thread::spawn(|| {
let mut called = false;
O.call_once(|| {
called = true;
});
assert!(!called);
});
tx2.send(()).unwrap();
assert!(t1.join().is_ok());
assert!(t2.join().is_ok());
}
#[test]
fn test_once_debug() {
static O: Once = Once::new();
assert_eq!(format!("{:?}", O), "Once { state: New }");
}
}

View File

@@ -0,0 +1,65 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::raw_mutex::RawMutex;
use lock_api::RawMutexFair;
/// Raw fair mutex type backed by the parking lot.
pub struct RawFairMutex(RawMutex);
unsafe impl lock_api::RawMutex for RawFairMutex {
const INIT: Self = RawFairMutex(<RawMutex as lock_api::RawMutex>::INIT);
type GuardMarker = <RawMutex as lock_api::RawMutex>::GuardMarker;
#[inline]
fn lock(&self) {
self.0.lock()
}
#[inline]
fn try_lock(&self) -> bool {
self.0.try_lock()
}
#[inline]
unsafe fn unlock(&self) {
self.unlock_fair()
}
#[inline]
fn is_locked(&self) -> bool {
self.0.is_locked()
}
}
unsafe impl lock_api::RawMutexFair for RawFairMutex {
#[inline]
unsafe fn unlock_fair(&self) {
self.0.unlock_fair()
}
#[inline]
unsafe fn bump(&self) {
self.0.bump()
}
}
unsafe impl lock_api::RawMutexTimed for RawFairMutex {
type Duration = <RawMutex as lock_api::RawMutexTimed>::Duration;
type Instant = <RawMutex as lock_api::RawMutexTimed>::Instant;
#[inline]
fn try_lock_until(&self, timeout: Self::Instant) -> bool {
self.0.try_lock_until(timeout)
}
#[inline]
fn try_lock_for(&self, timeout: Self::Duration) -> bool {
self.0.try_lock_for(timeout)
}
}

331
vendor/parking_lot/src/raw_mutex.rs vendored Normal file
View File

@@ -0,0 +1,331 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::{deadlock, util};
use core::{
sync::atomic::{AtomicU8, Ordering},
time::Duration,
};
use lock_api::RawMutex as RawMutex_;
use parking_lot_core::{self, ParkResult, SpinWait, UnparkResult, UnparkToken, DEFAULT_PARK_TOKEN};
use std::time::Instant;
// UnparkToken used to indicate that that the target thread should attempt to
// lock the mutex again as soon as it is unparked.
pub(crate) const TOKEN_NORMAL: UnparkToken = UnparkToken(0);
// UnparkToken used to indicate that the mutex is being handed off to the target
// thread directly without unlocking it.
pub(crate) const TOKEN_HANDOFF: UnparkToken = UnparkToken(1);
/// This bit is set in the `state` of a `RawMutex` when that mutex is locked by some thread.
const LOCKED_BIT: u8 = 0b01;
/// This bit is set in the `state` of a `RawMutex` just before parking a thread. A thread is being
/// parked if it wants to lock the mutex, but it is currently being held by some other thread.
const PARKED_BIT: u8 = 0b10;
/// Raw mutex type backed by the parking lot.
pub struct RawMutex {
/// This atomic integer holds the current state of the mutex instance. Only the two lowest bits
/// are used. See `LOCKED_BIT` and `PARKED_BIT` for the bitmask for these bits.
///
/// # State table:
///
/// PARKED_BIT | LOCKED_BIT | Description
/// 0 | 0 | The mutex is not locked, nor is anyone waiting for it.
/// -----------+------------+------------------------------------------------------------------
/// 0 | 1 | The mutex is locked by exactly one thread. No other thread is
/// | | waiting for it.
/// -----------+------------+------------------------------------------------------------------
/// 1 | 0 | The mutex is not locked. One or more thread is parked or about to
/// | | park. At least one of the parked threads are just about to be
/// | | unparked, or a thread heading for parking might abort the park.
/// -----------+------------+------------------------------------------------------------------
/// 1 | 1 | The mutex is locked by exactly one thread. One or more thread is
/// | | parked or about to park, waiting for the lock to become available.
/// | | In this state, PARKED_BIT is only ever cleared when a bucket lock
/// | | is held (i.e. in a parking_lot_core callback). This ensures that
/// | | we never end up in a situation where there are parked threads but
/// | | PARKED_BIT is not set (which would result in those threads
/// | | potentially never getting woken up).
state: AtomicU8,
}
unsafe impl lock_api::RawMutex for RawMutex {
const INIT: RawMutex = RawMutex {
state: AtomicU8::new(0),
};
type GuardMarker = crate::GuardMarker;
#[inline]
fn lock(&self) {
if self
.state
.compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
self.lock_slow(None);
}
unsafe { deadlock::acquire_resource(self as *const _ as usize) };
}
#[inline]
fn try_lock(&self) -> bool {
let mut state = self.state.load(Ordering::Relaxed);
loop {
if state & LOCKED_BIT != 0 {
return false;
}
match self.state.compare_exchange_weak(
state,
state | LOCKED_BIT,
Ordering::Acquire,
Ordering::Relaxed,
) {
Ok(_) => {
unsafe { deadlock::acquire_resource(self as *const _ as usize) };
return true;
}
Err(x) => state = x,
}
}
}
#[inline]
unsafe fn unlock(&self) {
deadlock::release_resource(self as *const _ as usize);
if self
.state
.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
return;
}
self.unlock_slow(false);
}
#[inline]
fn is_locked(&self) -> bool {
let state = self.state.load(Ordering::Relaxed);
state & LOCKED_BIT != 0
}
}
unsafe impl lock_api::RawMutexFair for RawMutex {
#[inline]
unsafe fn unlock_fair(&self) {
deadlock::release_resource(self as *const _ as usize);
if self
.state
.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
return;
}
self.unlock_slow(true);
}
#[inline]
unsafe fn bump(&self) {
if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 {
self.bump_slow();
}
}
}
unsafe impl lock_api::RawMutexTimed for RawMutex {
type Duration = Duration;
type Instant = Instant;
#[inline]
fn try_lock_until(&self, timeout: Instant) -> bool {
let result = if self
.state
.compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
true
} else {
self.lock_slow(Some(timeout))
};
if result {
unsafe { deadlock::acquire_resource(self as *const _ as usize) };
}
result
}
#[inline]
fn try_lock_for(&self, timeout: Duration) -> bool {
let result = if self
.state
.compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
true
} else {
self.lock_slow(util::to_deadline(timeout))
};
if result {
unsafe { deadlock::acquire_resource(self as *const _ as usize) };
}
result
}
}
impl RawMutex {
// Used by Condvar when requeuing threads to us, must be called while
// holding the queue lock.
#[inline]
pub(crate) fn mark_parked_if_locked(&self) -> bool {
let mut state = self.state.load(Ordering::Relaxed);
loop {
if state & LOCKED_BIT == 0 {
return false;
}
match self.state.compare_exchange_weak(
state,
state | PARKED_BIT,
Ordering::Relaxed,
Ordering::Relaxed,
) {
Ok(_) => return true,
Err(x) => state = x,
}
}
}
// Used by Condvar when requeuing threads to us, must be called while
// holding the queue lock.
#[inline]
pub(crate) fn mark_parked(&self) {
self.state.fetch_or(PARKED_BIT, Ordering::Relaxed);
}
#[cold]
fn lock_slow(&self, timeout: Option<Instant>) -> bool {
let mut spinwait = SpinWait::new();
let mut state = self.state.load(Ordering::Relaxed);
loop {
// Grab the lock if it isn't locked, even if there is a queue on it
if state & LOCKED_BIT == 0 {
match self.state.compare_exchange_weak(
state,
state | LOCKED_BIT,
Ordering::Acquire,
Ordering::Relaxed,
) {
Ok(_) => return true,
Err(x) => state = x,
}
continue;
}
// If there is no queue, try spinning a few times
if state & PARKED_BIT == 0 && spinwait.spin() {
state = self.state.load(Ordering::Relaxed);
continue;
}
// Set the parked bit
if state & PARKED_BIT == 0 {
if let Err(x) = self.state.compare_exchange_weak(
state,
state | PARKED_BIT,
Ordering::Relaxed,
Ordering::Relaxed,
) {
state = x;
continue;
}
}
// Park our thread until we are woken up by an unlock
let addr = self as *const _ as usize;
let validate = || self.state.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT;
let before_sleep = || {};
let timed_out = |_, was_last_thread| {
// Clear the parked bit if we were the last parked thread
if was_last_thread {
self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
}
};
// SAFETY:
// * `addr` is an address we control.
// * `validate`/`timed_out` does not panic or call into any function of `parking_lot`.
// * `before_sleep` does not call `park`, nor does it panic.
match unsafe {
parking_lot_core::park(
addr,
validate,
before_sleep,
timed_out,
DEFAULT_PARK_TOKEN,
timeout,
)
} {
// The thread that unparked us passed the lock on to us
// directly without unlocking it.
ParkResult::Unparked(TOKEN_HANDOFF) => return true,
// We were unparked normally, try acquiring the lock again
ParkResult::Unparked(_) => (),
// The validation function failed, try locking again
ParkResult::Invalid => (),
// Timeout expired
ParkResult::TimedOut => return false,
}
// Loop back and try locking again
spinwait.reset();
state = self.state.load(Ordering::Relaxed);
}
}
#[cold]
fn unlock_slow(&self, force_fair: bool) {
// Unpark one thread and leave the parked bit set if there might
// still be parked threads on this address.
let addr = self as *const _ as usize;
let callback = |result: UnparkResult| {
// If we are using a fair unlock then we should keep the
// mutex locked and hand it off to the unparked thread.
if result.unparked_threads != 0 && (force_fair || result.be_fair) {
// Clear the parked bit if there are no more parked
// threads.
if !result.have_more_threads {
self.state.store(LOCKED_BIT, Ordering::Relaxed);
}
return TOKEN_HANDOFF;
}
// Clear the locked bit, and the parked bit as well if there
// are no more parked threads.
if result.have_more_threads {
self.state.store(PARKED_BIT, Ordering::Release);
} else {
self.state.store(0, Ordering::Release);
}
TOKEN_NORMAL
};
// SAFETY:
// * `addr` is an address we control.
// * `callback` does not panic or call into any function of `parking_lot`.
unsafe {
parking_lot_core::unpark_one(addr, callback);
}
}
#[cold]
fn bump_slow(&self) {
unsafe { deadlock::release_resource(self as *const _ as usize) };
self.unlock_slow(true);
self.lock();
}
}

1157
vendor/parking_lot/src/raw_rwlock.rs vendored Normal file

File diff suppressed because it is too large Load Diff

171
vendor/parking_lot/src/remutex.rs vendored Normal file
View File

@@ -0,0 +1,171 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::raw_mutex::RawMutex;
use core::num::NonZeroUsize;
use lock_api::{self, GetThreadId};
/// Implementation of the `GetThreadId` trait for `lock_api::ReentrantMutex`.
pub struct RawThreadId;
unsafe impl GetThreadId for RawThreadId {
const INIT: RawThreadId = RawThreadId;
fn nonzero_thread_id(&self) -> NonZeroUsize {
// The address of a thread-local variable is guaranteed to be unique to the
// current thread, and is also guaranteed to be non-zero. The variable has to have a
// non-zero size to guarantee it has a unique address for each thread.
thread_local!(static KEY: u8 = 0);
KEY.with(|x| {
NonZeroUsize::new(x as *const _ as usize)
.expect("thread-local variable address is null")
})
}
}
/// A mutex which can be recursively locked by a single thread.
///
/// This type is identical to `Mutex` except for the following points:
///
/// - Locking multiple times from the same thread will work correctly instead of
/// deadlocking.
/// - `ReentrantMutexGuard` does not give mutable references to the locked data.
/// Use a `RefCell` if you need this.
///
/// See [`Mutex`](crate::Mutex) for more details about the underlying mutex
/// primitive.
pub type ReentrantMutex<T> = lock_api::ReentrantMutex<RawMutex, RawThreadId, T>;
/// Creates a new reentrant mutex in an unlocked state ready for use.
///
/// This allows creating a reentrant mutex in a constant context on stable Rust.
pub const fn const_reentrant_mutex<T>(val: T) -> ReentrantMutex<T> {
ReentrantMutex::const_new(
<RawMutex as lock_api::RawMutex>::INIT,
<RawThreadId as lock_api::GetThreadId>::INIT,
val,
)
}
/// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure
/// is dropped (falls out of scope), the lock will be unlocked.
///
/// The data protected by the mutex can be accessed through this guard via its
/// `Deref` implementation.
pub type ReentrantMutexGuard<'a, T> = lock_api::ReentrantMutexGuard<'a, RawMutex, RawThreadId, T>;
/// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a
/// subfield of the protected data.
///
/// The main difference between `MappedReentrantMutexGuard` and `ReentrantMutexGuard` is that the
/// former doesn't support temporarily unlocking and re-locking, since that
/// could introduce soundness issues if the locked object is modified by another
/// thread.
pub type MappedReentrantMutexGuard<'a, T> =
lock_api::MappedReentrantMutexGuard<'a, RawMutex, RawThreadId, T>;
#[cfg(test)]
mod tests {
use crate::ReentrantMutex;
use crate::ReentrantMutexGuard;
use std::cell::RefCell;
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
#[cfg(feature = "serde")]
use bincode::{deserialize, serialize};
#[test]
fn smoke() {
let m = ReentrantMutex::new(2);
{
let a = m.lock();
{
let b = m.lock();
{
let c = m.lock();
assert_eq!(*c, 2);
}
assert_eq!(*b, 2);
}
assert_eq!(*a, 2);
}
}
#[test]
fn is_mutex() {
let m = Arc::new(ReentrantMutex::new(RefCell::new(0)));
let m2 = m.clone();
let lock = m.lock();
let child = thread::spawn(move || {
let lock = m2.lock();
assert_eq!(*lock.borrow(), 4950);
});
for i in 0..100 {
let lock = m.lock();
*lock.borrow_mut() += i;
}
drop(lock);
child.join().unwrap();
}
#[test]
fn trylock_works() {
let m = Arc::new(ReentrantMutex::new(()));
let m2 = m.clone();
let _lock = m.try_lock();
let _lock2 = m.try_lock();
thread::spawn(move || {
let lock = m2.try_lock();
assert!(lock.is_none());
})
.join()
.unwrap();
let _lock3 = m.try_lock();
}
#[test]
fn test_reentrant_mutex_debug() {
let mutex = ReentrantMutex::new(vec![0u8, 10]);
assert_eq!(format!("{:?}", mutex), "ReentrantMutex { data: [0, 10] }");
}
#[test]
fn test_reentrant_mutex_bump() {
let mutex = Arc::new(ReentrantMutex::new(()));
let mutex2 = mutex.clone();
let mut guard = mutex.lock();
let (tx, rx) = channel();
thread::spawn(move || {
let _guard = mutex2.lock();
tx.send(()).unwrap();
});
// `bump()` repeatedly until the thread starts up and requests the lock
while rx.try_recv().is_err() {
ReentrantMutexGuard::bump(&mut guard);
}
}
#[cfg(feature = "serde")]
#[test]
fn test_serde() {
let contents: Vec<u8> = vec![0, 1, 2];
let mutex = ReentrantMutex::new(contents.clone());
let serialized = serialize(&mutex).unwrap();
let deserialized: ReentrantMutex<Vec<u8>> = deserialize(&serialized).unwrap();
assert_eq!(*(mutex.lock()), *(deserialized.lock()));
assert_eq!(contents, *(deserialized.lock()));
}
}

659
vendor/parking_lot/src/rwlock.rs vendored Normal file
View File

@@ -0,0 +1,659 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::raw_rwlock::RawRwLock;
/// A reader-writer lock
///
/// This type of lock allows a number of readers or at most one writer at any
/// point in time. The write portion of this lock typically allows modification
/// of the underlying data (exclusive access) and the read portion of this lock
/// typically allows for read-only access (shared access).
///
/// This lock uses a task-fair locking policy which avoids both reader and
/// writer starvation. This means that readers trying to acquire the lock will
/// block even if the lock is unlocked when there are writers waiting to acquire
/// the lock. Because of this, attempts to recursively acquire a read lock
/// within a single thread may result in a deadlock.
///
/// The type parameter `T` represents the data that this lock protects. It is
/// required that `T` satisfies `Send` to be shared across threads and `Sync` to
/// allow concurrent access through readers. The RAII guards returned from the
/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
/// to allow access to the contained of the lock.
///
/// # Fairness
///
/// A typical unfair lock can often end up in a situation where a single thread
/// quickly acquires and releases the same lock in succession, which can starve
/// other threads waiting to acquire the rwlock. While this improves throughput
/// because it doesn't force a context switch when a thread tries to re-acquire
/// a rwlock it has just released, this can starve other threads.
///
/// This rwlock uses [eventual fairness](https://trac.webkit.org/changeset/203350)
/// to ensure that the lock will be fair on average without sacrificing
/// throughput. This is done by forcing a fair unlock on average every 0.5ms,
/// which will force the lock to go to the next thread waiting for the rwlock.
///
/// Additionally, any critical section longer than 1ms will always use a fair
/// unlock, which has a negligible impact on throughput considering the length
/// of the critical section.
///
/// You can also force a fair unlock by calling `RwLockReadGuard::unlock_fair`
/// or `RwLockWriteGuard::unlock_fair` when unlocking a mutex instead of simply
/// dropping the guard.
///
/// # Differences from the standard library `RwLock`
///
/// - Supports atomically downgrading a write lock into a read lock.
/// - Task-fair locking policy instead of an unspecified platform default.
/// - No poisoning, the lock is released normally on panic.
/// - Only requires 1 word of space, whereas the standard library boxes the
/// `RwLock` due to platform limitations.
/// - Can be statically constructed.
/// - Does not require any drop glue when dropped.
/// - Inline fast path for the uncontended case.
/// - Efficient handling of micro-contention using adaptive spinning.
/// - Allows raw locking & unlocking without a guard.
/// - Supports eventual fairness so that the rwlock is fair on average.
/// - Optionally allows making the rwlock fair by calling
/// `RwLockReadGuard::unlock_fair` and `RwLockWriteGuard::unlock_fair`.
///
/// # Examples
///
/// ```
/// use parking_lot::RwLock;
///
/// let lock = RwLock::new(5);
///
/// // many reader locks can be held at once
/// {
/// let r1 = lock.read();
/// let r2 = lock.read();
/// assert_eq!(*r1, 5);
/// assert_eq!(*r2, 5);
/// } // read locks are dropped at this point
///
/// // only one write lock may be held, however
/// {
/// let mut w = lock.write();
/// *w += 1;
/// assert_eq!(*w, 6);
/// } // write lock is dropped here
/// ```
pub type RwLock<T> = lock_api::RwLock<RawRwLock, T>;
/// Creates a new instance of an `RwLock<T>` which is unlocked.
///
/// This allows creating a `RwLock<T>` in a constant context on stable Rust.
pub const fn const_rwlock<T>(val: T) -> RwLock<T> {
RwLock::const_new(<RawRwLock as lock_api::RawRwLock>::INIT, val)
}
/// RAII structure used to release the shared read access of a lock when
/// dropped.
pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, RawRwLock, T>;
/// RAII structure used to release the exclusive write access of a lock when
/// dropped.
pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, RawRwLock, T>;
/// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a
/// subfield of the protected data.
///
/// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the
/// former doesn't support temporarily unlocking and re-locking, since that
/// could introduce soundness issues if the locked object is modified by another
/// thread.
pub type MappedRwLockReadGuard<'a, T> = lock_api::MappedRwLockReadGuard<'a, RawRwLock, T>;
/// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a
/// subfield of the protected data.
///
/// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the
/// former doesn't support temporarily unlocking and re-locking, since that
/// could introduce soundness issues if the locked object is modified by another
/// thread.
pub type MappedRwLockWriteGuard<'a, T> = lock_api::MappedRwLockWriteGuard<'a, RawRwLock, T>;
/// RAII structure used to release the upgradable read access of a lock when
/// dropped.
pub type RwLockUpgradableReadGuard<'a, T> = lock_api::RwLockUpgradableReadGuard<'a, RawRwLock, T>;
#[cfg(test)]
mod tests {
use crate::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard};
use rand::Rng;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
#[cfg(feature = "serde")]
use bincode::{deserialize, serialize};
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
#[test]
fn smoke() {
let l = RwLock::new(());
drop(l.read());
drop(l.write());
drop(l.upgradable_read());
drop((l.read(), l.read()));
drop((l.read(), l.upgradable_read()));
drop(l.write());
}
#[test]
fn frob() {
const N: u32 = 10;
const M: u32 = 1000;
let r = Arc::new(RwLock::new(()));
let (tx, rx) = channel::<()>();
for _ in 0..N {
let tx = tx.clone();
let r = r.clone();
thread::spawn(move || {
let mut rng = rand::thread_rng();
for _ in 0..M {
if rng.gen_bool(1.0 / N as f64) {
drop(r.write());
} else {
drop(r.read());
}
}
drop(tx);
});
}
drop(tx);
let _ = rx.recv();
}
#[test]
fn test_rw_arc_no_poison_wr() {
let arc = Arc::new(RwLock::new(1));
let arc2 = arc.clone();
let _: Result<(), _> = thread::spawn(move || {
let _lock = arc2.write();
panic!();
})
.join();
let lock = arc.read();
assert_eq!(*lock, 1);
}
#[test]
fn test_rw_arc_no_poison_ww() {
let arc = Arc::new(RwLock::new(1));
let arc2 = arc.clone();
let _: Result<(), _> = thread::spawn(move || {
let _lock = arc2.write();
panic!();
})
.join();
let lock = arc.write();
assert_eq!(*lock, 1);
}
#[test]
fn test_rw_arc_no_poison_rr() {
let arc = Arc::new(RwLock::new(1));
let arc2 = arc.clone();
let _: Result<(), _> = thread::spawn(move || {
let _lock = arc2.read();
panic!();
})
.join();
let lock = arc.read();
assert_eq!(*lock, 1);
}
#[test]
fn test_rw_arc_no_poison_rw() {
let arc = Arc::new(RwLock::new(1));
let arc2 = arc.clone();
let _: Result<(), _> = thread::spawn(move || {
let _lock = arc2.read();
panic!()
})
.join();
let lock = arc.write();
assert_eq!(*lock, 1);
}
#[test]
fn test_ruw_arc() {
let arc = Arc::new(RwLock::new(0));
let arc2 = arc.clone();
let (tx, rx) = channel();
thread::spawn(move || {
for _ in 0..10 {
let mut lock = arc2.write();
let tmp = *lock;
*lock = -1;
thread::yield_now();
*lock = tmp + 1;
}
tx.send(()).unwrap();
});
let mut children = Vec::new();
// Upgradable readers try to catch the writer in the act and also
// try to touch the value
for _ in 0..5 {
let arc3 = arc.clone();
children.push(thread::spawn(move || {
let lock = arc3.upgradable_read();
let tmp = *lock;
assert!(tmp >= 0);
thread::yield_now();
let mut lock = RwLockUpgradableReadGuard::upgrade(lock);
assert_eq!(tmp, *lock);
*lock = -1;
thread::yield_now();
*lock = tmp + 1;
}));
}
// Readers try to catch the writers in the act
for _ in 0..5 {
let arc4 = arc.clone();
children.push(thread::spawn(move || {
let lock = arc4.read();
assert!(*lock >= 0);
}));
}
// Wait for children to pass their asserts
for r in children {
assert!(r.join().is_ok());
}
// Wait for writer to finish
rx.recv().unwrap();
let lock = arc.read();
assert_eq!(*lock, 15);
}
#[test]
fn test_rw_arc() {
let arc = Arc::new(RwLock::new(0));
let arc2 = arc.clone();
let (tx, rx) = channel();
thread::spawn(move || {
let mut lock = arc2.write();
for _ in 0..10 {
let tmp = *lock;
*lock = -1;
thread::yield_now();
*lock = tmp + 1;
}
tx.send(()).unwrap();
});
// Readers try to catch the writer in the act
let mut children = Vec::new();
for _ in 0..5 {
let arc3 = arc.clone();
children.push(thread::spawn(move || {
let lock = arc3.read();
assert!(*lock >= 0);
}));
}
// Wait for children to pass their asserts
for r in children {
assert!(r.join().is_ok());
}
// Wait for writer to finish
rx.recv().unwrap();
let lock = arc.read();
assert_eq!(*lock, 10);
}
#[test]
fn test_rw_arc_access_in_unwind() {
let arc = Arc::new(RwLock::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move || {
struct Unwinder {
i: Arc<RwLock<isize>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
let mut lock = self.i.write();
*lock += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
})
.join();
let lock = arc.read();
assert_eq!(*lock, 2);
}
#[test]
fn test_rwlock_unsized() {
let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
{
let b = &mut *rw.write();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*rw.read(), comp);
}
#[test]
fn test_rwlock_try_read() {
let lock = RwLock::new(0isize);
{
let read_guard = lock.read();
let read_result = lock.try_read();
assert!(
read_result.is_some(),
"try_read should succeed while read_guard is in scope"
);
drop(read_guard);
}
{
let upgrade_guard = lock.upgradable_read();
let read_result = lock.try_read();
assert!(
read_result.is_some(),
"try_read should succeed while upgrade_guard is in scope"
);
drop(upgrade_guard);
}
{
let write_guard = lock.write();
let read_result = lock.try_read();
assert!(
read_result.is_none(),
"try_read should fail while write_guard is in scope"
);
drop(write_guard);
}
}
#[test]
fn test_rwlock_try_write() {
let lock = RwLock::new(0isize);
{
let read_guard = lock.read();
let write_result = lock.try_write();
assert!(
write_result.is_none(),
"try_write should fail while read_guard is in scope"
);
assert!(lock.is_locked());
assert!(!lock.is_locked_exclusive());
drop(read_guard);
}
{
let upgrade_guard = lock.upgradable_read();
let write_result = lock.try_write();
assert!(
write_result.is_none(),
"try_write should fail while upgrade_guard is in scope"
);
assert!(lock.is_locked());
assert!(!lock.is_locked_exclusive());
drop(upgrade_guard);
}
{
let write_guard = lock.write();
let write_result = lock.try_write();
assert!(
write_result.is_none(),
"try_write should fail while write_guard is in scope"
);
assert!(lock.is_locked());
assert!(lock.is_locked_exclusive());
drop(write_guard);
}
}
#[test]
fn test_rwlock_try_upgrade() {
let lock = RwLock::new(0isize);
{
let read_guard = lock.read();
let upgrade_result = lock.try_upgradable_read();
assert!(
upgrade_result.is_some(),
"try_upgradable_read should succeed while read_guard is in scope"
);
drop(read_guard);
}
{
let upgrade_guard = lock.upgradable_read();
let upgrade_result = lock.try_upgradable_read();
assert!(
upgrade_result.is_none(),
"try_upgradable_read should fail while upgrade_guard is in scope"
);
drop(upgrade_guard);
}
{
let write_guard = lock.write();
let upgrade_result = lock.try_upgradable_read();
assert!(
upgrade_result.is_none(),
"try_upgradable should fail while write_guard is in scope"
);
drop(write_guard);
}
}
#[test]
fn test_into_inner() {
let m = RwLock::new(NonCopy(10));
assert_eq!(m.into_inner(), NonCopy(10));
}
#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = RwLock::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
#[test]
fn test_get_mut() {
let mut m = RwLock::new(NonCopy(10));
*m.get_mut() = NonCopy(20);
assert_eq!(m.into_inner(), NonCopy(20));
}
#[test]
fn test_rwlockguard_sync() {
fn sync<T: Sync>(_: T) {}
let rwlock = RwLock::new(());
sync(rwlock.read());
sync(rwlock.write());
}
#[test]
fn test_rwlock_downgrade() {
let x = Arc::new(RwLock::new(0));
let mut handles = Vec::new();
for _ in 0..8 {
let x = x.clone();
handles.push(thread::spawn(move || {
for _ in 0..100 {
let mut writer = x.write();
*writer += 1;
let cur_val = *writer;
let reader = RwLockWriteGuard::downgrade(writer);
assert_eq!(cur_val, *reader);
}
}));
}
for handle in handles {
handle.join().unwrap()
}
assert_eq!(*x.read(), 800);
}
#[test]
fn test_rwlock_recursive() {
let arc = Arc::new(RwLock::new(1));
let arc2 = arc.clone();
let lock1 = arc.read();
let t = thread::spawn(move || {
let _lock = arc2.write();
});
if cfg!(not(all(target_env = "sgx", target_vendor = "fortanix"))) {
thread::sleep(Duration::from_millis(100));
} else {
// FIXME: https://github.com/fortanix/rust-sgx/issues/31
for _ in 0..100 {
thread::yield_now();
}
}
// A normal read would block here since there is a pending writer
let lock2 = arc.read_recursive();
// Unblock the thread and join it.
drop(lock1);
drop(lock2);
t.join().unwrap();
}
#[test]
fn test_rwlock_debug() {
let x = RwLock::new(vec![0u8, 10]);
assert_eq!(format!("{:?}", x), "RwLock { data: [0, 10] }");
let _lock = x.write();
assert_eq!(format!("{:?}", x), "RwLock { data: <locked> }");
}
#[test]
fn test_clone() {
let rwlock = RwLock::new(Arc::new(1));
let a = rwlock.read_recursive();
let b = a.clone();
assert_eq!(Arc::strong_count(&b), 2);
}
#[cfg(feature = "serde")]
#[test]
fn test_serde() {
let contents: Vec<u8> = vec![0, 1, 2];
let mutex = RwLock::new(contents.clone());
let serialized = serialize(&mutex).unwrap();
let deserialized: RwLock<Vec<u8>> = deserialize(&serialized).unwrap();
assert_eq!(*(mutex.read()), *(deserialized.read()));
assert_eq!(contents, *(deserialized.read()));
}
#[test]
fn test_issue_203() {
struct Bar(RwLock<()>);
impl Drop for Bar {
fn drop(&mut self) {
let _n = self.0.write();
}
}
thread_local! {
static B: Bar = Bar(RwLock::new(()));
}
thread::spawn(|| {
B.with(|_| ());
let a = RwLock::new(());
let _a = a.read();
})
.join()
.unwrap();
}
#[test]
fn test_rw_write_is_locked() {
let lock = RwLock::new(0isize);
{
let _read_guard = lock.read();
assert!(lock.is_locked());
assert!(!lock.is_locked_exclusive());
}
{
let _write_guard = lock.write();
assert!(lock.is_locked());
assert!(lock.is_locked_exclusive());
}
}
#[test]
#[cfg(feature = "arc_lock")]
fn test_issue_430() {
let lock = std::sync::Arc::new(RwLock::new(0));
let mut rl = lock.upgradable_read_arc();
rl.with_upgraded(|_| {
println!("lock upgrade");
});
rl.with_upgraded(|_| {
println!("lock upgrade");
});
drop(lock);
}
}

38
vendor/parking_lot/src/util.rs vendored Normal file
View File

@@ -0,0 +1,38 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::time::{Duration, Instant};
// Option::unchecked_unwrap
pub trait UncheckedOptionExt<T> {
unsafe fn unchecked_unwrap(self) -> T;
}
impl<T> UncheckedOptionExt<T> for Option<T> {
#[inline]
unsafe fn unchecked_unwrap(self) -> T {
match self {
Some(x) => x,
None => unreachable(),
}
}
}
// hint::unreachable_unchecked() in release mode
#[inline]
unsafe fn unreachable() -> ! {
if cfg!(debug_assertions) {
unreachable!();
} else {
core::hint::unreachable_unchecked()
}
}
#[inline]
pub fn to_deadline(timeout: Duration) -> Option<Instant> {
Instant::now().checked_add(timeout)
}