Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

239
vendor/spin/src/barrier.rs vendored Normal file
View File

@@ -0,0 +1,239 @@
//! Synchronization primitive allowing multiple threads to synchronize the
//! beginning of some computation.
//!
//! Implementation adapted from the 'Barrier' type of the standard library. See:
//! <https://doc.rust-lang.org/std/sync/struct.Barrier.html>
//!
//! Copyright 2014 The Rust Project Developers. See the COPYRIGHT
//! file at the top-level directory of this distribution and at
//! <http://rust-lang.org/COPYRIGHT>.
//!
//! Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
//! <http://www.apache.org/licenses/LICENSE-2.0>> or the MIT license
//! <LICENSE-MIT or <http://opensource.org/licenses/MIT>>, at your
//! option. This file may not be copied, modified, or distributed
//! except according to those terms.
use crate::{mutex::Mutex, RelaxStrategy, Spin};
/// A primitive that synchronizes the execution of multiple threads.
///
/// # Example
///
/// ```
/// use spin;
/// use std::sync::Arc;
/// use std::thread;
///
/// let mut handles = Vec::with_capacity(10);
/// let barrier = Arc::new(spin::Barrier::new(10));
/// for _ in 0..10 {
/// let c = barrier.clone();
/// // The same messages will be printed together.
/// // You will NOT see any interleaving.
/// handles.push(thread::spawn(move|| {
/// println!("before wait");
/// c.wait();
/// println!("after wait");
/// }));
/// }
/// // Wait for other threads to finish.
/// for handle in handles {
/// handle.join().unwrap();
/// }
/// ```
pub struct Barrier<R = Spin> {
lock: Mutex<BarrierState, R>,
num_threads: usize,
}
// The inner state of a double barrier
struct BarrierState {
count: usize,
generation_id: usize,
}
/// A `BarrierWaitResult` is returned by [`wait`] when all threads in the [`Barrier`]
/// have rendezvoused.
///
/// [`wait`]: struct.Barrier.html#method.wait
/// [`Barrier`]: struct.Barrier.html
///
/// # Examples
///
/// ```
/// use spin;
///
/// let barrier = spin::Barrier::new(1);
/// let barrier_wait_result = barrier.wait();
/// ```
pub struct BarrierWaitResult(bool);
impl<R: RelaxStrategy> Barrier<R> {
/// Blocks the current thread until all threads have rendezvoused here.
///
/// Barriers are re-usable after all threads have rendezvoused once, and can
/// be used continuously.
///
/// A single (arbitrary) thread will receive a [`BarrierWaitResult`] that
/// returns `true` from [`is_leader`] when returning from this function, and
/// all other threads will receive a result that will return `false` from
/// [`is_leader`].
///
/// [`BarrierWaitResult`]: struct.BarrierWaitResult.html
/// [`is_leader`]: struct.BarrierWaitResult.html#method.is_leader
///
/// # Examples
///
/// ```
/// use spin;
/// use std::sync::Arc;
/// use std::thread;
///
/// let mut handles = Vec::with_capacity(10);
/// let barrier = Arc::new(spin::Barrier::new(10));
/// for _ in 0..10 {
/// let c = barrier.clone();
/// // The same messages will be printed together.
/// // You will NOT see any interleaving.
/// handles.push(thread::spawn(move|| {
/// println!("before wait");
/// c.wait();
/// println!("after wait");
/// }));
/// }
/// // Wait for other threads to finish.
/// for handle in handles {
/// handle.join().unwrap();
/// }
/// ```
pub fn wait(&self) -> BarrierWaitResult {
let mut lock = self.lock.lock();
lock.count += 1;
if lock.count < self.num_threads {
// not the leader
let local_gen = lock.generation_id;
while local_gen == lock.generation_id && lock.count < self.num_threads {
drop(lock);
R::relax();
lock = self.lock.lock();
}
BarrierWaitResult(false)
} else {
// this thread is the leader,
// and is responsible for incrementing the generation
lock.count = 0;
lock.generation_id = lock.generation_id.wrapping_add(1);
BarrierWaitResult(true)
}
}
}
impl<R> Barrier<R> {
/// Creates a new barrier that can block a given number of threads.
///
/// A barrier will block `n`-1 threads which call [`wait`] and then wake up
/// all threads at once when the `n`th thread calls [`wait`]. A Barrier created
/// with n = 0 will behave identically to one created with n = 1.
///
/// [`wait`]: #method.wait
///
/// # Examples
///
/// ```
/// use spin;
///
/// let barrier = spin::Barrier::new(10);
/// ```
pub const fn new(n: usize) -> Self {
Self {
lock: Mutex::new(BarrierState {
count: 0,
generation_id: 0,
}),
num_threads: n,
}
}
}
impl BarrierWaitResult {
/// Returns whether this thread from [`wait`] is the "leader thread".
///
/// Only one thread will have `true` returned from their result, all other
/// threads will have `false` returned.
///
/// [`wait`]: struct.Barrier.html#method.wait
///
/// # Examples
///
/// ```
/// use spin;
///
/// let barrier = spin::Barrier::new(1);
/// let barrier_wait_result = barrier.wait();
/// println!("{:?}", barrier_wait_result.is_leader());
/// ```
pub fn is_leader(&self) -> bool {
self.0
}
}
#[cfg(test)]
mod tests {
use std::prelude::v1::*;
use std::sync::mpsc::{channel, TryRecvError};
use std::sync::Arc;
use std::thread;
type Barrier = super::Barrier;
fn use_barrier(n: usize, barrier: Arc<Barrier>) {
let (tx, rx) = channel();
let mut ts = Vec::new();
for _ in 0..n - 1 {
let c = barrier.clone();
let tx = tx.clone();
ts.push(thread::spawn(move || {
tx.send(c.wait().is_leader()).unwrap();
}));
}
// At this point, all spawned threads should be blocked,
// so we shouldn't get anything from the port
assert!(match rx.try_recv() {
Err(TryRecvError::Empty) => true,
_ => false,
});
let mut leader_found = barrier.wait().is_leader();
// Now, the barrier is cleared and we should get data.
for _ in 0..n - 1 {
if rx.recv().unwrap() {
assert!(!leader_found);
leader_found = true;
}
}
assert!(leader_found);
for t in ts {
t.join().unwrap();
}
}
#[test]
fn test_barrier() {
const N: usize = 10;
let barrier = Arc::new(Barrier::new(N));
use_barrier(N, barrier.clone());
// use barrier twice to ensure it is reusable
use_barrier(N, barrier.clone());
}
}

118
vendor/spin/src/lazy.rs vendored Normal file
View File

@@ -0,0 +1,118 @@
//! Synchronization primitives for lazy evaluation.
//!
//! Implementation adapted from the `SyncLazy` type of the standard library. See:
//! <https://doc.rust-lang.org/std/lazy/struct.SyncLazy.html>
use crate::{once::Once, RelaxStrategy, Spin};
use core::{cell::Cell, fmt, ops::Deref};
/// A value which is initialized on the first access.
///
/// This type is a thread-safe `Lazy`, and can be used in statics.
///
/// # Examples
///
/// ```
/// use std::collections::HashMap;
/// use spin::Lazy;
///
/// static HASHMAP: Lazy<HashMap<i32, String>> = Lazy::new(|| {
/// println!("initializing");
/// let mut m = HashMap::new();
/// m.insert(13, "Spica".to_string());
/// m.insert(74, "Hoyten".to_string());
/// m
/// });
///
/// fn main() {
/// println!("ready");
/// std::thread::spawn(|| {
/// println!("{:?}", HASHMAP.get(&13));
/// }).join().unwrap();
/// println!("{:?}", HASHMAP.get(&74));
///
/// // Prints:
/// // ready
/// // initializing
/// // Some("Spica")
/// // Some("Hoyten")
/// }
/// ```
pub struct Lazy<T, F = fn() -> T, R = Spin> {
cell: Once<T, R>,
init: Cell<Option<F>>,
}
impl<T: fmt::Debug, F, R> fmt::Debug for Lazy<T, F, R> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Lazy")
.field("cell", &self.cell)
.field("init", &"..")
.finish()
}
}
// We never create a `&F` from a `&Lazy<T, F>` so it is fine
// to not impl `Sync` for `F`
// we do create a `&mut Option<F>` in `force`, but this is
// properly synchronized, so it only happens once
// so it also does not contribute to this impl.
unsafe impl<T, F: Send> Sync for Lazy<T, F> where Once<T>: Sync {}
// auto-derived `Send` impl is OK.
impl<T, F, R> Lazy<T, F, R> {
/// Creates a new lazy value with the given initializing
/// function.
pub const fn new(f: F) -> Self {
Self {
cell: Once::new(),
init: Cell::new(Some(f)),
}
}
/// Retrieves a mutable pointer to the inner data.
///
/// This is especially useful when interfacing with low level code or FFI where the caller
/// explicitly knows that it has exclusive access to the inner data. Note that reading from
/// this pointer is UB until initialized or directly written to.
pub fn as_mut_ptr(&self) -> *mut T {
self.cell.as_mut_ptr()
}
}
impl<T, F: FnOnce() -> T, R: RelaxStrategy> Lazy<T, F, R> {
/// Forces the evaluation of this lazy value and
/// returns a reference to result. This is equivalent
/// to the `Deref` impl, but is explicit.
///
/// # Examples
///
/// ```
/// use spin::Lazy;
///
/// let lazy = Lazy::new(|| 92);
///
/// assert_eq!(Lazy::force(&lazy), &92);
/// assert_eq!(&*lazy, &92);
/// ```
pub fn force(this: &Self) -> &T {
this.cell.call_once(|| match this.init.take() {
Some(f) => f(),
None => panic!("Lazy instance has previously been poisoned"),
})
}
}
impl<T, F: FnOnce() -> T, R: RelaxStrategy> Deref for Lazy<T, F, R> {
type Target = T;
fn deref(&self) -> &T {
Self::force(self)
}
}
impl<T: Default, R> Default for Lazy<T, fn() -> T, R> {
/// Creates a new lazy value using `Default` as the initializing function.
fn default() -> Self {
Self::new(T::default)
}
}

221
vendor/spin/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,221 @@
#![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![deny(missing_docs)]
//! This crate provides [spin-based](https://en.wikipedia.org/wiki/Spinlock) versions of the
//! primitives in `std::sync` and `std::lazy`. Because synchronization is done through spinning,
//! the primitives are suitable for use in `no_std` environments.
//!
//! # Features
//!
//! - `Mutex`, `RwLock`, `Once`/`SyncOnceCell`, and `SyncLazy` equivalents
//!
//! - Support for `no_std` environments
//!
//! - [`lock_api`](https://crates.io/crates/lock_api) compatibility
//!
//! - Upgradeable `RwLock` guards
//!
//! - Guards can be sent and shared between threads
//!
//! - Guard leaking
//!
//! - Ticket locks
//!
//! - Different strategies for dealing with contention
//!
//! # Relationship with `std::sync`
//!
//! While `spin` is not a drop-in replacement for `std::sync` (and
//! [should not be considered as such](https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html))
//! an effort is made to keep this crate reasonably consistent with `std::sync`.
//!
//! Many of the types defined in this crate have 'additional capabilities' when compared to `std::sync`:
//!
//! - Because spinning does not depend on the thread-driven model of `std::sync`, guards ([`MutexGuard`],
//! [`RwLockReadGuard`], [`RwLockWriteGuard`], etc.) may be sent and shared between threads.
//!
//! - [`RwLockUpgradableGuard`] supports being upgraded into a [`RwLockWriteGuard`].
//!
//! - Guards support [leaking](https://doc.rust-lang.org/nomicon/leaking.html).
//!
//! - [`Once`] owns the value returned by its `call_once` initializer.
//!
//! - [`RwLock`] supports counting readers and writers.
//!
//! Conversely, the types in this crate do not have some of the features `std::sync` has:
//!
//! - Locks do not track [panic poisoning](https://doc.rust-lang.org/nomicon/poisoning.html).
//!
//! ## Feature flags
//!
//! The crate comes with a few feature flags that you may wish to use.
//!
//! - `lock_api` enables support for [`lock_api`](https://crates.io/crates/lock_api)
//!
//! - `ticket_mutex` uses a ticket lock for the implementation of `Mutex`
//!
//! - `fair_mutex` enables a fairer implementation of `Mutex` that uses eventual fairness to avoid
//! starvation
//!
//! - `std` enables support for thread yielding instead of spinning
#[cfg(any(test, feature = "std"))]
extern crate core;
#[cfg(feature = "portable_atomic")]
extern crate portable_atomic;
#[cfg(not(feature = "portable_atomic"))]
use core::sync::atomic;
#[cfg(feature = "portable_atomic")]
use portable_atomic as atomic;
#[cfg(feature = "barrier")]
#[cfg_attr(docsrs, doc(cfg(feature = "barrier")))]
pub mod barrier;
#[cfg(feature = "lazy")]
#[cfg_attr(docsrs, doc(cfg(feature = "lazy")))]
pub mod lazy;
#[cfg(feature = "mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "mutex")))]
pub mod mutex;
#[cfg(feature = "once")]
#[cfg_attr(docsrs, doc(cfg(feature = "once")))]
pub mod once;
pub mod relax;
#[cfg(feature = "rwlock")]
#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
pub mod rwlock;
#[cfg(feature = "mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "mutex")))]
pub use mutex::MutexGuard;
#[cfg(feature = "std")]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub use relax::Yield;
pub use relax::{RelaxStrategy, Spin};
#[cfg(feature = "rwlock")]
#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
pub use rwlock::RwLockReadGuard;
// Avoid confusing inference errors by aliasing away the relax strategy parameter. Users that need to use a different
// relax strategy can do so by accessing the types through their fully-qualified path. This is a little bit horrible
// but sadly adding a default type parameter is *still* a breaking change in Rust (for understandable reasons).
/// A primitive that synchronizes the execution of multiple threads. See [`barrier::Barrier`] for documentation.
///
/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
#[cfg(feature = "barrier")]
#[cfg_attr(docsrs, doc(cfg(feature = "barrier")))]
pub type Barrier = crate::barrier::Barrier;
/// A value which is initialized on the first access. See [`lazy::Lazy`] for documentation.
///
/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
#[cfg(feature = "lazy")]
#[cfg_attr(docsrs, doc(cfg(feature = "lazy")))]
pub type Lazy<T, F = fn() -> T> = crate::lazy::Lazy<T, F>;
/// A primitive that synchronizes the execution of multiple threads. See [`mutex::Mutex`] for documentation.
///
/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
#[cfg(feature = "mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "mutex")))]
pub type Mutex<T> = crate::mutex::Mutex<T>;
/// A primitive that provides lazy one-time initialization. See [`once::Once`] for documentation.
///
/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
#[cfg(feature = "once")]
#[cfg_attr(docsrs, doc(cfg(feature = "once")))]
pub type Once<T = ()> = crate::once::Once<T>;
/// A lock that provides data access to either one writer or many readers. See [`rwlock::RwLock`] for documentation.
///
/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
#[cfg(feature = "rwlock")]
#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
pub type RwLock<T> = crate::rwlock::RwLock<T>;
/// A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`]. See
/// [`rwlock::RwLockUpgradableGuard`] for documentation.
///
/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
#[cfg(feature = "rwlock")]
#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
pub type RwLockUpgradableGuard<'a, T> = crate::rwlock::RwLockUpgradableGuard<'a, T>;
/// A guard that provides mutable data access. See [`rwlock::RwLockWriteGuard`] for documentation.
///
/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
#[cfg(feature = "rwlock")]
#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
pub type RwLockWriteGuard<'a, T> = crate::rwlock::RwLockWriteGuard<'a, T>;
/// Spin synchronisation primitives, but compatible with [`lock_api`](https://crates.io/crates/lock_api).
#[cfg(feature = "lock_api")]
#[cfg_attr(docsrs, doc(cfg(feature = "lock_api")))]
pub mod lock_api {
/// A lock that provides mutually exclusive data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
#[cfg(feature = "mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "mutex")))]
pub type Mutex<T> = lock_api_crate::Mutex<crate::Mutex<()>, T>;
/// A guard that provides mutable data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
#[cfg(feature = "mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "mutex")))]
pub type MutexGuard<'a, T> = lock_api_crate::MutexGuard<'a, crate::Mutex<()>, T>;
/// A lock that provides data access to either one writer or many readers (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
#[cfg(feature = "rwlock")]
#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
pub type RwLock<T> = lock_api_crate::RwLock<crate::RwLock<()>, T>;
/// A guard that provides immutable data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
#[cfg(feature = "rwlock")]
#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
pub type RwLockReadGuard<'a, T> = lock_api_crate::RwLockReadGuard<'a, crate::RwLock<()>, T>;
/// A guard that provides mutable data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
#[cfg(feature = "rwlock")]
#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
pub type RwLockWriteGuard<'a, T> = lock_api_crate::RwLockWriteGuard<'a, crate::RwLock<()>, T>;
/// A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`] (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
#[cfg(feature = "rwlock")]
#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
pub type RwLockUpgradableReadGuard<'a, T> =
lock_api_crate::RwLockUpgradableReadGuard<'a, crate::RwLock<()>, T>;
}
/// In the event of an invalid operation, it's best to abort the current process.
#[cfg(feature = "fair_mutex")]
fn abort() -> ! {
#[cfg(not(feature = "std"))]
{
// Panicking while panicking is defined by Rust to result in an abort.
struct Panic;
impl Drop for Panic {
fn drop(&mut self) {
panic!("aborting due to invalid operation");
}
}
let _panic = Panic;
panic!("aborting due to invalid operation");
}
#[cfg(feature = "std")]
{
std::process::abort();
}
}

340
vendor/spin/src/mutex.rs vendored Normal file
View File

@@ -0,0 +1,340 @@
//! Locks that have the same behaviour as a mutex.
//!
//! The [`Mutex`] in the root of the crate, can be configured using the `ticket_mutex` feature.
//! If it's enabled, [`TicketMutex`] and [`TicketMutexGuard`] will be re-exported as [`Mutex`]
//! and [`MutexGuard`], otherwise the [`SpinMutex`] and guard will be re-exported.
//!
//! `ticket_mutex` is disabled by default.
//!
//! [`Mutex`]: ../struct.Mutex.html
//! [`MutexGuard`]: ../struct.MutexGuard.html
//! [`TicketMutex`]: ./struct.TicketMutex.html
//! [`TicketMutexGuard`]: ./struct.TicketMutexGuard.html
//! [`SpinMutex`]: ./struct.SpinMutex.html
//! [`SpinMutexGuard`]: ./struct.SpinMutexGuard.html
#[cfg(feature = "spin_mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "spin_mutex")))]
pub mod spin;
#[cfg(feature = "spin_mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "spin_mutex")))]
pub use self::spin::{SpinMutex, SpinMutexGuard};
#[cfg(feature = "ticket_mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "ticket_mutex")))]
pub mod ticket;
#[cfg(feature = "ticket_mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "ticket_mutex")))]
pub use self::ticket::{TicketMutex, TicketMutexGuard};
#[cfg(feature = "fair_mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "fair_mutex")))]
pub mod fair;
#[cfg(feature = "fair_mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "fair_mutex")))]
pub use self::fair::{FairMutex, FairMutexGuard, Starvation};
use crate::{RelaxStrategy, Spin};
use core::{
fmt,
ops::{Deref, DerefMut},
};
#[cfg(all(not(feature = "spin_mutex"), not(feature = "use_ticket_mutex")))]
compile_error!("The `mutex` feature flag was used (perhaps through another feature?) without either `spin_mutex` or `use_ticket_mutex`. One of these is required.");
#[cfg(all(not(feature = "use_ticket_mutex"), feature = "spin_mutex"))]
type InnerMutex<T, R> = self::spin::SpinMutex<T, R>;
#[cfg(all(not(feature = "use_ticket_mutex"), feature = "spin_mutex"))]
type InnerMutexGuard<'a, T> = self::spin::SpinMutexGuard<'a, T>;
#[cfg(feature = "use_ticket_mutex")]
type InnerMutex<T, R> = self::ticket::TicketMutex<T, R>;
#[cfg(feature = "use_ticket_mutex")]
type InnerMutexGuard<'a, T> = self::ticket::TicketMutexGuard<'a, T>;
/// A spin-based lock providing mutually exclusive access to data.
///
/// The implementation uses either a ticket mutex or a regular spin mutex depending on whether the `spin_mutex` or
/// `ticket_mutex` feature flag is enabled.
///
/// # Example
///
/// ```
/// use spin;
///
/// let lock = spin::Mutex::new(0);
///
/// // Modify the data
/// *lock.lock() = 2;
///
/// // Read the data
/// let answer = *lock.lock();
/// assert_eq!(answer, 2);
/// ```
///
/// # Thread safety example
///
/// ```
/// use spin;
/// use std::sync::{Arc, Barrier};
///
/// let thread_count = 1000;
/// let spin_mutex = Arc::new(spin::Mutex::new(0));
///
/// // We use a barrier to ensure the readout happens after all writing
/// let barrier = Arc::new(Barrier::new(thread_count + 1));
///
/// # let mut ts = Vec::new();
/// for _ in (0..thread_count) {
/// let my_barrier = barrier.clone();
/// let my_lock = spin_mutex.clone();
/// # let t =
/// std::thread::spawn(move || {
/// let mut guard = my_lock.lock();
/// *guard += 1;
///
/// // Release the lock to prevent a deadlock
/// drop(guard);
/// my_barrier.wait();
/// });
/// # ts.push(t);
/// }
///
/// barrier.wait();
///
/// let answer = { *spin_mutex.lock() };
/// assert_eq!(answer, thread_count);
///
/// # for t in ts {
/// # t.join().unwrap();
/// # }
/// ```
pub struct Mutex<T: ?Sized, R = Spin> {
inner: InnerMutex<T, R>,
}
unsafe impl<T: ?Sized + Send, R> Sync for Mutex<T, R> {}
unsafe impl<T: ?Sized + Send, R> Send for Mutex<T, R> {}
/// A generic guard that will protect some data access and
/// uses either a ticket lock or a normal spin mutex.
///
/// For more info see [`TicketMutexGuard`] or [`SpinMutexGuard`].
///
/// [`TicketMutexGuard`]: ./struct.TicketMutexGuard.html
/// [`SpinMutexGuard`]: ./struct.SpinMutexGuard.html
pub struct MutexGuard<'a, T: 'a + ?Sized> {
inner: InnerMutexGuard<'a, T>,
}
impl<T, R> Mutex<T, R> {
/// Creates a new [`Mutex`] wrapping the supplied data.
///
/// # Example
///
/// ```
/// use spin::Mutex;
///
/// static MUTEX: Mutex<()> = Mutex::new(());
///
/// fn demo() {
/// let lock = MUTEX.lock();
/// // do something with lock
/// drop(lock);
/// }
/// ```
#[inline(always)]
pub const fn new(value: T) -> Self {
Self {
inner: InnerMutex::new(value),
}
}
/// Consumes this [`Mutex`] and unwraps the underlying data.
///
/// # Example
///
/// ```
/// let lock = spin::Mutex::new(42);
/// assert_eq!(42, lock.into_inner());
/// ```
#[inline(always)]
pub fn into_inner(self) -> T {
self.inner.into_inner()
}
}
impl<T: ?Sized, R: RelaxStrategy> Mutex<T, R> {
/// Locks the [`Mutex`] and returns a guard that permits access to the inner data.
///
/// The returned value may be dereferenced for data access
/// and the lock will be dropped when the guard falls out of scope.
///
/// ```
/// let lock = spin::Mutex::new(0);
/// {
/// let mut data = lock.lock();
/// // The lock is now locked and the data can be accessed
/// *data += 1;
/// // The lock is implicitly dropped at the end of the scope
/// }
/// ```
#[inline(always)]
pub fn lock(&self) -> MutexGuard<T> {
MutexGuard {
inner: self.inner.lock(),
}
}
}
impl<T: ?Sized, R> Mutex<T, R> {
/// Returns `true` if the lock is currently held.
///
/// # Safety
///
/// This function provides no synchronization guarantees and so its result should be considered 'out of date'
/// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
#[inline(always)]
pub fn is_locked(&self) -> bool {
self.inner.is_locked()
}
/// Force unlock this [`Mutex`].
///
/// # Safety
///
/// This is *extremely* unsafe if the lock is not held by the current
/// thread. However, this can be useful in some instances for exposing the
/// lock to FFI that doesn't know how to deal with RAII.
#[inline(always)]
pub unsafe fn force_unlock(&self) {
self.inner.force_unlock()
}
/// Try to lock this [`Mutex`], returning a lock guard if successful.
///
/// # Example
///
/// ```
/// let lock = spin::Mutex::new(42);
///
/// let maybe_guard = lock.try_lock();
/// assert!(maybe_guard.is_some());
///
/// // `maybe_guard` is still held, so the second call fails
/// let maybe_guard2 = lock.try_lock();
/// assert!(maybe_guard2.is_none());
/// ```
#[inline(always)]
pub fn try_lock(&self) -> Option<MutexGuard<T>> {
self.inner
.try_lock()
.map(|guard| MutexGuard { inner: guard })
}
/// Returns a mutable reference to the underlying data.
///
/// Since this call borrows the [`Mutex`] mutably, and a mutable reference is guaranteed to be exclusive in Rust,
/// no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As such,
/// this is a 'zero-cost' operation.
///
/// # Example
///
/// ```
/// let mut lock = spin::Mutex::new(0);
/// *lock.get_mut() = 10;
/// assert_eq!(*lock.lock(), 10);
/// ```
#[inline(always)]
pub fn get_mut(&mut self) -> &mut T {
self.inner.get_mut()
}
}
impl<T: ?Sized + fmt::Debug, R> fmt::Debug for Mutex<T, R> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.inner, f)
}
}
impl<T: ?Sized + Default, R> Default for Mutex<T, R> {
fn default() -> Self {
Self::new(Default::default())
}
}
impl<T, R> From<T> for Mutex<T, R> {
fn from(data: T) -> Self {
Self::new(data)
}
}
impl<'a, T: ?Sized> MutexGuard<'a, T> {
/// Leak the lock guard, yielding a mutable reference to the underlying data.
///
/// Note that this function will permanently lock the original [`Mutex`].
///
/// ```
/// let mylock = spin::Mutex::new(0);
///
/// let data: &mut i32 = spin::MutexGuard::leak(mylock.lock());
///
/// *data = 1;
/// assert_eq!(*data, 1);
/// ```
#[inline(always)]
pub fn leak(this: Self) -> &'a mut T {
InnerMutexGuard::leak(this.inner)
}
}
impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for MutexGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
&*self.inner
}
}
impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> {
fn deref_mut(&mut self) -> &mut T {
&mut *self.inner
}
}
#[cfg(feature = "lock_api")]
unsafe impl<R: RelaxStrategy> lock_api_crate::RawMutex for Mutex<(), R> {
type GuardMarker = lock_api_crate::GuardSend;
const INIT: Self = Self::new(());
fn lock(&self) {
// Prevent guard destructor running
core::mem::forget(Self::lock(self));
}
fn try_lock(&self) -> bool {
// Prevent guard destructor running
Self::try_lock(self).map(core::mem::forget).is_some()
}
unsafe fn unlock(&self) {
self.force_unlock();
}
fn is_locked(&self) -> bool {
self.inner.is_locked()
}
}

735
vendor/spin/src/mutex/fair.rs vendored Normal file
View File

@@ -0,0 +1,735 @@
//! A spinning mutex with a fairer unlock algorithm.
//!
//! This mutex is similar to the `SpinMutex` in that it uses spinning to avoid
//! context switches. However, it uses a fairer unlock algorithm that avoids
//! starvation of threads that are waiting for the lock.
use crate::{
atomic::{AtomicUsize, Ordering},
RelaxStrategy, Spin,
};
use core::{
cell::UnsafeCell,
fmt,
marker::PhantomData,
mem::ManuallyDrop,
ops::{Deref, DerefMut},
};
// The lowest bit of `lock` is used to indicate whether the mutex is locked or not. The rest of the bits are used to
// store the number of starving threads.
const LOCKED: usize = 1;
const STARVED: usize = 2;
/// Number chosen by fair roll of the dice, adjust as needed.
const STARVATION_SPINS: usize = 1024;
/// A [spin lock](https://en.m.wikipedia.org/wiki/Spinlock) providing mutually exclusive access to data, but with a fairer
/// algorithm.
///
/// # Example
///
/// ```
/// use spin;
///
/// let lock = spin::mutex::FairMutex::<_>::new(0);
///
/// // Modify the data
/// *lock.lock() = 2;
///
/// // Read the data
/// let answer = *lock.lock();
/// assert_eq!(answer, 2);
/// ```
///
/// # Thread safety example
///
/// ```
/// use spin;
/// use std::sync::{Arc, Barrier};
///
/// let thread_count = 1000;
/// let spin_mutex = Arc::new(spin::mutex::FairMutex::<_>::new(0));
///
/// // We use a barrier to ensure the readout happens after all writing
/// let barrier = Arc::new(Barrier::new(thread_count + 1));
///
/// for _ in (0..thread_count) {
/// let my_barrier = barrier.clone();
/// let my_lock = spin_mutex.clone();
/// std::thread::spawn(move || {
/// let mut guard = my_lock.lock();
/// *guard += 1;
///
/// // Release the lock to prevent a deadlock
/// drop(guard);
/// my_barrier.wait();
/// });
/// }
///
/// barrier.wait();
///
/// let answer = { *spin_mutex.lock() };
/// assert_eq!(answer, thread_count);
/// ```
pub struct FairMutex<T: ?Sized, R = Spin> {
phantom: PhantomData<R>,
pub(crate) lock: AtomicUsize,
data: UnsafeCell<T>,
}
/// A guard that provides mutable data access.
///
/// When the guard falls out of scope it will release the lock.
pub struct FairMutexGuard<'a, T: ?Sized + 'a> {
lock: &'a AtomicUsize,
data: *mut T,
}
/// A handle that indicates that we have been trying to acquire the lock for a while.
///
/// This handle is used to prevent starvation.
pub struct Starvation<'a, T: ?Sized + 'a, R> {
lock: &'a FairMutex<T, R>,
}
/// Indicates whether a lock was rejected due to the lock being held by another thread or due to starvation.
#[derive(Debug)]
pub enum LockRejectReason {
/// The lock was rejected due to the lock being held by another thread.
Locked,
/// The lock was rejected due to starvation.
Starved,
}
// Same unsafe impls as `std::sync::Mutex`
unsafe impl<T: ?Sized + Send, R> Sync for FairMutex<T, R> {}
unsafe impl<T: ?Sized + Send, R> Send for FairMutex<T, R> {}
unsafe impl<T: ?Sized + Sync> Sync for FairMutexGuard<'_, T> {}
unsafe impl<T: ?Sized + Send> Send for FairMutexGuard<'_, T> {}
impl<T, R> FairMutex<T, R> {
/// Creates a new [`FairMutex`] wrapping the supplied data.
///
/// # Example
///
/// ```
/// use spin::mutex::FairMutex;
///
/// static MUTEX: FairMutex<()> = FairMutex::<_>::new(());
///
/// fn demo() {
/// let lock = MUTEX.lock();
/// // do something with lock
/// drop(lock);
/// }
/// ```
#[inline(always)]
pub const fn new(data: T) -> Self {
FairMutex {
lock: AtomicUsize::new(0),
data: UnsafeCell::new(data),
phantom: PhantomData,
}
}
/// Consumes this [`FairMutex`] and unwraps the underlying data.
///
/// # Example
///
/// ```
/// let lock = spin::mutex::FairMutex::<_>::new(42);
/// assert_eq!(42, lock.into_inner());
/// ```
#[inline(always)]
pub fn into_inner(self) -> T {
// We know statically that there are no outstanding references to
// `self` so there's no need to lock.
let FairMutex { data, .. } = self;
data.into_inner()
}
/// Returns a mutable pointer to the underlying data.
///
/// This is mostly meant to be used for applications which require manual unlocking, but where
/// storing both the lock and the pointer to the inner data gets inefficient.
///
/// # Example
/// ```
/// let lock = spin::mutex::FairMutex::<_>::new(42);
///
/// unsafe {
/// core::mem::forget(lock.lock());
///
/// assert_eq!(lock.as_mut_ptr().read(), 42);
/// lock.as_mut_ptr().write(58);
///
/// lock.force_unlock();
/// }
///
/// assert_eq!(*lock.lock(), 58);
///
/// ```
#[inline(always)]
pub fn as_mut_ptr(&self) -> *mut T {
self.data.get()
}
}
impl<T: ?Sized, R: RelaxStrategy> FairMutex<T, R> {
/// Locks the [`FairMutex`] and returns a guard that permits access to the inner data.
///
/// The returned value may be dereferenced for data access
/// and the lock will be dropped when the guard falls out of scope.
///
/// ```
/// let lock = spin::mutex::FairMutex::<_>::new(0);
/// {
/// let mut data = lock.lock();
/// // The lock is now locked and the data can be accessed
/// *data += 1;
/// // The lock is implicitly dropped at the end of the scope
/// }
/// ```
#[inline(always)]
pub fn lock(&self) -> FairMutexGuard<T> {
// Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock`
// when called in a loop.
let mut spins = 0;
while self
.lock
.compare_exchange_weak(0, 1, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
// Wait until the lock looks unlocked before retrying
while self.is_locked() {
R::relax();
// If we've been spinning for a while, switch to a fairer strategy that will prevent
// newer users from stealing our lock from us.
if spins > STARVATION_SPINS {
return self.starve().lock();
}
spins += 1;
}
}
FairMutexGuard {
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
}
}
}
impl<T: ?Sized, R> FairMutex<T, R> {
/// Returns `true` if the lock is currently held.
///
/// # Safety
///
/// This function provides no synchronization guarantees and so its result should be considered 'out of date'
/// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
#[inline(always)]
pub fn is_locked(&self) -> bool {
self.lock.load(Ordering::Relaxed) & LOCKED != 0
}
/// Force unlock this [`FairMutex`].
///
/// # Safety
///
/// This is *extremely* unsafe if the lock is not held by the current
/// thread. However, this can be useful in some instances for exposing the
/// lock to FFI that doesn't know how to deal with RAII.
#[inline(always)]
pub unsafe fn force_unlock(&self) {
self.lock.fetch_and(!LOCKED, Ordering::Release);
}
/// Try to lock this [`FairMutex`], returning a lock guard if successful.
///
/// # Example
///
/// ```
/// let lock = spin::mutex::FairMutex::<_>::new(42);
///
/// let maybe_guard = lock.try_lock();
/// assert!(maybe_guard.is_some());
///
/// // `maybe_guard` is still held, so the second call fails
/// let maybe_guard2 = lock.try_lock();
/// assert!(maybe_guard2.is_none());
/// ```
#[inline(always)]
pub fn try_lock(&self) -> Option<FairMutexGuard<T>> {
self.try_lock_starver().ok()
}
/// Tries to lock this [`FairMutex`] and returns a result that indicates whether the lock was
/// rejected due to a starver or not.
#[inline(always)]
pub fn try_lock_starver(&self) -> Result<FairMutexGuard<T>, LockRejectReason> {
match self
.lock
.compare_exchange(0, LOCKED, Ordering::Acquire, Ordering::Relaxed)
.unwrap_or_else(|x| x)
{
0 => Ok(FairMutexGuard {
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
}),
LOCKED => Err(LockRejectReason::Locked),
_ => Err(LockRejectReason::Starved),
}
}
/// Indicates that the current user has been waiting for the lock for a while
/// and that the lock should yield to this thread over a newly arriving thread.
///
/// # Example
///
/// ```
/// let lock = spin::mutex::FairMutex::<_>::new(42);
///
/// // Lock the mutex to simulate it being used by another user.
/// let guard1 = lock.lock();
///
/// // Try to lock the mutex.
/// let guard2 = lock.try_lock();
/// assert!(guard2.is_none());
///
/// // Wait for a while.
/// wait_for_a_while();
///
/// // We are now starved, indicate as such.
/// let starve = lock.starve();
///
/// // Once the lock is released, another user trying to lock it will
/// // fail.
/// drop(guard1);
/// let guard3 = lock.try_lock();
/// assert!(guard3.is_none());
///
/// // However, we will be able to lock it.
/// let guard4 = starve.try_lock();
/// assert!(guard4.is_ok());
///
/// # fn wait_for_a_while() {}
/// ```
pub fn starve(&self) -> Starvation<'_, T, R> {
// Add a new starver to the state.
if self.lock.fetch_add(STARVED, Ordering::Relaxed) > (core::isize::MAX - 1) as usize {
// In the event of a potential lock overflow, abort.
crate::abort();
}
Starvation { lock: self }
}
/// Returns a mutable reference to the underlying data.
///
/// Since this call borrows the [`FairMutex`] mutably, and a mutable reference is guaranteed to be exclusive in
/// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As
/// such, this is a 'zero-cost' operation.
///
/// # Example
///
/// ```
/// let mut lock = spin::mutex::FairMutex::<_>::new(0);
/// *lock.get_mut() = 10;
/// assert_eq!(*lock.lock(), 10);
/// ```
#[inline(always)]
pub fn get_mut(&mut self) -> &mut T {
// We know statically that there are no other references to `self`, so
// there's no need to lock the inner mutex.
unsafe { &mut *self.data.get() }
}
}
impl<T: ?Sized + fmt::Debug, R> fmt::Debug for FairMutex<T, R> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
struct LockWrapper<'a, T: ?Sized + fmt::Debug>(Option<FairMutexGuard<'a, T>>);
impl<T: ?Sized + fmt::Debug> fmt::Debug for LockWrapper<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self.0 {
Some(guard) => fmt::Debug::fmt(guard, f),
None => f.write_str("<locked>"),
}
}
}
f.debug_struct("FairMutex")
.field("data", &LockWrapper(self.try_lock()))
.finish()
}
}
impl<T: ?Sized + Default, R> Default for FairMutex<T, R> {
fn default() -> Self {
Self::new(Default::default())
}
}
impl<T, R> From<T> for FairMutex<T, R> {
fn from(data: T) -> Self {
Self::new(data)
}
}
impl<'a, T: ?Sized> FairMutexGuard<'a, T> {
/// Leak the lock guard, yielding a mutable reference to the underlying data.
///
/// Note that this function will permanently lock the original [`FairMutex`].
///
/// ```
/// let mylock = spin::mutex::FairMutex::<_>::new(0);
///
/// let data: &mut i32 = spin::mutex::FairMutexGuard::leak(mylock.lock());
///
/// *data = 1;
/// assert_eq!(*data, 1);
/// ```
#[inline(always)]
pub fn leak(this: Self) -> &'a mut T {
// Use ManuallyDrop to avoid stacked-borrow invalidation
let mut this = ManuallyDrop::new(this);
// We know statically that only we are referencing data
unsafe { &mut *this.data }
}
}
impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for FairMutexGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, T: ?Sized + fmt::Display> fmt::Display for FairMutexGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
impl<'a, T: ?Sized> Deref for FairMutexGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
// We know statically that only we are referencing data
unsafe { &*self.data }
}
}
impl<'a, T: ?Sized> DerefMut for FairMutexGuard<'a, T> {
fn deref_mut(&mut self) -> &mut T {
// We know statically that only we are referencing data
unsafe { &mut *self.data }
}
}
impl<'a, T: ?Sized> Drop for FairMutexGuard<'a, T> {
/// The dropping of the MutexGuard will release the lock it was created from.
fn drop(&mut self) {
self.lock.fetch_and(!LOCKED, Ordering::Release);
}
}
impl<'a, T: ?Sized, R> Starvation<'a, T, R> {
/// Attempts the lock the mutex if we are the only starving user.
///
/// This allows another user to lock the mutex if they are starving as well.
pub fn try_lock_fair(self) -> Result<FairMutexGuard<'a, T>, Self> {
// Try to lock the mutex.
if self
.lock
.lock
.compare_exchange(
STARVED,
STARVED | LOCKED,
Ordering::Acquire,
Ordering::Relaxed,
)
.is_ok()
{
// We are the only starving user, lock the mutex.
Ok(FairMutexGuard {
lock: &self.lock.lock,
data: self.lock.data.get(),
})
} else {
// Another user is starving, fail.
Err(self)
}
}
/// Attempts to lock the mutex.
///
/// If the lock is currently held by another thread, this will return `None`.
///
/// # Example
///
/// ```
/// let lock = spin::mutex::FairMutex::<_>::new(42);
///
/// // Lock the mutex to simulate it being used by another user.
/// let guard1 = lock.lock();
///
/// // Try to lock the mutex.
/// let guard2 = lock.try_lock();
/// assert!(guard2.is_none());
///
/// // Wait for a while.
/// wait_for_a_while();
///
/// // We are now starved, indicate as such.
/// let starve = lock.starve();
///
/// // Once the lock is released, another user trying to lock it will
/// // fail.
/// drop(guard1);
/// let guard3 = lock.try_lock();
/// assert!(guard3.is_none());
///
/// // However, we will be able to lock it.
/// let guard4 = starve.try_lock();
/// assert!(guard4.is_ok());
///
/// # fn wait_for_a_while() {}
/// ```
pub fn try_lock(self) -> Result<FairMutexGuard<'a, T>, Self> {
// Try to lock the mutex.
if self.lock.lock.fetch_or(LOCKED, Ordering::Acquire) & LOCKED == 0 {
// We have successfully locked the mutex.
// By dropping `self` here, we decrement the starvation count.
Ok(FairMutexGuard {
lock: &self.lock.lock,
data: self.lock.data.get(),
})
} else {
Err(self)
}
}
}
impl<'a, T: ?Sized, R: RelaxStrategy> Starvation<'a, T, R> {
/// Locks the mutex.
pub fn lock(mut self) -> FairMutexGuard<'a, T> {
// Try to lock the mutex.
loop {
match self.try_lock() {
Ok(lock) => return lock,
Err(starve) => self = starve,
}
// Relax until the lock is released.
while self.lock.is_locked() {
R::relax();
}
}
}
}
impl<'a, T: ?Sized, R> Drop for Starvation<'a, T, R> {
fn drop(&mut self) {
// As there is no longer a user being starved, we decrement the starver count.
self.lock.lock.fetch_sub(STARVED, Ordering::Release);
}
}
impl fmt::Display for LockRejectReason {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
LockRejectReason::Locked => write!(f, "locked"),
LockRejectReason::Starved => write!(f, "starved"),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for LockRejectReason {}
#[cfg(feature = "lock_api")]
unsafe impl<R: RelaxStrategy> lock_api_crate::RawMutex for FairMutex<(), R> {
type GuardMarker = lock_api_crate::GuardSend;
const INIT: Self = Self::new(());
fn lock(&self) {
// Prevent guard destructor running
core::mem::forget(Self::lock(self));
}
fn try_lock(&self) -> bool {
// Prevent guard destructor running
Self::try_lock(self).map(core::mem::forget).is_some()
}
unsafe fn unlock(&self) {
self.force_unlock();
}
fn is_locked(&self) -> bool {
Self::is_locked(self)
}
}
#[cfg(test)]
mod tests {
use std::prelude::v1::*;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
type FairMutex<T> = super::FairMutex<T>;
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
#[test]
fn smoke() {
let m = FairMutex::<_>::new(());
drop(m.lock());
drop(m.lock());
}
#[test]
fn lots_and_lots() {
static M: FairMutex<()> = FairMutex::<_>::new(());
static mut CNT: u32 = 0;
const J: u32 = 1000;
const K: u32 = 3;
fn inc() {
for _ in 0..J {
unsafe {
let _g = M.lock();
CNT += 1;
}
}
}
let (tx, rx) = channel();
for _ in 0..K {
let tx2 = tx.clone();
thread::spawn(move || {
inc();
tx2.send(()).unwrap();
});
let tx2 = tx.clone();
thread::spawn(move || {
inc();
tx2.send(()).unwrap();
});
}
drop(tx);
for _ in 0..2 * K {
rx.recv().unwrap();
}
assert_eq!(unsafe { CNT }, J * K * 2);
}
#[test]
fn try_lock() {
let mutex = FairMutex::<_>::new(42);
// First lock succeeds
let a = mutex.try_lock();
assert_eq!(a.as_ref().map(|r| **r), Some(42));
// Additional lock fails
let b = mutex.try_lock();
assert!(b.is_none());
// After dropping lock, it succeeds again
::core::mem::drop(a);
let c = mutex.try_lock();
assert_eq!(c.as_ref().map(|r| **r), Some(42));
}
#[test]
fn test_into_inner() {
let m = FairMutex::<_>::new(NonCopy(10));
assert_eq!(m.into_inner(), NonCopy(10));
}
#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = FairMutex::<_>::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
#[test]
fn test_mutex_arc_nested() {
// Tests nested mutexes and access
// to underlying data.
let arc = Arc::new(FairMutex::<_>::new(1));
let arc2 = Arc::new(FairMutex::<_>::new(arc));
let (tx, rx) = channel();
let _t = thread::spawn(move || {
let lock = arc2.lock();
let lock2 = lock.lock();
assert_eq!(*lock2, 1);
tx.send(()).unwrap();
});
rx.recv().unwrap();
}
#[test]
fn test_mutex_arc_access_in_unwind() {
let arc = Arc::new(FairMutex::<_>::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move || -> () {
struct Unwinder {
i: Arc<FairMutex<i32>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
*self.i.lock() += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
})
.join();
let lock = arc.lock();
assert_eq!(*lock, 2);
}
#[test]
fn test_mutex_unsized() {
let mutex: &FairMutex<[i32]> = &FairMutex::<_>::new([1, 2, 3]);
{
let b = &mut *mutex.lock();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*mutex.lock(), comp);
}
#[test]
fn test_mutex_force_lock() {
let lock = FairMutex::<_>::new(());
::std::mem::forget(lock.lock());
unsafe {
lock.force_unlock();
}
assert!(lock.try_lock().is_some());
}
}

543
vendor/spin/src/mutex/spin.rs vendored Normal file
View File

@@ -0,0 +1,543 @@
//! A naïve spinning mutex.
//!
//! Waiting threads hammer an atomic variable until it becomes available. Best-case latency is low, but worst-case
//! latency is theoretically infinite.
use crate::{
atomic::{AtomicBool, Ordering},
RelaxStrategy, Spin,
};
use core::{
cell::UnsafeCell,
fmt,
marker::PhantomData,
mem::ManuallyDrop,
ops::{Deref, DerefMut},
};
/// A [spin lock](https://en.m.wikipedia.org/wiki/Spinlock) providing mutually exclusive access to data.
///
/// # Example
///
/// ```
/// use spin;
///
/// let lock = spin::mutex::SpinMutex::<_>::new(0);
///
/// // Modify the data
/// *lock.lock() = 2;
///
/// // Read the data
/// let answer = *lock.lock();
/// assert_eq!(answer, 2);
/// ```
///
/// # Thread safety example
///
/// ```
/// use spin;
/// use std::sync::{Arc, Barrier};
///
/// let thread_count = 1000;
/// let spin_mutex = Arc::new(spin::mutex::SpinMutex::<_>::new(0));
///
/// // We use a barrier to ensure the readout happens after all writing
/// let barrier = Arc::new(Barrier::new(thread_count + 1));
///
/// # let mut ts = Vec::new();
/// for _ in (0..thread_count) {
/// let my_barrier = barrier.clone();
/// let my_lock = spin_mutex.clone();
/// # let t =
/// std::thread::spawn(move || {
/// let mut guard = my_lock.lock();
/// *guard += 1;
///
/// // Release the lock to prevent a deadlock
/// drop(guard);
/// my_barrier.wait();
/// });
/// # ts.push(t);
/// }
///
/// barrier.wait();
///
/// let answer = { *spin_mutex.lock() };
/// assert_eq!(answer, thread_count);
///
/// # for t in ts {
/// # t.join().unwrap();
/// # }
/// ```
pub struct SpinMutex<T: ?Sized, R = Spin> {
phantom: PhantomData<R>,
pub(crate) lock: AtomicBool,
data: UnsafeCell<T>,
}
/// A guard that provides mutable data access.
///
/// When the guard falls out of scope it will release the lock.
pub struct SpinMutexGuard<'a, T: ?Sized + 'a> {
lock: &'a AtomicBool,
data: *mut T,
}
// Same unsafe impls as `std::sync::Mutex`
unsafe impl<T: ?Sized + Send, R> Sync for SpinMutex<T, R> {}
unsafe impl<T: ?Sized + Send, R> Send for SpinMutex<T, R> {}
unsafe impl<T: ?Sized + Sync> Sync for SpinMutexGuard<'_, T> {}
unsafe impl<T: ?Sized + Send> Send for SpinMutexGuard<'_, T> {}
impl<T, R> SpinMutex<T, R> {
/// Creates a new [`SpinMutex`] wrapping the supplied data.
///
/// # Example
///
/// ```
/// use spin::mutex::SpinMutex;
///
/// static MUTEX: SpinMutex<()> = SpinMutex::<_>::new(());
///
/// fn demo() {
/// let lock = MUTEX.lock();
/// // do something with lock
/// drop(lock);
/// }
/// ```
#[inline(always)]
pub const fn new(data: T) -> Self {
SpinMutex {
lock: AtomicBool::new(false),
data: UnsafeCell::new(data),
phantom: PhantomData,
}
}
/// Consumes this [`SpinMutex`] and unwraps the underlying data.
///
/// # Example
///
/// ```
/// let lock = spin::mutex::SpinMutex::<_>::new(42);
/// assert_eq!(42, lock.into_inner());
/// ```
#[inline(always)]
pub fn into_inner(self) -> T {
// We know statically that there are no outstanding references to
// `self` so there's no need to lock.
let SpinMutex { data, .. } = self;
data.into_inner()
}
/// Returns a mutable pointer to the underlying data.
///
/// This is mostly meant to be used for applications which require manual unlocking, but where
/// storing both the lock and the pointer to the inner data gets inefficient.
///
/// # Example
/// ```
/// let lock = spin::mutex::SpinMutex::<_>::new(42);
///
/// unsafe {
/// core::mem::forget(lock.lock());
///
/// assert_eq!(lock.as_mut_ptr().read(), 42);
/// lock.as_mut_ptr().write(58);
///
/// lock.force_unlock();
/// }
///
/// assert_eq!(*lock.lock(), 58);
///
/// ```
#[inline(always)]
pub fn as_mut_ptr(&self) -> *mut T {
self.data.get()
}
}
impl<T: ?Sized, R: RelaxStrategy> SpinMutex<T, R> {
/// Locks the [`SpinMutex`] and returns a guard that permits access to the inner data.
///
/// The returned value may be dereferenced for data access
/// and the lock will be dropped when the guard falls out of scope.
///
/// ```
/// let lock = spin::mutex::SpinMutex::<_>::new(0);
/// {
/// let mut data = lock.lock();
/// // The lock is now locked and the data can be accessed
/// *data += 1;
/// // The lock is implicitly dropped at the end of the scope
/// }
/// ```
#[inline(always)]
pub fn lock(&self) -> SpinMutexGuard<T> {
// Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock`
// when called in a loop.
while self
.lock
.compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
// Wait until the lock looks unlocked before retrying
while self.is_locked() {
R::relax();
}
}
SpinMutexGuard {
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
}
}
}
impl<T: ?Sized, R> SpinMutex<T, R> {
/// Returns `true` if the lock is currently held.
///
/// # Safety
///
/// This function provides no synchronization guarantees and so its result should be considered 'out of date'
/// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
#[inline(always)]
pub fn is_locked(&self) -> bool {
self.lock.load(Ordering::Relaxed)
}
/// Force unlock this [`SpinMutex`].
///
/// # Safety
///
/// This is *extremely* unsafe if the lock is not held by the current
/// thread. However, this can be useful in some instances for exposing the
/// lock to FFI that doesn't know how to deal with RAII.
#[inline(always)]
pub unsafe fn force_unlock(&self) {
self.lock.store(false, Ordering::Release);
}
/// Try to lock this [`SpinMutex`], returning a lock guard if successful.
///
/// # Example
///
/// ```
/// let lock = spin::mutex::SpinMutex::<_>::new(42);
///
/// let maybe_guard = lock.try_lock();
/// assert!(maybe_guard.is_some());
///
/// // `maybe_guard` is still held, so the second call fails
/// let maybe_guard2 = lock.try_lock();
/// assert!(maybe_guard2.is_none());
/// ```
#[inline(always)]
pub fn try_lock(&self) -> Option<SpinMutexGuard<T>> {
// The reason for using a strong compare_exchange is explained here:
// https://github.com/Amanieu/parking_lot/pull/207#issuecomment-575869107
if self
.lock
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
Some(SpinMutexGuard {
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
})
} else {
None
}
}
/// Returns a mutable reference to the underlying data.
///
/// Since this call borrows the [`SpinMutex`] mutably, and a mutable reference is guaranteed to be exclusive in
/// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As
/// such, this is a 'zero-cost' operation.
///
/// # Example
///
/// ```
/// let mut lock = spin::mutex::SpinMutex::<_>::new(0);
/// *lock.get_mut() = 10;
/// assert_eq!(*lock.lock(), 10);
/// ```
#[inline(always)]
pub fn get_mut(&mut self) -> &mut T {
// We know statically that there are no other references to `self`, so
// there's no need to lock the inner mutex.
unsafe { &mut *self.data.get() }
}
}
impl<T: ?Sized + fmt::Debug, R> fmt::Debug for SpinMutex<T, R> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_lock() {
Some(guard) => write!(f, "Mutex {{ data: ")
.and_then(|()| (&*guard).fmt(f))
.and_then(|()| write!(f, "}}")),
None => write!(f, "Mutex {{ <locked> }}"),
}
}
}
impl<T: ?Sized + Default, R> Default for SpinMutex<T, R> {
fn default() -> Self {
Self::new(Default::default())
}
}
impl<T, R> From<T> for SpinMutex<T, R> {
fn from(data: T) -> Self {
Self::new(data)
}
}
impl<'a, T: ?Sized> SpinMutexGuard<'a, T> {
/// Leak the lock guard, yielding a mutable reference to the underlying data.
///
/// Note that this function will permanently lock the original [`SpinMutex`].
///
/// ```
/// let mylock = spin::mutex::SpinMutex::<_>::new(0);
///
/// let data: &mut i32 = spin::mutex::SpinMutexGuard::leak(mylock.lock());
///
/// *data = 1;
/// assert_eq!(*data, 1);
/// ```
#[inline(always)]
pub fn leak(this: Self) -> &'a mut T {
// Use ManuallyDrop to avoid stacked-borrow invalidation
let mut this = ManuallyDrop::new(this);
// We know statically that only we are referencing data
unsafe { &mut *this.data }
}
}
impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for SpinMutexGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, T: ?Sized + fmt::Display> fmt::Display for SpinMutexGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
impl<'a, T: ?Sized> Deref for SpinMutexGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
// We know statically that only we are referencing data
unsafe { &*self.data }
}
}
impl<'a, T: ?Sized> DerefMut for SpinMutexGuard<'a, T> {
fn deref_mut(&mut self) -> &mut T {
// We know statically that only we are referencing data
unsafe { &mut *self.data }
}
}
impl<'a, T: ?Sized> Drop for SpinMutexGuard<'a, T> {
/// The dropping of the MutexGuard will release the lock it was created from.
fn drop(&mut self) {
self.lock.store(false, Ordering::Release);
}
}
#[cfg(feature = "lock_api")]
unsafe impl<R: RelaxStrategy> lock_api_crate::RawMutex for SpinMutex<(), R> {
type GuardMarker = lock_api_crate::GuardSend;
const INIT: Self = Self::new(());
fn lock(&self) {
// Prevent guard destructor running
core::mem::forget(Self::lock(self));
}
fn try_lock(&self) -> bool {
// Prevent guard destructor running
Self::try_lock(self).map(core::mem::forget).is_some()
}
unsafe fn unlock(&self) {
self.force_unlock();
}
fn is_locked(&self) -> bool {
Self::is_locked(self)
}
}
#[cfg(test)]
mod tests {
use std::prelude::v1::*;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
type SpinMutex<T> = super::SpinMutex<T>;
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
#[test]
fn smoke() {
let m = SpinMutex::<_>::new(());
drop(m.lock());
drop(m.lock());
}
#[test]
fn lots_and_lots() {
static M: SpinMutex<()> = SpinMutex::<_>::new(());
static mut CNT: u32 = 0;
const J: u32 = 1000;
const K: u32 = 3;
fn inc() {
for _ in 0..J {
unsafe {
let _g = M.lock();
CNT += 1;
}
}
}
let (tx, rx) = channel();
let mut ts = Vec::new();
for _ in 0..K {
let tx2 = tx.clone();
ts.push(thread::spawn(move || {
inc();
tx2.send(()).unwrap();
}));
let tx2 = tx.clone();
ts.push(thread::spawn(move || {
inc();
tx2.send(()).unwrap();
}));
}
drop(tx);
for _ in 0..2 * K {
rx.recv().unwrap();
}
assert_eq!(unsafe { CNT }, J * K * 2);
for t in ts {
t.join().unwrap();
}
}
#[test]
fn try_lock() {
let mutex = SpinMutex::<_>::new(42);
// First lock succeeds
let a = mutex.try_lock();
assert_eq!(a.as_ref().map(|r| **r), Some(42));
// Additional lock fails
let b = mutex.try_lock();
assert!(b.is_none());
// After dropping lock, it succeeds again
::core::mem::drop(a);
let c = mutex.try_lock();
assert_eq!(c.as_ref().map(|r| **r), Some(42));
}
#[test]
fn test_into_inner() {
let m = SpinMutex::<_>::new(NonCopy(10));
assert_eq!(m.into_inner(), NonCopy(10));
}
#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = SpinMutex::<_>::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
#[test]
fn test_mutex_arc_nested() {
// Tests nested mutexes and access
// to underlying data.
let arc = Arc::new(SpinMutex::<_>::new(1));
let arc2 = Arc::new(SpinMutex::<_>::new(arc));
let (tx, rx) = channel();
let t = thread::spawn(move || {
let lock = arc2.lock();
let lock2 = lock.lock();
assert_eq!(*lock2, 1);
tx.send(()).unwrap();
});
rx.recv().unwrap();
t.join().unwrap();
}
#[test]
fn test_mutex_arc_access_in_unwind() {
let arc = Arc::new(SpinMutex::<_>::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move || -> () {
struct Unwinder {
i: Arc<SpinMutex<i32>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
*self.i.lock() += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
})
.join();
let lock = arc.lock();
assert_eq!(*lock, 2);
}
#[test]
fn test_mutex_unsized() {
let mutex: &SpinMutex<[i32]> = &SpinMutex::<_>::new([1, 2, 3]);
{
let b = &mut *mutex.lock();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*mutex.lock(), comp);
}
#[test]
fn test_mutex_force_lock() {
let lock = SpinMutex::<_>::new(());
::std::mem::forget(lock.lock());
unsafe {
lock.force_unlock();
}
assert!(lock.try_lock().is_some());
}
}

537
vendor/spin/src/mutex/ticket.rs vendored Normal file
View File

@@ -0,0 +1,537 @@
//! A ticket-based mutex.
//!
//! Waiting threads take a 'ticket' from the lock in the order they arrive and gain access to the lock when their
//! ticket is next in the queue. Best-case latency is slightly worse than a regular spinning mutex, but worse-case
//! latency is infinitely better. Waiting threads simply need to wait for all threads that come before them in the
//! queue to finish.
use crate::{
atomic::{AtomicUsize, Ordering},
RelaxStrategy, Spin,
};
use core::{
cell::UnsafeCell,
fmt,
marker::PhantomData,
ops::{Deref, DerefMut},
};
/// A spin-based [ticket lock](https://en.wikipedia.org/wiki/Ticket_lock) providing mutually exclusive access to data.
///
/// A ticket lock is analogous to a queue management system for lock requests. When a thread tries to take a lock, it
/// is assigned a 'ticket'. It then spins until its ticket becomes next in line. When the lock guard is released, the
/// next ticket will be processed.
///
/// Ticket locks significantly reduce the worse-case performance of locking at the cost of slightly higher average-time
/// overhead.
///
/// # Example
///
/// ```
/// use spin;
///
/// let lock = spin::mutex::TicketMutex::<_>::new(0);
///
/// // Modify the data
/// *lock.lock() = 2;
///
/// // Read the data
/// let answer = *lock.lock();
/// assert_eq!(answer, 2);
/// ```
///
/// # Thread safety example
///
/// ```
/// use spin;
/// use std::sync::{Arc, Barrier};
///
/// let thread_count = 1000;
/// let spin_mutex = Arc::new(spin::mutex::TicketMutex::<_>::new(0));
///
/// // We use a barrier to ensure the readout happens after all writing
/// let barrier = Arc::new(Barrier::new(thread_count + 1));
///
/// for _ in (0..thread_count) {
/// let my_barrier = barrier.clone();
/// let my_lock = spin_mutex.clone();
/// std::thread::spawn(move || {
/// let mut guard = my_lock.lock();
/// *guard += 1;
///
/// // Release the lock to prevent a deadlock
/// drop(guard);
/// my_barrier.wait();
/// });
/// }
///
/// barrier.wait();
///
/// let answer = { *spin_mutex.lock() };
/// assert_eq!(answer, thread_count);
/// ```
pub struct TicketMutex<T: ?Sized, R = Spin> {
phantom: PhantomData<R>,
next_ticket: AtomicUsize,
next_serving: AtomicUsize,
data: UnsafeCell<T>,
}
/// A guard that protects some data.
///
/// When the guard is dropped, the next ticket will be processed.
pub struct TicketMutexGuard<'a, T: ?Sized + 'a> {
next_serving: &'a AtomicUsize,
ticket: usize,
data: &'a mut T,
}
unsafe impl<T: ?Sized + Send, R> Sync for TicketMutex<T, R> {}
unsafe impl<T: ?Sized + Send, R> Send for TicketMutex<T, R> {}
impl<T, R> TicketMutex<T, R> {
/// Creates a new [`TicketMutex`] wrapping the supplied data.
///
/// # Example
///
/// ```
/// use spin::mutex::TicketMutex;
///
/// static MUTEX: TicketMutex<()> = TicketMutex::<_>::new(());
///
/// fn demo() {
/// let lock = MUTEX.lock();
/// // do something with lock
/// drop(lock);
/// }
/// ```
#[inline(always)]
pub const fn new(data: T) -> Self {
Self {
phantom: PhantomData,
next_ticket: AtomicUsize::new(0),
next_serving: AtomicUsize::new(0),
data: UnsafeCell::new(data),
}
}
/// Consumes this [`TicketMutex`] and unwraps the underlying data.
///
/// # Example
///
/// ```
/// let lock = spin::mutex::TicketMutex::<_>::new(42);
/// assert_eq!(42, lock.into_inner());
/// ```
#[inline(always)]
pub fn into_inner(self) -> T {
self.data.into_inner()
}
/// Returns a mutable pointer to the underying data.
///
/// This is mostly meant to be used for applications which require manual unlocking, but where
/// storing both the lock and the pointer to the inner data gets inefficient.
///
/// # Example
/// ```
/// let lock = spin::mutex::SpinMutex::<_>::new(42);
///
/// unsafe {
/// core::mem::forget(lock.lock());
///
/// assert_eq!(lock.as_mut_ptr().read(), 42);
/// lock.as_mut_ptr().write(58);
///
/// lock.force_unlock();
/// }
///
/// assert_eq!(*lock.lock(), 58);
///
/// ```
#[inline(always)]
pub fn as_mut_ptr(&self) -> *mut T {
self.data.get()
}
}
impl<T: ?Sized + fmt::Debug, R> fmt::Debug for TicketMutex<T, R> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_lock() {
Some(guard) => write!(f, "Mutex {{ data: ")
.and_then(|()| (&*guard).fmt(f))
.and_then(|()| write!(f, "}}")),
None => write!(f, "Mutex {{ <locked> }}"),
}
}
}
impl<T: ?Sized, R: RelaxStrategy> TicketMutex<T, R> {
/// Locks the [`TicketMutex`] and returns a guard that permits access to the inner data.
///
/// The returned data may be dereferenced for data access
/// and the lock will be dropped when the guard falls out of scope.
///
/// ```
/// let lock = spin::mutex::TicketMutex::<_>::new(0);
/// {
/// let mut data = lock.lock();
/// // The lock is now locked and the data can be accessed
/// *data += 1;
/// // The lock is implicitly dropped at the end of the scope
/// }
/// ```
#[inline(always)]
pub fn lock(&self) -> TicketMutexGuard<T> {
let ticket = self.next_ticket.fetch_add(1, Ordering::Relaxed);
while self.next_serving.load(Ordering::Acquire) != ticket {
R::relax();
}
TicketMutexGuard {
next_serving: &self.next_serving,
ticket,
// Safety
// We know that we are the next ticket to be served,
// so there's no other thread accessing the data.
//
// Every other thread has another ticket number so it's
// definitely stuck in the spin loop above.
data: unsafe { &mut *self.data.get() },
}
}
}
impl<T: ?Sized, R> TicketMutex<T, R> {
/// Returns `true` if the lock is currently held.
///
/// # Safety
///
/// This function provides no synchronization guarantees and so its result should be considered 'out of date'
/// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
#[inline(always)]
pub fn is_locked(&self) -> bool {
let ticket = self.next_ticket.load(Ordering::Relaxed);
self.next_serving.load(Ordering::Relaxed) != ticket
}
/// Force unlock this [`TicketMutex`], by serving the next ticket.
///
/// # Safety
///
/// This is *extremely* unsafe if the lock is not held by the current
/// thread. However, this can be useful in some instances for exposing the
/// lock to FFI that doesn't know how to deal with RAII.
#[inline(always)]
pub unsafe fn force_unlock(&self) {
self.next_serving.fetch_add(1, Ordering::Release);
}
/// Try to lock this [`TicketMutex`], returning a lock guard if successful.
///
/// # Example
///
/// ```
/// let lock = spin::mutex::TicketMutex::<_>::new(42);
///
/// let maybe_guard = lock.try_lock();
/// assert!(maybe_guard.is_some());
///
/// // `maybe_guard` is still held, so the second call fails
/// let maybe_guard2 = lock.try_lock();
/// assert!(maybe_guard2.is_none());
/// ```
#[inline(always)]
pub fn try_lock(&self) -> Option<TicketMutexGuard<T>> {
let ticket = self
.next_ticket
.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |ticket| {
if self.next_serving.load(Ordering::Acquire) == ticket {
Some(ticket + 1)
} else {
None
}
});
ticket.ok().map(|ticket| TicketMutexGuard {
next_serving: &self.next_serving,
ticket,
// Safety
// We have a ticket that is equal to the next_serving ticket, so we know:
// - that no other thread can have the same ticket id as this thread
// - that we are the next one to be served so we have exclusive access to the data
data: unsafe { &mut *self.data.get() },
})
}
/// Returns a mutable reference to the underlying data.
///
/// Since this call borrows the [`TicketMutex`] mutably, and a mutable reference is guaranteed to be exclusive in
/// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As
/// such, this is a 'zero-cost' operation.
///
/// # Example
///
/// ```
/// let mut lock = spin::mutex::TicketMutex::<_>::new(0);
/// *lock.get_mut() = 10;
/// assert_eq!(*lock.lock(), 10);
/// ```
#[inline(always)]
pub fn get_mut(&mut self) -> &mut T {
// Safety:
// We know that there are no other references to `self`,
// so it's safe to return a exclusive reference to the data.
unsafe { &mut *self.data.get() }
}
}
impl<T: ?Sized + Default, R> Default for TicketMutex<T, R> {
fn default() -> Self {
Self::new(Default::default())
}
}
impl<T, R> From<T> for TicketMutex<T, R> {
fn from(data: T) -> Self {
Self::new(data)
}
}
impl<'a, T: ?Sized> TicketMutexGuard<'a, T> {
/// Leak the lock guard, yielding a mutable reference to the underlying data.
///
/// Note that this function will permanently lock the original [`TicketMutex`].
///
/// ```
/// let mylock = spin::mutex::TicketMutex::<_>::new(0);
///
/// let data: &mut i32 = spin::mutex::TicketMutexGuard::leak(mylock.lock());
///
/// *data = 1;
/// assert_eq!(*data, 1);
/// ```
#[inline(always)]
pub fn leak(this: Self) -> &'a mut T {
let data = this.data as *mut _; // Keep it in pointer form temporarily to avoid double-aliasing
core::mem::forget(this);
unsafe { &mut *data }
}
}
impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for TicketMutexGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, T: ?Sized + fmt::Display> fmt::Display for TicketMutexGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
impl<'a, T: ?Sized> Deref for TicketMutexGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
self.data
}
}
impl<'a, T: ?Sized> DerefMut for TicketMutexGuard<'a, T> {
fn deref_mut(&mut self) -> &mut T {
self.data
}
}
impl<'a, T: ?Sized> Drop for TicketMutexGuard<'a, T> {
fn drop(&mut self) {
let new_ticket = self.ticket + 1;
self.next_serving.store(new_ticket, Ordering::Release);
}
}
#[cfg(feature = "lock_api")]
unsafe impl<R: RelaxStrategy> lock_api_crate::RawMutex for TicketMutex<(), R> {
type GuardMarker = lock_api_crate::GuardSend;
const INIT: Self = Self::new(());
fn lock(&self) {
// Prevent guard destructor running
core::mem::forget(Self::lock(self));
}
fn try_lock(&self) -> bool {
// Prevent guard destructor running
Self::try_lock(self).map(core::mem::forget).is_some()
}
unsafe fn unlock(&self) {
self.force_unlock();
}
fn is_locked(&self) -> bool {
Self::is_locked(self)
}
}
#[cfg(test)]
mod tests {
use std::prelude::v1::*;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
type TicketMutex<T> = super::TicketMutex<T>;
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
#[test]
fn smoke() {
let m = TicketMutex::<_>::new(());
drop(m.lock());
drop(m.lock());
}
#[test]
fn lots_and_lots() {
static M: TicketMutex<()> = TicketMutex::<_>::new(());
static mut CNT: u32 = 0;
const J: u32 = 1000;
const K: u32 = 3;
fn inc() {
for _ in 0..J {
unsafe {
let _g = M.lock();
CNT += 1;
}
}
}
let (tx, rx) = channel();
for _ in 0..K {
let tx2 = tx.clone();
thread::spawn(move || {
inc();
tx2.send(()).unwrap();
});
let tx2 = tx.clone();
thread::spawn(move || {
inc();
tx2.send(()).unwrap();
});
}
drop(tx);
for _ in 0..2 * K {
rx.recv().unwrap();
}
assert_eq!(unsafe { CNT }, J * K * 2);
}
#[test]
fn try_lock() {
let mutex = TicketMutex::<_>::new(42);
// First lock succeeds
let a = mutex.try_lock();
assert_eq!(a.as_ref().map(|r| **r), Some(42));
// Additional lock fails
let b = mutex.try_lock();
assert!(b.is_none());
// After dropping lock, it succeeds again
::core::mem::drop(a);
let c = mutex.try_lock();
assert_eq!(c.as_ref().map(|r| **r), Some(42));
}
#[test]
fn test_into_inner() {
let m = TicketMutex::<_>::new(NonCopy(10));
assert_eq!(m.into_inner(), NonCopy(10));
}
#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = TicketMutex::<_>::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
#[test]
fn test_mutex_arc_nested() {
// Tests nested mutexes and access
// to underlying data.
let arc = Arc::new(TicketMutex::<_>::new(1));
let arc2 = Arc::new(TicketMutex::<_>::new(arc));
let (tx, rx) = channel();
let _t = thread::spawn(move || {
let lock = arc2.lock();
let lock2 = lock.lock();
assert_eq!(*lock2, 1);
tx.send(()).unwrap();
});
rx.recv().unwrap();
}
#[test]
fn test_mutex_arc_access_in_unwind() {
let arc = Arc::new(TicketMutex::<_>::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move || -> () {
struct Unwinder {
i: Arc<TicketMutex<i32>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
*self.i.lock() += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
})
.join();
let lock = arc.lock();
assert_eq!(*lock, 2);
}
#[test]
fn test_mutex_unsized() {
let mutex: &TicketMutex<[i32]> = &TicketMutex::<_>::new([1, 2, 3]);
{
let b = &mut *mutex.lock();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*mutex.lock(), comp);
}
#[test]
fn is_locked() {
let mutex = TicketMutex::<_>::new(());
assert!(!mutex.is_locked());
let lock = mutex.lock();
assert!(mutex.is_locked());
drop(lock);
assert!(!mutex.is_locked());
}
}

789
vendor/spin/src/once.rs vendored Normal file
View File

@@ -0,0 +1,789 @@
//! Synchronization primitives for one-time evaluation.
use crate::{
atomic::{AtomicU8, Ordering},
RelaxStrategy, Spin,
};
use core::{cell::UnsafeCell, fmt, marker::PhantomData, mem::MaybeUninit};
/// A primitive that provides lazy one-time initialization.
///
/// Unlike its `std::sync` equivalent, this is generalized such that the closure returns a
/// value to be stored by the [`Once`] (`std::sync::Once` can be trivially emulated with
/// `Once`).
///
/// Because [`Once::new`] is `const`, this primitive may be used to safely initialize statics.
///
/// # Examples
///
/// ```
/// use spin;
///
/// static START: spin::Once = spin::Once::new();
///
/// START.call_once(|| {
/// // run initialization here
/// });
/// ```
pub struct Once<T = (), R = Spin> {
phantom: PhantomData<R>,
status: AtomicStatus,
data: UnsafeCell<MaybeUninit<T>>,
}
impl<T, R> Default for Once<T, R> {
fn default() -> Self {
Self::new()
}
}
impl<T: fmt::Debug, R> fmt::Debug for Once<T, R> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.get() {
Some(s) => write!(f, "Once {{ data: ")
.and_then(|()| s.fmt(f))
.and_then(|()| write!(f, "}}")),
None => write!(f, "Once {{ <uninitialized> }}"),
}
}
}
// Same unsafe impls as `std::sync::RwLock`, because this also allows for
// concurrent reads.
unsafe impl<T: Send + Sync, R> Sync for Once<T, R> {}
unsafe impl<T: Send, R> Send for Once<T, R> {}
mod status {
use super::*;
// SAFETY: This structure has an invariant, namely that the inner atomic u8 must *always* have
// a value for which there exists a valid Status. This means that users of this API must only
// be allowed to load and store `Status`es.
#[repr(transparent)]
pub struct AtomicStatus(AtomicU8);
// Four states that a Once can be in, encoded into the lower bits of `status` in
// the Once structure.
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Status {
Incomplete = 0x00,
Running = 0x01,
Complete = 0x02,
Panicked = 0x03,
}
impl Status {
// Construct a status from an inner u8 integer.
//
// # Safety
//
// For this to be safe, the inner number must have a valid corresponding enum variant.
unsafe fn new_unchecked(inner: u8) -> Self {
core::mem::transmute(inner)
}
}
impl AtomicStatus {
#[inline(always)]
pub const fn new(status: Status) -> Self {
// SAFETY: We got the value directly from status, so transmuting back is fine.
Self(AtomicU8::new(status as u8))
}
#[inline(always)]
pub fn load(&self, ordering: Ordering) -> Status {
// SAFETY: We know that the inner integer must have been constructed from a Status in
// the first place.
unsafe { Status::new_unchecked(self.0.load(ordering)) }
}
#[inline(always)]
pub fn store(&self, status: Status, ordering: Ordering) {
// SAFETY: While not directly unsafe, this is safe because the value was retrieved from
// a status, thus making transmutation safe.
self.0.store(status as u8, ordering);
}
#[inline(always)]
pub fn compare_exchange(
&self,
old: Status,
new: Status,
success: Ordering,
failure: Ordering,
) -> Result<Status, Status> {
match self
.0
.compare_exchange(old as u8, new as u8, success, failure)
{
// SAFETY: A compare exchange will always return a value that was later stored into
// the atomic u8, but due to the invariant that it must be a valid Status, we know
// that both Ok(_) and Err(_) will be safely transmutable.
Ok(ok) => Ok(unsafe { Status::new_unchecked(ok) }),
Err(err) => Err(unsafe { Status::new_unchecked(err) }),
}
}
#[inline(always)]
pub fn get_mut(&mut self) -> &mut Status {
// SAFETY: Since we know that the u8 inside must be a valid Status, we can safely cast
// it to a &mut Status.
unsafe { &mut *((self.0.get_mut() as *mut u8).cast::<Status>()) }
}
}
}
use self::status::{AtomicStatus, Status};
impl<T, R: RelaxStrategy> Once<T, R> {
/// Performs an initialization routine once and only once. The given closure
/// will be executed if this is the first time `call_once` has been called,
/// and otherwise the routine will *not* be invoked.
///
/// This method will block the calling thread if another initialization
/// routine is currently running.
///
/// When this function returns, it is guaranteed that some initialization
/// has run and completed (it may not be the closure specified). The
/// returned pointer will point to the result from the closure that was
/// run.
///
/// # Panics
///
/// This function will panic if the [`Once`] previously panicked while attempting
/// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
/// primitives.
///
/// # Examples
///
/// ```
/// use spin;
///
/// static INIT: spin::Once<usize> = spin::Once::new();
///
/// fn get_cached_val() -> usize {
/// *INIT.call_once(expensive_computation)
/// }
///
/// fn expensive_computation() -> usize {
/// // ...
/// # 2
/// }
/// ```
pub fn call_once<F: FnOnce() -> T>(&self, f: F) -> &T {
match self.try_call_once(|| Ok::<T, core::convert::Infallible>(f())) {
Ok(x) => x,
Err(void) => match void {},
}
}
/// This method is similar to `call_once`, but allows the given closure to
/// fail, and lets the `Once` in a uninitialized state if it does.
///
/// This method will block the calling thread if another initialization
/// routine is currently running.
///
/// When this function returns without error, it is guaranteed that some
/// initialization has run and completed (it may not be the closure
/// specified). The returned reference will point to the result from the
/// closure that was run.
///
/// # Panics
///
/// This function will panic if the [`Once`] previously panicked while attempting
/// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
/// primitives.
///
/// # Examples
///
/// ```
/// use spin;
///
/// static INIT: spin::Once<usize> = spin::Once::new();
///
/// fn get_cached_val() -> Result<usize, String> {
/// INIT.try_call_once(expensive_fallible_computation).map(|x| *x)
/// }
///
/// fn expensive_fallible_computation() -> Result<usize, String> {
/// // ...
/// # Ok(2)
/// }
/// ```
pub fn try_call_once<F: FnOnce() -> Result<T, E>, E>(&self, f: F) -> Result<&T, E> {
if let Some(value) = self.get() {
Ok(value)
} else {
self.try_call_once_slow(f)
}
}
#[cold]
fn try_call_once_slow<F: FnOnce() -> Result<T, E>, E>(&self, f: F) -> Result<&T, E> {
loop {
let xchg = self.status.compare_exchange(
Status::Incomplete,
Status::Running,
Ordering::Acquire,
Ordering::Acquire,
);
match xchg {
Ok(_must_be_state_incomplete) => {
// Impl is defined after the match for readability
}
Err(Status::Panicked) => panic!("Once panicked"),
Err(Status::Running) => match self.poll() {
Some(v) => return Ok(v),
None => continue,
},
Err(Status::Complete) => {
return Ok(unsafe {
// SAFETY: The status is Complete
self.force_get()
});
}
Err(Status::Incomplete) => {
// The compare_exchange failed, so this shouldn't ever be reached,
// however if we decide to switch to compare_exchange_weak it will
// be safer to leave this here than hit an unreachable
continue;
}
}
// The compare-exchange succeeded, so we shall initialize it.
// We use a guard (Finish) to catch panics caused by builder
let finish = Finish {
status: &self.status,
};
let val = match f() {
Ok(val) => val,
Err(err) => {
// If an error occurs, clean up everything and leave.
core::mem::forget(finish);
self.status.store(Status::Incomplete, Ordering::Release);
return Err(err);
}
};
unsafe {
// SAFETY:
// `UnsafeCell`/deref: currently the only accessor, mutably
// and immutably by cas exclusion.
// `write`: pointer comes from `MaybeUninit`.
(*self.data.get()).as_mut_ptr().write(val);
};
// If there were to be a panic with unwind enabled, the code would
// short-circuit and never reach the point where it writes the inner data.
// The destructor for Finish will run, and poison the Once to ensure that other
// threads accessing it do not exhibit unwanted behavior, if there were to be
// any inconsistency in data structures caused by the panicking thread.
//
// However, f() is expected in the general case not to panic. In that case, we
// simply forget the guard, bypassing its destructor. We could theoretically
// clear a flag instead, but this eliminates the call to the destructor at
// compile time, and unconditionally poisons during an eventual panic, if
// unwinding is enabled.
core::mem::forget(finish);
// SAFETY: Release is required here, so that all memory accesses done in the
// closure when initializing, become visible to other threads that perform Acquire
// loads.
//
// And, we also know that the changes this thread has done will not magically
// disappear from our cache, so it does not need to be AcqRel.
self.status.store(Status::Complete, Ordering::Release);
// This next line is mainly an optimization.
return unsafe { Ok(self.force_get()) };
}
}
/// Spins until the [`Once`] contains a value.
///
/// Note that in releases prior to `0.7`, this function had the behaviour of [`Once::poll`].
///
/// # Panics
///
/// This function will panic if the [`Once`] previously panicked while attempting
/// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
/// primitives.
pub fn wait(&self) -> &T {
loop {
match self.poll() {
Some(x) => break x,
None => R::relax(),
}
}
}
/// Like [`Once::get`], but will spin if the [`Once`] is in the process of being
/// initialized. If initialization has not even begun, `None` will be returned.
///
/// Note that in releases prior to `0.7`, this function was named `wait`.
///
/// # Panics
///
/// This function will panic if the [`Once`] previously panicked while attempting
/// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
/// primitives.
pub fn poll(&self) -> Option<&T> {
loop {
// SAFETY: Acquire is safe here, because if the status is COMPLETE, then we want to make
// sure that all memory accessed done while initializing that value, are visible when
// we return a reference to the inner data after this load.
match self.status.load(Ordering::Acquire) {
Status::Incomplete => return None,
Status::Running => R::relax(), // We spin
Status::Complete => return Some(unsafe { self.force_get() }),
Status::Panicked => panic!("Once previously poisoned by a panicked"),
}
}
}
}
impl<T, R> Once<T, R> {
/// Initialization constant of [`Once`].
#[allow(clippy::declare_interior_mutable_const)]
pub const INIT: Self = Self {
phantom: PhantomData,
status: AtomicStatus::new(Status::Incomplete),
data: UnsafeCell::new(MaybeUninit::uninit()),
};
/// Creates a new [`Once`].
pub const fn new() -> Self {
Self::INIT
}
/// Creates a new initialized [`Once`].
pub const fn initialized(data: T) -> Self {
Self {
phantom: PhantomData,
status: AtomicStatus::new(Status::Complete),
data: UnsafeCell::new(MaybeUninit::new(data)),
}
}
/// Retrieve a pointer to the inner data.
///
/// While this method itself is safe, accessing the pointer before the [`Once`] has been
/// initialized is UB, unless this method has already been written to from a pointer coming
/// from this method.
pub fn as_mut_ptr(&self) -> *mut T {
// SAFETY:
// * MaybeUninit<T> always has exactly the same layout as T
self.data.get().cast::<T>()
}
/// Get a reference to the initialized instance. Must only be called once COMPLETE.
unsafe fn force_get(&self) -> &T {
// SAFETY:
// * `UnsafeCell`/inner deref: data never changes again
// * `MaybeUninit`/outer deref: data was initialized
&*(*self.data.get()).as_ptr()
}
/// Get a reference to the initialized instance. Must only be called once COMPLETE.
unsafe fn force_get_mut(&mut self) -> &mut T {
// SAFETY:
// * `UnsafeCell`/inner deref: data never changes again
// * `MaybeUninit`/outer deref: data was initialized
&mut *(*self.data.get()).as_mut_ptr()
}
/// Get a reference to the initialized instance. Must only be called once COMPLETE.
unsafe fn force_into_inner(self) -> T {
// SAFETY:
// * `UnsafeCell`/inner deref: data never changes again
// * `MaybeUninit`/outer deref: data was initialized
(*self.data.get()).as_ptr().read()
}
/// Returns a reference to the inner value if the [`Once`] has been initialized.
pub fn get(&self) -> Option<&T> {
// SAFETY: Just as with `poll`, Acquire is safe here because we want to be able to see the
// nonatomic stores done when initializing, once we have loaded and checked the status.
match self.status.load(Ordering::Acquire) {
Status::Complete => Some(unsafe { self.force_get() }),
_ => None,
}
}
/// Returns a reference to the inner value on the unchecked assumption that the [`Once`] has been initialized.
///
/// # Safety
///
/// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
/// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused).
/// However, this can be useful in some instances for exposing the `Once` to FFI or when the overhead of atomically
/// checking initialization is unacceptable and the `Once` has already been initialized.
pub unsafe fn get_unchecked(&self) -> &T {
debug_assert_eq!(
self.status.load(Ordering::SeqCst),
Status::Complete,
"Attempted to access an uninitialized Once. If this was run without debug checks, this would be undefined behaviour. This is a serious bug and you must fix it.",
);
self.force_get()
}
/// Returns a mutable reference to the inner value if the [`Once`] has been initialized.
///
/// Because this method requires a mutable reference to the [`Once`], no synchronization
/// overhead is required to access the inner value. In effect, it is zero-cost.
pub fn get_mut(&mut self) -> Option<&mut T> {
match *self.status.get_mut() {
Status::Complete => Some(unsafe { self.force_get_mut() }),
_ => None,
}
}
/// Returns a mutable reference to the inner value
///
/// # Safety
///
/// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
/// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused).
/// However, this can be useful in some instances for exposing the `Once` to FFI or when the overhead of atomically
/// checking initialization is unacceptable and the `Once` has already been initialized.
pub unsafe fn get_mut_unchecked(&mut self) -> &mut T {
debug_assert_eq!(
self.status.load(Ordering::SeqCst),
Status::Complete,
"Attempted to access an unintialized Once. If this was to run without debug checks, this would be undefined behavior. This is a serious bug and you must fix it.",
);
self.force_get_mut()
}
/// Returns a the inner value if the [`Once`] has been initialized.
///
/// Because this method requires ownership of the [`Once`], no synchronization overhead
/// is required to access the inner value. In effect, it is zero-cost.
pub fn try_into_inner(mut self) -> Option<T> {
match *self.status.get_mut() {
Status::Complete => Some(unsafe { self.force_into_inner() }),
_ => None,
}
}
/// Returns a the inner value if the [`Once`] has been initialized.
/// # Safety
///
/// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
/// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused)
/// This can be useful, if `Once` has already been initialized, and you want to bypass an
/// option check.
pub unsafe fn into_inner_unchecked(self) -> T {
debug_assert_eq!(
self.status.load(Ordering::SeqCst),
Status::Complete,
"Attempted to access an unintialized Once. If this was to run without debug checks, this would be undefined behavior. This is a serious bug and you must fix it.",
);
self.force_into_inner()
}
/// Checks whether the value has been initialized.
///
/// This is done using [`Acquire`](core::sync::atomic::Ordering::Acquire) ordering, and
/// therefore it is safe to access the value directly via
/// [`get_unchecked`](Self::get_unchecked) if this returns true.
pub fn is_completed(&self) -> bool {
// TODO: Add a similar variant for Relaxed?
self.status.load(Ordering::Acquire) == Status::Complete
}
}
impl<T, R> From<T> for Once<T, R> {
fn from(data: T) -> Self {
Self::initialized(data)
}
}
impl<T, R> Drop for Once<T, R> {
fn drop(&mut self) {
// No need to do any atomic access here, we have &mut!
if *self.status.get_mut() == Status::Complete {
unsafe {
//TODO: Use MaybeUninit::assume_init_drop once stabilised
core::ptr::drop_in_place((*self.data.get()).as_mut_ptr());
}
}
}
}
struct Finish<'a> {
status: &'a AtomicStatus,
}
impl<'a> Drop for Finish<'a> {
fn drop(&mut self) {
// While using Relaxed here would most likely not be an issue, we use SeqCst anyway.
// This is mainly because panics are not meant to be fast at all, but also because if
// there were to be a compiler bug which reorders accesses within the same thread,
// where it should not, we want to be sure that the panic really is handled, and does
// not cause additional problems. SeqCst will therefore help guarding against such
// bugs.
self.status.store(Status::Panicked, Ordering::SeqCst);
}
}
#[cfg(test)]
mod tests {
use std::prelude::v1::*;
use std::sync::atomic::AtomicU32;
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
use super::*;
#[test]
fn smoke_once() {
static O: Once = Once::new();
let mut a = 0;
O.call_once(|| a += 1);
assert_eq!(a, 1);
O.call_once(|| a += 1);
assert_eq!(a, 1);
}
#[test]
fn smoke_once_value() {
static O: Once<usize> = Once::new();
let a = O.call_once(|| 1);
assert_eq!(*a, 1);
let b = O.call_once(|| 2);
assert_eq!(*b, 1);
}
#[test]
fn stampede_once() {
static O: Once = Once::new();
static mut RUN: bool = false;
let (tx, rx) = channel();
let mut ts = Vec::new();
for _ in 0..10 {
let tx = tx.clone();
ts.push(thread::spawn(move || {
for _ in 0..4 {
thread::yield_now()
}
unsafe {
O.call_once(|| {
assert!(!RUN);
RUN = true;
});
assert!(RUN);
}
tx.send(()).unwrap();
}));
}
unsafe {
O.call_once(|| {
assert!(!RUN);
RUN = true;
});
assert!(RUN);
}
for _ in 0..10 {
rx.recv().unwrap();
}
for t in ts {
t.join().unwrap();
}
}
#[test]
fn get() {
static INIT: Once<usize> = Once::new();
assert!(INIT.get().is_none());
INIT.call_once(|| 2);
assert_eq!(INIT.get().map(|r| *r), Some(2));
}
#[test]
fn get_no_wait() {
static INIT: Once<usize> = Once::new();
assert!(INIT.get().is_none());
let t = thread::spawn(move || {
INIT.call_once(|| {
thread::sleep(std::time::Duration::from_secs(3));
42
});
});
assert!(INIT.get().is_none());
t.join().unwrap();
}
#[test]
fn poll() {
static INIT: Once<usize> = Once::new();
assert!(INIT.poll().is_none());
INIT.call_once(|| 3);
assert_eq!(INIT.poll().map(|r| *r), Some(3));
}
#[test]
fn wait() {
static INIT: Once<usize> = Once::new();
let t = std::thread::spawn(|| {
assert_eq!(*INIT.wait(), 3);
assert!(INIT.is_completed());
});
for _ in 0..4 {
thread::yield_now()
}
assert!(INIT.poll().is_none());
INIT.call_once(|| 3);
t.join().unwrap();
}
#[test]
fn panic() {
use std::panic;
static INIT: Once = Once::new();
// poison the once
let t = panic::catch_unwind(|| {
INIT.call_once(|| panic!());
});
assert!(t.is_err());
// poisoning propagates
let t = panic::catch_unwind(|| {
INIT.call_once(|| {});
});
assert!(t.is_err());
}
#[test]
fn init_constant() {
static O: Once = Once::INIT;
let mut a = 0;
O.call_once(|| a += 1);
assert_eq!(a, 1);
O.call_once(|| a += 1);
assert_eq!(a, 1);
}
static mut CALLED: bool = false;
struct DropTest {}
impl Drop for DropTest {
fn drop(&mut self) {
unsafe {
CALLED = true;
}
}
}
#[test]
fn try_call_once_err() {
let once = Once::<_, Spin>::new();
let shared = Arc::new((once, AtomicU32::new(0)));
let (tx, rx) = channel();
let t0 = {
let shared = shared.clone();
thread::spawn(move || {
let (once, called) = &*shared;
once.try_call_once(|| {
called.fetch_add(1, Ordering::AcqRel);
tx.send(()).unwrap();
thread::sleep(std::time::Duration::from_millis(50));
Err(())
})
.ok();
})
};
let t1 = {
let shared = shared.clone();
thread::spawn(move || {
rx.recv().unwrap();
let (once, called) = &*shared;
assert_eq!(
called.load(Ordering::Acquire),
1,
"leader thread did not run first"
);
once.call_once(|| {
called.fetch_add(1, Ordering::AcqRel);
});
})
};
t0.join().unwrap();
t1.join().unwrap();
assert_eq!(shared.1.load(Ordering::Acquire), 2);
}
// This is sort of two test cases, but if we write them as separate test methods
// they can be executed concurrently and then fail some small fraction of the
// time.
#[test]
fn drop_occurs_and_skip_uninit_drop() {
unsafe {
CALLED = false;
}
{
let once = Once::<_>::new();
once.call_once(|| DropTest {});
}
assert!(unsafe { CALLED });
// Now test that we skip drops for the uninitialized case.
unsafe {
CALLED = false;
}
let once = Once::<DropTest>::new();
drop(once);
assert!(unsafe { !CALLED });
}
#[test]
fn call_once_test() {
for _ in 0..20 {
use std::sync::atomic::AtomicUsize;
use std::sync::Arc;
use std::time::Duration;
let share = Arc::new(AtomicUsize::new(0));
let once = Arc::new(Once::<_, Spin>::new());
let mut hs = Vec::new();
for _ in 0..8 {
let h = thread::spawn({
let share = share.clone();
let once = once.clone();
move || {
thread::sleep(Duration::from_millis(10));
once.call_once(|| {
share.fetch_add(1, Ordering::SeqCst);
});
}
});
hs.push(h);
}
for h in hs {
h.join().unwrap();
}
assert_eq!(1, share.load(Ordering::SeqCst));
}
}
}

61
vendor/spin/src/relax.rs vendored Normal file
View File

@@ -0,0 +1,61 @@
//! Strategies that determine the behaviour of locks when encountering contention.
/// A trait implemented by spinning relax strategies.
pub trait RelaxStrategy {
/// Perform the relaxing operation during a period of contention.
fn relax();
}
/// A strategy that rapidly spins while informing the CPU that it should power down non-essential components via
/// [`core::hint::spin_loop`].
///
/// Note that spinning is a 'dumb' strategy and most schedulers cannot correctly differentiate it from useful work,
/// thereby misallocating even more CPU time to the spinning process. This is known as
/// ['priority inversion'](https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html).
///
/// If you see signs that priority inversion is occurring, consider switching to [`Yield`] or, even better, not using a
/// spinlock at all and opting for a proper scheduler-aware lock. Remember also that different targets, operating
/// systems, schedulers, and even the same scheduler with different workloads will exhibit different behaviour. Just
/// because priority inversion isn't occurring in your tests does not mean that it will not occur. Use a scheduler-
/// aware lock if at all possible.
pub struct Spin;
impl RelaxStrategy for Spin {
#[inline(always)]
fn relax() {
// Use the deprecated spin_loop_hint() to ensure that we don't get
// a higher MSRV than we need to.
#[allow(deprecated)]
core::sync::atomic::spin_loop_hint();
}
}
/// A strategy that yields the current time slice to the scheduler in favour of other threads or processes.
///
/// This is generally used as a strategy for minimising power consumption and priority inversion on targets that have a
/// standard library available. Note that such targets have scheduler-integrated concurrency primitives available, and
/// you should generally use these instead, except in rare circumstances.
#[cfg(feature = "std")]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub struct Yield;
#[cfg(feature = "std")]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl RelaxStrategy for Yield {
#[inline(always)]
fn relax() {
std::thread::yield_now();
}
}
/// A strategy that rapidly spins, without telling the CPU to do any powering down.
///
/// You almost certainly do not want to use this. Use [`Spin`] instead. It exists for completeness and for targets
/// that, for some reason, miscompile or do not support spin hint intrinsics despite attempting to generate code for
/// them (i.e: this is a workaround for possible compiler bugs).
pub struct Loop;
impl RelaxStrategy for Loop {
#[inline(always)]
fn relax() {}
}

1165
vendor/spin/src/rwlock.rs vendored Normal file

File diff suppressed because it is too large Load Diff