Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

290
vendor/async-lock/src/barrier.rs vendored Normal file
View File

@@ -0,0 +1,290 @@
use event_listener::{Event, EventListener};
use event_listener_strategy::{easy_wrapper, EventListenerFuture, Strategy};
use core::fmt;
use core::pin::Pin;
use core::task::Poll;
use crate::futures::Lock;
use crate::Mutex;
/// A counter to synchronize multiple tasks at the same time.
#[derive(Debug)]
pub struct Barrier {
n: usize,
state: Mutex<State>,
event: Event,
}
#[derive(Debug)]
struct State {
count: usize,
generation_id: u64,
}
impl Barrier {
const_fn! {
const_if: #[cfg(not(loom))];
/// Creates a barrier that can block the given number of tasks.
///
/// A barrier will block `n`-1 tasks which call [`wait()`] and then wake up all tasks
/// at once when the `n`th task calls [`wait()`].
///
/// [`wait()`]: `Barrier::wait()`
///
/// # Examples
///
/// ```
/// use async_lock::Barrier;
///
/// let barrier = Barrier::new(5);
/// ```
pub const fn new(n: usize) -> Barrier {
Barrier {
n,
state: Mutex::new(State {
count: 0,
generation_id: 0,
}),
event: Event::new(),
}
}
}
/// Blocks the current task until all tasks reach this point.
///
/// Barriers are reusable after all tasks have synchronized, and can be used continuously.
///
/// Returns a [`BarrierWaitResult`] indicating whether this task is the "leader", meaning the
/// last task to call this method.
///
/// # Examples
///
/// ```
/// use async_lock::Barrier;
/// use futures_lite::future;
/// use std::sync::Arc;
/// use std::thread;
///
/// let barrier = Arc::new(Barrier::new(5));
///
/// for _ in 0..5 {
/// let b = barrier.clone();
/// thread::spawn(move || {
/// future::block_on(async {
/// // The same messages will be printed together.
/// // There will NOT be interleaving of "before" and "after".
/// println!("before wait");
/// b.wait().await;
/// println!("after wait");
/// });
/// });
/// }
/// ```
pub fn wait(&self) -> BarrierWait<'_> {
BarrierWait::_new(BarrierWaitInner {
barrier: self,
lock: Some(self.state.lock()),
evl: None,
state: WaitState::Initial,
})
}
/// Blocks the current thread until all tasks reach this point.
///
/// Barriers are reusable after all tasks have synchronized, and can be used continuously.
///
/// Returns a [`BarrierWaitResult`] indicating whether this task is the "leader", meaning the
/// last task to call this method.
///
/// # Blocking
///
/// Rather than using asynchronous waiting, like the [`wait`][`Barrier::wait`] method,
/// this method will block the current thread until the wait is complete.
///
/// This method should not be used in an asynchronous context. It is intended to be
/// used in a way that a barrier can be used in both asynchronous and synchronous contexts.
/// Calling this method in an asynchronous context may result in a deadlock.
///
/// # Examples
///
/// ```
/// use async_lock::Barrier;
/// use futures_lite::future;
/// use std::sync::Arc;
/// use std::thread;
///
/// let barrier = Arc::new(Barrier::new(5));
///
/// for _ in 0..5 {
/// let b = barrier.clone();
/// thread::spawn(move || {
/// // The same messages will be printed together.
/// // There will NOT be interleaving of "before" and "after".
/// println!("before wait");
/// b.wait_blocking();
/// println!("after wait");
/// });
/// }
/// # // Wait for threads to stop.
/// # std::thread::sleep(std::time::Duration::from_secs(1));
/// ```
#[cfg(all(feature = "std", not(target_family = "wasm")))]
pub fn wait_blocking(&self) -> BarrierWaitResult {
self.wait().wait()
}
}
easy_wrapper! {
/// The future returned by [`Barrier::wait()`].
pub struct BarrierWait<'a>(BarrierWaitInner<'a> => BarrierWaitResult);
#[cfg(all(feature = "std", not(target_family = "wasm")))]
pub(crate) wait();
}
pin_project_lite::pin_project! {
/// The future returned by [`Barrier::wait()`].
struct BarrierWaitInner<'a> {
// The barrier to wait on.
barrier: &'a Barrier,
// The ongoing mutex lock operation we are blocking on.
#[pin]
lock: Option<Lock<'a, State>>,
// An event listener for the `barrier.event` event.
evl: Option<EventListener>,
// The current state of the future.
state: WaitState,
}
}
impl fmt::Debug for BarrierWait<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("BarrierWait { .. }")
}
}
enum WaitState {
/// We are getting the original values of the state.
Initial,
/// We are waiting for the listener to complete.
Waiting { local_gen: u64 },
/// Waiting to re-acquire the lock to check the state again.
Reacquiring { local_gen: u64 },
}
impl EventListenerFuture for BarrierWaitInner<'_> {
type Output = BarrierWaitResult;
fn poll_with_strategy<'a, S: Strategy<'a>>(
self: Pin<&mut Self>,
strategy: &mut S,
cx: &mut S::Context,
) -> Poll<Self::Output> {
let mut this = self.project();
loop {
match this.state {
WaitState::Initial => {
// See if the lock is ready yet.
let mut state = ready!(this
.lock
.as_mut()
.as_pin_mut()
.unwrap()
.poll_with_strategy(strategy, cx));
this.lock.as_mut().set(None);
let local_gen = state.generation_id;
state.count += 1;
if state.count < this.barrier.n {
// We need to wait for the event.
*this.evl = Some(this.barrier.event.listen());
*this.state = WaitState::Waiting { local_gen };
} else {
// We are the last one.
state.count = 0;
state.generation_id = state.generation_id.wrapping_add(1);
this.barrier.event.notify(usize::MAX);
return Poll::Ready(BarrierWaitResult { is_leader: true });
}
}
WaitState::Waiting { local_gen } => {
ready!(strategy.poll(this.evl, cx));
// We are now re-acquiring the mutex.
this.lock.as_mut().set(Some(this.barrier.state.lock()));
*this.state = WaitState::Reacquiring {
local_gen: *local_gen,
};
}
WaitState::Reacquiring { local_gen } => {
// Acquire the local state again.
let state = ready!(this
.lock
.as_mut()
.as_pin_mut()
.unwrap()
.poll_with_strategy(strategy, cx));
this.lock.set(None);
if *local_gen == state.generation_id && state.count < this.barrier.n {
// We need to wait for the event again.
*this.evl = Some(this.barrier.event.listen());
*this.state = WaitState::Waiting {
local_gen: *local_gen,
};
} else {
// We are ready, but not the leader.
return Poll::Ready(BarrierWaitResult { is_leader: false });
}
}
}
}
}
}
/// Returned by [`Barrier::wait()`] when all tasks have called it.
///
/// # Examples
///
/// ```
/// # futures_lite::future::block_on(async {
/// use async_lock::Barrier;
///
/// let barrier = Barrier::new(1);
/// let barrier_wait_result = barrier.wait().await;
/// # });
/// ```
#[derive(Debug, Clone)]
pub struct BarrierWaitResult {
is_leader: bool,
}
impl BarrierWaitResult {
/// Returns `true` if this task was the last to call to [`Barrier::wait()`].
///
/// # Examples
///
/// ```
/// # futures_lite::future::block_on(async {
/// use async_lock::Barrier;
/// use futures_lite::future;
///
/// let barrier = Barrier::new(2);
/// let (a, b) = future::zip(barrier.wait(), barrier.wait()).await;
/// assert_eq!(a.is_leader(), false);
/// assert_eq!(b.is_leader(), true);
/// # });
/// ```
pub fn is_leader(&self) -> bool {
self.is_leader
}
}

168
vendor/async-lock/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,168 @@
//! Async synchronization primitives.
//!
//! This crate provides the following primitives:
//!
//! * [`Barrier`] - enables tasks to synchronize all together at the same time.
//! * [`Mutex`] - a mutual exclusion lock.
//! * [`RwLock`] - a reader-writer lock, allowing any number of readers or a single writer.
//! * [`Semaphore`] - limits the number of concurrent operations.
//!
//! ## Relationship with `std::sync`
//!
//! In general, you should consider using [`std::sync`] types over types from this crate.
//!
//! There are two primary use cases for types from this crate:
//!
//! - You need to use a synchronization primitive in a `no_std` environment.
//! - You need to hold a lock across an `.await` point.
//! (Holding an [`std::sync`] lock guard across an `.await` will make your future non-`Send`,
//! and is also highly likely to cause deadlocks.)
//!
//! If you already use `libstd` and you aren't holding locks across await points (there is a
//! Clippy lint called [`await_holding_lock`] that emits warnings for this scenario), you should
//! consider [`std::sync`] instead of this crate. Those types are optimized for the currently
//! running operating system, are less complex and are generally much faster.
//!
//! In contrast, `async-lock`'s notification system uses `std::sync::Mutex` under the hood if
//! the `std` feature is enabled, and will fall back to a significantly slower strategy if it is
//! not. So, there are few cases where `async-lock` is a win for performance over [`std::sync`].
//!
//! [`std::sync`]: https://doc.rust-lang.org/std/sync/index.html
//! [`await_holding_lock`]: https://rust-lang.github.io/rust-clippy/stable/index.html#/await_holding_lock
#![cfg_attr(not(feature = "std"), no_std)]
#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)]
#![doc(
html_favicon_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png"
)]
#![doc(
html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png"
)]
extern crate alloc;
/// Simple macro to extract the value of `Poll` or return `Pending`.
///
/// TODO: Drop in favor of `core::task::ready`, once MSRV is bumped to 1.64.
macro_rules! ready {
($e:expr) => {{
use ::core::task::Poll;
match $e {
Poll::Ready(v) => v,
Poll::Pending => return Poll::Pending,
}
}};
}
/// Pins a variable on the stack.
///
/// TODO: Drop in favor of `core::pin::pin`, once MSRV is bumped to 1.68.
#[cfg(all(feature = "std", not(target_family = "wasm")))]
macro_rules! pin {
($($x:ident),* $(,)?) => {
$(
let mut $x = $x;
#[allow(unused_mut)]
let mut $x = unsafe {
core::pin::Pin::new_unchecked(&mut $x)
};
)*
}
}
/// Make the given function const if the given condition is true.
macro_rules! const_fn {
(
const_if: #[cfg($($cfg:tt)+)];
$(#[$($attr:tt)*])*
$vis:vis const fn $($rest:tt)*
) => {
#[cfg($($cfg)+)]
$(#[$($attr)*])*
$vis const fn $($rest)*
#[cfg(not($($cfg)+))]
$(#[$($attr)*])*
$vis fn $($rest)*
};
}
mod barrier;
mod mutex;
mod once_cell;
mod rwlock;
mod semaphore;
pub use barrier::{Barrier, BarrierWaitResult};
pub use mutex::{Mutex, MutexGuard, MutexGuardArc};
pub use once_cell::OnceCell;
pub use rwlock::{
RwLock, RwLockReadGuard, RwLockReadGuardArc, RwLockUpgradableReadGuard,
RwLockUpgradableReadGuardArc, RwLockWriteGuard, RwLockWriteGuardArc,
};
pub use semaphore::{Semaphore, SemaphoreGuard, SemaphoreGuardArc};
pub mod futures {
//! Named futures for use with `async_lock` primitives.
pub use crate::barrier::BarrierWait;
pub use crate::mutex::{Lock, LockArc};
pub use crate::rwlock::futures::{
Read, ReadArc, UpgradableRead, UpgradableReadArc, Upgrade, UpgradeArc, Write, WriteArc,
};
pub use crate::semaphore::{Acquire, AcquireArc};
}
#[cfg(not(loom))]
/// Synchronization primitive implementation.
mod sync {
pub(super) use core::sync::atomic;
pub(super) trait WithMut {
type Output;
fn with_mut<F, R>(&mut self, f: F) -> R
where
F: FnOnce(&mut Self::Output) -> R;
}
impl WithMut for atomic::AtomicUsize {
type Output = usize;
#[inline]
fn with_mut<F, R>(&mut self, f: F) -> R
where
F: FnOnce(&mut Self::Output) -> R,
{
f(self.get_mut())
}
}
}
#[cfg(loom)]
/// Synchronization primitive implementation.
mod sync {
pub(super) use loom::sync::atomic;
}
#[cold]
fn abort() -> ! {
// For no_std targets, panicking while panicking is defined as an abort
#[cfg(not(feature = "std"))]
{
struct Bomb;
impl Drop for Bomb {
fn drop(&mut self) {
panic!("Panicking while panicking to abort")
}
}
let _bomb = Bomb;
panic!("Panicking while panicking to abort")
}
// For libstd targets, abort using std::process::abort
#[cfg(feature = "std")]
std::process::abort()
}

762
vendor/async-lock/src/mutex.rs vendored Normal file
View File

@@ -0,0 +1,762 @@
use core::borrow::Borrow;
use core::cell::UnsafeCell;
use core::fmt;
use core::marker::{PhantomData, PhantomPinned};
use core::ops::{Deref, DerefMut};
use core::pin::Pin;
use core::task::Poll;
use alloc::sync::Arc;
// We don't use loom::UnsafeCell as that doesn't work with the Mutex API.
use crate::sync::atomic::{AtomicUsize, Ordering};
#[cfg(all(feature = "std", not(target_family = "wasm")))]
use std::time::{Duration, Instant};
use event_listener::{Event, EventListener};
use event_listener_strategy::{easy_wrapper, EventListenerFuture};
/// An async mutex.
///
/// The locking mechanism uses eventual fairness to ensure locking will be fair on average without
/// sacrificing performance. This is done by forcing a fair lock whenever a lock operation is
/// starved for longer than 0.5 milliseconds.
///
/// # Examples
///
/// ```
/// # futures_lite::future::block_on(async {
/// use async_lock::Mutex;
///
/// let m = Mutex::new(1);
///
/// let mut guard = m.lock().await;
/// *guard = 2;
///
/// assert!(m.try_lock().is_none());
/// drop(guard);
/// assert_eq!(*m.try_lock().unwrap(), 2);
/// # })
/// ```
pub struct Mutex<T: ?Sized> {
/// Current state of the mutex.
///
/// The least significant bit is set to 1 if the mutex is locked.
/// The other bits hold the number of starved lock operations.
state: AtomicUsize,
/// Lock operations waiting for the mutex to be released.
lock_ops: Event,
/// The value inside the mutex.
data: UnsafeCell<T>,
}
unsafe impl<T: Send + ?Sized> Send for Mutex<T> {}
unsafe impl<T: Send + ?Sized> Sync for Mutex<T> {}
impl<T> Mutex<T> {
const_fn! {
const_if: #[cfg(not(loom))];
/// Creates a new async mutex.
///
/// # Examples
///
/// ```
/// use async_lock::Mutex;
///
/// let mutex = Mutex::new(0);
/// ```
pub const fn new(data: T) -> Mutex<T> {
Mutex {
state: AtomicUsize::new(0),
lock_ops: Event::new(),
data: UnsafeCell::new(data),
}
}
}
/// Consumes the mutex, returning the underlying data.
///
/// # Examples
///
/// ```
/// use async_lock::Mutex;
///
/// let mutex = Mutex::new(10);
/// assert_eq!(mutex.into_inner(), 10);
/// ```
pub fn into_inner(self) -> T {
self.data.into_inner()
}
}
impl<T: ?Sized> Mutex<T> {
/// Acquires the mutex.
///
/// Returns a guard that releases the mutex when dropped.
///
/// # Examples
///
/// ```
/// # futures_lite::future::block_on(async {
/// use async_lock::Mutex;
///
/// let mutex = Mutex::new(10);
/// let guard = mutex.lock().await;
/// assert_eq!(*guard, 10);
/// # })
/// ```
#[inline]
pub fn lock(&self) -> Lock<'_, T> {
Lock::_new(LockInner {
mutex: self,
acquire_slow: None,
})
}
/// Acquires the mutex using the blocking strategy.
///
/// Returns a guard that releases the mutex when dropped.
///
/// # Blocking
///
/// Rather than using asynchronous waiting, like the [`lock`][Mutex::lock] method,
/// this method will block the current thread until the lock is acquired.
///
/// This method should not be used in an asynchronous context. It is intended to be
/// used in a way that a mutex can be used in both asynchronous and synchronous contexts.
/// Calling this method in an asynchronous context may result in a deadlock.
///
/// # Examples
///
/// ```
/// use async_lock::Mutex;
///
/// let mutex = Mutex::new(10);
/// let guard = mutex.lock_blocking();
/// assert_eq!(*guard, 10);
/// ```
#[cfg(all(feature = "std", not(target_family = "wasm")))]
#[inline]
pub fn lock_blocking(&self) -> MutexGuard<'_, T> {
self.lock().wait()
}
/// Attempts to acquire the mutex.
///
/// If the mutex could not be acquired at this time, then [`None`] is returned. Otherwise, a
/// guard is returned that releases the mutex when dropped.
///
/// # Examples
///
/// ```
/// use async_lock::Mutex;
///
/// let mutex = Mutex::new(10);
/// if let Some(guard) = mutex.try_lock() {
/// assert_eq!(*guard, 10);
/// }
/// # ;
/// ```
#[inline]
pub fn try_lock(&self) -> Option<MutexGuard<'_, T>> {
if self
.state
.compare_exchange(0, 1, Ordering::Acquire, Ordering::Acquire)
.is_ok()
{
Some(MutexGuard(self))
} else {
None
}
}
/// Returns a mutable reference to the underlying data.
///
/// Since this call borrows the mutex mutably, no actual locking takes place -- the mutable
/// borrow statically guarantees the mutex is not already acquired.
///
/// # Examples
///
/// ```
/// # futures_lite::future::block_on(async {
/// use async_lock::Mutex;
///
/// let mut mutex = Mutex::new(0);
/// *mutex.get_mut() = 10;
/// assert_eq!(*mutex.lock().await, 10);
/// # })
/// ```
pub fn get_mut(&mut self) -> &mut T {
self.data.get_mut()
}
/// Unlocks the mutex directly.
///
/// # Safety
///
/// This function is intended to be used only in the case where the mutex is locked,
/// and the guard is subsequently forgotten. Calling this while you don't hold a lock
/// on the mutex will likely lead to UB.
pub(crate) unsafe fn unlock_unchecked(&self) {
// Remove the last bit and notify a waiting lock operation.
self.state.fetch_sub(1, Ordering::Release);
self.lock_ops.notify(1);
}
}
impl<T: ?Sized> Mutex<T> {
/// Acquires the mutex and clones a reference to it.
///
/// Returns an owned guard that releases the mutex when dropped.
///
/// # Examples
///
/// ```
/// # futures_lite::future::block_on(async {
/// use async_lock::Mutex;
/// use std::sync::Arc;
///
/// let mutex = Arc::new(Mutex::new(10));
/// let guard = mutex.lock_arc().await;
/// assert_eq!(*guard, 10);
/// # })
/// ```
#[inline]
pub fn lock_arc(self: &Arc<Self>) -> LockArc<T> {
LockArc::_new(LockArcInnards::Unpolled {
mutex: Some(self.clone()),
})
}
/// Acquires the mutex and clones a reference to it using the blocking strategy.
///
/// Returns an owned guard that releases the mutex when dropped.
///
/// # Blocking
///
/// Rather than using asynchronous waiting, like the [`lock_arc`][Mutex::lock_arc] method,
/// this method will block the current thread until the lock is acquired.
///
/// This method should not be used in an asynchronous context. It is intended to be
/// used in a way that a mutex can be used in both asynchronous and synchronous contexts.
/// Calling this method in an asynchronous context may result in a deadlock.
///
/// # Examples
///
/// ```
/// use async_lock::Mutex;
/// use std::sync::Arc;
///
/// let mutex = Arc::new(Mutex::new(10));
/// let guard = mutex.lock_arc_blocking();
/// assert_eq!(*guard, 10);
/// ```
#[cfg(all(feature = "std", not(target_family = "wasm")))]
#[inline]
pub fn lock_arc_blocking(self: &Arc<Self>) -> MutexGuardArc<T> {
self.lock_arc().wait()
}
/// Attempts to acquire the mutex and clone a reference to it.
///
/// If the mutex could not be acquired at this time, then [`None`] is returned. Otherwise, an
/// owned guard is returned that releases the mutex when dropped.
///
/// # Examples
///
/// ```
/// use async_lock::Mutex;
/// use std::sync::Arc;
///
/// let mutex = Arc::new(Mutex::new(10));
/// if let Some(guard) = mutex.try_lock() {
/// assert_eq!(*guard, 10);
/// }
/// # ;
/// ```
#[inline]
pub fn try_lock_arc(self: &Arc<Self>) -> Option<MutexGuardArc<T>> {
if self
.state
.compare_exchange(0, 1, Ordering::Acquire, Ordering::Acquire)
.is_ok()
{
Some(MutexGuardArc(self.clone()))
} else {
None
}
}
}
impl<T: fmt::Debug + ?Sized> fmt::Debug for Mutex<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
struct Locked;
impl fmt::Debug for Locked {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("<locked>")
}
}
match self.try_lock() {
None => f.debug_struct("Mutex").field("data", &Locked).finish(),
Some(guard) => f.debug_struct("Mutex").field("data", &&*guard).finish(),
}
}
}
impl<T> From<T> for Mutex<T> {
fn from(val: T) -> Mutex<T> {
Mutex::new(val)
}
}
impl<T: Default> Default for Mutex<T> {
fn default() -> Mutex<T> {
Mutex::new(Default::default())
}
}
easy_wrapper! {
/// The future returned by [`Mutex::lock`].
pub struct Lock<'a, T: ?Sized>(LockInner<'a, T> => MutexGuard<'a, T>);
#[cfg(all(feature = "std", not(target_family = "wasm")))]
pub(crate) wait();
}
pin_project_lite::pin_project! {
/// Inner future for acquiring the mutex.
struct LockInner<'a, T: ?Sized> {
// Reference to the mutex.
mutex: &'a Mutex<T>,
// The future that waits for the mutex to become available.
#[pin]
acquire_slow: Option<AcquireSlow<&'a Mutex<T>, T>>,
}
}
unsafe impl<T: Send + ?Sized> Send for Lock<'_, T> {}
unsafe impl<T: Sync + ?Sized> Sync for Lock<'_, T> {}
impl<T: ?Sized> fmt::Debug for Lock<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("Lock { .. }")
}
}
impl<'a, T: ?Sized> EventListenerFuture for LockInner<'a, T> {
type Output = MutexGuard<'a, T>;
#[inline]
fn poll_with_strategy<'x, S: event_listener_strategy::Strategy<'x>>(
self: Pin<&mut Self>,
strategy: &mut S,
context: &mut S::Context,
) -> Poll<Self::Output> {
let mut this = self.project();
// This may seem weird, but the borrow checker complains otherwise.
if this.acquire_slow.is_none() {
match this.mutex.try_lock() {
Some(guard) => return Poll::Ready(guard),
None => {
this.acquire_slow.set(Some(AcquireSlow::new(this.mutex)));
}
}
}
ready!(this
.acquire_slow
.as_pin_mut()
.unwrap()
.poll_with_strategy(strategy, context));
Poll::Ready(MutexGuard(this.mutex))
}
}
easy_wrapper! {
/// The future returned by [`Mutex::lock_arc`].
pub struct LockArc<T: ?Sized>(LockArcInnards<T> => MutexGuardArc<T>);
#[cfg(all(feature = "std", not(target_family = "wasm")))]
pub(crate) wait();
}
pin_project_lite::pin_project! {
#[project = LockArcInnardsProj]
enum LockArcInnards<T: ?Sized> {
/// We have not tried to poll the fast path yet.
Unpolled { mutex: Option<Arc<Mutex<T>>> },
/// We are acquiring the mutex through the slow path.
AcquireSlow {
#[pin]
inner: AcquireSlow<Arc<Mutex<T>>, T>
},
}
}
unsafe impl<T: Send + ?Sized> Send for LockArc<T> {}
unsafe impl<T: Sync + ?Sized> Sync for LockArc<T> {}
impl<T: ?Sized> fmt::Debug for LockArcInnards<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("LockArc { .. }")
}
}
impl<T: ?Sized> EventListenerFuture for LockArcInnards<T> {
type Output = MutexGuardArc<T>;
fn poll_with_strategy<'a, S: event_listener_strategy::Strategy<'a>>(
mut self: Pin<&mut Self>,
strategy: &mut S,
context: &mut S::Context,
) -> Poll<Self::Output> {
// Set the inner future if needed.
if let LockArcInnardsProj::Unpolled { mutex } = self.as_mut().project() {
let mutex = mutex.take().expect("mutex taken more than once");
// Try the fast path before trying to register slowly.
if let Some(guard) = mutex.try_lock_arc() {
return Poll::Ready(guard);
}
// Set the inner future to the slow acquire path.
self.as_mut().set(LockArcInnards::AcquireSlow {
inner: AcquireSlow::new(mutex),
});
}
// Poll the inner future.
let value = match self.project() {
LockArcInnardsProj::AcquireSlow { inner } => {
ready!(inner.poll_with_strategy(strategy, context))
}
_ => unreachable!(),
};
Poll::Ready(MutexGuardArc(value))
}
}
pin_project_lite::pin_project! {
/// Future for acquiring the mutex slowly.
struct AcquireSlow<B: Borrow<Mutex<T>>, T: ?Sized> {
// Reference to the mutex.
mutex: Option<B>,
// The event listener waiting on the mutex.
listener: Option<EventListener>,
// The point at which the mutex lock was started.
start: Start,
// This lock operation is starving.
starved: bool,
// Capture the `T` lifetime.
#[pin]
_marker: PhantomData<T>,
// Keeping this type `!Unpin` enables future optimizations.
#[pin]
_pin: PhantomPinned
}
impl<T: ?Sized, B: Borrow<Mutex<T>>> PinnedDrop for AcquireSlow<B, T> {
fn drop(this: Pin<&mut Self>) {
// Make sure the starvation counter is decremented.
this.take_mutex();
}
}
}
/// `pin_project_lite` doesn't support `#[cfg]` yet, so we have to do this manually.
struct Start {
#[cfg(all(feature = "std", not(target_family = "wasm")))]
start: Option<Instant>,
}
impl<T: ?Sized, B: Borrow<Mutex<T>>> AcquireSlow<B, T> {
/// Create a new `AcquireSlow` future.
#[cold]
fn new(mutex: B) -> Self {
AcquireSlow {
mutex: Some(mutex),
listener: None,
start: Start {
#[cfg(all(feature = "std", not(target_family = "wasm")))]
start: None,
},
starved: false,
_marker: PhantomData,
_pin: PhantomPinned,
}
}
/// Take the mutex reference out, decrementing the counter if necessary.
fn take_mutex(self: Pin<&mut Self>) -> Option<B> {
let this = self.project();
let mutex = this.mutex.take();
if *this.starved {
if let Some(mutex) = mutex.as_ref() {
// Decrement this counter before we exit.
mutex.borrow().state.fetch_sub(2, Ordering::Release);
}
}
mutex
}
}
impl<T: ?Sized, B: Unpin + Borrow<Mutex<T>>> EventListenerFuture for AcquireSlow<B, T> {
type Output = B;
#[cold]
fn poll_with_strategy<'a, S: event_listener_strategy::Strategy<'a>>(
mut self: Pin<&mut Self>,
strategy: &mut S,
context: &mut S::Context,
) -> Poll<Self::Output> {
let this = self.as_mut().project();
#[cfg(all(feature = "std", not(target_family = "wasm")))]
let start = *this.start.start.get_or_insert_with(Instant::now);
let mutex = Borrow::<Mutex<T>>::borrow(
this.mutex.as_ref().expect("future polled after completion"),
);
// Only use this hot loop if we aren't currently starved.
if !*this.starved {
loop {
// Start listening for events.
if this.listener.is_none() {
*this.listener = Some(mutex.lock_ops.listen());
// Try locking if nobody is being starved.
match mutex
.state
.compare_exchange(0, 1, Ordering::Acquire, Ordering::Acquire)
.unwrap_or_else(|x| x)
{
// Lock acquired!
0 => return Poll::Ready(self.take_mutex().unwrap()),
// Lock is held and nobody is starved.
1 => {}
// Somebody is starved.
_ => break,
}
} else {
ready!(strategy.poll(this.listener, context));
// Try locking if nobody is being starved.
match mutex
.state
.compare_exchange(0, 1, Ordering::Acquire, Ordering::Acquire)
.unwrap_or_else(|x| x)
{
// Lock acquired!
0 => return Poll::Ready(self.take_mutex().unwrap()),
// Lock is held and nobody is starved.
1 => {}
// Somebody is starved.
_ => {
// Notify the first listener in line because we probably received a
// notification that was meant for a starved task.
mutex.lock_ops.notify(1);
break;
}
}
// If waiting for too long, fall back to a fairer locking strategy that will prevent
// newer lock operations from starving us forever.
#[cfg(all(feature = "std", not(target_family = "wasm")))]
if start.elapsed() > Duration::from_micros(500) {
break;
}
}
}
// Increment the number of starved lock operations.
if mutex.state.fetch_add(2, Ordering::Release) > usize::MAX / 2 {
// In case of potential overflow, abort.
crate::abort();
}
// Indicate that we are now starving and will use a fairer locking strategy.
*this.starved = true;
}
// Fairer locking loop.
loop {
if this.listener.is_none() {
// Start listening for events.
*this.listener = Some(mutex.lock_ops.listen());
// Try locking if nobody else is being starved.
match mutex
.state
.compare_exchange(2, 2 | 1, Ordering::Acquire, Ordering::Acquire)
.unwrap_or_else(|x| x)
{
// Lock acquired!
2 => return Poll::Ready(self.take_mutex().unwrap()),
// Lock is held by someone.
s if s % 2 == 1 => {}
// Lock is available.
_ => {
// Be fair: notify the first listener and then go wait in line.
mutex.lock_ops.notify(1);
}
}
} else {
// Wait for a notification.
ready!(strategy.poll(this.listener, context));
// Try acquiring the lock without waiting for others.
if mutex.state.fetch_or(1, Ordering::Acquire) % 2 == 0 {
return Poll::Ready(self.take_mutex().unwrap());
}
}
}
}
}
/// A guard that releases the mutex when dropped.
#[clippy::has_significant_drop]
pub struct MutexGuard<'a, T: ?Sized>(&'a Mutex<T>);
unsafe impl<T: Send + ?Sized> Send for MutexGuard<'_, T> {}
unsafe impl<T: Sync + ?Sized> Sync for MutexGuard<'_, T> {}
impl<'a, T: ?Sized> MutexGuard<'a, T> {
/// Returns a reference to the mutex a guard came from.
///
/// # Examples
///
/// ```
/// # futures_lite::future::block_on(async {
/// use async_lock::{Mutex, MutexGuard};
///
/// let mutex = Mutex::new(10i32);
/// let guard = mutex.lock().await;
/// dbg!(MutexGuard::source(&guard));
/// # })
/// ```
pub fn source(guard: &MutexGuard<'a, T>) -> &'a Mutex<T> {
guard.0
}
}
impl<T: ?Sized> Drop for MutexGuard<'_, T> {
#[inline]
fn drop(&mut self) {
// SAFETY: we are dropping the mutex guard, therefore unlocking the mutex.
unsafe {
self.0.unlock_unchecked();
}
}
}
impl<T: fmt::Debug + ?Sized> fmt::Debug for MutexGuard<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<T: fmt::Display + ?Sized> fmt::Display for MutexGuard<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
(**self).fmt(f)
}
}
impl<T: ?Sized> Deref for MutexGuard<'_, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.0.data.get() }
}
}
impl<T: ?Sized> DerefMut for MutexGuard<'_, T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.0.data.get() }
}
}
/// An owned guard that releases the mutex when dropped.
#[clippy::has_significant_drop]
pub struct MutexGuardArc<T: ?Sized>(Arc<Mutex<T>>);
unsafe impl<T: Send + ?Sized> Send for MutexGuardArc<T> {}
unsafe impl<T: Sync + ?Sized> Sync for MutexGuardArc<T> {}
impl<T: ?Sized> MutexGuardArc<T> {
/// Returns a reference to the mutex a guard came from.
///
/// # Examples
///
/// ```
/// # futures_lite::future::block_on(async {
/// use async_lock::{Mutex, MutexGuardArc};
/// use std::sync::Arc;
///
/// let mutex = Arc::new(Mutex::new(10i32));
/// let guard = mutex.lock_arc().await;
/// dbg!(MutexGuardArc::source(&guard));
/// # })
/// ```
pub fn source(guard: &Self) -> &Arc<Mutex<T>>
where
// Required because `MutexGuardArc` implements `Sync` regardless of whether `T` is `Send`,
// but this method allows dropping `T` from a different thead than it was created in.
T: Send,
{
&guard.0
}
}
impl<T: ?Sized> Drop for MutexGuardArc<T> {
#[inline]
fn drop(&mut self) {
// SAFETY: we are dropping the mutex guard, therefore unlocking the mutex.
unsafe {
self.0.unlock_unchecked();
}
}
}
impl<T: fmt::Debug + ?Sized> fmt::Debug for MutexGuardArc<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<T: fmt::Display + ?Sized> fmt::Display for MutexGuardArc<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
(**self).fmt(f)
}
}
impl<T: ?Sized> Deref for MutexGuardArc<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.0.data.get() }
}
}
impl<T: ?Sized> DerefMut for MutexGuardArc<T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.0.data.get() }
}
}

814
vendor/async-lock/src/once_cell.rs vendored Normal file
View File

@@ -0,0 +1,814 @@
use core::cell::UnsafeCell;
use core::convert::Infallible;
use core::fmt;
use core::future::Future;
use core::mem::{forget, MaybeUninit};
use core::ptr;
use crate::sync::atomic::{AtomicUsize, Ordering};
#[cfg(not(loom))]
use crate::sync::WithMut;
#[cfg(all(feature = "std", not(target_family = "wasm")))]
use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
use event_listener::Event;
use event_listener_strategy::{NonBlocking, Strategy};
#[cfg(all(feature = "std", not(target_family = "wasm")))]
use event_listener::Listener;
/// The current state of the `OnceCell`.
#[derive(Copy, Clone, PartialEq, Eq)]
#[repr(usize)]
enum State {
/// The `OnceCell` is uninitialized.
Uninitialized = 0,
/// The `OnceCell` is being initialized.
Initializing = 1,
/// The `OnceCell` is initialized.
Initialized = 2,
}
impl From<usize> for State {
fn from(val: usize) -> Self {
match val {
0 => State::Uninitialized,
1 => State::Initializing,
2 => State::Initialized,
_ => unreachable!("Invalid state"),
}
}
}
impl From<State> for usize {
fn from(val: State) -> Self {
val as usize
}
}
/// A memory location that can be written to at most once.
///
/// A `OnceCell` can be used to store a single value, and only once. However,
/// once the value is stored, it can be accessed directly through a reference
/// instead of needing an RAII guard like `Mutex` or `RwLock`.
///
/// # Examples
///
/// This structure is useful for a variety of patterns, most notably for one-time
/// initialization.
///
/// ```rust
/// use async_lock::OnceCell;
///
/// # struct Foobar;
///
/// async fn very_expensive_initialization() -> Foobar {
/// // Imagine this is very expensive to initialize,
/// // for instance, it requires a network request or
/// // a database call.
/// # Foobar
/// }
///
/// struct LazyFoobar {
/// inner: OnceCell<Foobar>,
/// }
///
/// impl LazyFoobar {
/// fn new() -> Self {
/// Self {
/// inner: OnceCell::new(),
/// }
/// }
///
/// async fn load(&self) -> &Foobar {
/// self.inner.get_or_init(|| async {
/// very_expensive_initialization().await
/// }).await
/// }
/// }
/// ```
pub struct OnceCell<T> {
/// Listeners waiting for a chance to initialize the cell.
///
/// These are the users of get_or_init() and similar functions.
active_initializers: Event,
/// Listeners waiting for the cell to be initialized.
///
/// These are the users of wait().
passive_waiters: Event,
/// State associated with the cell.
state: AtomicUsize,
/// The value of the cell.
value: UnsafeCell<MaybeUninit<T>>,
}
unsafe impl<T: Send> Send for OnceCell<T> {}
unsafe impl<T: Send + Sync> Sync for OnceCell<T> {}
impl<T> OnceCell<T> {
const_fn! {
const_if: #[cfg(not(loom))];
/// Create a new, uninitialized `OnceCell`.
///
/// # Example
///
/// ```rust
/// use async_lock::OnceCell;
///
/// let cell = OnceCell::new();
/// # cell.set_blocking(1);
/// ```
pub const fn new() -> Self {
Self {
active_initializers: Event::new(),
passive_waiters: Event::new(),
state: AtomicUsize::new(State::Uninitialized as _),
value: UnsafeCell::new(MaybeUninit::uninit()),
}
}
}
/// Tell whether or not the cell is initialized.
///
/// This may not always be accurate. For instance, it is possible for
/// another thread to initialize the cell between the time when this
/// function is called and the time when the result is actually used.
///
/// # Example
///
/// ```rust
/// use async_lock::OnceCell;
///
/// # futures_lite::future::block_on(async {
/// let cell = OnceCell::new();
/// assert!(!cell.is_initialized());
/// cell.set(1).await;
/// assert!(cell.is_initialized());
/// # });
/// ```
pub fn is_initialized(&self) -> bool {
State::from(self.state.load(Ordering::Acquire)) == State::Initialized
}
/// Get a reference to the inner value, or `None` if the value
/// is not yet initialized.
///
/// # Example
///
/// ```rust
/// use async_lock::OnceCell;
///
/// # futures_lite::future::block_on(async {
/// let cell = OnceCell::new();
/// assert!(cell.get().is_none());
/// cell.set(1).await;
/// assert_eq!(cell.get(), Some(&1));
/// # });
/// ```
pub fn get(&self) -> Option<&T> {
if self.is_initialized() {
// SAFETY: We know that the value is initialized, so it is safe to
// read it.
Some(unsafe { self.get_unchecked() })
} else {
None
}
}
/// Get a mutable reference to the inner value, or `None` if the value
/// is not yet initialized.
///
/// This function is useful for initializing the value inside the cell
/// when we still have a mutable reference to the cell.
///
/// # Example
///
/// ```rust
/// use async_lock::OnceCell;
///
/// # futures_lite::future::block_on(async {
/// let mut cell = OnceCell::new();
/// assert!(cell.get_mut().is_none());
/// cell.set(1).await;
/// assert_eq!(cell.get_mut(), Some(&mut 1));
/// *cell.get_mut().unwrap() = 2;
/// assert_eq!(cell.get(), Some(&2));
/// # });
/// ```
pub fn get_mut(&mut self) -> Option<&mut T> {
self.state.with_mut(|state| {
if State::from(*state) == State::Initialized {
// SAFETY: We know that the value is initialized, so it is safe to
// read it.
Some(unsafe { &mut *self.value.get().cast() })
} else {
None
}
})
}
/// Take the value out of this `OnceCell`, moving it back to the uninitialized
/// state.
///
/// # Example
///
/// ```rust
/// use async_lock::OnceCell;
///
/// # futures_lite::future::block_on(async {
/// let mut cell = OnceCell::new();
/// cell.set(1).await;
/// assert_eq!(cell.take(), Some(1));
/// assert!(!cell.is_initialized());
/// # });
/// ```
pub fn take(&mut self) -> Option<T> {
self.state.with_mut(|state| {
if State::from(*state) == State::Initialized {
// SAFETY: We know that the value is initialized, so it is safe to
// read it.
let value = unsafe { ptr::read(self.value.get().cast()) };
*state = State::Uninitialized.into();
Some(value)
} else {
None
}
})
}
/// Convert this `OnceCell` into the inner value, if it is initialized.
///
/// # Example
///
/// ```rust
/// use async_lock::OnceCell;
///
/// # futures_lite::future::block_on(async {
/// let cell = OnceCell::new();
/// cell.set(1).await;
/// assert_eq!(cell.into_inner(), Some(1));
/// # });
/// ```
pub fn into_inner(mut self) -> Option<T> {
self.take()
}
/// Wait for the cell to be initialized, and then return a reference to the
/// inner value.
///
/// # Example
///
/// ```rust
/// use async_lock::OnceCell;
/// use std::sync::Arc;
/// use std::time::Duration;
/// use std::thread::{sleep, spawn};
///
/// let cell = Arc::new(OnceCell::new());
/// let cell2 = cell.clone();
///
/// spawn(move || {
/// sleep(Duration::from_millis(5));
/// cell2.set_blocking(1);
/// });
///
/// # futures_lite::future::block_on(async {
/// assert_eq!(cell.wait().await, &1);
/// # });
/// ```
pub async fn wait(&self) -> &T {
// Fast path: see if the value is already initialized.
if let Some(value) = self.get() {
return value;
}
// Slow path: wait for the value to be initialized.
event_listener::listener!(self.passive_waiters => listener);
// Try again.
if let Some(value) = self.get() {
return value;
}
listener.await;
debug_assert!(self.is_initialized());
// SAFETY: We know that the value is initialized, so it is safe to
// read it.
unsafe { self.get_unchecked() }
}
/// Wait for the cell to be initialized, and then return a reference to the
/// inner value.
///
/// # Blocking
///
/// In contrast to the `wait` method, this method blocks the current thread of
/// execution instead of awaiting.
///
/// This method should not be used in an asynchronous context. It is intended
/// to be used such that a `OnceCell` can be used in both asynchronous and synchronous contexts.
/// Calling this method in an asynchronous context may result in deadlocks.
///
/// # Example
///
/// ```rust
/// use async_lock::OnceCell;
/// use std::sync::Arc;
/// use std::time::Duration;
/// use std::thread::{sleep, spawn};
///
/// let cell = Arc::new(OnceCell::new());
/// let cell2 = cell.clone();
///
/// spawn(move || {
/// sleep(Duration::from_millis(5));
/// cell2.set_blocking(1);
/// });
///
/// assert_eq!(cell.wait_blocking(), &1);
/// ```
#[cfg(all(feature = "std", not(target_family = "wasm")))]
pub fn wait_blocking(&self) -> &T {
// Fast path: see if the value is already initialized.
if let Some(value) = self.get() {
return value;
}
// Slow path: wait for the value to be initialized.
event_listener::listener!(self.passive_waiters => listener);
// Try again.
if let Some(value) = self.get() {
return value;
}
listener.wait();
debug_assert!(self.is_initialized());
// SAFETY: We know that the value is initialized, so it is safe to
// read it.
unsafe { self.get_unchecked() }
}
/// Either get the value or initialize it with the given closure.
///
/// The cell will not be initialized if the closure returns an error.
///
/// # Example
///
/// ```rust
/// use async_lock::OnceCell;
/// #
/// # // Prevent explicit value errors.
/// # fn _explicit(_: &Result<&i32, ()>) {}
///
/// # futures_lite::future::block_on(async {
/// let cell = OnceCell::new();
///
/// let result = cell.get_or_try_init(|| async { Err(()) }).await;
/// assert!(result.is_err());
///
/// let result = cell.get_or_try_init(|| async { Ok(1) }).await;
/// # _explicit(&result);
/// assert_eq!(result.unwrap(), &1);
///
/// let result = cell.get_or_try_init(|| async { Err(()) }).await;
///
/// assert_eq!(result.unwrap(), &1);
/// # });
/// ```
pub async fn get_or_try_init<E, Fut: Future<Output = Result<T, E>>>(
&self,
closure: impl FnOnce() -> Fut,
) -> Result<&T, E> {
// Fast path: see if the value is already initialized.
if let Some(value) = self.get() {
return Ok(value);
}
// Slow path: initialize the value.
self.initialize_or_wait(closure, &mut NonBlocking::default())
.await?;
debug_assert!(self.is_initialized());
// SAFETY: We know that the value is initialized, so it is safe to
// read it.
Ok(unsafe { self.get_unchecked() })
}
/// Either get the value or initialize it with the given closure.
///
/// The cell will not be initialized if the closure returns an error.
///
/// # Blocking
///
/// In contrast to the `get_or_try_init` method, this method blocks the current thread of
/// execution instead of awaiting.
///
/// This method should not be used in an asynchronous context. It is intended
/// to be used such that a `OnceCell` can be used in both asynchronous and synchronous contexts.
/// Calling this method in an asynchronous context may result in deadlocks.
///
/// # Example
///
/// ```rust
/// use async_lock::OnceCell;
/// #
/// # // Prevent explicit type errors.
/// # fn _explicit(_: &Result<&i32, ()>) {}
///
/// let cell = OnceCell::new();
///
/// let result = cell.get_or_try_init_blocking(|| Err(()));
/// assert!(result.is_err());
///
/// let result = cell.get_or_try_init_blocking(|| Ok(1));
/// # _explicit(&result);
/// assert_eq!(result.unwrap(), &1);
///
/// let result = cell.get_or_try_init_blocking(|| Err(()));
///
/// assert_eq!(result.unwrap(), &1);
/// ```
#[cfg(all(feature = "std", not(target_family = "wasm")))]
pub fn get_or_try_init_blocking<E>(
&self,
closure: impl FnOnce() -> Result<T, E>,
) -> Result<&T, E> {
// Fast path: see if the value is already initialized.
if let Some(value) = self.get() {
return Ok(value);
}
// Slow path: initialize the value.
// The futures provided should never block, so we can use `now_or_never`.
now_or_never(self.initialize_or_wait(
move || core::future::ready(closure()),
&mut event_listener_strategy::Blocking::default(),
))?;
debug_assert!(self.is_initialized());
// SAFETY: We know that the value is initialized, so it is safe to
// read it.
Ok(unsafe { self.get_unchecked() })
}
/// Either get the value or initialize it with the given closure.
///
/// Many tasks may call this function, but the value will only be set once
/// and only one closure will be invoked.
///
/// # Example
///
/// ```rust
/// use async_lock::OnceCell;
///
/// # futures_lite::future::block_on(async {
/// let cell = OnceCell::new();
/// assert_eq!(cell.get_or_init(|| async { 1 }).await, &1);
/// assert_eq!(cell.get_or_init(|| async { 2 }).await, &1);
/// # });
/// ```
pub async fn get_or_init<Fut: Future<Output = T>>(&self, closure: impl FnOnce() -> Fut) -> &T {
// false positive: https://github.com/rust-lang/rust/issues/129352
#[allow(unreachable_patterns)]
match self
.get_or_try_init(move || async move {
let result: Result<T, Infallible> = Ok(closure().await);
result
})
.await
{
Ok(value) => value,
Err(infallible) => match infallible {},
}
}
/// Either get the value or initialize it with the given closure.
///
/// Many tasks may call this function, but the value will only be set once
/// and only one closure will be invoked.
///
/// # Blocking
///
/// In contrast to the `get_or_init` method, this method blocks the current thread of
/// execution instead of awaiting.
///
/// This method should not be used in an asynchronous context. It is intended
/// to be used such that a `OnceCell` can be used in both asynchronous and synchronous contexts.
/// Calling this method in an asynchronous context may result in deadlocks.
///
/// # Example
///
/// ```rust
/// use async_lock::OnceCell;
///
/// let cell = OnceCell::new();
/// assert_eq!(cell.get_or_init_blocking(|| 1), &1);
/// assert_eq!(cell.get_or_init_blocking(|| 2), &1);
/// ```
#[cfg(all(feature = "std", not(target_family = "wasm")))]
pub fn get_or_init_blocking(&self, closure: impl FnOnce() -> T + Unpin) -> &T {
let result = self.get_or_try_init_blocking(move || {
let result: Result<T, Infallible> = Ok(closure());
result
});
// false positive: https://github.com/rust-lang/rust/issues/129352
#[allow(unreachable_patterns)]
match result {
Ok(value) => value,
Err(infallible) => match infallible {},
}
}
/// Try to set the value of the cell.
///
/// If the cell is already initialized, this method returns the original
/// value back.
///
/// # Example
///
/// ```rust
/// use async_lock::OnceCell;
///
/// # futures_lite::future::block_on(async {
/// let cell = OnceCell::new();
///
/// assert_eq!(cell.set(1).await, Ok(&1));
/// assert_eq!(cell.get(), Some(&1));
/// assert_eq!(cell.set(2).await, Err(2));
/// # });
/// ```
pub async fn set(&self, value: T) -> Result<&T, T> {
let mut value = Some(value);
self.get_or_init(|| async { value.take().unwrap() }).await;
match value {
Some(value) => Err(value),
None => {
// SAFETY: value was taken, so we are initialized
Ok(unsafe { self.get_unchecked() })
}
}
}
/// Try to set the value of the cell.
///
/// If the cell is already initialized, this method returns the original
/// value back.
///
/// # Blocking
///
/// In contrast to the `set` method, this method blocks the current thread of
/// execution instead of awaiting.
///
/// This method should not be used in an asynchronous context. It is intended
/// to be used such that a `OnceCell` can be used in both asynchronous and synchronous contexts.
/// Calling this method in an asynchronous context may result in deadlocks.
///
/// # Example
///
/// ```rust
/// use async_lock::OnceCell;
///
/// let cell = OnceCell::new();
///
/// assert_eq!(cell.set_blocking(1), Ok(&1));
/// assert_eq!(cell.get(), Some(&1));
/// assert_eq!(cell.set_blocking(2), Err(2));
/// ```
#[cfg(all(feature = "std", not(target_family = "wasm")))]
pub fn set_blocking(&self, value: T) -> Result<&T, T> {
let mut value = Some(value);
self.get_or_init_blocking(|| value.take().unwrap());
match value {
Some(value) => Err(value),
None => {
// SAFETY: value was taken, so we are initialized
Ok(unsafe { self.get_unchecked() })
}
}
}
/// Wait for the cell to be initialized, optionally using a closure
/// to initialize the cell if it is not initialized yet.
#[cold]
async fn initialize_or_wait<E, Fut: Future<Output = Result<T, E>>, F: FnOnce() -> Fut>(
&self,
closure: F,
strategy: &mut impl for<'a> Strategy<'a>,
) -> Result<(), E> {
// The event listener we're currently waiting on.
let mut event_listener = None;
let mut closure = Some(closure);
loop {
// Check the current state of the cell.
let state = self.state.load(Ordering::Acquire);
// Determine what we should do based on our state.
match state.into() {
State::Initialized => {
// The cell is initialized now, so we can return.
return Ok(());
}
State::Initializing => {
// The cell is currently initializing, or the cell is uninitialized
// but we do not have the ability to initialize it.
//
// We need to wait the initialization to complete.
if let Some(listener) = event_listener.take() {
strategy.wait(listener).await;
} else {
event_listener = Some(self.active_initializers.listen());
}
}
State::Uninitialized => {
// Try to move the cell into the initializing state.
if self
.state
.compare_exchange(
State::Uninitialized.into(),
State::Initializing.into(),
Ordering::AcqRel,
Ordering::Acquire,
)
.is_err()
{
// The cell was initialized while we were trying to
// initialize it.
continue;
}
// Now that we have an exclusive lock on the cell's value,
// we can try to initialize it.
let _guard = Guard(self);
let initializer = closure.take().unwrap();
match (initializer)().await {
Ok(value) => {
// Write the value into the cell and update the state.
unsafe {
ptr::write(self.value.get().cast(), value);
}
forget(_guard);
self.state
.store(State::Initialized.into(), Ordering::Release);
// Notify the listeners that the value is initialized.
self.active_initializers.notify_additional(usize::MAX);
self.passive_waiters.notify_additional(usize::MAX);
return Ok(());
}
Err(err) => {
// Update the state to indicate that the value is
// uninitialized.
drop(_guard);
return Err(err);
}
}
}
}
}
/// Set the cell's state back to `UNINITIALIZED on drop.
///
/// If the closure panics, this ensures that the cell's state is set back to
/// `UNINITIALIZED` and that the next listener is notified.
struct Guard<'a, T>(&'a OnceCell<T>);
impl<T> Drop for Guard<'_, T> {
fn drop(&mut self) {
self.0
.state
.store(State::Uninitialized.into(), Ordering::Release);
// Notify the next initializer that it's their turn.
self.0.active_initializers.notify(1);
}
}
}
/// Get a reference to the inner value.
///
/// # Safety
///
/// The caller must ensure that the cell is initialized.
///
/// # Example
///
/// ```rust
/// use async_lock::OnceCell;
///
/// # futures_lite::future::block_on(async {
/// let cell = OnceCell::new();
/// cell.set(1).await;
///
/// // SAFETY: We know that the value is initialized, so it is safe to
/// // read it.
/// assert_eq!(unsafe { cell.get_unchecked() }, &1);
/// # });
/// ```
pub unsafe fn get_unchecked(&self) -> &T {
// SAFETY: The caller asserts that the value is initialized
&*self.value.get().cast()
}
}
impl<T> From<T> for OnceCell<T> {
/// Create a new, initialized `OnceCell` from an existing value.
///
/// # Example
///
/// ```rust
/// use async_lock::OnceCell;
///
/// let cell = OnceCell::from(42);
/// assert_eq!(cell.get(), Some(&42));
/// ```
fn from(value: T) -> Self {
Self {
active_initializers: Event::new(),
passive_waiters: Event::new(),
state: AtomicUsize::new(State::Initialized.into()),
value: UnsafeCell::new(MaybeUninit::new(value)),
}
}
}
impl<T: fmt::Debug> fmt::Debug for OnceCell<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
struct Inner<'a, T>(&'a OnceCell<T>);
impl<T: fmt::Debug> fmt::Debug for Inner<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0.state.load(Ordering::Acquire).into() {
State::Uninitialized => f.write_str("<uninitialized>"),
State::Initializing => f.write_str("<initializing>"),
State::Initialized => {
// SAFETY: "value" is initialized.
let value = unsafe { self.0.get_unchecked() };
fmt::Debug::fmt(value, f)
}
}
}
}
f.debug_tuple("OnceCell").field(&Inner(self)).finish()
}
}
impl<T> Drop for OnceCell<T> {
fn drop(&mut self) {
self.state.with_mut(|state| {
if State::from(*state) == State::Initialized {
// SAFETY: We know that the value is initialized, so it is safe to
// drop it.
unsafe { self.value.get().cast::<T>().drop_in_place() }
}
});
}
}
impl<T> Default for OnceCell<T> {
// Calls `OnceCell::new`.
#[inline]
fn default() -> Self {
Self::new()
}
}
/// Either return the result of a future now, or panic.
#[cfg(all(feature = "std", not(target_family = "wasm")))]
fn now_or_never<T>(f: impl Future<Output = T>) -> T {
const NOOP_WAKER: RawWakerVTable = RawWakerVTable::new(clone, wake, wake_by_ref, drop);
unsafe fn wake(_: *const ()) {}
unsafe fn wake_by_ref(_: *const ()) {}
unsafe fn clone(_: *const ()) -> RawWaker {
RawWaker::new(ptr::null(), &NOOP_WAKER)
}
unsafe fn drop(_: *const ()) {}
pin!(f);
let waker = unsafe { Waker::from_raw(RawWaker::new(ptr::null(), &NOOP_WAKER)) };
// Poll the future exactly once.
let mut cx = Context::from_waker(&waker);
match f.poll(&mut cx) {
Poll::Ready(value) => value,
Poll::Pending => unreachable!("future not ready"),
}
}

1468
vendor/async-lock/src/rwlock.rs vendored Normal file

File diff suppressed because it is too large Load Diff

485
vendor/async-lock/src/rwlock/futures.rs vendored Normal file
View File

@@ -0,0 +1,485 @@
use core::fmt;
use core::mem::ManuallyDrop;
use core::pin::Pin;
use core::task::Poll;
use alloc::sync::Arc;
use super::raw::{RawRead, RawUpgradableRead, RawUpgrade, RawWrite};
use super::{
RwLock, RwLockReadGuard, RwLockReadGuardArc, RwLockUpgradableReadGuard,
RwLockUpgradableReadGuardArc, RwLockWriteGuard, RwLockWriteGuardArc,
};
use event_listener_strategy::{easy_wrapper, EventListenerFuture, Strategy};
easy_wrapper! {
/// The future returned by [`RwLock::read`].
pub struct Read<'a, T: ?Sized>(ReadInner<'a, T> => RwLockReadGuard<'a, T>);
#[cfg(all(feature = "std", not(target_family = "wasm")))]
pub(crate) wait();
}
pin_project_lite::pin_project! {
/// The future returned by [`RwLock::read`].
struct ReadInner<'a, T: ?Sized> {
// Raw read lock acquisition future, doesn't depend on `T`.
#[pin]
pub(super) raw: RawRead<'a>,
// Pointer to the value protected by the lock. Covariant in `T`.
pub(super) value: *const T,
}
}
unsafe impl<T: Sync + ?Sized> Send for ReadInner<'_, T> {}
unsafe impl<T: Sync + ?Sized> Sync for ReadInner<'_, T> {}
impl<'x, T: ?Sized> Read<'x, T> {
#[inline]
pub(super) fn new(raw: RawRead<'x>, value: *const T) -> Self {
Self::_new(ReadInner { raw, value })
}
}
impl<T: ?Sized> fmt::Debug for Read<'_, T> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("Read { .. }")
}
}
impl<'a, T: ?Sized> EventListenerFuture for ReadInner<'a, T> {
type Output = RwLockReadGuard<'a, T>;
#[inline]
fn poll_with_strategy<'x, S: Strategy<'x>>(
self: Pin<&mut Self>,
strategy: &mut S,
cx: &mut S::Context,
) -> Poll<Self::Output> {
let mut this = self.project();
ready!(this.raw.as_mut().poll_with_strategy(strategy, cx));
Poll::Ready(RwLockReadGuard {
lock: this.raw.lock,
value: *this.value,
})
}
}
easy_wrapper! {
/// The future returned by [`RwLock::read_arc`].
pub struct ReadArc<'a, T>(ReadArcInner<'a, T> => RwLockReadGuardArc<T>);
#[cfg(all(feature = "std", not(target_family = "wasm")))]
pub(crate) wait();
}
pin_project_lite::pin_project! {
/// The future returned by [`RwLock::read_arc`].
struct ReadArcInner<'a, T> {
// Raw read lock acquisition future, doesn't depend on `T`.
#[pin]
pub(super) raw: RawRead<'a>,
// FIXME: Could be covariant in T
pub(super) lock: &'a Arc<RwLock<T>>,
}
}
unsafe impl<T: Send + Sync> Send for ReadArcInner<'_, T> {}
unsafe impl<T: Send + Sync> Sync for ReadArcInner<'_, T> {}
impl<'x, T> ReadArc<'x, T> {
#[inline]
pub(super) fn new(raw: RawRead<'x>, lock: &'x Arc<RwLock<T>>) -> Self {
Self::_new(ReadArcInner { raw, lock })
}
}
impl<T> fmt::Debug for ReadArc<'_, T> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("ReadArc { .. }")
}
}
impl<T> EventListenerFuture for ReadArcInner<'_, T> {
type Output = RwLockReadGuardArc<T>;
#[inline]
fn poll_with_strategy<'x, S: Strategy<'x>>(
self: Pin<&mut Self>,
strategy: &mut S,
cx: &mut S::Context,
) -> Poll<Self::Output> {
let mut this = self.project();
ready!(this.raw.as_mut().poll_with_strategy(strategy, cx));
// SAFETY: we just acquired a read lock
Poll::Ready(unsafe { RwLockReadGuardArc::from_arc(this.lock.clone()) })
}
}
easy_wrapper! {
/// The future returned by [`RwLock::upgradable_read`].
pub struct UpgradableRead<'a, T: ?Sized>(
UpgradableReadInner<'a, T> => RwLockUpgradableReadGuard<'a, T>
);
#[cfg(all(feature = "std", not(target_family = "wasm")))]
pub(crate) wait();
}
pin_project_lite::pin_project! {
/// The future returned by [`RwLock::upgradable_read`].
struct UpgradableReadInner<'a, T: ?Sized> {
// Raw upgradable read lock acquisition future, doesn't depend on `T`.
#[pin]
pub(super) raw: RawUpgradableRead<'a>,
// Pointer to the value protected by the lock. Invariant in `T`
// as the upgradable lock could provide write access.
pub(super) value: *mut T,
}
}
unsafe impl<T: Send + Sync + ?Sized> Send for UpgradableReadInner<'_, T> {}
unsafe impl<T: Sync + ?Sized> Sync for UpgradableReadInner<'_, T> {}
impl<'x, T: ?Sized> UpgradableRead<'x, T> {
#[inline]
pub(super) fn new(raw: RawUpgradableRead<'x>, value: *mut T) -> Self {
Self::_new(UpgradableReadInner { raw, value })
}
}
impl<T: ?Sized> fmt::Debug for UpgradableRead<'_, T> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("UpgradableRead { .. }")
}
}
impl<'a, T: ?Sized> EventListenerFuture for UpgradableReadInner<'a, T> {
type Output = RwLockUpgradableReadGuard<'a, T>;
#[inline]
fn poll_with_strategy<'x, S: Strategy<'x>>(
self: Pin<&mut Self>,
strategy: &mut S,
cx: &mut S::Context,
) -> Poll<Self::Output> {
let mut this = self.project();
ready!(this.raw.as_mut().poll_with_strategy(strategy, cx));
Poll::Ready(RwLockUpgradableReadGuard {
lock: this.raw.lock,
value: *this.value,
})
}
}
easy_wrapper! {
/// The future returned by [`RwLock::upgradable_read_arc`].
pub struct UpgradableReadArc<'a, T: ?Sized>(
UpgradableReadArcInner<'a, T> => RwLockUpgradableReadGuardArc<T>
);
#[cfg(all(feature = "std", not(target_family = "wasm")))]
pub(crate) wait();
}
pin_project_lite::pin_project! {
/// The future returned by [`RwLock::upgradable_read_arc`].
struct UpgradableReadArcInner<'a, T: ?Sized> {
// Raw upgradable read lock acquisition future, doesn't depend on `T`.
#[pin]
pub(super) raw: RawUpgradableRead<'a>,
pub(super) lock: &'a Arc<RwLock<T>>,
}
}
unsafe impl<T: Send + Sync + ?Sized> Send for UpgradableReadArcInner<'_, T> {}
unsafe impl<T: Send + Sync + ?Sized> Sync for UpgradableReadArcInner<'_, T> {}
impl<'x, T: ?Sized> UpgradableReadArc<'x, T> {
#[inline]
pub(super) fn new(raw: RawUpgradableRead<'x>, lock: &'x Arc<RwLock<T>>) -> Self {
Self::_new(UpgradableReadArcInner { raw, lock })
}
}
impl<T: ?Sized> fmt::Debug for UpgradableReadArc<'_, T> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("UpgradableReadArc { .. }")
}
}
impl<T: ?Sized> EventListenerFuture for UpgradableReadArcInner<'_, T> {
type Output = RwLockUpgradableReadGuardArc<T>;
#[inline]
fn poll_with_strategy<'x, S: Strategy<'x>>(
self: Pin<&mut Self>,
strategy: &mut S,
cx: &mut S::Context,
) -> Poll<Self::Output> {
let mut this = self.project();
ready!(this.raw.as_mut().poll_with_strategy(strategy, cx));
Poll::Ready(RwLockUpgradableReadGuardArc {
lock: this.lock.clone(),
})
}
}
easy_wrapper! {
/// The future returned by [`RwLock::write`].
pub struct Write<'a, T: ?Sized>(WriteInner<'a, T> => RwLockWriteGuard<'a, T>);
#[cfg(all(feature = "std", not(target_family = "wasm")))]
pub(crate) wait();
}
pin_project_lite::pin_project! {
/// The future returned by [`RwLock::write`].
struct WriteInner<'a, T: ?Sized> {
// Raw write lock acquisition future, doesn't depend on `T`.
#[pin]
pub(super) raw: RawWrite<'a>,
// Pointer to the value protected by the lock. Invariant in `T`.
pub(super) value: *mut T,
}
}
unsafe impl<T: Send + ?Sized> Send for WriteInner<'_, T> {}
unsafe impl<T: Sync + ?Sized> Sync for WriteInner<'_, T> {}
impl<'x, T: ?Sized> Write<'x, T> {
#[inline]
pub(super) fn new(raw: RawWrite<'x>, value: *mut T) -> Self {
Self::_new(WriteInner { raw, value })
}
}
impl<T: ?Sized> fmt::Debug for Write<'_, T> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("Write { .. }")
}
}
impl<'a, T: ?Sized> EventListenerFuture for WriteInner<'a, T> {
type Output = RwLockWriteGuard<'a, T>;
#[inline]
fn poll_with_strategy<'x, S: Strategy<'x>>(
self: Pin<&mut Self>,
strategy: &mut S,
cx: &mut S::Context,
) -> Poll<Self::Output> {
let mut this = self.project();
ready!(this.raw.as_mut().poll_with_strategy(strategy, cx));
Poll::Ready(RwLockWriteGuard {
lock: this.raw.lock,
value: *this.value,
})
}
}
easy_wrapper! {
/// The future returned by [`RwLock::write_arc`].
pub struct WriteArc<'a, T: ?Sized>(WriteArcInner<'a, T> => RwLockWriteGuardArc<T>);
#[cfg(all(feature = "std", not(target_family = "wasm")))]
pub(crate) wait();
}
pin_project_lite::pin_project! {
/// The future returned by [`RwLock::write_arc`].
struct WriteArcInner<'a, T: ?Sized> {
// Raw write lock acquisition future, doesn't depend on `T`.
#[pin]
pub(super) raw: RawWrite<'a>,
pub(super) lock: &'a Arc<RwLock<T>>,
}
}
unsafe impl<T: Send + Sync + ?Sized> Send for WriteArcInner<'_, T> {}
unsafe impl<T: Send + Sync + ?Sized> Sync for WriteArcInner<'_, T> {}
impl<'x, T: ?Sized> WriteArc<'x, T> {
#[inline]
pub(super) fn new(raw: RawWrite<'x>, lock: &'x Arc<RwLock<T>>) -> Self {
Self::_new(WriteArcInner { raw, lock })
}
}
impl<T: ?Sized> fmt::Debug for WriteArc<'_, T> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("WriteArc { .. }")
}
}
impl<T: ?Sized> EventListenerFuture for WriteArcInner<'_, T> {
type Output = RwLockWriteGuardArc<T>;
#[inline]
fn poll_with_strategy<'x, S: Strategy<'x>>(
self: Pin<&mut Self>,
strategy: &mut S,
cx: &mut S::Context,
) -> Poll<Self::Output> {
let mut this = self.project();
ready!(this.raw.as_mut().poll_with_strategy(strategy, cx));
Poll::Ready(RwLockWriteGuardArc {
lock: this.lock.clone(),
})
}
}
easy_wrapper! {
/// The future returned by [`RwLockUpgradableReadGuard::upgrade`].
pub struct Upgrade<'a, T: ?Sized>(UpgradeInner<'a, T> => RwLockWriteGuard<'a, T>);
#[cfg(all(feature = "std", not(target_family = "wasm")))]
pub(crate) wait();
}
pin_project_lite::pin_project! {
/// The future returned by [`RwLockUpgradableReadGuard::upgrade`].
struct UpgradeInner<'a, T: ?Sized> {
// Raw read lock upgrade future, doesn't depend on `T`.
#[pin]
pub(super) raw: RawUpgrade<'a>,
// Pointer to the value protected by the lock. Invariant in `T`.
pub(super) value: *mut T,
}
}
unsafe impl<T: Send + ?Sized> Send for UpgradeInner<'_, T> {}
unsafe impl<T: Sync + ?Sized> Sync for UpgradeInner<'_, T> {}
impl<'x, T: ?Sized> Upgrade<'x, T> {
#[inline]
pub(super) fn new(raw: RawUpgrade<'x>, value: *mut T) -> Self {
Self::_new(UpgradeInner { raw, value })
}
}
impl<T: ?Sized> fmt::Debug for Upgrade<'_, T> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Upgrade").finish()
}
}
impl<'a, T: ?Sized> EventListenerFuture for UpgradeInner<'a, T> {
type Output = RwLockWriteGuard<'a, T>;
#[inline]
fn poll_with_strategy<'x, S: Strategy<'x>>(
self: Pin<&mut Self>,
strategy: &mut S,
cx: &mut S::Context,
) -> Poll<Self::Output> {
let mut this = self.project();
let lock = ready!(this.raw.as_mut().poll_with_strategy(strategy, cx));
Poll::Ready(RwLockWriteGuard {
lock,
value: *this.value,
})
}
}
easy_wrapper! {
/// The future returned by [`RwLockUpgradableReadGuardArc::upgrade`].
pub struct UpgradeArc<T: ?Sized>(UpgradeArcInner<T> => RwLockWriteGuardArc<T>);
#[cfg(all(feature = "std", not(target_family = "wasm")))]
pub(crate) wait();
}
pin_project_lite::pin_project! {
/// The future returned by [`RwLockUpgradableReadGuardArc::upgrade`].
struct UpgradeArcInner<T: ?Sized> {
// Raw read lock upgrade future, doesn't depend on `T`.
// `'static` is a lie, this field is actually referencing the
// `Arc` data. But since this struct also stores said `Arc`, we know
// this value will be alive as long as the struct is.
//
// Yes, one field of the `ArcUpgrade` struct is referencing another.
// Such self-references are usually not sound without pinning.
// However, in this case, there is an indirection via the heap;
// moving the `ArcUpgrade` won't move the heap allocation of the `Arc`,
// so the reference inside `RawUpgrade` isn't invalidated.
#[pin]
pub(super) raw: ManuallyDrop<RawUpgrade<'static>>,
// Pointer to the value protected by the lock. Invariant in `T`.
pub(super) lock: ManuallyDrop<Arc<RwLock<T>>>,
}
impl<T: ?Sized> PinnedDrop for UpgradeArcInner<T> {
fn drop(this: Pin<&mut Self>) {
let this = this.project();
let is_ready = this.raw.is_ready();
// SAFETY: The drop impl for raw assumes that it is pinned.
unsafe {
ManuallyDrop::drop(this.raw.get_unchecked_mut());
}
if !is_ready {
// SAFETY: we drop the `Arc` (decrementing the reference count)
// only if this future was cancelled before returning an
// upgraded lock.
unsafe {
ManuallyDrop::drop(this.lock);
};
}
}
}
}
impl<T: ?Sized> UpgradeArc<T> {
#[inline]
pub(super) unsafe fn new(
raw: ManuallyDrop<RawUpgrade<'static>>,
lock: ManuallyDrop<Arc<RwLock<T>>>,
) -> Self {
Self::_new(UpgradeArcInner { raw, lock })
}
}
impl<T: ?Sized> fmt::Debug for UpgradeArc<T> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ArcUpgrade").finish()
}
}
impl<T: ?Sized> EventListenerFuture for UpgradeArcInner<T> {
type Output = RwLockWriteGuardArc<T>;
#[inline]
fn poll_with_strategy<'x, S: Strategy<'x>>(
self: Pin<&mut Self>,
strategy: &mut S,
cx: &mut S::Context,
) -> Poll<Self::Output> {
let this = self.project();
unsafe {
// SAFETY: Practically, this is a pin projection.
ready!(Pin::new_unchecked(&mut **this.raw.get_unchecked_mut())
.poll_with_strategy(strategy, cx));
}
Poll::Ready(RwLockWriteGuardArc {
lock: unsafe { ManuallyDrop::take(this.lock) },
})
}
}

580
vendor/async-lock/src/rwlock/raw.rs vendored Normal file
View File

@@ -0,0 +1,580 @@
//! Raw, unsafe reader-writer locking implementation,
//! doesn't depend on the data protected by the lock.
//! [`RwLock`](super::RwLock) is implemented in terms of this.
//!
//! Splitting the implementation this way allows instantiating
//! the locking code only once, and also lets us make
//! [`RwLockReadGuard`](super::RwLockReadGuard) covariant in `T`.
use core::marker::PhantomPinned;
use core::mem::forget;
use core::pin::Pin;
use core::task::Poll;
use crate::sync::atomic::{AtomicUsize, Ordering};
use event_listener::{Event, EventListener};
use event_listener_strategy::{EventListenerFuture, Strategy};
use crate::futures::Lock;
use crate::Mutex;
const WRITER_BIT: usize = 1;
const ONE_READER: usize = 2;
/// A "raw" RwLock that doesn't hold any data.
pub(super) struct RawRwLock {
/// Acquired by the writer.
mutex: Mutex<()>,
/// Event triggered when the last reader is dropped.
no_readers: Event,
/// Event triggered when the writer is dropped.
no_writer: Event,
/// Current state of the lock.
///
/// The least significant bit (`WRITER_BIT`) is set to 1 when a writer is holding the lock or
/// trying to acquire it.
///
/// The upper bits contain the number of currently active readers. Each active reader
/// increments the state by `ONE_READER`.
state: AtomicUsize,
}
impl RawRwLock {
const_fn! {
const_if: #[cfg(not(loom))];
#[inline]
pub(super) const fn new() -> Self {
RawRwLock {
mutex: Mutex::new(()),
no_readers: Event::new(),
no_writer: Event::new(),
state: AtomicUsize::new(0),
}
}
}
/// Returns `true` iff a read lock was successfully acquired.
pub(super) fn try_read(&self) -> bool {
let mut state = self.state.load(Ordering::Acquire);
loop {
// If there's a writer holding the lock or attempting to acquire it, we cannot acquire
// a read lock here.
if state & WRITER_BIT != 0 {
return false;
}
// Make sure the number of readers doesn't overflow.
if state > isize::MAX as usize {
crate::abort();
}
// Increment the number of readers.
match self.state.compare_exchange(
state,
state + ONE_READER,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => return true,
Err(s) => state = s,
}
}
}
#[inline]
pub(super) fn read(&self) -> RawRead<'_> {
RawRead {
lock: self,
state: self.state.load(Ordering::Acquire),
listener: None,
_pin: PhantomPinned,
}
}
/// Returns `true` iff an upgradable read lock was successfully acquired.
pub(super) fn try_upgradable_read(&self) -> bool {
// First try grabbing the mutex.
let lock = if let Some(lock) = self.mutex.try_lock() {
lock
} else {
return false;
};
forget(lock);
let mut state = self.state.load(Ordering::Acquire);
// Make sure the number of readers doesn't overflow.
if state > isize::MAX as usize {
crate::abort();
}
// Increment the number of readers.
loop {
match self.state.compare_exchange(
state,
state + ONE_READER,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => return true,
Err(s) => state = s,
}
}
}
#[inline]
pub(super) fn upgradable_read(&self) -> RawUpgradableRead<'_> {
RawUpgradableRead {
lock: self,
acquire: self.mutex.lock(),
}
}
/// Returns `true` iff a write lock was successfully acquired.
pub(super) fn try_write(&self) -> bool {
// First try grabbing the mutex.
let lock = if let Some(lock) = self.mutex.try_lock() {
lock
} else {
return false;
};
// If there are no readers, grab the write lock.
if self
.state
.compare_exchange(0, WRITER_BIT, Ordering::AcqRel, Ordering::Acquire)
.is_ok()
{
forget(lock);
true
} else {
drop(lock);
false
}
}
#[inline]
pub(super) fn write(&self) -> RawWrite<'_> {
RawWrite {
lock: self,
no_readers: None,
state: WriteState::Acquiring {
lock: self.mutex.lock(),
},
}
}
/// Returns `true` iff a the upgradable read lock was successfully upgraded to a write lock.
///
/// # Safety
///
/// Caller must hold an upgradable read lock.
/// This will attempt to upgrade it to a write lock.
pub(super) unsafe fn try_upgrade(&self) -> bool {
self.state
.compare_exchange(ONE_READER, WRITER_BIT, Ordering::AcqRel, Ordering::Acquire)
.is_ok()
}
/// # Safety
///
/// Caller must hold an upgradable read lock.
/// This will upgrade it to a write lock.
pub(super) unsafe fn upgrade(&self) -> RawUpgrade<'_> {
// Set `WRITER_BIT` and decrement the number of readers at the same time.
self.state
.fetch_sub(ONE_READER - WRITER_BIT, Ordering::SeqCst);
RawUpgrade {
lock: Some(self),
listener: None,
_pin: PhantomPinned,
}
}
/// # Safety
///
/// Caller must hold an upgradable read lock.
/// This will downgrade it to a standard read lock.
#[inline]
pub(super) unsafe fn downgrade_upgradable_read(&self) {
self.mutex.unlock_unchecked();
}
/// # Safety
///
/// Caller must hold a write lock.
/// This will downgrade it to a read lock.
pub(super) unsafe fn downgrade_write(&self) {
// Atomically downgrade state.
self.state
.fetch_add(ONE_READER - WRITER_BIT, Ordering::SeqCst);
// Release the writer mutex.
self.mutex.unlock_unchecked();
// Trigger the "no writer" event.
self.no_writer.notify(1);
}
/// # Safety
///
/// Caller must hold a write lock.
/// This will downgrade it to an upgradable read lock.
pub(super) unsafe fn downgrade_to_upgradable(&self) {
// Atomically downgrade state.
self.state
.fetch_add(ONE_READER - WRITER_BIT, Ordering::SeqCst);
}
/// # Safety
///
/// Caller must hold a read lock .
/// This will unlock that lock.
pub(super) unsafe fn read_unlock(&self) {
// Decrement the number of readers.
if self.state.fetch_sub(ONE_READER, Ordering::SeqCst) & !WRITER_BIT == ONE_READER {
// If this was the last reader, trigger the "no readers" event.
self.no_readers.notify(1);
}
}
/// # Safety
///
/// Caller must hold an upgradable read lock.
/// This will unlock that lock.
pub(super) unsafe fn upgradable_read_unlock(&self) {
// Decrement the number of readers.
if self.state.fetch_sub(ONE_READER, Ordering::SeqCst) & !WRITER_BIT == ONE_READER {
// If this was the last reader, trigger the "no readers" event.
self.no_readers.notify(1);
}
// SAFETY: upgradable read guards acquire the writer mutex upon creation.
self.mutex.unlock_unchecked();
}
/// # Safety
///
/// Caller must hold a write lock.
/// This will unlock that lock.
pub(super) unsafe fn write_unlock(&self) {
// Unset `WRITER_BIT`.
self.state.fetch_and(!WRITER_BIT, Ordering::SeqCst);
// Trigger the "no writer" event.
self.no_writer.notify(1);
// Release the writer lock.
// SAFETY: `RwLockWriteGuard` always holds a lock on writer mutex.
self.mutex.unlock_unchecked();
}
}
pin_project_lite::pin_project! {
/// The future returned by [`RawRwLock::read`].
pub(super) struct RawRead<'a> {
// The lock that is being acquired.
pub(super) lock: &'a RawRwLock,
// The last-observed state of the lock.
state: usize,
// The listener for the "no writers" event.
listener: Option<EventListener>,
// Making this type `!Unpin` enables future optimizations.
#[pin]
_pin: PhantomPinned
}
}
impl EventListenerFuture for RawRead<'_> {
type Output = ();
fn poll_with_strategy<'x, S: Strategy<'x>>(
self: Pin<&mut Self>,
strategy: &mut S,
cx: &mut S::Context,
) -> Poll<()> {
let this = self.project();
loop {
if *this.state & WRITER_BIT == 0 {
// Make sure the number of readers doesn't overflow.
if *this.state > isize::MAX as usize {
crate::abort();
}
// If nobody is holding a write lock or attempting to acquire it, increment the
// number of readers.
match this.lock.state.compare_exchange(
*this.state,
*this.state + ONE_READER,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => return Poll::Ready(()),
Err(s) => *this.state = s,
}
} else {
// Start listening for "no writer" events.
let load_ordering = if this.listener.is_none() {
*this.listener = Some(this.lock.no_writer.listen());
// Make sure there really is no writer.
Ordering::SeqCst
} else {
// Wait for the writer to finish.
ready!(strategy.poll(this.listener, cx));
// Notify the next reader waiting in list.
this.lock.no_writer.notify(1);
// Check the state again.
Ordering::Acquire
};
// Reload the state.
*this.state = this.lock.state.load(load_ordering);
}
}
}
}
pin_project_lite::pin_project! {
/// The future returned by [`RawRwLock::upgradable_read`].
pub(super) struct RawUpgradableRead<'a> {
// The lock that is being acquired.
pub(super) lock: &'a RawRwLock,
// The mutex we are trying to acquire.
#[pin]
acquire: Lock<'a, ()>,
}
}
impl EventListenerFuture for RawUpgradableRead<'_> {
type Output = ();
fn poll_with_strategy<'x, S: Strategy<'x>>(
self: Pin<&mut Self>,
strategy: &mut S,
cx: &mut S::Context,
) -> Poll<()> {
let this = self.project();
// Acquire the mutex.
let mutex_guard = ready!(this.acquire.poll_with_strategy(strategy, cx));
forget(mutex_guard);
// Load the current state.
let mut state = this.lock.state.load(Ordering::Acquire);
// Make sure the number of readers doesn't overflow.
if state > isize::MAX as usize {
crate::abort();
}
// Increment the number of readers.
loop {
match this.lock.state.compare_exchange(
state,
state + ONE_READER,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
return Poll::Ready(());
}
Err(s) => state = s,
}
}
}
}
pin_project_lite::pin_project! {
/// The future returned by [`RawRwLock::write`].
pub(super) struct RawWrite<'a> {
// The lock that is being acquired.
pub(super) lock: &'a RawRwLock,
// Our listener for the "no readers" event.
no_readers: Option<EventListener>,
// Current state of this future.
#[pin]
state: WriteState<'a>,
}
impl PinnedDrop for RawWrite<'_> {
fn drop(this: Pin<&mut Self>) {
let this = this.project();
if matches!(this.state.project(), WriteStateProj::WaitingReaders) {
// Safety: we hold a write lock, more or less.
unsafe {
this.lock.write_unlock();
}
}
}
}
}
pin_project_lite::pin_project! {
#[project = WriteStateProj]
#[project_replace = WriteStateProjReplace]
enum WriteState<'a> {
// We are currently acquiring the inner mutex.
Acquiring { #[pin] lock: Lock<'a, ()> },
// We are currently waiting for readers to finish.
WaitingReaders,
// The future has completed.
Acquired,
}
}
impl EventListenerFuture for RawWrite<'_> {
type Output = ();
fn poll_with_strategy<'x, S: Strategy<'x>>(
self: Pin<&mut Self>,
strategy: &mut S,
cx: &mut S::Context,
) -> Poll<()> {
let mut this = self.project();
loop {
match this.state.as_mut().project() {
WriteStateProj::Acquiring { lock } => {
// First grab the mutex.
let mutex_guard = ready!(lock.poll_with_strategy(strategy, cx));
forget(mutex_guard);
// Set `WRITER_BIT` and create a guard that unsets it in case this future is canceled.
let new_state = this.lock.state.fetch_or(WRITER_BIT, Ordering::SeqCst);
// If we just acquired the lock, return.
if new_state == WRITER_BIT {
this.state.as_mut().set(WriteState::Acquired);
return Poll::Ready(());
}
// Start waiting for the readers to finish.
*this.no_readers = Some(this.lock.no_readers.listen());
this.state.as_mut().set(WriteState::WaitingReaders);
}
WriteStateProj::WaitingReaders => {
let load_ordering = if this.no_readers.is_some() {
Ordering::Acquire
} else {
Ordering::SeqCst
};
// Check the state again.
if this.lock.state.load(load_ordering) == WRITER_BIT {
// We are the only ones holding the lock, return `Ready`.
this.state.as_mut().set(WriteState::Acquired);
return Poll::Ready(());
}
// Wait for the readers to finish.
if this.no_readers.is_none() {
// Register a listener.
*this.no_readers = Some(this.lock.no_readers.listen());
} else {
// Wait for the readers to finish.
ready!(strategy.poll(this.no_readers, cx));
};
}
WriteStateProj::Acquired => panic!("Write lock already acquired"),
}
}
}
}
pin_project_lite::pin_project! {
/// The future returned by [`RawRwLock::upgrade`].
pub(super) struct RawUpgrade<'a> {
lock: Option<&'a RawRwLock>,
// The event listener we are waiting on.
listener: Option<EventListener>,
// Keeping this future `!Unpin` enables future optimizations.
#[pin]
_pin: PhantomPinned
}
impl PinnedDrop for RawUpgrade<'_> {
fn drop(this: Pin<&mut Self>) {
let this = this.project();
if let Some(lock) = this.lock {
// SAFETY: we are dropping the future that would give us a write lock,
// so we don't need said lock anymore.
unsafe {
lock.write_unlock();
}
}
}
}
}
impl<'a> EventListenerFuture for RawUpgrade<'a> {
type Output = &'a RawRwLock;
fn poll_with_strategy<'x, S: Strategy<'x>>(
self: Pin<&mut Self>,
strategy: &mut S,
cx: &mut S::Context,
) -> Poll<&'a RawRwLock> {
let this = self.project();
let lock = this.lock.expect("cannot poll future after completion");
// If there are readers, we need to wait for them to finish.
loop {
let load_ordering = if this.listener.is_some() {
Ordering::Acquire
} else {
Ordering::SeqCst
};
// See if the number of readers is zero.
let state = lock.state.load(load_ordering);
if state == WRITER_BIT {
break;
}
// If there are readers, wait for them to finish.
if this.listener.is_none() {
// Start listening for "no readers" events.
*this.listener = Some(lock.no_readers.listen());
} else {
// Wait for the readers to finish.
ready!(strategy.poll(this.listener, cx));
};
}
// We are done.
Poll::Ready(this.lock.take().unwrap())
}
}
impl RawUpgrade<'_> {
/// Whether the future returned `Poll::Ready(..)` at some point.
#[inline]
pub(super) fn is_ready(&self) -> bool {
self.lock.is_none()
}
}

392
vendor/async-lock/src/semaphore.rs vendored Normal file
View File

@@ -0,0 +1,392 @@
use core::fmt;
use core::marker::PhantomPinned;
use core::mem;
use core::pin::Pin;
use core::task::Poll;
use crate::sync::atomic::{AtomicUsize, Ordering};
use alloc::sync::Arc;
use event_listener::{Event, EventListener};
use event_listener_strategy::{easy_wrapper, EventListenerFuture, Strategy};
/// A counter for limiting the number of concurrent operations.
#[derive(Debug)]
pub struct Semaphore {
count: AtomicUsize,
event: Event,
}
impl Semaphore {
const_fn! {
const_if: #[cfg(not(loom))];
/// Creates a new semaphore with a limit of `n` concurrent operations.
///
/// # Examples
///
/// ```
/// use async_lock::Semaphore;
///
/// let s = Semaphore::new(5);
/// ```
pub const fn new(n: usize) -> Semaphore {
Semaphore {
count: AtomicUsize::new(n),
event: Event::new(),
}
}
}
/// Attempts to get a permit for a concurrent operation.
///
/// If the permit could not be acquired at this time, then [`None`] is returned. Otherwise, a
/// guard is returned that releases the mutex when dropped.
///
/// # Examples
///
/// ```
/// use async_lock::Semaphore;
///
/// let s = Semaphore::new(2);
///
/// let g1 = s.try_acquire().unwrap();
/// let g2 = s.try_acquire().unwrap();
///
/// assert!(s.try_acquire().is_none());
/// drop(g2);
/// assert!(s.try_acquire().is_some());
/// ```
pub fn try_acquire(&self) -> Option<SemaphoreGuard<'_>> {
let mut count = self.count.load(Ordering::Acquire);
loop {
if count == 0 {
return None;
}
match self.count.compare_exchange_weak(
count,
count - 1,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => return Some(SemaphoreGuard(self)),
Err(c) => count = c,
}
}
}
/// Waits for a permit for a concurrent operation.
///
/// Returns a guard that releases the permit when dropped.
///
/// # Examples
///
/// ```
/// # futures_lite::future::block_on(async {
/// use async_lock::Semaphore;
///
/// let s = Semaphore::new(2);
/// let guard = s.acquire().await;
/// # });
/// ```
pub fn acquire(&self) -> Acquire<'_> {
Acquire::_new(AcquireInner {
semaphore: self,
listener: None,
_pin: PhantomPinned,
})
}
/// Waits for a permit for a concurrent operation.
///
/// Returns a guard that releases the permit when dropped.
///
/// # Blocking
///
/// Rather than using asynchronous waiting, like the [`acquire`][Semaphore::acquire] method,
/// this method will block the current thread until the permit is acquired.
///
/// This method should not be used in an asynchronous context. It is intended to be
/// used in a way that a semaphore can be used in both asynchronous and synchronous contexts.
/// Calling this method in an asynchronous context may result in a deadlock.
///
/// # Examples
///
/// ```
/// use async_lock::Semaphore;
///
/// let s = Semaphore::new(2);
/// let guard = s.acquire_blocking();
/// ```
#[cfg(all(feature = "std", not(target_family = "wasm")))]
#[inline]
pub fn acquire_blocking(&self) -> SemaphoreGuard<'_> {
self.acquire().wait()
}
/// Attempts to get an owned permit for a concurrent operation.
///
/// If the permit could not be acquired at this time, then [`None`] is returned. Otherwise, an
/// owned guard is returned that releases the mutex when dropped.
///
/// # Examples
///
/// ```
/// use async_lock::Semaphore;
/// use std::sync::Arc;
///
/// let s = Arc::new(Semaphore::new(2));
///
/// let g1 = s.try_acquire_arc().unwrap();
/// let g2 = s.try_acquire_arc().unwrap();
///
/// assert!(s.try_acquire_arc().is_none());
/// drop(g2);
/// assert!(s.try_acquire_arc().is_some());
/// ```
pub fn try_acquire_arc(self: &Arc<Self>) -> Option<SemaphoreGuardArc> {
let mut count = self.count.load(Ordering::Acquire);
loop {
if count == 0 {
return None;
}
match self.count.compare_exchange_weak(
count,
count - 1,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => return Some(SemaphoreGuardArc(Some(self.clone()))),
Err(c) => count = c,
}
}
}
/// Waits for an owned permit for a concurrent operation.
///
/// Returns a guard that releases the permit when dropped.
///
/// # Examples
///
/// ```
/// # futures_lite::future::block_on(async {
/// use async_lock::Semaphore;
/// use std::sync::Arc;
///
/// let s = Arc::new(Semaphore::new(2));
/// let guard = s.acquire_arc().await;
/// # });
/// ```
pub fn acquire_arc(self: &Arc<Self>) -> AcquireArc {
AcquireArc::_new(AcquireArcInner {
semaphore: self.clone(),
listener: None,
_pin: PhantomPinned,
})
}
/// Waits for an owned permit for a concurrent operation.
///
/// Returns a guard that releases the permit when dropped.
///
/// # Blocking
///
/// Rather than using asynchronous waiting, like the [`acquire_arc`][Semaphore::acquire_arc] method,
/// this method will block the current thread until the permit is acquired.
///
/// This method should not be used in an asynchronous context. It is intended to be
/// used in a way that a semaphore can be used in both asynchronous and synchronous contexts.
/// Calling this method in an asynchronous context may result in a deadlock.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use async_lock::Semaphore;
///
/// let s = Arc::new(Semaphore::new(2));
/// let guard = s.acquire_arc_blocking();
/// ```
#[cfg(all(feature = "std", not(target_family = "wasm")))]
#[inline]
pub fn acquire_arc_blocking(self: &Arc<Self>) -> SemaphoreGuardArc {
self.acquire_arc().wait()
}
/// Adds `n` additional permits to the semaphore.
///
/// # Examples
///
/// ```
/// use async_lock::Semaphore;
///
/// # futures_lite::future::block_on(async {
/// let s = Semaphore::new(1);
///
/// let _guard = s.acquire().await;
/// assert!(s.try_acquire().is_none());
///
/// s.add_permits(2);
///
/// let _guard = s.acquire().await;
/// let _guard = s.acquire().await;
/// # });
/// ```
pub fn add_permits(&self, n: usize) {
self.count.fetch_add(n, Ordering::AcqRel);
self.event.notify(n);
}
}
easy_wrapper! {
/// The future returned by [`Semaphore::acquire`].
pub struct Acquire<'a>(AcquireInner<'a> => SemaphoreGuard<'a>);
#[cfg(all(feature = "std", not(target_family = "wasm")))]
pub(crate) wait();
}
pin_project_lite::pin_project! {
struct AcquireInner<'a> {
// The semaphore being acquired.
semaphore: &'a Semaphore,
// The listener waiting on the semaphore.
listener: Option<EventListener>,
// Keeping this future `!Unpin` enables future optimizations.
#[pin]
_pin: PhantomPinned
}
}
impl fmt::Debug for Acquire<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("Acquire { .. }")
}
}
impl<'a> EventListenerFuture for AcquireInner<'a> {
type Output = SemaphoreGuard<'a>;
fn poll_with_strategy<'x, S: Strategy<'x>>(
self: Pin<&mut Self>,
strategy: &mut S,
cx: &mut S::Context,
) -> Poll<Self::Output> {
let this = self.project();
loop {
match this.semaphore.try_acquire() {
Some(guard) => return Poll::Ready(guard),
None => {
// Wait on the listener.
if this.listener.is_none() {
*this.listener = Some(this.semaphore.event.listen());
} else {
ready!(strategy.poll(this.listener, cx));
}
}
}
}
}
}
easy_wrapper! {
/// The future returned by [`Semaphore::acquire_arc`].
pub struct AcquireArc(AcquireArcInner => SemaphoreGuardArc);
#[cfg(all(feature = "std", not(target_family = "wasm")))]
pub(crate) wait();
}
pin_project_lite::pin_project! {
struct AcquireArcInner {
// The semaphore being acquired.
semaphore: Arc<Semaphore>,
// The listener waiting on the semaphore.
listener: Option<EventListener>,
// Keeping this future `!Unpin` enables future optimizations.
#[pin]
_pin: PhantomPinned
}
}
impl fmt::Debug for AcquireArc {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("AcquireArc { .. }")
}
}
impl EventListenerFuture for AcquireArcInner {
type Output = SemaphoreGuardArc;
fn poll_with_strategy<'x, S: Strategy<'x>>(
self: Pin<&mut Self>,
strategy: &mut S,
cx: &mut S::Context,
) -> Poll<Self::Output> {
let this = self.project();
loop {
match this.semaphore.try_acquire_arc() {
Some(guard) => return Poll::Ready(guard),
None => {
// Wait on the listener.
if this.listener.is_none() {
*this.listener = Some(this.semaphore.event.listen());
} else {
ready!(strategy.poll(this.listener, cx));
}
}
}
}
}
}
/// A guard that releases the acquired permit.
#[clippy::has_significant_drop]
#[derive(Debug)]
pub struct SemaphoreGuard<'a>(&'a Semaphore);
impl SemaphoreGuard<'_> {
/// Drops the guard _without_ releasing the acquired permit.
#[inline]
pub fn forget(self) {
mem::forget(self);
}
}
impl Drop for SemaphoreGuard<'_> {
fn drop(&mut self) {
self.0.count.fetch_add(1, Ordering::AcqRel);
self.0.event.notify(1);
}
}
/// An owned guard that releases the acquired permit.
#[clippy::has_significant_drop]
#[derive(Debug)]
pub struct SemaphoreGuardArc(Option<Arc<Semaphore>>);
impl SemaphoreGuardArc {
/// Drops the guard _without_ releasing the acquired permit.
/// (Will still decrement the `Arc` reference count.)
#[inline]
pub fn forget(mut self) {
// Drop the inner `Arc` in order to decrement the reference count.
// FIXME: get rid of the `Option` once RFC 3466 or equivalent becomes available.
drop(self.0.take());
mem::forget(self);
}
}
impl Drop for SemaphoreGuardArc {
fn drop(&mut self) {
let opt = self.0.take().unwrap();
opt.count.fetch_add(1, Ordering::AcqRel);
opt.event.notify(1);
}
}