Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

88
vendor/async-lock/tests/barrier.rs vendored Normal file
View File

@@ -0,0 +1,88 @@
use std::sync::Arc;
use std::thread;
use async_lock::Barrier;
use futures_lite::future;
#[test]
#[cfg_attr(miri, ignore)]
fn smoke() {
future::block_on(async move {
const N: usize = 10;
let barrier = Arc::new(Barrier::new(N));
for _ in 0..10 {
let (tx, rx) = flume::unbounded();
for _ in 0..N - 1 {
let c = barrier.clone();
let tx = tx.clone();
thread::spawn(move || {
future::block_on(async move {
let res = c.wait().await;
tx.send_async(res.is_leader()).await.unwrap();
})
});
}
// At this point, all spawned threads should be blocked,
// so we shouldn't get anything from the channel.
let res = rx.try_recv();
assert!(res.is_err());
let mut leader_found = barrier.wait().await.is_leader();
// Now, the barrier is cleared and we should get data.
for _ in 0..N - 1 {
if rx.recv_async().await.unwrap() {
assert!(!leader_found);
leader_found = true;
}
}
assert!(leader_found);
}
});
}
#[cfg(all(feature = "std", not(target_family = "wasm")))]
#[test]
#[cfg_attr(miri, ignore)]
fn smoke_blocking() {
future::block_on(async move {
const N: usize = 10;
let barrier = Arc::new(Barrier::new(N));
for _ in 0..10 {
let (tx, rx) = flume::unbounded();
for _ in 0..N - 1 {
let c = barrier.clone();
let tx = tx.clone();
thread::spawn(move || {
let res = c.wait_blocking();
tx.send(res.is_leader()).unwrap();
});
}
// At this point, all spawned threads should be blocked,
// so we shouldn't get anything from the channel.
let res = rx.try_recv();
assert!(res.is_err());
let mut leader_found = barrier.wait_blocking().is_leader();
// Now, the barrier is cleared and we should get data.
for _ in 0..N - 1 {
if rx.recv_async().await.unwrap() {
assert!(!leader_found);
leader_found = true;
}
}
assert!(leader_found);
}
});
}

21
vendor/async-lock/tests/common/mod.rs vendored Normal file
View File

@@ -0,0 +1,21 @@
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::task::Context;
use futures_lite::prelude::*;
use waker_fn::waker_fn;
pub fn check_yields_when_contended<G>(contending_guard: G, acquire_future: impl Future) {
let was_woken = Arc::new(AtomicBool::new(false));
let waker = {
let was_woken = Arc::clone(&was_woken);
waker_fn(move || was_woken.store(true, Ordering::SeqCst))
};
let mut cx = Context::from_waker(&waker);
futures_lite::pin!(acquire_future);
assert!(acquire_future.as_mut().poll(&mut cx).is_pending());
drop(contending_guard);
assert!(was_woken.load(Ordering::SeqCst));
assert!(acquire_future.poll(&mut cx).is_ready());
}

46
vendor/async-lock/tests/loom.rs vendored Normal file
View File

@@ -0,0 +1,46 @@
#![cfg(loom)]
use loom::sync::{mpsc, Arc};
use loom::thread;
use async_lock::Barrier;
#[ignore]
#[test]
fn barrier_smoke() {
loom::model(|| {
const N: usize = 10;
let barrier = Arc::new(Barrier::new(N));
for _ in 0..10 {
let (tx, rx) = mpsc::channel();
for _ in 0..loom::MAX_THREADS - 1 {
let c = barrier.clone();
let tx = tx.clone();
thread::spawn(move || {
let res = c.wait_blocking();
tx.send(res.is_leader()).unwrap();
});
}
// At this point, all spawned threads should be blocked,
// so we shouldn't get anything from the channel.
let res = rx.try_recv();
assert!(res.is_err());
let mut leader_found = barrier.wait_blocking().is_leader();
// Now, the barrier is cleared and we should get data.
for _ in 0..N - 1 {
if rx.recv().unwrap() {
assert!(!leader_found);
leader_found = true;
}
}
assert!(leader_found);
}
});
}

111
vendor/async-lock/tests/mutex.rs vendored Normal file
View File

@@ -0,0 +1,111 @@
mod common;
use std::sync::Arc;
#[cfg(not(target_family = "wasm"))]
use std::thread;
use async_lock::Mutex;
use futures_lite::future;
use common::check_yields_when_contended;
#[cfg(target_family = "wasm")]
use wasm_bindgen_test::wasm_bindgen_test as test;
#[cfg(target_family = "wasm")]
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[test]
fn smoke() {
future::block_on(async {
let m = Mutex::new(());
drop(m.lock().await);
drop(m.lock().await);
})
}
#[cfg(all(feature = "std", not(target_family = "wasm")))]
#[test]
fn smoke_blocking() {
let m = Mutex::new(());
drop(m.lock_blocking());
drop(m.lock_blocking());
}
#[cfg(all(feature = "std", not(target_family = "wasm")))]
#[test]
fn smoke_arc_blocking() {
let m = Arc::new(Mutex::new(()));
drop(m.lock_arc_blocking());
drop(m.lock_arc_blocking());
}
#[test]
fn try_lock() {
let m = Mutex::new(());
*m.try_lock().unwrap() = ();
}
#[test]
fn into_inner() {
let m = Mutex::new(10i32);
assert_eq!(m.into_inner(), 10);
}
#[test]
fn get_mut() {
let mut m = Mutex::new(10i32);
*m.get_mut() = 20;
assert_eq!(m.into_inner(), 20);
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn contention() {
future::block_on(async {
let (tx, rx) = flume::unbounded();
let tx = Arc::new(tx);
let mutex = Arc::new(Mutex::new(0i32));
let num_tasks = 100;
for _ in 0..num_tasks {
let tx = tx.clone();
let mutex = mutex.clone();
thread::spawn(|| {
future::block_on(async move {
let mut lock = mutex.lock().await;
*lock += 1;
tx.send_async(()).await.unwrap();
drop(lock);
})
});
}
for _ in 0..num_tasks {
rx.recv_async().await.unwrap();
}
let lock = mutex.lock().await;
assert_eq!(num_tasks, *lock);
});
}
#[test]
fn lifetime() {
// Show that the future keeps the mutex alive.
let _fut = {
let mutex = Arc::new(Mutex::new(0i32));
mutex.lock_arc()
};
}
#[test]
fn yields_when_contended() {
let m = Mutex::new(());
check_yields_when_contended(m.try_lock().unwrap(), m.lock());
let m = Arc::new(m);
check_yields_when_contended(m.try_lock_arc().unwrap(), m.lock_arc());
}

589
vendor/async-lock/tests/rwlock.rs vendored Normal file
View File

@@ -0,0 +1,589 @@
mod common;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use common::check_yields_when_contended;
#[cfg(not(target_family = "wasm"))]
use futures_lite::prelude::*;
#[cfg(not(target_family = "wasm"))]
use std::thread;
use futures_lite::future;
use async_lock::{
RwLock, RwLockReadGuard, RwLockReadGuardArc, RwLockUpgradableReadGuard,
RwLockUpgradableReadGuardArc,
};
#[cfg(target_family = "wasm")]
use wasm_bindgen_test::wasm_bindgen_test as test;
#[cfg(target_family = "wasm")]
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[cfg(not(target_family = "wasm"))]
fn spawn<T: Send + 'static>(f: impl Future<Output = T> + Send + 'static) -> future::Boxed<T> {
let (s, r) = flume::bounded(1);
thread::spawn(move || {
future::block_on(async {
let _ = s.send_async(f.await).await;
})
});
async move { r.recv_async().await.unwrap() }.boxed()
}
#[test]
fn smoke() {
future::block_on(async {
let lock = RwLock::new(());
drop(lock.read().await);
drop(lock.write().await);
drop((lock.read().await, lock.read().await));
drop(lock.write().await);
});
}
#[cfg(all(feature = "std", not(target_family = "wasm")))]
#[test]
fn smoke_blocking() {
let lock = RwLock::new(());
drop(lock.read_blocking());
drop(lock.write_blocking());
drop((lock.read_blocking(), lock.read_blocking()));
let read = lock.read_blocking();
let upgradabe = lock.upgradable_read_blocking();
drop(read);
drop(RwLockUpgradableReadGuard::upgrade_blocking(upgradabe));
drop(lock.write_blocking());
}
#[cfg(all(feature = "std", not(target_family = "wasm")))]
#[test]
fn smoke_arc_blocking() {
let lock = Arc::new(RwLock::new(()));
drop(lock.read_arc_blocking());
drop(lock.write_arc_blocking());
drop((lock.read_arc_blocking(), lock.read_arc_blocking()));
let read = lock.read_arc_blocking();
let upgradabe = lock.upgradable_read_arc_blocking();
drop(read);
drop(RwLockUpgradableReadGuardArc::upgrade_blocking(upgradabe));
drop(lock.write_arc_blocking());
}
#[test]
fn try_write() {
future::block_on(async {
let lock = RwLock::new(0isize);
let read_guard = lock.read().await;
assert!(lock.try_write().is_none());
drop(read_guard);
});
}
#[test]
fn into_inner() {
let lock = RwLock::new(10);
assert_eq!(lock.into_inner(), 10);
}
#[test]
fn into_inner_and_drop() {
struct Counter(Arc<AtomicUsize>);
impl Drop for Counter {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let cnt = Arc::new(AtomicUsize::new(0));
let lock = RwLock::new(Counter(cnt.clone()));
assert_eq!(cnt.load(Ordering::SeqCst), 0);
{
let _inner = lock.into_inner();
assert_eq!(cnt.load(Ordering::SeqCst), 0);
}
assert_eq!(cnt.load(Ordering::SeqCst), 1);
}
#[test]
fn get_mut() {
let mut lock = RwLock::new(10);
*lock.get_mut() = 20;
assert_eq!(lock.into_inner(), 20);
}
// Miri bug; this works when async is replaced with blocking
#[cfg(not(target_family = "wasm"))]
#[test]
#[cfg_attr(miri, ignore)]
fn contention() {
const N: u32 = 10;
const M: usize = if cfg!(miri) { 100 } else { 1000 };
let (tx, rx) = flume::unbounded();
let tx = Arc::new(tx);
let rw = Arc::new(RwLock::new(()));
// Spawn N tasks that randomly acquire the lock M times.
for _ in 0..N {
let tx = tx.clone();
let rw = rw.clone();
let _spawned = spawn(async move {
for _ in 0..M {
if fastrand::u32(..N) == 0 {
drop(rw.write().await);
} else {
drop(rw.read().await);
}
}
tx.send_async(()).await.unwrap();
});
}
future::block_on(async move {
for _ in 0..N {
rx.recv_async().await.unwrap();
}
});
}
#[cfg(not(target_family = "wasm"))]
#[test]
#[cfg_attr(miri, ignore)]
fn contention_arc() {
const N: u32 = 10;
const M: usize = if cfg!(miri) { 100 } else { 1000 };
let (tx, rx) = flume::unbounded();
let tx = Arc::new(tx);
let rw = Arc::new(RwLock::new(()));
// Spawn N tasks that randomly acquire the lock M times.
for _ in 0..N {
let tx = tx.clone();
let rw = rw.clone();
let _spawned = spawn(async move {
for _ in 0..M {
if fastrand::u32(..N) == 0 {
drop(rw.write_arc().await);
} else {
drop(rw.read_arc().await);
}
}
tx.send_async(()).await.unwrap();
});
}
future::block_on(async move {
for _ in 0..N {
rx.recv_async().await.unwrap();
}
});
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn writer_and_readers() {
let lock = Arc::new(RwLock::new(0i32));
let (tx, rx) = flume::unbounded();
// Spawn a writer task.
let _spawned = spawn({
let lock = lock.clone();
async move {
let mut lock = lock.write().await;
for _ in 0..1000 {
let tmp = *lock;
*lock = -1;
future::yield_now().await;
*lock = tmp + 1;
}
tx.send_async(()).await.unwrap();
}
});
// Readers try to catch the writer in the act.
let mut readers = Vec::new();
for _ in 0..5 {
let lock = lock.clone();
readers.push(spawn(async move {
for _ in 0..1000 {
let lock = lock.read().await;
assert!(*lock >= 0);
}
}));
}
future::block_on(async move {
// Wait for readers to pass their asserts.
for r in readers {
r.await;
}
// Wait for writer to finish.
rx.recv_async().await.unwrap();
let lock = lock.read().await;
assert_eq!(*lock, 1000);
});
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn writer_and_readers_arc() {
let lock = Arc::new(RwLock::new(0i32));
let (tx, rx) = flume::unbounded();
// Spawn a writer task.
let _spawned = spawn({
let lock = lock.clone();
async move {
let mut lock = lock.write_arc().await;
for _ in 0..1000 {
let tmp = *lock;
*lock = -1;
future::yield_now().await;
*lock = tmp + 1;
}
tx.send_async(()).await.unwrap();
}
});
// Readers try to catch the writer in the act.
let mut readers = Vec::new();
for _ in 0..5 {
let lock = lock.clone();
readers.push(spawn(async move {
for _ in 0..1000 {
let lock = lock.read_arc().await;
assert!(*lock >= 0);
}
}));
}
future::block_on(async move {
// Wait for readers to pass their asserts.
for r in readers {
r.await;
}
// Wait for writer to finish.
rx.recv_async().await.unwrap();
let lock = lock.read_arc().await;
assert_eq!(*lock, 1000);
});
}
#[test]
fn upgrade() {
future::block_on(async {
let lock: RwLock<i32> = RwLock::new(0);
let read_guard = lock.read().await;
let read_guard2 = lock.read().await;
// Should be able to obtain an upgradable lock.
let upgradable_guard = lock.upgradable_read().await;
// Should be able to obtain a read lock when an upgradable lock is active.
let read_guard3 = lock.read().await;
assert_eq!(0, *read_guard3);
drop(read_guard);
drop(read_guard2);
drop(read_guard3);
// Writers should not pass.
assert!(lock.try_write().is_none());
let mut write_guard = RwLockUpgradableReadGuard::try_upgrade(upgradable_guard).expect(
"should be able to upgrade an upgradable lock because there are no more readers",
);
*write_guard += 1;
drop(write_guard);
let read_guard = lock.read().await;
assert_eq!(1, *read_guard)
});
}
#[test]
fn upgrade_arc() {
future::block_on(async {
let lock: Arc<RwLock<i32>> = Arc::new(RwLock::new(0));
let read_guard = lock.read_arc().await;
let read_guard2 = lock.read_arc().await;
// Should be able to obtain an upgradable lock.
let upgradable_guard = lock.upgradable_read_arc().await;
// Should be able to obtain a read lock when an upgradable lock is active.
let read_guard3 = lock.read_arc().await;
assert_eq!(0, *read_guard3);
drop(read_guard);
drop(read_guard2);
drop(read_guard3);
// Writers should not pass.
assert!(lock.try_write().is_none());
let mut write_guard = RwLockUpgradableReadGuardArc::try_upgrade(upgradable_guard).expect(
"should be able to upgrade an upgradable lock because there are no more readers",
);
*write_guard += 1;
drop(write_guard);
let read_guard = lock.read_arc().await;
assert_eq!(1, *read_guard)
});
}
#[test]
fn not_upgrade() {
future::block_on(async {
let mutex: RwLock<i32> = RwLock::new(0);
let read_guard = mutex.read().await;
let read_guard2 = mutex.read().await;
// Should be able to obtain an upgradable lock.
let upgradable_guard = mutex.upgradable_read().await;
// Should be able to obtain a shared lock when an upgradable lock is active.
let read_guard3 = mutex.read().await;
assert_eq!(0, *read_guard3);
drop(read_guard);
drop(read_guard2);
drop(read_guard3);
// Drop the upgradable lock.
drop(upgradable_guard);
assert_eq!(0, *(mutex.read().await));
// Should be able to acquire a write lock because there are no more readers.
let mut write_guard = mutex.write().await;
*write_guard += 1;
drop(write_guard);
let read_guard = mutex.read().await;
assert_eq!(1, *read_guard)
});
}
#[test]
fn not_upgrade_arc() {
future::block_on(async {
let mutex: Arc<RwLock<i32>> = Arc::new(RwLock::new(0));
let read_guard = mutex.read_arc().await;
let read_guard2 = mutex.read_arc().await;
// Should be able to obtain an upgradable lock.
let upgradable_guard = mutex.upgradable_read_arc().await;
// Should be able to obtain a shared lock when an upgradable lock is active.
let read_guard3 = mutex.read_arc().await;
assert_eq!(0, *read_guard3);
drop(read_guard);
drop(read_guard2);
drop(read_guard3);
// Drop the upgradable lock.
drop(upgradable_guard);
assert_eq!(0, *(mutex.read_arc().await));
// Should be able to acquire a write lock because there are no more readers.
let mut write_guard = mutex.write_arc().await;
*write_guard += 1;
drop(write_guard);
let read_guard = mutex.read_arc().await;
assert_eq!(1, *read_guard)
});
}
#[test]
fn upgradable_with_concurrent_writer() {
future::block_on(async {
let lock: Arc<RwLock<i32>> = Arc::new(RwLock::new(0));
let lock2 = lock.clone();
let upgradable_guard = lock.upgradable_read().await;
future::or(
async move {
let mut write_guard = lock2.write().await;
*write_guard = 1;
},
async move {
let mut write_guard = RwLockUpgradableReadGuard::upgrade(upgradable_guard).await;
assert_eq!(*write_guard, 0);
*write_guard = 2;
},
)
.await;
assert_eq!(2, *(lock.write().await));
let read_guard = lock.read().await;
assert_eq!(2, *read_guard);
});
}
#[test]
fn upgradable_with_concurrent_writer_arc() {
future::block_on(async {
let lock: Arc<RwLock<i32>> = Arc::new(RwLock::new(0));
let lock2 = lock.clone();
let upgradable_guard = lock.upgradable_read_arc().await;
future::or(
async move {
let mut write_guard = lock2.write_arc().await;
*write_guard = 1;
},
async move {
let mut write_guard = RwLockUpgradableReadGuardArc::upgrade(upgradable_guard).await;
assert_eq!(*write_guard, 0);
*write_guard = 2;
},
)
.await;
assert_eq!(2, *(lock.write_arc().await));
let read_guard = lock.read_arc().await;
assert_eq!(2, *read_guard);
});
}
#[test]
fn yields_when_contended() {
let rw = RwLock::new(());
check_yields_when_contended(rw.try_write().unwrap(), rw.read());
check_yields_when_contended(rw.try_write().unwrap(), rw.upgradable_read());
check_yields_when_contended(rw.try_write().unwrap(), rw.write());
check_yields_when_contended(rw.try_read().unwrap(), rw.write());
check_yields_when_contended(rw.try_upgradable_read().unwrap(), rw.write());
check_yields_when_contended(rw.try_upgradable_read().unwrap(), rw.upgradable_read());
let upgradable = rw.try_upgradable_read().unwrap();
check_yields_when_contended(
rw.try_read().unwrap(),
RwLockUpgradableReadGuard::upgrade(upgradable),
);
}
#[test]
fn yields_when_contended_arc() {
let rw = Arc::new(RwLock::new(()));
check_yields_when_contended(rw.try_write_arc().unwrap(), rw.read_arc());
check_yields_when_contended(rw.try_write_arc().unwrap(), rw.upgradable_read_arc());
check_yields_when_contended(rw.try_write_arc().unwrap(), rw.write_arc());
check_yields_when_contended(rw.try_read_arc().unwrap(), rw.write_arc());
check_yields_when_contended(rw.try_upgradable_read_arc().unwrap(), rw.write_arc());
check_yields_when_contended(
rw.try_upgradable_read_arc().unwrap(),
rw.upgradable_read_arc(),
);
let upgradable = rw.try_upgradable_read_arc().unwrap();
check_yields_when_contended(
rw.try_read_arc().unwrap(),
RwLockUpgradableReadGuardArc::upgrade(upgradable),
);
}
#[test]
fn cancellation() {
future::block_on(async {
let rw = RwLock::new(());
drop(rw.read());
drop(rw.upgradable_read());
drop(rw.write());
let read = rw.read().await;
drop(read);
let upgradable_read = rw.upgradable_read().await;
drop(upgradable_read);
let write = rw.write().await;
drop(write);
let upgradable_read = rw.upgradable_read().await;
drop(RwLockUpgradableReadGuard::upgrade(upgradable_read));
let upgradable_read = rw.upgradable_read().await;
let write = RwLockUpgradableReadGuard::upgrade(upgradable_read).await;
drop(write);
});
}
#[test]
fn arc_rwlock_refcounts() {
future::block_on(async {
let rw = Arc::new(RwLock::new(()));
assert_eq!(Arc::strong_count(&rw), 1);
drop(rw.read_arc());
assert_eq!(Arc::strong_count(&rw), 1);
drop(rw.upgradable_read_arc());
assert_eq!(Arc::strong_count(&rw), 1);
drop(rw.write());
assert_eq!(Arc::strong_count(&rw), 1);
let read = rw.read_arc().await;
assert_eq!(Arc::strong_count(&rw), 2);
drop(read);
assert_eq!(Arc::strong_count(&rw), 1);
let upgradable_read = rw.upgradable_read_arc().await;
assert_eq!(Arc::strong_count(&rw), 2);
drop(upgradable_read);
assert_eq!(Arc::strong_count(&rw), 1);
let write = rw.write_arc().await;
assert_eq!(Arc::strong_count(&rw), 2);
drop(write);
assert_eq!(Arc::strong_count(&rw), 1);
let upgradable_read = rw.upgradable_read_arc().await;
assert_eq!(Arc::strong_count(&rw), 2);
drop(RwLockUpgradableReadGuardArc::upgrade(upgradable_read));
assert_eq!(Arc::strong_count(&rw), 1);
let upgradable_read = rw.upgradable_read_arc().await;
assert_eq!(Arc::strong_count(&rw), 2);
let write = RwLockUpgradableReadGuardArc::upgrade(upgradable_read).await;
assert_eq!(Arc::strong_count(&rw), 2);
drop(write);
assert_eq!(Arc::strong_count(&rw), 1);
});
}
// We are testing that this compiles.
fn _covariance_test<'g>(guard: RwLockReadGuard<'g, &'static ()>) {
let _: RwLockReadGuard<'g, &'g ()> = guard;
}
// We are testing that this compiles.
fn _covariance_test_arc(
guard: RwLockReadGuardArc<&'static ()>,
mut _guard_2: RwLockReadGuardArc<&()>,
) {
_guard_2 = guard;
}

194
vendor/async-lock/tests/semaphore.rs vendored Normal file
View File

@@ -0,0 +1,194 @@
mod common;
use std::future::Future;
use std::mem::forget;
use std::pin::Pin;
use std::sync::{
atomic::{AtomicUsize, Ordering},
mpsc, Arc,
};
use std::task::Context;
use std::task::Poll;
use std::thread;
use common::check_yields_when_contended;
use async_lock::Semaphore;
use futures_lite::{future, pin};
#[test]
fn try_acquire() {
let s = Semaphore::new(2);
let g1 = s.try_acquire().unwrap();
let _g2 = s.try_acquire().unwrap();
assert!(s.try_acquire().is_none());
drop(g1);
assert!(s.try_acquire().is_some());
}
#[test]
fn stress() {
const COUNT: usize = if cfg!(miri) { 500 } else { 10_000 };
let s = Arc::new(Semaphore::new(5));
let (tx, rx) = mpsc::channel::<()>();
for _ in 0..50 {
let s = s.clone();
let tx = tx.clone();
thread::spawn(move || {
future::block_on(async {
for _ in 0..COUNT {
s.acquire().await;
}
drop(tx);
})
});
}
drop(tx);
let _ = rx.recv();
let _g1 = s.try_acquire().unwrap();
let g2 = s.try_acquire().unwrap();
let _g3 = s.try_acquire().unwrap();
let _g4 = s.try_acquire().unwrap();
let _g5 = s.try_acquire().unwrap();
assert!(s.try_acquire().is_none());
drop(g2);
assert!(s.try_acquire().is_some());
}
#[test]
fn as_mutex() {
let s = Arc::new(Semaphore::new(1));
let s2 = s.clone();
let _t = thread::spawn(move || {
future::block_on(async {
let _g = s2.acquire().await;
});
});
future::block_on(async {
let _g = s.acquire().await;
});
}
#[test]
fn multi_resource() {
let s = Arc::new(Semaphore::new(2));
let s2 = s.clone();
let (tx1, rx1) = mpsc::channel();
let (tx2, rx2) = mpsc::channel();
let _t = thread::spawn(move || {
future::block_on(async {
let _g = s2.acquire().await;
let _ = rx2.recv();
tx1.send(()).unwrap();
});
});
future::block_on(async {
let _g = s.acquire().await;
tx2.send(()).unwrap();
rx1.recv().unwrap();
});
}
#[test]
fn lifetime() {
// Show that the future keeps the semaphore alive.
let _fut = {
let mutex = Arc::new(Semaphore::new(2));
mutex.acquire_arc()
};
}
#[test]
fn yields_when_contended() {
let s = Semaphore::new(1);
check_yields_when_contended(s.try_acquire().unwrap(), s.acquire());
let s = Arc::new(s);
check_yields_when_contended(s.try_acquire_arc().unwrap(), s.acquire_arc());
}
#[cfg(all(feature = "std", not(target_family = "wasm")))]
#[test]
fn smoke_blocking() {
let s = Semaphore::new(2);
let g1 = s.acquire_blocking();
let _g2 = s.acquire_blocking();
assert!(s.try_acquire().is_none());
drop(g1);
assert!(s.try_acquire().is_some());
}
#[cfg(all(feature = "std", not(target_family = "wasm")))]
#[test]
fn smoke_arc_blocking() {
let s = Arc::new(Semaphore::new(2));
let g1 = s.acquire_arc_blocking();
let _g2 = s.acquire_arc_blocking();
assert!(s.try_acquire().is_none());
drop(g1);
assert!(s.try_acquire().is_some());
}
#[test]
fn add_permits() {
static COUNTER: AtomicUsize = AtomicUsize::new(0);
let s = Arc::new(Semaphore::new(0));
let (tx, rx) = mpsc::channel::<()>();
for _ in 0..50 {
let s = s.clone();
let tx = tx.clone();
thread::spawn(move || {
future::block_on(async {
let perm = s.acquire().await;
forget(perm);
COUNTER.fetch_add(1, Ordering::Relaxed);
drop(tx);
})
});
}
assert_eq!(COUNTER.load(Ordering::Relaxed), 0);
s.add_permits(50);
drop(tx);
let _ = rx.recv();
assert_eq!(COUNTER.load(Ordering::Relaxed), 50);
}
#[test]
fn add_permits_2() {
future::block_on(AddPermitsTest);
}
struct AddPermitsTest;
impl Future for AddPermitsTest {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
let s = Semaphore::new(0);
let acq = s.acquire();
pin!(acq);
let acq_2 = s.acquire();
pin!(acq_2);
assert!(acq.as_mut().poll(cx).is_pending());
assert!(acq_2.as_mut().poll(cx).is_pending());
s.add_permits(1);
let g = acq.poll(cx);
assert!(g.is_ready());
assert!(acq_2.poll(cx).is_pending());
Poll::Ready(())
}
}