Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

371
vendor/concurrent-queue/tests/bounded.rs vendored Normal file
View File

@@ -0,0 +1,371 @@
#![allow(clippy::bool_assert_comparison)]
use concurrent_queue::{ConcurrentQueue, ForcePushError, PopError, PushError};
#[cfg(not(target_family = "wasm"))]
use easy_parallel::Parallel;
#[cfg(not(target_family = "wasm"))]
use std::sync::atomic::{AtomicUsize, Ordering};
#[cfg(target_family = "wasm")]
use wasm_bindgen_test::wasm_bindgen_test as test;
#[test]
fn smoke() {
let q = ConcurrentQueue::bounded(2);
q.push(7).unwrap();
assert_eq!(q.pop(), Ok(7));
q.push(8).unwrap();
assert_eq!(q.pop(), Ok(8));
assert!(q.pop().is_err());
}
#[test]
fn capacity() {
for i in 1..10 {
let q = ConcurrentQueue::<i32>::bounded(i);
assert_eq!(q.capacity(), Some(i));
}
}
#[test]
#[should_panic(expected = "capacity must be positive")]
fn zero_capacity() {
let _ = ConcurrentQueue::<i32>::bounded(0);
}
#[test]
fn len_empty_full() {
let q = ConcurrentQueue::bounded(2);
assert_eq!(q.len(), 0);
assert_eq!(q.is_empty(), true);
assert_eq!(q.is_full(), false);
q.push(()).unwrap();
assert_eq!(q.len(), 1);
assert_eq!(q.is_empty(), false);
assert_eq!(q.is_full(), false);
q.push(()).unwrap();
assert_eq!(q.len(), 2);
assert_eq!(q.is_empty(), false);
assert_eq!(q.is_full(), true);
q.pop().unwrap();
assert_eq!(q.len(), 1);
assert_eq!(q.is_empty(), false);
assert_eq!(q.is_full(), false);
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn len() {
const COUNT: usize = if cfg!(miri) { 50 } else { 25_000 };
const CAP: usize = if cfg!(miri) { 50 } else { 1000 };
let q = ConcurrentQueue::bounded(CAP);
assert_eq!(q.len(), 0);
for _ in 0..CAP / 10 {
for i in 0..50 {
q.push(i).unwrap();
assert_eq!(q.len(), i + 1);
}
for i in 0..50 {
q.pop().unwrap();
assert_eq!(q.len(), 50 - i - 1);
}
}
assert_eq!(q.len(), 0);
for i in 0..CAP {
q.push(i).unwrap();
assert_eq!(q.len(), i + 1);
}
for _ in 0..CAP {
q.pop().unwrap();
}
assert_eq!(q.len(), 0);
Parallel::new()
.add(|| {
for i in 0..COUNT {
loop {
if let Ok(x) = q.pop() {
assert_eq!(x, i);
break;
}
}
let len = q.len();
assert!(len <= CAP);
}
})
.add(|| {
for i in 0..COUNT {
while q.push(i).is_err() {}
let len = q.len();
assert!(len <= CAP);
}
})
.run();
assert_eq!(q.len(), 0);
}
#[test]
fn close() {
let q = ConcurrentQueue::bounded(2);
assert_eq!(q.push(10), Ok(()));
assert!(!q.is_closed());
assert!(q.close());
assert!(q.is_closed());
assert!(!q.close());
assert_eq!(q.push(20), Err(PushError::Closed(20)));
assert_eq!(q.pop(), Ok(10));
assert_eq!(q.pop(), Err(PopError::Closed));
}
#[test]
fn force_push() {
let q = ConcurrentQueue::<i32>::bounded(5);
for i in 1..=5 {
assert_eq!(q.force_push(i), Ok(None));
}
assert!(!q.is_closed());
for i in 6..=10 {
assert_eq!(q.force_push(i), Ok(Some(i - 5)));
}
assert_eq!(q.pop(), Ok(6));
assert_eq!(q.force_push(11), Ok(None));
for i in 12..=15 {
assert_eq!(q.force_push(i), Ok(Some(i - 5)));
}
assert!(q.close());
assert_eq!(q.force_push(40), Err(ForcePushError(40)));
for i in 11..=15 {
assert_eq!(q.pop(), Ok(i));
}
assert_eq!(q.pop(), Err(PopError::Closed));
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn spsc() {
const COUNT: usize = if cfg!(miri) { 100 } else { 100_000 };
let q = ConcurrentQueue::bounded(3);
Parallel::new()
.add(|| {
for i in 0..COUNT {
loop {
if let Ok(x) = q.pop() {
assert_eq!(x, i);
break;
}
}
}
assert!(q.pop().is_err());
})
.add(|| {
for i in 0..COUNT {
while q.push(i).is_err() {}
}
})
.run();
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn mpmc() {
const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 };
const THREADS: usize = 4;
let q = ConcurrentQueue::<usize>::bounded(3);
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
Parallel::new()
.each(0..THREADS, |_| {
for _ in 0..COUNT {
let n = loop {
if let Ok(x) = q.pop() {
break x;
}
};
v[n].fetch_add(1, Ordering::SeqCst);
}
})
.each(0..THREADS, |_| {
for i in 0..COUNT {
while q.push(i).is_err() {}
}
})
.run();
for c in v {
assert_eq!(c.load(Ordering::SeqCst), THREADS);
}
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn drops() {
const RUNS: usize = if cfg!(miri) { 10 } else { 100 };
const STEPS: usize = if cfg!(miri) { 100 } else { 10_000 };
static DROPS: AtomicUsize = AtomicUsize::new(0);
#[derive(Debug, PartialEq)]
struct DropCounter;
impl Drop for DropCounter {
fn drop(&mut self) {
DROPS.fetch_add(1, Ordering::SeqCst);
}
}
for _ in 0..RUNS {
let steps = fastrand::usize(..STEPS);
let additional = fastrand::usize(..50);
DROPS.store(0, Ordering::SeqCst);
let q = ConcurrentQueue::bounded(50);
Parallel::new()
.add(|| {
for _ in 0..steps {
while q.pop().is_err() {}
}
})
.add(|| {
for _ in 0..steps {
while q.push(DropCounter).is_err() {
DROPS.fetch_sub(1, Ordering::SeqCst);
}
}
})
.run();
for _ in 0..additional {
q.push(DropCounter).unwrap();
}
assert_eq!(DROPS.load(Ordering::SeqCst), steps);
drop(q);
assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional);
}
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn linearizable() {
const COUNT: usize = if cfg!(miri) { 500 } else { 25_000 };
const THREADS: usize = 4;
let q = ConcurrentQueue::bounded(THREADS);
Parallel::new()
.each(0..THREADS / 2, |_| {
for _ in 0..COUNT {
while q.push(0).is_err() {}
q.pop().unwrap();
}
})
.each(0..THREADS / 2, |_| {
for _ in 0..COUNT {
if q.force_push(0).unwrap().is_none() {
q.pop().unwrap();
}
}
})
.run();
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn spsc_ring_buffer() {
const COUNT: usize = if cfg!(miri) { 200 } else { 100_000 };
let t = AtomicUsize::new(1);
let q = ConcurrentQueue::<usize>::bounded(3);
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
Parallel::new()
.add(|| loop {
match t.load(Ordering::SeqCst) {
0 if q.is_empty() => break,
_ => {
while let Ok(n) = q.pop() {
v[n].fetch_add(1, Ordering::SeqCst);
}
}
}
})
.add(|| {
for i in 0..COUNT {
if let Ok(Some(n)) = q.force_push(i) {
v[n].fetch_add(1, Ordering::SeqCst);
}
}
t.fetch_sub(1, Ordering::SeqCst);
})
.run();
for c in v {
assert_eq!(c.load(Ordering::SeqCst), 1);
}
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn mpmc_ring_buffer() {
const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 };
const THREADS: usize = 4;
let t = AtomicUsize::new(THREADS);
let q = ConcurrentQueue::<usize>::bounded(3);
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
Parallel::new()
.each(0..THREADS, |_| loop {
match t.load(Ordering::SeqCst) {
0 if q.is_empty() => break,
_ => {
while let Ok(n) = q.pop() {
v[n].fetch_add(1, Ordering::SeqCst);
}
}
}
})
.each(0..THREADS, |_| {
for i in 0..COUNT {
if let Ok(Some(n)) = q.force_push(i) {
v[n].fetch_add(1, Ordering::SeqCst);
}
}
t.fetch_sub(1, Ordering::SeqCst);
})
.run();
for c in v {
assert_eq!(c.load(Ordering::SeqCst), THREADS);
}
}

307
vendor/concurrent-queue/tests/loom.rs vendored Normal file
View File

@@ -0,0 +1,307 @@
#![cfg(loom)]
use concurrent_queue::{ConcurrentQueue, ForcePushError, PopError, PushError};
use loom::sync::atomic::{AtomicUsize, Ordering};
use loom::sync::{Arc, Condvar, Mutex};
use loom::thread;
#[cfg(target_family = "wasm")]
use wasm_bindgen_test::wasm_bindgen_test as test;
/// A basic MPMC channel based on a ConcurrentQueue and loom primitives.
struct Channel<T> {
/// The queue used to contain items.
queue: ConcurrentQueue<T>,
/// The number of senders.
senders: AtomicUsize,
/// The number of receivers.
receivers: AtomicUsize,
/// The event that is signaled when a new item is pushed.
push_event: Event,
/// The event that is signaled when a new item is popped.
pop_event: Event,
}
/// The sending side of a channel.
struct Sender<T> {
/// The channel.
channel: Arc<Channel<T>>,
}
/// The receiving side of a channel.
struct Receiver<T> {
/// The channel.
channel: Arc<Channel<T>>,
}
/// Create a new pair of senders/receivers based on a queue.
fn pair<T>(queue: ConcurrentQueue<T>) -> (Sender<T>, Receiver<T>) {
let channel = Arc::new(Channel {
queue,
senders: AtomicUsize::new(1),
receivers: AtomicUsize::new(1),
push_event: Event::new(),
pop_event: Event::new(),
});
(
Sender {
channel: channel.clone(),
},
Receiver { channel },
)
}
impl<T> Clone for Sender<T> {
fn clone(&self) -> Self {
self.channel.senders.fetch_add(1, Ordering::SeqCst);
Sender {
channel: self.channel.clone(),
}
}
}
impl<T> Drop for Sender<T> {
fn drop(&mut self) {
if self.channel.senders.fetch_sub(1, Ordering::SeqCst) == 1 {
// Close the channel and notify the receivers.
self.channel.queue.close();
self.channel.push_event.signal_all();
}
}
}
impl<T> Clone for Receiver<T> {
fn clone(&self) -> Self {
self.channel.receivers.fetch_add(1, Ordering::SeqCst);
Receiver {
channel: self.channel.clone(),
}
}
}
impl<T> Drop for Receiver<T> {
fn drop(&mut self) {
if self.channel.receivers.fetch_sub(1, Ordering::SeqCst) == 1 {
// Close the channel and notify the senders.
self.channel.queue.close();
self.channel.pop_event.signal_all();
}
}
}
impl<T> Sender<T> {
/// Send a value.
///
/// Returns an error with the value if the channel is closed.
fn send(&self, mut value: T) -> Result<(), T> {
loop {
match self.channel.queue.push(value) {
Ok(()) => {
// Notify a single receiver.
self.channel.push_event.signal();
return Ok(());
}
Err(PushError::Closed(val)) => return Err(val),
Err(PushError::Full(val)) => {
// Wait for a receiver to pop an item.
value = val;
self.channel.pop_event.wait();
}
}
}
}
/// Send a value forcefully.
fn force_send(&self, value: T) -> Result<Option<T>, T> {
match self.channel.queue.force_push(value) {
Ok(bumped) => {
self.channel.push_event.signal();
Ok(bumped)
}
Err(ForcePushError(val)) => Err(val),
}
}
}
impl<T> Receiver<T> {
/// Channel capacity.
fn capacity(&self) -> Option<usize> {
self.channel.queue.capacity()
}
/// Receive a value.
///
/// Returns an error if the channel is closed.
fn recv(&self) -> Result<T, ()> {
loop {
match self.channel.queue.pop() {
Ok(value) => {
// Notify a single sender.
self.channel.pop_event.signal();
return Ok(value);
}
Err(PopError::Closed) => return Err(()),
Err(PopError::Empty) => {
// Wait for a sender to push an item.
self.channel.push_event.wait();
}
}
}
}
}
/// An event that can be waited on and then signaled.
struct Event {
/// The condition variable used to wait on the event.
condvar: Condvar,
/// The mutex used to protect the event.
///
/// Inside is the event's state. The first bit is used to indicate if the
/// notify_one method was called. The second bit is used to indicate if the
/// notify_all method was called.
mutex: Mutex<usize>,
}
impl Event {
/// Create a new event.
fn new() -> Self {
Self {
condvar: Condvar::new(),
mutex: Mutex::new(0),
}
}
/// Wait for the event to be signaled.
fn wait(&self) {
let mut state = self.mutex.lock().unwrap();
loop {
if *state & 0b11 != 0 {
// The event was signaled.
*state &= !0b01;
return;
}
// Wait for the event to be signaled.
state = self.condvar.wait(state).unwrap();
}
}
/// Signal the event.
fn signal(&self) {
let mut state = self.mutex.lock().unwrap();
*state |= 1;
drop(state);
self.condvar.notify_one();
}
/// Signal the event, but notify all waiters.
fn signal_all(&self) {
let mut state = self.mutex.lock().unwrap();
*state |= 3;
drop(state);
self.condvar.notify_all();
}
}
/// Wrapper to run tests on all three queues.
fn run_test<F: Fn(ConcurrentQueue<usize>, usize) + Send + Sync + Clone + 'static>(f: F) {
// The length of a loom test seems to increase exponentially the higher this number is.
const LIMIT: usize = 4;
let fc = f.clone();
loom::model(move || {
fc(ConcurrentQueue::bounded(1), LIMIT);
});
let fc = f.clone();
loom::model(move || {
fc(ConcurrentQueue::bounded(LIMIT / 2), LIMIT);
});
loom::model(move || {
f(ConcurrentQueue::unbounded(), LIMIT);
});
}
#[test]
fn spsc() {
run_test(|q, limit| {
// Create a new pair of senders/receivers.
let (tx, rx) = pair(q);
// Push each onto a thread and run them.
let handle = thread::spawn(move || {
for i in 0..limit {
if tx.send(i).is_err() {
break;
}
}
});
let mut recv_values = vec![];
loop {
match rx.recv() {
Ok(value) => recv_values.push(value),
Err(()) => break,
}
}
// Values may not be in order.
recv_values.sort_unstable();
assert_eq!(recv_values, (0..limit).collect::<Vec<_>>());
// Join the handle before we exit.
handle.join().unwrap();
});
}
#[test]
fn spsc_force() {
run_test(|q, limit| {
// Create a new pair of senders/receivers.
let (tx, rx) = pair(q);
// Push each onto a thread and run them.
let handle = thread::spawn(move || {
for i in 0..limit {
if tx.force_send(i).is_err() {
break;
}
}
});
let mut recv_values = vec![];
loop {
match rx.recv() {
Ok(value) => recv_values.push(value),
Err(()) => break,
}
}
// Values may not be in order.
recv_values.sort_unstable();
let cap = rx.capacity().unwrap_or(usize::MAX);
for (left, right) in (0..limit)
.rev()
.take(cap)
.zip(recv_values.into_iter().rev())
{
assert_eq!(left, right);
}
// Join the handle before we exit.
handle.join().unwrap();
});
}

289
vendor/concurrent-queue/tests/single.rs vendored Normal file
View File

@@ -0,0 +1,289 @@
#![allow(clippy::bool_assert_comparison)]
use concurrent_queue::{ConcurrentQueue, ForcePushError, PopError, PushError};
#[cfg(not(target_family = "wasm"))]
use easy_parallel::Parallel;
#[cfg(not(target_family = "wasm"))]
use std::sync::atomic::{AtomicUsize, Ordering};
#[cfg(target_family = "wasm")]
use wasm_bindgen_test::wasm_bindgen_test as test;
#[test]
fn smoke() {
let q = ConcurrentQueue::bounded(1);
q.push(7).unwrap();
assert_eq!(q.pop(), Ok(7));
q.push(8).unwrap();
assert_eq!(q.pop(), Ok(8));
assert!(q.pop().is_err());
}
#[test]
fn capacity() {
let q = ConcurrentQueue::<i32>::bounded(1);
assert_eq!(q.capacity(), Some(1));
}
#[test]
fn len_empty_full() {
let q = ConcurrentQueue::bounded(1);
assert_eq!(q.len(), 0);
assert_eq!(q.is_empty(), true);
assert_eq!(q.is_full(), false);
q.push(()).unwrap();
assert_eq!(q.len(), 1);
assert_eq!(q.is_empty(), false);
assert_eq!(q.is_full(), true);
q.pop().unwrap();
assert_eq!(q.len(), 0);
assert_eq!(q.is_empty(), true);
assert_eq!(q.is_full(), false);
}
#[test]
fn close() {
let q = ConcurrentQueue::<i32>::bounded(1);
assert_eq!(q.push(10), Ok(()));
assert!(!q.is_closed());
assert!(q.close());
assert!(q.is_closed());
assert!(!q.close());
assert_eq!(q.push(20), Err(PushError::Closed(20)));
assert_eq!(q.pop(), Ok(10));
assert_eq!(q.pop(), Err(PopError::Closed));
}
#[test]
fn force_push() {
let q = ConcurrentQueue::<i32>::bounded(1);
assert_eq!(q.force_push(10), Ok(None));
assert!(!q.is_closed());
assert_eq!(q.force_push(20), Ok(Some(10)));
assert_eq!(q.force_push(30), Ok(Some(20)));
assert!(q.close());
assert_eq!(q.force_push(40), Err(ForcePushError(40)));
assert_eq!(q.pop(), Ok(30));
assert_eq!(q.pop(), Err(PopError::Closed));
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn spsc() {
const COUNT: usize = if cfg!(miri) { 100 } else { 100_000 };
let q = ConcurrentQueue::bounded(1);
Parallel::new()
.add(|| {
for i in 0..COUNT {
loop {
if let Ok(x) = q.pop() {
assert_eq!(x, i);
break;
}
}
}
assert!(q.pop().is_err());
})
.add(|| {
for i in 0..COUNT {
while q.push(i).is_err() {}
}
})
.run();
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn mpmc() {
const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 };
const THREADS: usize = 1;
let q = ConcurrentQueue::<usize>::bounded(THREADS);
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
Parallel::new()
.each(0..THREADS, |_| {
for _ in 0..COUNT {
let n = loop {
if let Ok(x) = q.pop() {
break x;
}
};
v[n].fetch_add(1, Ordering::SeqCst);
}
})
.each(0..THREADS, |_| {
for i in 0..COUNT {
while q.push(i).is_err() {}
}
})
.run();
for c in v {
assert_eq!(c.load(Ordering::SeqCst), THREADS);
}
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn drops() {
const RUNS: usize = if cfg!(miri) { 20 } else { 100 };
const STEPS: usize = if cfg!(miri) { 100 } else { 10_000 };
static DROPS: AtomicUsize = AtomicUsize::new(0);
#[derive(Debug, PartialEq)]
struct DropCounter;
impl Drop for DropCounter {
fn drop(&mut self) {
DROPS.fetch_add(1, Ordering::SeqCst);
}
}
for _ in 0..RUNS {
let steps = fastrand::usize(..STEPS);
let additional = fastrand::usize(0..=1);
DROPS.store(0, Ordering::SeqCst);
let q = ConcurrentQueue::bounded(1);
Parallel::new()
.add(|| {
for _ in 0..steps {
while q.pop().is_err() {}
}
})
.add(|| {
for _ in 0..steps {
while q.push(DropCounter).is_err() {
DROPS.fetch_sub(1, Ordering::SeqCst);
}
}
})
.run();
for _ in 0..additional {
q.push(DropCounter).unwrap();
}
assert_eq!(DROPS.load(Ordering::SeqCst), steps);
drop(q);
assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional);
}
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn linearizable() {
const COUNT: usize = if cfg!(miri) { 500 } else { 25_000 };
const THREADS: usize = 4;
let q = ConcurrentQueue::bounded(1);
Parallel::new()
.each(0..THREADS / 2, |_| {
for _ in 0..COUNT {
while q.push(0).is_err() {}
q.pop().unwrap();
}
})
.each(0..THREADS / 2, |_| {
for _ in 0..COUNT {
if q.force_push(0).unwrap().is_none() {
q.pop().unwrap();
}
}
})
.run();
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn spsc_ring_buffer() {
const COUNT: usize = if cfg!(miri) { 200 } else { 100_000 };
let t = AtomicUsize::new(1);
let q = ConcurrentQueue::<usize>::bounded(1);
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
Parallel::new()
.add(|| loop {
match t.load(Ordering::SeqCst) {
0 if q.is_empty() => break,
_ => {
while let Ok(n) = q.pop() {
v[n].fetch_add(1, Ordering::SeqCst);
}
}
}
})
.add(|| {
for i in 0..COUNT {
if let Ok(Some(n)) = q.force_push(i) {
v[n].fetch_add(1, Ordering::SeqCst);
}
}
t.fetch_sub(1, Ordering::SeqCst);
})
.run();
for c in v {
assert_eq!(c.load(Ordering::SeqCst), 1);
}
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn mpmc_ring_buffer() {
const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 };
const THREADS: usize = 4;
let t = AtomicUsize::new(THREADS);
let q = ConcurrentQueue::<usize>::bounded(1);
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
Parallel::new()
.each(0..THREADS, |_| loop {
match t.load(Ordering::SeqCst) {
0 if q.is_empty() => break,
_ => {
while let Ok(n) = q.pop() {
v[n].fetch_add(1, Ordering::SeqCst);
}
}
}
})
.each(0..THREADS, |_| {
for i in 0..COUNT {
if let Ok(Some(n)) = q.force_push(i) {
v[n].fetch_add(1, Ordering::SeqCst);
}
}
t.fetch_sub(1, Ordering::SeqCst);
})
.run();
for c in v {
assert_eq!(c.load(Ordering::SeqCst), THREADS);
}
}

View File

@@ -0,0 +1,181 @@
#![allow(clippy::bool_assert_comparison)]
use concurrent_queue::{ConcurrentQueue, PopError, PushError};
#[cfg(not(target_family = "wasm"))]
use easy_parallel::Parallel;
#[cfg(not(target_family = "wasm"))]
use std::sync::atomic::{AtomicUsize, Ordering};
#[cfg(target_family = "wasm")]
use wasm_bindgen_test::wasm_bindgen_test as test;
#[test]
fn smoke() {
let q = ConcurrentQueue::unbounded();
q.push(7).unwrap();
assert_eq!(q.pop(), Ok(7));
q.push(8).unwrap();
assert_eq!(q.pop(), Ok(8));
assert!(q.pop().is_err());
}
#[test]
fn len_empty_full() {
let q = ConcurrentQueue::unbounded();
assert_eq!(q.len(), 0);
assert_eq!(q.is_empty(), true);
q.push(()).unwrap();
assert_eq!(q.len(), 1);
assert_eq!(q.is_empty(), false);
q.pop().unwrap();
assert_eq!(q.len(), 0);
assert_eq!(q.is_empty(), true);
}
#[test]
fn len() {
let q = ConcurrentQueue::unbounded();
assert_eq!(q.len(), 0);
for i in 0..50 {
q.push(i).unwrap();
assert_eq!(q.len(), i + 1);
}
for i in 0..50 {
q.pop().unwrap();
assert_eq!(q.len(), 50 - i - 1);
}
assert_eq!(q.len(), 0);
}
#[test]
fn close() {
let q = ConcurrentQueue::unbounded();
assert_eq!(q.push(10), Ok(()));
assert!(!q.is_closed());
assert!(q.close());
assert!(q.is_closed());
assert!(!q.close());
assert_eq!(q.push(20), Err(PushError::Closed(20)));
assert_eq!(q.pop(), Ok(10));
assert_eq!(q.pop(), Err(PopError::Closed));
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn spsc() {
const COUNT: usize = if cfg!(miri) { 100 } else { 100_000 };
let q = ConcurrentQueue::unbounded();
Parallel::new()
.add(|| {
for i in 0..COUNT {
loop {
if let Ok(x) = q.pop() {
assert_eq!(x, i);
break;
}
}
}
assert!(q.pop().is_err());
})
.add(|| {
for i in 0..COUNT {
q.push(i).unwrap();
}
})
.run();
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn mpmc() {
const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 };
const THREADS: usize = 4;
let q = ConcurrentQueue::<usize>::unbounded();
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
Parallel::new()
.each(0..THREADS, |_| {
for _ in 0..COUNT {
let n = loop {
if let Ok(x) = q.pop() {
break x;
}
};
v[n].fetch_add(1, Ordering::SeqCst);
}
})
.each(0..THREADS, |_| {
for i in 0..COUNT {
q.push(i).unwrap();
}
})
.run();
for c in v {
assert_eq!(c.load(Ordering::SeqCst), THREADS);
}
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn drops() {
const RUNS: usize = if cfg!(miri) { 20 } else { 100 };
const STEPS: usize = if cfg!(miri) { 100 } else { 10_000 };
static DROPS: AtomicUsize = AtomicUsize::new(0);
#[derive(Debug, PartialEq)]
struct DropCounter;
impl Drop for DropCounter {
fn drop(&mut self) {
DROPS.fetch_add(1, Ordering::SeqCst);
}
}
for _ in 0..RUNS {
let steps = fastrand::usize(0..STEPS);
let additional = fastrand::usize(0..1000);
DROPS.store(0, Ordering::SeqCst);
let q = ConcurrentQueue::unbounded();
Parallel::new()
.add(|| {
for _ in 0..steps {
while q.pop().is_err() {}
}
})
.add(|| {
for _ in 0..steps {
q.push(DropCounter).unwrap();
}
})
.run();
for _ in 0..additional {
q.push(DropCounter).unwrap();
}
assert_eq!(DROPS.load(Ordering::SeqCst), steps);
drop(q);
assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional);
}
}