Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

408
vendor/concurrent-queue/src/bounded.rs vendored Normal file
View File

@@ -0,0 +1,408 @@
use alloc::{boxed::Box, vec::Vec};
use core::mem::MaybeUninit;
use crossbeam_utils::CachePadded;
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sync::cell::UnsafeCell;
#[allow(unused_imports)]
use crate::sync::prelude::*;
use crate::{busy_wait, ForcePushError, PopError, PushError};
/// A slot in a queue.
struct Slot<T> {
/// The current stamp.
stamp: AtomicUsize,
/// The value in this slot.
value: UnsafeCell<MaybeUninit<T>>,
}
/// A bounded queue.
pub struct Bounded<T> {
/// The head of the queue.
///
/// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but
/// packed into a single `usize`. The lower bits represent the index, while the upper bits
/// represent the lap. The mark bit in the head is always zero.
///
/// Values are popped from the head of the queue.
head: CachePadded<AtomicUsize>,
/// The tail of the queue.
///
/// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but
/// packed into a single `usize`. The lower bits represent the index, while the upper bits
/// represent the lap. The mark bit indicates that the queue is closed.
///
/// Values are pushed into the tail of the queue.
tail: CachePadded<AtomicUsize>,
/// The buffer holding slots.
buffer: Box<[Slot<T>]>,
/// A stamp with the value of `{ lap: 1, mark: 0, index: 0 }`.
one_lap: usize,
/// If this bit is set in the tail, that means the queue is closed.
mark_bit: usize,
}
impl<T> Bounded<T> {
/// Creates a new bounded queue.
pub fn new(cap: usize) -> Bounded<T> {
assert!(cap > 0, "capacity must be positive");
// Head is initialized to `{ lap: 0, mark: 0, index: 0 }`.
let head = 0;
// Tail is initialized to `{ lap: 0, mark: 0, index: 0 }`.
let tail = 0;
// Allocate a buffer of `cap` slots initialized with stamps.
let mut buffer = Vec::with_capacity(cap);
for i in 0..cap {
// Set the stamp to `{ lap: 0, mark: 0, index: i }`.
buffer.push(Slot {
stamp: AtomicUsize::new(i),
value: UnsafeCell::new(MaybeUninit::uninit()),
});
}
// Compute constants `mark_bit` and `one_lap`.
let mark_bit = (cap + 1).next_power_of_two();
let one_lap = mark_bit * 2;
Bounded {
buffer: buffer.into(),
one_lap,
mark_bit,
head: CachePadded::new(AtomicUsize::new(head)),
tail: CachePadded::new(AtomicUsize::new(tail)),
}
}
/// Attempts to push an item into the queue.
pub fn push(&self, value: T) -> Result<(), PushError<T>> {
self.push_or_else(value, |value, tail, _, _| {
let head = self.head.load(Ordering::Relaxed);
// If the head lags one lap behind the tail as well...
if head.wrapping_add(self.one_lap) == tail {
// ...then the queue is full.
Err(PushError::Full(value))
} else {
Ok(value)
}
})
}
/// Pushes an item into the queue, displacing another item if needed.
pub fn force_push(&self, value: T) -> Result<Option<T>, ForcePushError<T>> {
let result = self.push_or_else(value, |value, tail, new_tail, slot| {
let head = tail.wrapping_sub(self.one_lap);
let new_head = new_tail.wrapping_sub(self.one_lap);
// Try to move the head.
if self
.head
.compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Relaxed)
.is_ok()
{
// Move the tail.
self.tail.store(new_tail, Ordering::SeqCst);
// Swap out the old value.
// SAFETY: We know this is initialized, since it's covered by the current queue.
let old = unsafe {
slot.value
.with_mut(|slot| slot.replace(MaybeUninit::new(value)).assume_init())
};
// Update the stamp.
slot.stamp.store(tail + 1, Ordering::Release);
// Return a PushError.
Err(PushError::Full(old))
} else {
Ok(value)
}
});
match result {
Ok(()) => Ok(None),
Err(PushError::Full(old_value)) => Ok(Some(old_value)),
Err(PushError::Closed(value)) => Err(ForcePushError(value)),
}
}
/// Attempts to push an item into the queue, running a closure on failure.
///
/// `fail` is run when there is no more room left in the tail of the queue. The parameters of
/// this function are as follows:
///
/// - The item that failed to push.
/// - The value of `self.tail` before the new value would be inserted.
/// - The value of `self.tail` after the new value would be inserted.
/// - The slot that we attempted to push into.
///
/// If `fail` returns `Ok(val)`, we will try pushing `val` to the head of the queue. Otherwise,
/// this function will return the error.
fn push_or_else<F>(&self, mut value: T, mut fail: F) -> Result<(), PushError<T>>
where
F: FnMut(T, usize, usize, &Slot<T>) -> Result<T, PushError<T>>,
{
let mut tail = self.tail.load(Ordering::Relaxed);
loop {
// Check if the queue is closed.
if tail & self.mark_bit != 0 {
return Err(PushError::Closed(value));
}
// Deconstruct the tail.
let index = tail & (self.mark_bit - 1);
let lap = tail & !(self.one_lap - 1);
// Calculate the new location of the tail.
let new_tail = if index + 1 < self.buffer.len() {
// Same lap, incremented index.
// Set to `{ lap: lap, mark: 0, index: index + 1 }`.
tail + 1
} else {
// One lap forward, index wraps around to zero.
// Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`.
lap.wrapping_add(self.one_lap)
};
// Inspect the corresponding slot.
let slot = &self.buffer[index];
let stamp = slot.stamp.load(Ordering::Acquire);
// If the tail and the stamp match, we may attempt to push.
if tail == stamp {
// Try moving the tail.
match self.tail.compare_exchange_weak(
tail,
new_tail,
Ordering::SeqCst,
Ordering::Relaxed,
) {
Ok(_) => {
// Write the value into the slot and update the stamp.
slot.value.with_mut(|slot| unsafe {
slot.write(MaybeUninit::new(value));
});
slot.stamp.store(tail + 1, Ordering::Release);
return Ok(());
}
Err(t) => {
tail = t;
}
}
} else if stamp.wrapping_add(self.one_lap) == tail + 1 {
crate::full_fence();
// We've failed to push; run our failure closure.
value = fail(value, tail, new_tail, slot)?;
// Loom complains if there isn't an explicit busy wait here.
#[cfg(loom)]
busy_wait();
tail = self.tail.load(Ordering::Relaxed);
} else {
// Yield because we need to wait for the stamp to get updated.
busy_wait();
tail = self.tail.load(Ordering::Relaxed);
}
}
}
/// Attempts to pop an item from the queue.
pub fn pop(&self) -> Result<T, PopError> {
let mut head = self.head.load(Ordering::Relaxed);
loop {
// Deconstruct the head.
let index = head & (self.mark_bit - 1);
let lap = head & !(self.one_lap - 1);
// Inspect the corresponding slot.
let slot = &self.buffer[index];
let stamp = slot.stamp.load(Ordering::Acquire);
// If the the stamp is ahead of the head by 1, we may attempt to pop.
if head + 1 == stamp {
let new = if index + 1 < self.buffer.len() {
// Same lap, incremented index.
// Set to `{ lap: lap, mark: 0, index: index + 1 }`.
head + 1
} else {
// One lap forward, index wraps around to zero.
// Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`.
lap.wrapping_add(self.one_lap)
};
// Try moving the head.
match self.head.compare_exchange_weak(
head,
new,
Ordering::SeqCst,
Ordering::Relaxed,
) {
Ok(_) => {
// Read the value from the slot and update the stamp.
let value = slot
.value
.with_mut(|slot| unsafe { slot.read().assume_init() });
slot.stamp
.store(head.wrapping_add(self.one_lap), Ordering::Release);
return Ok(value);
}
Err(h) => {
head = h;
}
}
} else if stamp == head {
crate::full_fence();
let tail = self.tail.load(Ordering::Relaxed);
// If the tail equals the head, that means the queue is empty.
if (tail & !self.mark_bit) == head {
// Check if the queue is closed.
if tail & self.mark_bit != 0 {
return Err(PopError::Closed);
} else {
return Err(PopError::Empty);
}
}
// Loom complains if there isn't a busy-wait here.
#[cfg(loom)]
busy_wait();
head = self.head.load(Ordering::Relaxed);
} else {
// Yield because we need to wait for the stamp to get updated.
busy_wait();
head = self.head.load(Ordering::Relaxed);
}
}
}
/// Returns the number of items in the queue.
pub fn len(&self) -> usize {
loop {
// Load the tail, then load the head.
let tail = self.tail.load(Ordering::SeqCst);
let head = self.head.load(Ordering::SeqCst);
// If the tail didn't change, we've got consistent values to work with.
if self.tail.load(Ordering::SeqCst) == tail {
let hix = head & (self.mark_bit - 1);
let tix = tail & (self.mark_bit - 1);
return if hix < tix {
tix - hix
} else if hix > tix {
self.buffer.len() - hix + tix
} else if (tail & !self.mark_bit) == head {
0
} else {
self.buffer.len()
};
}
}
}
/// Returns `true` if the queue is empty.
pub fn is_empty(&self) -> bool {
let head = self.head.load(Ordering::SeqCst);
let tail = self.tail.load(Ordering::SeqCst);
// Is the tail equal to the head?
//
// Note: If the head changes just before we load the tail, that means there was a moment
// when the queue was not empty, so it is safe to just return `false`.
(tail & !self.mark_bit) == head
}
/// Returns `true` if the queue is full.
pub fn is_full(&self) -> bool {
let tail = self.tail.load(Ordering::SeqCst);
let head = self.head.load(Ordering::SeqCst);
// Is the head lagging one lap behind tail?
//
// Note: If the tail changes just before we load the head, that means there was a moment
// when the queue was not full, so it is safe to just return `false`.
head.wrapping_add(self.one_lap) == tail & !self.mark_bit
}
/// Returns the capacity of the queue.
pub fn capacity(&self) -> usize {
self.buffer.len()
}
/// Closes the queue.
///
/// Returns `true` if this call closed the queue.
pub fn close(&self) -> bool {
let tail = self.tail.fetch_or(self.mark_bit, Ordering::SeqCst);
tail & self.mark_bit == 0
}
/// Returns `true` if the queue is closed.
pub fn is_closed(&self) -> bool {
self.tail.load(Ordering::SeqCst) & self.mark_bit != 0
}
}
impl<T> Drop for Bounded<T> {
fn drop(&mut self) {
// Get the index of the head.
let Self {
head,
tail,
buffer,
mark_bit,
..
} = self;
let mark_bit = *mark_bit;
head.with_mut(|&mut head| {
tail.with_mut(|&mut tail| {
let hix = head & (mark_bit - 1);
let tix = tail & (mark_bit - 1);
let len = if hix < tix {
tix - hix
} else if hix > tix {
buffer.len() - hix + tix
} else if (tail & !mark_bit) == head {
0
} else {
buffer.len()
};
// Loop over all slots that hold a value and drop them.
for i in 0..len {
// Compute the index of the next slot holding a value.
let index = if hix + i < buffer.len() {
hix + i
} else {
hix + i - buffer.len()
};
// Drop the value in the slot.
let slot = &buffer[index];
slot.value.with_mut(|slot| unsafe {
let value = &mut *slot;
value.as_mut_ptr().drop_in_place();
});
}
});
});
}
}

660
vendor/concurrent-queue/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,660 @@
//! A concurrent multi-producer multi-consumer queue.
//!
//! There are two kinds of queues:
//!
//! 1. [Bounded] queue with limited capacity.
//! 2. [Unbounded] queue with unlimited capacity.
//!
//! Queues also have the capability to get [closed] at any point. When closed, no more items can be
//! pushed into the queue, although the remaining items can still be popped.
//!
//! These features make it easy to build channels similar to [`std::sync::mpsc`] on top of this
//! crate.
//!
//! # Examples
//!
//! ```
//! use concurrent_queue::ConcurrentQueue;
//!
//! let q = ConcurrentQueue::unbounded();
//! q.push(1).unwrap();
//! q.push(2).unwrap();
//!
//! assert_eq!(q.pop(), Ok(1));
//! assert_eq!(q.pop(), Ok(2));
//! ```
//!
//! # Features
//!
//! `concurrent-queue` uses an `std` default feature. With this feature enabled, this crate will
//! use [`std::thread::yield_now`] to avoid busy waiting in tight loops. However, with this
//! feature disabled, [`core::hint::spin_loop`] will be used instead. Disabling `std` will allow
//! this crate to be used on `no_std` platforms at the potential expense of more busy waiting.
//!
//! There is also a `portable-atomic` feature, which uses a polyfill from the
//! [`portable-atomic`] crate to provide atomic operations on platforms that do not support them.
//! See the [`README`] for the [`portable-atomic`] crate for more information on how to use it.
//! Note that even with this feature enabled, `concurrent-queue` still requires a global allocator
//! to be available. See the documentation for the [`std::alloc::GlobalAlloc`] trait for more
//! information.
//!
//! [Bounded]: `ConcurrentQueue::bounded()`
//! [Unbounded]: `ConcurrentQueue::unbounded()`
//! [closed]: `ConcurrentQueue::close()`
//! [`portable-atomic`]: https://crates.io/crates/portable-atomic
//! [`README`]: https://github.com/taiki-e/portable-atomic/blob/main/README.md#optional-cfg
#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)]
#![no_std]
#![doc(
html_favicon_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png"
)]
#![doc(
html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png"
)]
extern crate alloc;
#[cfg(feature = "std")]
extern crate std;
use core::fmt;
use core::panic::{RefUnwindSafe, UnwindSafe};
use sync::atomic::{self, Ordering};
#[cfg(feature = "std")]
use std::error;
use crate::bounded::Bounded;
use crate::single::Single;
use crate::sync::busy_wait;
use crate::unbounded::Unbounded;
mod bounded;
mod single;
mod unbounded;
mod sync;
/// Make the given function const if the given condition is true.
macro_rules! const_fn {
(
const_if: #[cfg($($cfg:tt)+)];
$(#[$($attr:tt)*])*
$vis:vis const fn $($rest:tt)*
) => {
#[cfg($($cfg)+)]
$(#[$($attr)*])*
$vis const fn $($rest)*
#[cfg(not($($cfg)+))]
$(#[$($attr)*])*
$vis fn $($rest)*
};
}
pub(crate) use const_fn;
/// A concurrent queue.
///
/// # Examples
///
/// ```
/// use concurrent_queue::{ConcurrentQueue, PopError, PushError};
///
/// let q = ConcurrentQueue::bounded(2);
///
/// assert_eq!(q.push('a'), Ok(()));
/// assert_eq!(q.push('b'), Ok(()));
/// assert_eq!(q.push('c'), Err(PushError::Full('c')));
///
/// assert_eq!(q.pop(), Ok('a'));
/// assert_eq!(q.pop(), Ok('b'));
/// assert_eq!(q.pop(), Err(PopError::Empty));
/// ```
pub struct ConcurrentQueue<T>(Inner<T>);
unsafe impl<T: Send> Send for ConcurrentQueue<T> {}
unsafe impl<T: Send> Sync for ConcurrentQueue<T> {}
impl<T> UnwindSafe for ConcurrentQueue<T> {}
impl<T> RefUnwindSafe for ConcurrentQueue<T> {}
#[allow(clippy::large_enum_variant)]
enum Inner<T> {
Single(Single<T>),
Bounded(Bounded<T>),
Unbounded(Unbounded<T>),
}
impl<T> ConcurrentQueue<T> {
/// Creates a new bounded queue.
///
/// The queue allocates enough space for `cap` items.
///
/// # Panics
///
/// If the capacity is zero, this constructor will panic.
///
/// # Examples
///
/// ```
/// use concurrent_queue::ConcurrentQueue;
///
/// let q = ConcurrentQueue::<i32>::bounded(100);
/// ```
pub fn bounded(cap: usize) -> ConcurrentQueue<T> {
if cap == 1 {
ConcurrentQueue(Inner::Single(Single::new()))
} else {
ConcurrentQueue(Inner::Bounded(Bounded::new(cap)))
}
}
const_fn!(
const_if: #[cfg(not(loom))];
/// Creates a new unbounded queue.
///
/// # Examples
///
/// ```
/// use concurrent_queue::ConcurrentQueue;
///
/// let q = ConcurrentQueue::<i32>::unbounded();
/// ```
pub const fn unbounded() -> ConcurrentQueue<T> {
ConcurrentQueue(Inner::Unbounded(Unbounded::new()))
}
);
/// Attempts to push an item into the queue.
///
/// If the queue is full or closed, the item is returned back as an error.
///
/// # Examples
///
/// ```
/// use concurrent_queue::{ConcurrentQueue, PushError};
///
/// let q = ConcurrentQueue::bounded(1);
///
/// // Push succeeds because there is space in the queue.
/// assert_eq!(q.push(10), Ok(()));
///
/// // Push errors because the queue is now full.
/// assert_eq!(q.push(20), Err(PushError::Full(20)));
///
/// // Close the queue, which will prevent further pushes.
/// q.close();
///
/// // Pushing now errors indicating the queue is closed.
/// assert_eq!(q.push(20), Err(PushError::Closed(20)));
///
/// // Pop the single item in the queue.
/// assert_eq!(q.pop(), Ok(10));
///
/// // Even though there is space, no more items can be pushed.
/// assert_eq!(q.push(20), Err(PushError::Closed(20)));
/// ```
pub fn push(&self, value: T) -> Result<(), PushError<T>> {
match &self.0 {
Inner::Single(q) => q.push(value),
Inner::Bounded(q) => q.push(value),
Inner::Unbounded(q) => q.push(value),
}
}
/// Push an element into the queue, potentially displacing another element.
///
/// Attempts to push an element into the queue. If the queue is full, one item from the
/// queue is replaced with the provided item. The displaced item is returned as `Some(T)`.
/// If the queue is closed, an error is returned.
///
/// # Examples
///
/// ```
/// use concurrent_queue::{ConcurrentQueue, ForcePushError, PushError};
///
/// let q = ConcurrentQueue::bounded(3);
///
/// // We can push to the queue.
/// for i in 1..=3 {
/// assert_eq!(q.force_push(i), Ok(None));
/// }
///
/// // Push errors because the queue is now full.
/// assert_eq!(q.push(4), Err(PushError::Full(4)));
///
/// // Pushing a new value replaces the old ones.
/// assert_eq!(q.force_push(5), Ok(Some(1)));
/// assert_eq!(q.force_push(6), Ok(Some(2)));
///
/// // Close the queue to stop further pushes.
/// q.close();
///
/// // Pushing will return an error.
/// assert_eq!(q.force_push(7), Err(ForcePushError(7)));
///
/// // Popping items will return the force-pushed ones.
/// assert_eq!(q.pop(), Ok(3));
/// assert_eq!(q.pop(), Ok(5));
/// assert_eq!(q.pop(), Ok(6));
/// ```
pub fn force_push(&self, value: T) -> Result<Option<T>, ForcePushError<T>> {
match &self.0 {
Inner::Single(q) => q.force_push(value),
Inner::Bounded(q) => q.force_push(value),
Inner::Unbounded(q) => match q.push(value) {
Ok(()) => Ok(None),
Err(PushError::Closed(value)) => Err(ForcePushError(value)),
Err(PushError::Full(_)) => unreachable!(),
},
}
}
/// Attempts to pop an item from the queue.
///
/// If the queue is empty, an error is returned.
///
/// # Examples
///
/// ```
/// use concurrent_queue::{ConcurrentQueue, PopError};
///
/// let q = ConcurrentQueue::bounded(1);
///
/// // Pop errors when the queue is empty.
/// assert_eq!(q.pop(), Err(PopError::Empty));
///
/// // Push one item and close the queue.
/// assert_eq!(q.push(10), Ok(()));
/// q.close();
///
/// // Remaining items can be popped.
/// assert_eq!(q.pop(), Ok(10));
///
/// // Again, pop errors when the queue is empty,
/// // but now also indicates that the queue is closed.
/// assert_eq!(q.pop(), Err(PopError::Closed));
/// ```
pub fn pop(&self) -> Result<T, PopError> {
match &self.0 {
Inner::Single(q) => q.pop(),
Inner::Bounded(q) => q.pop(),
Inner::Unbounded(q) => q.pop(),
}
}
/// Get an iterator over the items in the queue.
///
/// The iterator will continue until the queue is empty or closed. It will never block;
/// if the queue is empty, the iterator will return `None`. If new items are pushed into
/// the queue, the iterator may return `Some` in the future after returning `None`.
///
/// # Examples
///
/// ```
/// use concurrent_queue::ConcurrentQueue;
///
/// let q = ConcurrentQueue::bounded(5);
/// q.push(1).unwrap();
/// q.push(2).unwrap();
/// q.push(3).unwrap();
///
/// let mut iter = q.try_iter();
/// assert_eq!(iter.by_ref().sum::<i32>(), 6);
/// assert_eq!(iter.next(), None);
///
/// // Pushing more items will make them available to the iterator.
/// q.push(4).unwrap();
/// assert_eq!(iter.next(), Some(4));
/// assert_eq!(iter.next(), None);
/// ```
pub fn try_iter(&self) -> TryIter<'_, T> {
TryIter { queue: self }
}
/// Returns `true` if the queue is empty.
///
/// # Examples
///
/// ```
/// use concurrent_queue::ConcurrentQueue;
///
/// let q = ConcurrentQueue::<i32>::unbounded();
///
/// assert!(q.is_empty());
/// q.push(1).unwrap();
/// assert!(!q.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
match &self.0 {
Inner::Single(q) => q.is_empty(),
Inner::Bounded(q) => q.is_empty(),
Inner::Unbounded(q) => q.is_empty(),
}
}
/// Returns `true` if the queue is full.
///
/// An unbounded queue is never full.
///
/// # Examples
///
/// ```
/// use concurrent_queue::ConcurrentQueue;
///
/// let q = ConcurrentQueue::bounded(1);
///
/// assert!(!q.is_full());
/// q.push(1).unwrap();
/// assert!(q.is_full());
/// ```
pub fn is_full(&self) -> bool {
match &self.0 {
Inner::Single(q) => q.is_full(),
Inner::Bounded(q) => q.is_full(),
Inner::Unbounded(q) => q.is_full(),
}
}
/// Returns the number of items in the queue.
///
/// # Examples
///
/// ```
/// use concurrent_queue::ConcurrentQueue;
///
/// let q = ConcurrentQueue::unbounded();
/// assert_eq!(q.len(), 0);
///
/// assert_eq!(q.push(10), Ok(()));
/// assert_eq!(q.len(), 1);
///
/// assert_eq!(q.push(20), Ok(()));
/// assert_eq!(q.len(), 2);
/// ```
pub fn len(&self) -> usize {
match &self.0 {
Inner::Single(q) => q.len(),
Inner::Bounded(q) => q.len(),
Inner::Unbounded(q) => q.len(),
}
}
/// Returns the capacity of the queue.
///
/// Unbounded queues have infinite capacity, represented as [`None`].
///
/// # Examples
///
/// ```
/// use concurrent_queue::ConcurrentQueue;
///
/// let q = ConcurrentQueue::<i32>::bounded(7);
/// assert_eq!(q.capacity(), Some(7));
///
/// let q = ConcurrentQueue::<i32>::unbounded();
/// assert_eq!(q.capacity(), None);
/// ```
pub fn capacity(&self) -> Option<usize> {
match &self.0 {
Inner::Single(_) => Some(1),
Inner::Bounded(q) => Some(q.capacity()),
Inner::Unbounded(_) => None,
}
}
/// Closes the queue.
///
/// Returns `true` if this call closed the queue, or `false` if it was already closed.
///
/// When a queue is closed, no more items can be pushed but the remaining items can still be
/// popped.
///
/// # Examples
///
/// ```
/// use concurrent_queue::{ConcurrentQueue, PopError, PushError};
///
/// let q = ConcurrentQueue::unbounded();
/// assert_eq!(q.push(10), Ok(()));
///
/// assert!(q.close()); // `true` because this call closes the queue.
/// assert!(!q.close()); // `false` because the queue is already closed.
///
/// // Cannot push any more items when closed.
/// assert_eq!(q.push(20), Err(PushError::Closed(20)));
///
/// // Remaining items can still be popped.
/// assert_eq!(q.pop(), Ok(10));
///
/// // When no more items are present, the error is `Closed`.
/// assert_eq!(q.pop(), Err(PopError::Closed));
/// ```
pub fn close(&self) -> bool {
match &self.0 {
Inner::Single(q) => q.close(),
Inner::Bounded(q) => q.close(),
Inner::Unbounded(q) => q.close(),
}
}
/// Returns `true` if the queue is closed.
///
/// # Examples
///
/// ```
/// use concurrent_queue::ConcurrentQueue;
///
/// let q = ConcurrentQueue::<i32>::unbounded();
///
/// assert!(!q.is_closed());
/// q.close();
/// assert!(q.is_closed());
/// ```
pub fn is_closed(&self) -> bool {
match &self.0 {
Inner::Single(q) => q.is_closed(),
Inner::Bounded(q) => q.is_closed(),
Inner::Unbounded(q) => q.is_closed(),
}
}
}
impl<T> fmt::Debug for ConcurrentQueue<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ConcurrentQueue")
.field("len", &self.len())
.field("capacity", &self.capacity())
.field("is_closed", &self.is_closed())
.finish()
}
}
/// An iterator that pops items from a [`ConcurrentQueue`].
///
/// This iterator will never block; it will return `None` once the queue has
/// been exhausted. Calling `next` after `None` may yield `Some(item)` if more items
/// are pushed to the queue.
#[must_use = "iterators are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct TryIter<'a, T> {
queue: &'a ConcurrentQueue<T>,
}
impl<T> fmt::Debug for TryIter<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Iter").field(&self.queue).finish()
}
}
impl<T> Iterator for TryIter<'_, T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
self.queue.pop().ok()
}
}
/// Error which occurs when popping from an empty queue.
#[derive(Clone, Copy, Eq, PartialEq)]
pub enum PopError {
/// The queue is empty but not closed.
Empty,
/// The queue is empty and closed.
Closed,
}
impl PopError {
/// Returns `true` if the queue is empty but not closed.
pub fn is_empty(&self) -> bool {
match self {
PopError::Empty => true,
PopError::Closed => false,
}
}
/// Returns `true` if the queue is empty and closed.
pub fn is_closed(&self) -> bool {
match self {
PopError::Empty => false,
PopError::Closed => true,
}
}
}
#[cfg(feature = "std")]
impl error::Error for PopError {}
impl fmt::Debug for PopError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PopError::Empty => write!(f, "Empty"),
PopError::Closed => write!(f, "Closed"),
}
}
}
impl fmt::Display for PopError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PopError::Empty => write!(f, "Empty"),
PopError::Closed => write!(f, "Closed"),
}
}
}
/// Error which occurs when pushing into a full or closed queue.
#[derive(Clone, Copy, Eq, PartialEq)]
pub enum PushError<T> {
/// The queue is full but not closed.
Full(T),
/// The queue is closed.
Closed(T),
}
impl<T> PushError<T> {
/// Unwraps the item that couldn't be pushed.
pub fn into_inner(self) -> T {
match self {
PushError::Full(t) => t,
PushError::Closed(t) => t,
}
}
/// Returns `true` if the queue is full but not closed.
pub fn is_full(&self) -> bool {
match self {
PushError::Full(_) => true,
PushError::Closed(_) => false,
}
}
/// Returns `true` if the queue is closed.
pub fn is_closed(&self) -> bool {
match self {
PushError::Full(_) => false,
PushError::Closed(_) => true,
}
}
}
#[cfg(feature = "std")]
impl<T: fmt::Debug> error::Error for PushError<T> {}
impl<T: fmt::Debug> fmt::Debug for PushError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PushError::Full(t) => f.debug_tuple("Full").field(t).finish(),
PushError::Closed(t) => f.debug_tuple("Closed").field(t).finish(),
}
}
}
impl<T> fmt::Display for PushError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PushError::Full(_) => write!(f, "Full"),
PushError::Closed(_) => write!(f, "Closed"),
}
}
}
/// Error that occurs when force-pushing into a full queue.
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct ForcePushError<T>(pub T);
impl<T> ForcePushError<T> {
/// Return the inner value that failed to be force-pushed.
pub fn into_inner(self) -> T {
self.0
}
}
impl<T: fmt::Debug> fmt::Debug for ForcePushError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("ForcePushError").field(&self.0).finish()
}
}
impl<T> fmt::Display for ForcePushError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Closed")
}
}
#[cfg(feature = "std")]
impl<T: fmt::Debug> error::Error for ForcePushError<T> {}
/// Equivalent to `atomic::fence(Ordering::SeqCst)`, but in some cases faster.
#[inline]
fn full_fence() {
#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), not(miri), not(loom)))]
{
use core::{arch::asm, cell::UnsafeCell};
// HACK(stjepang): On x86 architectures there are two different ways of executing
// a `SeqCst` fence.
//
// 1. `atomic::fence(SeqCst)`, which compiles into a `mfence` instruction.
// 2. A `lock <op>` instruction.
//
// Both instructions have the effect of a full barrier, but empirical benchmarks have shown
// that the second one is sometimes a bit faster.
let a = UnsafeCell::new(0_usize);
// It is common to use `lock or` here, but when using a local variable, `lock not`, which
// does not change the flag, should be slightly more efficient.
// Refs: https://www.felixcloutier.com/x86/not
unsafe {
#[cfg(target_pointer_width = "64")]
asm!("lock not qword ptr [{0}]", in(reg) a.get(), options(nostack, preserves_flags));
#[cfg(target_pointer_width = "32")]
asm!("lock not dword ptr [{0:e}]", in(reg) a.get(), options(nostack, preserves_flags));
}
return;
}
#[allow(unreachable_code)]
{
atomic::fence(Ordering::SeqCst);
}
}

187
vendor/concurrent-queue/src/single.rs vendored Normal file
View File

@@ -0,0 +1,187 @@
use core::mem::MaybeUninit;
use core::ptr;
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sync::cell::UnsafeCell;
#[allow(unused_imports)]
use crate::sync::prelude::*;
use crate::{busy_wait, ForcePushError, PopError, PushError};
const LOCKED: usize = 1 << 0;
const PUSHED: usize = 1 << 1;
const CLOSED: usize = 1 << 2;
/// A single-element queue.
pub struct Single<T> {
state: AtomicUsize,
slot: UnsafeCell<MaybeUninit<T>>,
}
impl<T> Single<T> {
/// Creates a new single-element queue.
pub fn new() -> Single<T> {
Single {
state: AtomicUsize::new(0),
slot: UnsafeCell::new(MaybeUninit::uninit()),
}
}
/// Attempts to push an item into the queue.
pub fn push(&self, value: T) -> Result<(), PushError<T>> {
// Lock and fill the slot.
let state = self
.state
.compare_exchange(0, LOCKED | PUSHED, Ordering::SeqCst, Ordering::SeqCst)
.unwrap_or_else(|x| x);
if state == 0 {
// Write the value and unlock.
self.slot.with_mut(|slot| unsafe {
slot.write(MaybeUninit::new(value));
});
self.state.fetch_and(!LOCKED, Ordering::Release);
Ok(())
} else if state & CLOSED != 0 {
Err(PushError::Closed(value))
} else {
Err(PushError::Full(value))
}
}
/// Attempts to push an item into the queue, displacing another if necessary.
pub fn force_push(&self, value: T) -> Result<Option<T>, ForcePushError<T>> {
// Attempt to lock the slot.
let mut state = 0;
loop {
// Lock the slot.
let prev = self
.state
.compare_exchange(state, LOCKED | PUSHED, Ordering::SeqCst, Ordering::SeqCst)
.unwrap_or_else(|x| x);
if prev & CLOSED != 0 {
return Err(ForcePushError(value));
}
if prev == state {
// If the value was pushed, swap out the value.
let prev_value = if prev & PUSHED == 0 {
// SAFETY: write is safe because we have locked the state.
self.slot.with_mut(|slot| unsafe {
slot.write(MaybeUninit::new(value));
});
None
} else {
// SAFETY: replace is safe because we have locked the state, and
// assume_init is safe because we have checked that the value was pushed.
let prev_value = unsafe {
self.slot.with_mut(move |slot| {
ptr::replace(slot, MaybeUninit::new(value)).assume_init()
})
};
Some(prev_value)
};
// We can unlock the slot now.
self.state.fetch_and(!LOCKED, Ordering::Release);
// Return the old value.
return Ok(prev_value);
}
// Try to go for the current (pushed) state.
if prev & LOCKED == 0 {
state = prev;
} else {
// State is locked.
busy_wait();
state = prev & !LOCKED;
}
}
}
/// Attempts to pop an item from the queue.
pub fn pop(&self) -> Result<T, PopError> {
let mut state = PUSHED;
loop {
// Lock and empty the slot.
let prev = self
.state
.compare_exchange(
state,
(state | LOCKED) & !PUSHED,
Ordering::SeqCst,
Ordering::SeqCst,
)
.unwrap_or_else(|x| x);
if prev == state {
// Read the value and unlock.
let value = self
.slot
.with_mut(|slot| unsafe { slot.read().assume_init() });
self.state.fetch_and(!LOCKED, Ordering::Release);
return Ok(value);
}
if prev & PUSHED == 0 {
if prev & CLOSED == 0 {
return Err(PopError::Empty);
} else {
return Err(PopError::Closed);
}
}
if prev & LOCKED == 0 {
state = prev;
} else {
busy_wait();
state = prev & !LOCKED;
}
}
}
/// Returns the number of items in the queue.
pub fn len(&self) -> usize {
usize::from(self.state.load(Ordering::SeqCst) & PUSHED != 0)
}
/// Returns `true` if the queue is empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns `true` if the queue is full.
pub fn is_full(&self) -> bool {
self.len() == 1
}
/// Closes the queue.
///
/// Returns `true` if this call closed the queue.
pub fn close(&self) -> bool {
let state = self.state.fetch_or(CLOSED, Ordering::SeqCst);
state & CLOSED == 0
}
/// Returns `true` if the queue is closed.
pub fn is_closed(&self) -> bool {
self.state.load(Ordering::SeqCst) & CLOSED != 0
}
}
impl<T> Drop for Single<T> {
fn drop(&mut self) {
// Drop the value in the slot.
let Self { state, slot } = self;
state.with_mut(|state| {
if *state & PUSHED != 0 {
slot.with_mut(|slot| unsafe {
let value = &mut *slot;
value.as_mut_ptr().drop_in_place();
});
}
});
}
}

114
vendor/concurrent-queue/src/sync.rs vendored Normal file
View File

@@ -0,0 +1,114 @@
//! Synchronization facade to choose between `core` primitives and `loom` primitives.
#[cfg(all(feature = "portable-atomic", not(loom)))]
mod sync_impl {
pub(crate) use core::cell;
pub(crate) use portable_atomic as atomic;
#[cfg(not(feature = "std"))]
pub(crate) use atomic::hint::spin_loop;
#[cfg(feature = "std")]
pub(crate) use std::thread::yield_now;
}
#[cfg(all(not(feature = "portable-atomic"), not(loom)))]
mod sync_impl {
pub(crate) use core::cell;
pub(crate) use core::sync::atomic;
#[cfg(not(feature = "std"))]
#[inline]
pub(crate) fn spin_loop() {
#[allow(deprecated)]
atomic::spin_loop_hint();
}
#[cfg(feature = "std")]
pub(crate) use std::thread::yield_now;
}
#[cfg(loom)]
mod sync_impl {
pub(crate) use loom::cell;
pub(crate) mod atomic {
pub(crate) use loom::sync::atomic::*;
}
#[cfg(not(feature = "std"))]
pub(crate) use loom::hint::spin_loop;
#[cfg(feature = "std")]
pub(crate) use loom::thread::yield_now;
}
pub(crate) use sync_impl::*;
/// Notify the CPU that we are currently busy-waiting.
#[inline]
pub(crate) fn busy_wait() {
#[cfg(feature = "std")]
yield_now();
#[cfg(not(feature = "std"))]
spin_loop();
}
#[cfg(loom)]
pub(crate) mod prelude {}
#[cfg(not(loom))]
pub(crate) mod prelude {
use super::{atomic, cell};
/// Emulate `loom::UnsafeCell`'s API.
pub(crate) trait UnsafeCellExt {
type Value;
fn with_mut<R, F>(&self, f: F) -> R
where
F: FnOnce(*mut Self::Value) -> R;
}
impl<T> UnsafeCellExt for cell::UnsafeCell<T> {
type Value = T;
fn with_mut<R, F>(&self, f: F) -> R
where
F: FnOnce(*mut Self::Value) -> R,
{
f(self.get())
}
}
/// Emulate `loom::Atomic*`'s API.
pub(crate) trait AtomicExt {
type Value;
fn with_mut<R, F>(&mut self, f: F) -> R
where
F: FnOnce(&mut Self::Value) -> R;
}
impl AtomicExt for atomic::AtomicUsize {
type Value = usize;
fn with_mut<R, F>(&mut self, f: F) -> R
where
F: FnOnce(&mut Self::Value) -> R,
{
f(self.get_mut())
}
}
impl<T> AtomicExt for atomic::AtomicPtr<T> {
type Value = *mut T;
fn with_mut<R, F>(&mut self, f: F) -> R
where
F: FnOnce(&mut Self::Value) -> R,
{
f(self.get_mut())
}
}
}

452
vendor/concurrent-queue/src/unbounded.rs vendored Normal file
View File

@@ -0,0 +1,452 @@
use alloc::boxed::Box;
use core::mem::MaybeUninit;
use core::ptr;
use crossbeam_utils::CachePadded;
use crate::const_fn;
use crate::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use crate::sync::cell::UnsafeCell;
#[allow(unused_imports)]
use crate::sync::prelude::*;
use crate::{busy_wait, PopError, PushError};
// Bits indicating the state of a slot:
// * If a value has been written into the slot, `WRITE` is set.
// * If a value has been read from the slot, `READ` is set.
// * If the block is being destroyed, `DESTROY` is set.
const WRITE: usize = 1;
const READ: usize = 2;
const DESTROY: usize = 4;
// Each block covers one "lap" of indices.
const LAP: usize = 32;
// The maximum number of items a block can hold.
const BLOCK_CAP: usize = LAP - 1;
// How many lower bits are reserved for metadata.
const SHIFT: usize = 1;
// Has two different purposes:
// * If set in head, indicates that the block is not the last one.
// * If set in tail, indicates that the queue is closed.
const MARK_BIT: usize = 1;
/// A slot in a block.
struct Slot<T> {
/// The value.
value: UnsafeCell<MaybeUninit<T>>,
/// The state of the slot.
state: AtomicUsize,
}
impl<T> Slot<T> {
#[cfg(not(loom))]
const UNINIT: Slot<T> = Slot {
value: UnsafeCell::new(MaybeUninit::uninit()),
state: AtomicUsize::new(0),
};
#[cfg(not(loom))]
fn uninit_block() -> [Slot<T>; BLOCK_CAP] {
[Self::UNINIT; BLOCK_CAP]
}
#[cfg(loom)]
fn uninit_block() -> [Slot<T>; BLOCK_CAP] {
// Repeat this expression 31 times.
// Update if we change BLOCK_CAP
macro_rules! repeat_31 {
($e: expr) => {
[
$e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e,
$e, $e, $e, $e, $e, $e, $e, $e, $e, $e, $e,
]
};
}
repeat_31!(Slot {
value: UnsafeCell::new(MaybeUninit::uninit()),
state: AtomicUsize::new(0),
})
}
/// Waits until a value is written into the slot.
fn wait_write(&self) {
while self.state.load(Ordering::Acquire) & WRITE == 0 {
busy_wait();
}
}
}
/// A block in a linked list.
///
/// Each block in the list can hold up to `BLOCK_CAP` values.
struct Block<T> {
/// The next block in the linked list.
next: AtomicPtr<Block<T>>,
/// Slots for values.
slots: [Slot<T>; BLOCK_CAP],
}
impl<T> Block<T> {
/// Creates an empty block.
fn new() -> Block<T> {
Block {
next: AtomicPtr::new(ptr::null_mut()),
slots: Slot::uninit_block(),
}
}
/// Waits until the next pointer is set.
fn wait_next(&self) -> *mut Block<T> {
loop {
let next = self.next.load(Ordering::Acquire);
if !next.is_null() {
return next;
}
busy_wait();
}
}
/// Sets the `DESTROY` bit in slots starting from `start` and destroys the block.
unsafe fn destroy(this: *mut Block<T>, start: usize) {
// It is not necessary to set the `DESTROY` bit in the last slot because that slot has
// begun destruction of the block.
for i in start..BLOCK_CAP - 1 {
let slot = (*this).slots.get_unchecked(i);
// Mark the `DESTROY` bit if a thread is still using the slot.
if slot.state.load(Ordering::Acquire) & READ == 0
&& slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0
{
// If a thread is still using the slot, it will continue destruction of the block.
return;
}
}
// No thread is using the block, now it is safe to destroy it.
drop(Box::from_raw(this));
}
}
/// A position in a queue.
struct Position<T> {
/// The index in the queue.
index: AtomicUsize,
/// The block in the linked list.
block: AtomicPtr<Block<T>>,
}
/// An unbounded queue.
pub struct Unbounded<T> {
/// The head of the queue.
head: CachePadded<Position<T>>,
/// The tail of the queue.
tail: CachePadded<Position<T>>,
}
impl<T> Unbounded<T> {
const_fn!(
const_if: #[cfg(not(loom))];
/// Creates a new unbounded queue.
pub const fn new() -> Unbounded<T> {
Unbounded {
head: CachePadded::new(Position {
block: AtomicPtr::new(ptr::null_mut()),
index: AtomicUsize::new(0),
}),
tail: CachePadded::new(Position {
block: AtomicPtr::new(ptr::null_mut()),
index: AtomicUsize::new(0),
}),
}
}
);
/// Pushes an item into the queue.
pub fn push(&self, value: T) -> Result<(), PushError<T>> {
let mut tail = self.tail.index.load(Ordering::Acquire);
let mut block = self.tail.block.load(Ordering::Acquire);
let mut next_block = None;
loop {
// Check if the queue is closed.
if tail & MARK_BIT != 0 {
return Err(PushError::Closed(value));
}
// Calculate the offset of the index into the block.
let offset = (tail >> SHIFT) % LAP;
// If we reached the end of the block, wait until the next one is installed.
if offset == BLOCK_CAP {
busy_wait();
tail = self.tail.index.load(Ordering::Acquire);
block = self.tail.block.load(Ordering::Acquire);
continue;
}
// If we're going to have to install the next block, allocate it in advance in order to
// make the wait for other threads as short as possible.
if offset + 1 == BLOCK_CAP && next_block.is_none() {
next_block = Some(Box::new(Block::<T>::new()));
}
// If this is the first value to be pushed into the queue, we need to allocate the
// first block and install it.
if block.is_null() {
let new = Box::into_raw(Box::new(Block::<T>::new()));
if self
.tail
.block
.compare_exchange(block, new, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
self.head.block.store(new, Ordering::Release);
block = new;
} else {
next_block = unsafe { Some(Box::from_raw(new)) };
tail = self.tail.index.load(Ordering::Acquire);
block = self.tail.block.load(Ordering::Acquire);
continue;
}
}
let new_tail = tail + (1 << SHIFT);
// Try advancing the tail forward.
match self.tail.index.compare_exchange_weak(
tail,
new_tail,
Ordering::SeqCst,
Ordering::Acquire,
) {
Ok(_) => unsafe {
// If we've reached the end of the block, install the next one.
if offset + 1 == BLOCK_CAP {
let next_block = Box::into_raw(next_block.unwrap());
self.tail.block.store(next_block, Ordering::Release);
self.tail.index.fetch_add(1 << SHIFT, Ordering::Release);
(*block).next.store(next_block, Ordering::Release);
}
// Write the value into the slot.
let slot = (*block).slots.get_unchecked(offset);
slot.value.with_mut(|slot| {
slot.write(MaybeUninit::new(value));
});
slot.state.fetch_or(WRITE, Ordering::Release);
return Ok(());
},
Err(t) => {
tail = t;
block = self.tail.block.load(Ordering::Acquire);
}
}
}
}
/// Pops an item from the queue.
pub fn pop(&self) -> Result<T, PopError> {
let mut head = self.head.index.load(Ordering::Acquire);
let mut block = self.head.block.load(Ordering::Acquire);
loop {
// Calculate the offset of the index into the block.
let offset = (head >> SHIFT) % LAP;
// If we reached the end of the block, wait until the next one is installed.
if offset == BLOCK_CAP {
busy_wait();
head = self.head.index.load(Ordering::Acquire);
block = self.head.block.load(Ordering::Acquire);
continue;
}
let mut new_head = head + (1 << SHIFT);
if new_head & MARK_BIT == 0 {
crate::full_fence();
let tail = self.tail.index.load(Ordering::Relaxed);
// If the tail equals the head, that means the queue is empty.
if head >> SHIFT == tail >> SHIFT {
// Check if the queue is closed.
if tail & MARK_BIT != 0 {
return Err(PopError::Closed);
} else {
return Err(PopError::Empty);
}
}
// If head and tail are not in the same block, set `MARK_BIT` in head.
if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP {
new_head |= MARK_BIT;
}
}
// The block can be null here only if the first push operation is in progress.
if block.is_null() {
busy_wait();
head = self.head.index.load(Ordering::Acquire);
block = self.head.block.load(Ordering::Acquire);
continue;
}
// Try moving the head index forward.
match self.head.index.compare_exchange_weak(
head,
new_head,
Ordering::SeqCst,
Ordering::Acquire,
) {
Ok(_) => unsafe {
// If we've reached the end of the block, move to the next one.
if offset + 1 == BLOCK_CAP {
let next = (*block).wait_next();
let mut next_index = (new_head & !MARK_BIT).wrapping_add(1 << SHIFT);
if !(*next).next.load(Ordering::Relaxed).is_null() {
next_index |= MARK_BIT;
}
self.head.block.store(next, Ordering::Release);
self.head.index.store(next_index, Ordering::Release);
}
// Read the value.
let slot = (*block).slots.get_unchecked(offset);
slot.wait_write();
let value = slot.value.with_mut(|slot| slot.read().assume_init());
// Destroy the block if we've reached the end, or if another thread wanted to
// destroy but couldn't because we were busy reading from the slot.
if offset + 1 == BLOCK_CAP {
Block::destroy(block, 0);
} else if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 {
Block::destroy(block, offset + 1);
}
return Ok(value);
},
Err(h) => {
head = h;
block = self.head.block.load(Ordering::Acquire);
}
}
}
}
/// Returns the number of items in the queue.
pub fn len(&self) -> usize {
loop {
// Load the tail index, then load the head index.
let mut tail = self.tail.index.load(Ordering::SeqCst);
let mut head = self.head.index.load(Ordering::SeqCst);
// If the tail index didn't change, we've got consistent indices to work with.
if self.tail.index.load(Ordering::SeqCst) == tail {
// Erase the lower bits.
tail &= !((1 << SHIFT) - 1);
head &= !((1 << SHIFT) - 1);
// Fix up indices if they fall onto block ends.
if (tail >> SHIFT) & (LAP - 1) == LAP - 1 {
tail = tail.wrapping_add(1 << SHIFT);
}
if (head >> SHIFT) & (LAP - 1) == LAP - 1 {
head = head.wrapping_add(1 << SHIFT);
}
// Rotate indices so that head falls into the first block.
let lap = (head >> SHIFT) / LAP;
tail = tail.wrapping_sub((lap * LAP) << SHIFT);
head = head.wrapping_sub((lap * LAP) << SHIFT);
// Remove the lower bits.
tail >>= SHIFT;
head >>= SHIFT;
// Return the difference minus the number of blocks between tail and head.
return tail - head - tail / LAP;
}
}
}
/// Returns `true` if the queue is empty.
pub fn is_empty(&self) -> bool {
let head = self.head.index.load(Ordering::SeqCst);
let tail = self.tail.index.load(Ordering::SeqCst);
head >> SHIFT == tail >> SHIFT
}
/// Returns `true` if the queue is full.
pub fn is_full(&self) -> bool {
false
}
/// Closes the queue.
///
/// Returns `true` if this call closed the queue.
pub fn close(&self) -> bool {
let tail = self.tail.index.fetch_or(MARK_BIT, Ordering::SeqCst);
tail & MARK_BIT == 0
}
/// Returns `true` if the queue is closed.
pub fn is_closed(&self) -> bool {
self.tail.index.load(Ordering::SeqCst) & MARK_BIT != 0
}
}
impl<T> Drop for Unbounded<T> {
fn drop(&mut self) {
let Self { head, tail } = self;
let Position { index: head, block } = &mut **head;
head.with_mut(|&mut mut head| {
tail.index.with_mut(|&mut mut tail| {
// Erase the lower bits.
head &= !((1 << SHIFT) - 1);
tail &= !((1 << SHIFT) - 1);
unsafe {
// Drop all values between `head` and `tail` and deallocate the heap-allocated blocks.
while head != tail {
let offset = (head >> SHIFT) % LAP;
if offset < BLOCK_CAP {
// Drop the value in the slot.
block.with_mut(|block| {
let slot = (**block).slots.get_unchecked(offset);
slot.value.with_mut(|slot| {
let value = &mut *slot;
value.as_mut_ptr().drop_in_place();
});
});
} else {
// Deallocate the block and move to the next one.
block.with_mut(|block| {
let next_block = (**block).next.with_mut(|next| *next);
drop(Box::from_raw(*block));
*block = next_block;
});
}
head = head.wrapping_add(1 << SHIFT);
}
// Deallocate the last remaining block.
block.with_mut(|block| {
if !block.is_null() {
drop(Box::from_raw(*block));
}
});
}
});
});
}
}