Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,111 @@
#[cfg(not(crossbeam_no_atomic))]
use core::sync::atomic::Ordering;
/// Trait which allows reading from primitive atomic types with "consume" ordering.
pub trait AtomicConsume {
/// Type returned by `load_consume`.
type Val;
/// Loads a value from the atomic using a "consume" memory ordering.
///
/// This is similar to the "acquire" ordering, except that an ordering is
/// only guaranteed with operations that "depend on" the result of the load.
/// However consume loads are usually much faster than acquire loads on
/// architectures with a weak memory model since they don't require memory
/// fence instructions.
///
/// The exact definition of "depend on" is a bit vague, but it works as you
/// would expect in practice since a lot of software, especially the Linux
/// kernel, rely on this behavior.
///
/// This is currently only implemented on ARM and AArch64, where a fence
/// can be avoided. On other architectures this will fall back to a simple
/// `load(Ordering::Acquire)`.
fn load_consume(&self) -> Self::Val;
}
#[cfg(not(crossbeam_no_atomic))]
// Miri and Loom don't support "consume" ordering and ThreadSanitizer doesn't treat
// load(Relaxed) + compiler_fence(Acquire) as "consume" load.
// LLVM generates machine code equivalent to fence(Acquire) in compiler_fence(Acquire)
// on PowerPC, MIPS, etc. (https://godbolt.org/z/hffvjvW7h), so for now the fence
// can be actually avoided here only on ARM and AArch64. See also
// https://github.com/rust-lang/rust/issues/62256.
#[cfg(all(
any(target_arch = "arm", target_arch = "aarch64"),
not(any(miri, crossbeam_loom, crossbeam_sanitize_thread)),
))]
macro_rules! impl_consume {
() => {
#[inline]
fn load_consume(&self) -> Self::Val {
use crate::primitive::sync::atomic::compiler_fence;
let result = self.load(Ordering::Relaxed);
compiler_fence(Ordering::Acquire);
result
}
};
}
#[cfg(not(crossbeam_no_atomic))]
#[cfg(not(all(
any(target_arch = "arm", target_arch = "aarch64"),
not(any(miri, crossbeam_loom, crossbeam_sanitize_thread)),
)))]
macro_rules! impl_consume {
() => {
#[inline]
fn load_consume(&self) -> Self::Val {
self.load(Ordering::Acquire)
}
};
}
macro_rules! impl_atomic {
($atomic:ident, $val:ty) => {
#[cfg(not(crossbeam_no_atomic))]
impl AtomicConsume for core::sync::atomic::$atomic {
type Val = $val;
impl_consume!();
}
#[cfg(crossbeam_loom)]
impl AtomicConsume for loom::sync::atomic::$atomic {
type Val = $val;
impl_consume!();
}
};
}
impl_atomic!(AtomicBool, bool);
impl_atomic!(AtomicUsize, usize);
impl_atomic!(AtomicIsize, isize);
impl_atomic!(AtomicU8, u8);
impl_atomic!(AtomicI8, i8);
impl_atomic!(AtomicU16, u16);
impl_atomic!(AtomicI16, i16);
#[cfg(any(target_has_atomic = "32", not(target_pointer_width = "16")))]
impl_atomic!(AtomicU32, u32);
#[cfg(any(target_has_atomic = "32", not(target_pointer_width = "16")))]
impl_atomic!(AtomicI32, i32);
#[cfg(any(
target_has_atomic = "64",
not(any(target_pointer_width = "16", target_pointer_width = "32")),
))]
impl_atomic!(AtomicU64, u64);
#[cfg(any(
target_has_atomic = "64",
not(any(target_pointer_width = "16", target_pointer_width = "32")),
))]
impl_atomic!(AtomicI64, i64);
#[cfg(not(crossbeam_no_atomic))]
impl<T> AtomicConsume for core::sync::atomic::AtomicPtr<T> {
type Val = *mut T;
impl_consume!();
}
#[cfg(crossbeam_loom)]
impl<T> AtomicConsume for loom::sync::atomic::AtomicPtr<T> {
type Val = *mut T;
impl_consume!();
}

View File

@@ -0,0 +1,32 @@
//! Atomic types.
//!
//! * [`AtomicCell`], a thread-safe mutable memory location.
//! * [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering.
#[cfg(target_has_atomic = "ptr")]
#[cfg(not(crossbeam_loom))]
// Use "wide" sequence lock if the pointer width <= 32 for preventing its counter against wrap
// around.
//
// In narrow architectures (pointer width <= 16), the counter is still <= 32-bit and may be
// vulnerable to wrap around. But it's mostly okay, since in such a primitive hardware, the
// counter will not be increased that fast.
// Note that Rust (and C99) pointers must be at least 16-bit (i.e., 8-bit targets are impossible): https://github.com/rust-lang/rust/pull/49305
#[cfg_attr(
any(target_pointer_width = "16", target_pointer_width = "32"),
path = "seq_lock_wide.rs"
)]
mod seq_lock;
#[cfg(target_has_atomic = "ptr")]
// We cannot provide AtomicCell under cfg(crossbeam_loom) because loom's atomic
// types have a different in-memory representation than the underlying type.
// TODO: The latest loom supports fences, so fallback using seqlock may be available.
#[cfg(not(crossbeam_loom))]
mod atomic_cell;
#[cfg(target_has_atomic = "ptr")]
#[cfg(not(crossbeam_loom))]
pub use atomic_cell::AtomicCell;
mod consume;
pub use consume::AtomicConsume;

View File

@@ -0,0 +1,112 @@
use core::mem;
use core::sync::atomic::{self, AtomicUsize, Ordering};
use crate::Backoff;
/// A simple stamped lock.
pub(crate) struct SeqLock {
/// The current state of the lock.
///
/// All bits except the least significant one hold the current stamp. When locked, the state
/// equals 1 and doesn't contain a valid stamp.
state: AtomicUsize,
}
impl SeqLock {
pub(crate) const fn new() -> Self {
Self {
state: AtomicUsize::new(0),
}
}
/// If not locked, returns the current stamp.
///
/// This method should be called before optimistic reads.
#[inline]
pub(crate) fn optimistic_read(&self) -> Option<usize> {
let state = self.state.load(Ordering::Acquire);
if state == 1 {
None
} else {
Some(state)
}
}
/// Returns `true` if the current stamp is equal to `stamp`.
///
/// This method should be called after optimistic reads to check whether they are valid. The
/// argument `stamp` should correspond to the one returned by method `optimistic_read`.
#[inline]
pub(crate) fn validate_read(&self, stamp: usize) -> bool {
atomic::fence(Ordering::Acquire);
self.state.load(Ordering::Relaxed) == stamp
}
/// Grabs the lock for writing.
#[inline]
pub(crate) fn write(&'static self) -> SeqLockWriteGuard {
let backoff = Backoff::new();
loop {
let previous = self.state.swap(1, Ordering::Acquire);
if previous != 1 {
atomic::fence(Ordering::Release);
return SeqLockWriteGuard {
lock: self,
state: previous,
};
}
backoff.snooze();
}
}
}
/// An RAII guard that releases the lock and increments the stamp when dropped.
pub(crate) struct SeqLockWriteGuard {
/// The parent lock.
lock: &'static SeqLock,
/// The stamp before locking.
state: usize,
}
impl SeqLockWriteGuard {
/// Releases the lock without incrementing the stamp.
#[inline]
pub(crate) fn abort(self) {
self.lock.state.store(self.state, Ordering::Release);
// We specifically don't want to call drop(), since that's
// what increments the stamp.
mem::forget(self);
}
}
impl Drop for SeqLockWriteGuard {
#[inline]
fn drop(&mut self) {
// Release the lock and increment the stamp.
self.lock
.state
.store(self.state.wrapping_add(2), Ordering::Release);
}
}
#[cfg(test)]
mod tests {
use super::SeqLock;
#[test]
fn test_abort() {
static LK: SeqLock = SeqLock::new();
let before = LK.optimistic_read().unwrap();
{
let guard = LK.write();
guard.abort();
}
let after = LK.optimistic_read().unwrap();
assert_eq!(before, after, "aborted write does not update the stamp");
}
}

View File

@@ -0,0 +1,155 @@
use core::mem;
use core::sync::atomic::{self, AtomicUsize, Ordering};
use crate::Backoff;
/// A simple stamped lock.
///
/// The state is represented as two `AtomicUsize`: `state_hi` for high bits and `state_lo` for low
/// bits.
pub(crate) struct SeqLock {
/// The high bits of the current state of the lock.
state_hi: AtomicUsize,
/// The low bits of the current state of the lock.
///
/// All bits except the least significant one hold the current stamp. When locked, the state_lo
/// equals 1 and doesn't contain a valid stamp.
state_lo: AtomicUsize,
}
impl SeqLock {
pub(crate) const fn new() -> Self {
Self {
state_hi: AtomicUsize::new(0),
state_lo: AtomicUsize::new(0),
}
}
/// If not locked, returns the current stamp.
///
/// This method should be called before optimistic reads.
#[inline]
pub(crate) fn optimistic_read(&self) -> Option<(usize, usize)> {
// The acquire loads from `state_hi` and `state_lo` synchronize with the release stores in
// `SeqLockWriteGuard::drop`.
//
// As a consequence, we can make sure that (1) all writes within the era of `state_hi - 1`
// happens before now; and therefore, (2) if `state_lo` is even, all writes within the
// critical section of (`state_hi`, `state_lo`) happens before now.
let state_hi = self.state_hi.load(Ordering::Acquire);
let state_lo = self.state_lo.load(Ordering::Acquire);
if state_lo == 1 {
None
} else {
Some((state_hi, state_lo))
}
}
/// Returns `true` if the current stamp is equal to `stamp`.
///
/// This method should be called after optimistic reads to check whether they are valid. The
/// argument `stamp` should correspond to the one returned by method `optimistic_read`.
#[inline]
pub(crate) fn validate_read(&self, stamp: (usize, usize)) -> bool {
// Thanks to the fence, if we're noticing any modification to the data at the critical
// section of `(a, b)`, then the critical section's write of 1 to state_lo should be
// visible.
atomic::fence(Ordering::Acquire);
// So if `state_lo` coincides with `stamp.1`, then either (1) we're noticing no modification
// to the data after the critical section of `(stamp.0, stamp.1)`, or (2) `state_lo` wrapped
// around.
//
// If (2) is the case, the acquire ordering ensures we see the new value of `state_hi`.
let state_lo = self.state_lo.load(Ordering::Acquire);
// If (2) is the case and `state_hi` coincides with `stamp.0`, then `state_hi` also wrapped
// around, which we give up to correctly validate the read.
let state_hi = self.state_hi.load(Ordering::Relaxed);
// Except for the case that both `state_hi` and `state_lo` wrapped around, the following
// condition implies that we're noticing no modification to the data after the critical
// section of `(stamp.0, stamp.1)`.
(state_hi, state_lo) == stamp
}
/// Grabs the lock for writing.
#[inline]
pub(crate) fn write(&'static self) -> SeqLockWriteGuard {
let backoff = Backoff::new();
loop {
let previous = self.state_lo.swap(1, Ordering::Acquire);
if previous != 1 {
// To synchronize with the acquire fence in `validate_read` via any modification to
// the data at the critical section of `(state_hi, previous)`.
atomic::fence(Ordering::Release);
return SeqLockWriteGuard {
lock: self,
state_lo: previous,
};
}
backoff.snooze();
}
}
}
/// An RAII guard that releases the lock and increments the stamp when dropped.
pub(crate) struct SeqLockWriteGuard {
/// The parent lock.
lock: &'static SeqLock,
/// The stamp before locking.
state_lo: usize,
}
impl SeqLockWriteGuard {
/// Releases the lock without incrementing the stamp.
#[inline]
pub(crate) fn abort(self) {
self.lock.state_lo.store(self.state_lo, Ordering::Release);
mem::forget(self);
}
}
impl Drop for SeqLockWriteGuard {
#[inline]
fn drop(&mut self) {
let state_lo = self.state_lo.wrapping_add(2);
// Increase the high bits if the low bits wrap around.
//
// Release ordering for synchronizing with `optimistic_read`.
if state_lo == 0 {
let state_hi = self.lock.state_hi.load(Ordering::Relaxed);
self.lock
.state_hi
.store(state_hi.wrapping_add(1), Ordering::Release);
}
// Release the lock and increment the stamp.
//
// Release ordering for synchronizing with `optimistic_read`.
self.lock.state_lo.store(state_lo, Ordering::Release);
}
}
#[cfg(test)]
mod tests {
use super::SeqLock;
#[test]
fn test_abort() {
static LK: SeqLock = SeqLock::new();
let before = LK.optimistic_read().unwrap();
{
let guard = LK.write();
guard.abort();
}
let after = LK.optimistic_read().unwrap();
assert_eq!(before, after, "aborted write does not update the stamp");
}
}