Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

67
vendor/parking_lot_core/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,67 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! This library exposes a low-level API for creating your own efficient
//! synchronization primitives.
//!
//! # The parking lot
//!
//! To keep synchronization primitives small, all thread queuing and suspending
//! functionality is offloaded to the *parking lot*. The idea behind this is based
//! on the Webkit [`WTF::ParkingLot`](https://webkit.org/blog/6161/locking-in-webkit/)
//! class, which essentially consists of a hash table mapping of lock addresses
//! to queues of parked (sleeping) threads. The Webkit parking lot was itself
//! inspired by Linux [futexes](http://man7.org/linux/man-pages/man2/futex.2.html),
//! but it is more powerful since it allows invoking callbacks while holding a
//! queue lock.
//!
//! There are two main operations that can be performed on the parking lot:
//!
//! - *Parking* refers to suspending the thread while simultaneously enqueuing it
//! on a queue keyed by some address.
//! - *Unparking* refers to dequeuing a thread from a queue keyed by some address
//! and resuming it.
//!
//! See the documentation of the individual functions for more details.
//!
//! # Building custom synchronization primitives
//!
//! Building custom synchronization primitives is very simple since the parking
//! lot takes care of all the hard parts for you. A simple example for a
//! custom primitive would be to integrate a `Mutex` inside another data type.
//! Since a mutex only requires 2 bits, it can share space with other data.
//! For example, one could create an `ArcMutex` type that combines the atomic
//! reference count and the two mutex bits in the same atomic word.
#![warn(missing_docs)]
#![warn(rust_2018_idioms)]
#![cfg_attr(
all(target_env = "sgx", target_vendor = "fortanix"),
feature(sgx_platform)
)]
#![cfg_attr(
all(
feature = "nightly",
target_family = "wasm",
target_feature = "atomics"
),
feature(stdarch_wasm_atomic_wait)
)]
mod parking_lot;
mod spinwait;
mod thread_parker;
mod util;
mod word_lock;
pub use self::parking_lot::deadlock;
pub use self::parking_lot::{park, unpark_all, unpark_filter, unpark_one, unpark_requeue};
pub use self::parking_lot::{
FilterOp, ParkResult, ParkToken, RequeueOp, UnparkResult, UnparkToken,
};
pub use self::parking_lot::{DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN};
pub use self::spinwait::SpinWait;

1700
vendor/parking_lot_core/src/parking_lot.rs vendored Normal file

File diff suppressed because it is too large Load Diff

74
vendor/parking_lot_core/src/spinwait.rs vendored Normal file
View File

@@ -0,0 +1,74 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::thread_parker;
use core::hint::spin_loop;
// Wastes some CPU time for the given number of iterations,
// using a hint to indicate to the CPU that we are spinning.
#[inline]
fn cpu_relax(iterations: u32) {
for _ in 0..iterations {
spin_loop()
}
}
/// A counter used to perform exponential backoff in spin loops.
#[derive(Default)]
pub struct SpinWait {
counter: u32,
}
impl SpinWait {
/// Creates a new `SpinWait`.
#[inline]
pub fn new() -> Self {
Self::default()
}
/// Resets a `SpinWait` to its initial state.
#[inline]
pub fn reset(&mut self) {
self.counter = 0;
}
/// Spins until the sleep threshold has been reached.
///
/// This function returns whether the sleep threshold has been reached, at
/// which point further spinning has diminishing returns and the thread
/// should be parked instead.
///
/// The spin strategy will initially use a CPU-bound loop but will fall back
/// to yielding the CPU to the OS after a few iterations.
#[inline]
pub fn spin(&mut self) -> bool {
if self.counter >= 10 {
return false;
}
self.counter += 1;
if self.counter <= 3 {
cpu_relax(1 << self.counter);
} else {
thread_parker::thread_yield();
}
true
}
/// Spins without yielding the thread to the OS.
///
/// Instead, the backoff is simply capped at a maximum value. This can be
/// used to improve throughput in `compare_exchange` loops that have high
/// contention.
#[inline]
pub fn spin_no_yield(&mut self) {
self.counter += 1;
if self.counter > 10 {
self.counter = 10;
}
cpu_relax(1 << self.counter);
}
}

View File

@@ -0,0 +1,79 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! A simple spin lock based thread parker. Used on platforms without better
//! parking facilities available.
use core::hint::spin_loop;
use core::sync::atomic::{AtomicBool, Ordering};
use std::thread;
use std::time::Instant;
// Helper type for putting a thread to sleep until some other thread wakes it up
pub struct ThreadParker {
parked: AtomicBool,
}
impl super::ThreadParkerT for ThreadParker {
type UnparkHandle = UnparkHandle;
const IS_CHEAP_TO_CONSTRUCT: bool = true;
#[inline]
fn new() -> ThreadParker {
ThreadParker {
parked: AtomicBool::new(false),
}
}
#[inline]
unsafe fn prepare_park(&self) {
self.parked.store(true, Ordering::Relaxed);
}
#[inline]
unsafe fn timed_out(&self) -> bool {
self.parked.load(Ordering::Relaxed) != false
}
#[inline]
unsafe fn park(&self) {
while self.parked.load(Ordering::Acquire) != false {
spin_loop();
}
}
#[inline]
unsafe fn park_until(&self, timeout: Instant) -> bool {
while self.parked.load(Ordering::Acquire) != false {
if Instant::now() >= timeout {
return false;
}
spin_loop();
}
true
}
#[inline]
unsafe fn unpark_lock(&self) -> UnparkHandle {
// We don't need to lock anything, just clear the state
self.parked.store(false, Ordering::Release);
UnparkHandle(())
}
}
pub struct UnparkHandle(());
impl super::UnparkHandleT for UnparkHandle {
#[inline]
unsafe fn unpark(self) {}
}
#[inline]
pub fn thread_yield() {
thread::yield_now();
}

View File

@@ -0,0 +1,156 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use core::{
ptr,
sync::atomic::{AtomicI32, Ordering},
};
use libc;
use std::thread;
use std::time::Instant;
// x32 Linux uses a non-standard type for tv_nsec in timespec.
// See https://sourceware.org/bugzilla/show_bug.cgi?id=16437
#[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))]
#[allow(non_camel_case_types)]
type tv_nsec_t = i64;
#[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))]
#[allow(non_camel_case_types)]
type tv_nsec_t = libc::c_long;
fn errno() -> libc::c_int {
#[cfg(target_os = "linux")]
unsafe {
*libc::__errno_location()
}
#[cfg(target_os = "android")]
unsafe {
*libc::__errno()
}
}
// Helper type for putting a thread to sleep until some other thread wakes it up
pub struct ThreadParker {
futex: AtomicI32,
}
impl super::ThreadParkerT for ThreadParker {
type UnparkHandle = UnparkHandle;
const IS_CHEAP_TO_CONSTRUCT: bool = true;
#[inline]
fn new() -> ThreadParker {
ThreadParker {
futex: AtomicI32::new(0),
}
}
#[inline]
unsafe fn prepare_park(&self) {
self.futex.store(1, Ordering::Relaxed);
}
#[inline]
unsafe fn timed_out(&self) -> bool {
self.futex.load(Ordering::Relaxed) != 0
}
#[inline]
unsafe fn park(&self) {
while self.futex.load(Ordering::Acquire) != 0 {
self.futex_wait(None);
}
}
#[inline]
unsafe fn park_until(&self, timeout: Instant) -> bool {
while self.futex.load(Ordering::Acquire) != 0 {
let now = Instant::now();
if timeout <= now {
return false;
}
let diff = timeout - now;
if diff.as_secs() as libc::time_t as u64 != diff.as_secs() {
// Timeout overflowed, just sleep indefinitely
self.park();
return true;
}
// SAFETY: libc::timespec is zero initializable.
let mut ts: libc::timespec = std::mem::zeroed();
ts.tv_sec = diff.as_secs() as libc::time_t;
ts.tv_nsec = diff.subsec_nanos() as tv_nsec_t;
self.futex_wait(Some(ts));
}
true
}
// Locks the parker to prevent the target thread from exiting. This is
// necessary to ensure that thread-local ThreadData objects remain valid.
// This should be called while holding the queue lock.
#[inline]
unsafe fn unpark_lock(&self) -> UnparkHandle {
// We don't need to lock anything, just clear the state
self.futex.store(0, Ordering::Release);
UnparkHandle { futex: &self.futex }
}
}
impl ThreadParker {
#[inline]
fn futex_wait(&self, ts: Option<libc::timespec>) {
let ts_ptr = ts
.as_ref()
.map(|ts_ref| ts_ref as *const _)
.unwrap_or(ptr::null());
let r = unsafe {
libc::syscall(
libc::SYS_futex,
&self.futex,
libc::FUTEX_WAIT | libc::FUTEX_PRIVATE_FLAG,
1,
ts_ptr,
)
};
debug_assert!(r == 0 || r == -1);
if r == -1 {
debug_assert!(
errno() == libc::EINTR
|| errno() == libc::EAGAIN
|| (ts.is_some() && errno() == libc::ETIMEDOUT)
);
}
}
}
pub struct UnparkHandle {
futex: *const AtomicI32,
}
impl super::UnparkHandleT for UnparkHandle {
#[inline]
unsafe fn unpark(self) {
// The thread data may have been freed at this point, but it doesn't
// matter since the syscall will just return EFAULT in that case.
let r = libc::syscall(
libc::SYS_futex,
self.futex,
libc::FUTEX_WAKE | libc::FUTEX_PRIVATE_FLAG,
1,
);
debug_assert!(r == 0 || r == 1 || r == -1);
if r == -1 {
debug_assert_eq!(errno(), libc::EFAULT);
}
}
}
#[inline]
pub fn thread_yield() {
thread::yield_now();
}

View File

@@ -0,0 +1,85 @@
use cfg_if::cfg_if;
use std::time::Instant;
/// Trait for the platform thread parker implementation.
///
/// All unsafe methods are unsafe because the Unix thread parker is based on
/// pthread mutexes and condvars. Those primitives must not be moved and used
/// from any other memory address than the one they were located at when they
/// were initialized. As such, it's UB to call any unsafe method on
/// `ThreadParkerT` if the implementing instance has moved since the last
/// call to any of the unsafe methods.
pub trait ThreadParkerT {
type UnparkHandle: UnparkHandleT;
const IS_CHEAP_TO_CONSTRUCT: bool;
fn new() -> Self;
/// Prepares the parker. This should be called before adding it to the queue.
unsafe fn prepare_park(&self);
/// Checks if the park timed out. This should be called while holding the
/// queue lock after `park_until` has returned false.
unsafe fn timed_out(&self) -> bool;
/// Parks the thread until it is unparked. This should be called after it has
/// been added to the queue, after unlocking the queue.
unsafe fn park(&self);
/// Parks the thread until it is unparked or the timeout is reached. This
/// should be called after it has been added to the queue, after unlocking
/// the queue. Returns true if we were unparked and false if we timed out.
unsafe fn park_until(&self, timeout: Instant) -> bool;
/// Locks the parker to prevent the target thread from exiting. This is
/// necessary to ensure that thread-local `ThreadData` objects remain valid.
/// This should be called while holding the queue lock.
unsafe fn unpark_lock(&self) -> Self::UnparkHandle;
}
/// Handle for a thread that is about to be unparked. We need to mark the thread
/// as unparked while holding the queue lock, but we delay the actual unparking
/// until after the queue lock is released.
pub trait UnparkHandleT {
/// Wakes up the parked thread. This should be called after the queue lock is
/// released to avoid blocking the queue for too long.
///
/// This method is unsafe for the same reason as the unsafe methods in
/// `ThreadParkerT`.
unsafe fn unpark(self);
}
cfg_if! {
if #[cfg(any(target_os = "linux", target_os = "android"))] {
#[path = "linux.rs"]
mod imp;
} else if #[cfg(unix)] {
#[path = "unix.rs"]
mod imp;
} else if #[cfg(windows)] {
#[path = "windows/mod.rs"]
mod imp;
} else if #[cfg(target_os = "redox")] {
#[path = "redox.rs"]
mod imp;
} else if #[cfg(all(target_env = "sgx", target_vendor = "fortanix"))] {
#[path = "sgx.rs"]
mod imp;
} else if #[cfg(all(
feature = "nightly",
target_family = "wasm",
target_feature = "atomics"
))] {
#[path = "wasm_atomic.rs"]
mod imp;
} else if #[cfg(target_family = "wasm")] {
#[path = "wasm.rs"]
mod imp;
} else {
#[path = "generic.rs"]
mod imp;
}
}
pub use self::imp::{thread_yield, ThreadParker};

View File

@@ -0,0 +1,139 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use core::{
ptr,
sync::atomic::{AtomicI32, Ordering},
};
use std::thread;
use std::time::Instant;
use syscall::{
call::futex,
data::TimeSpec,
error::{Error, EAGAIN, EFAULT, EINTR, ETIMEDOUT},
flag::{FUTEX_WAIT, FUTEX_WAKE},
};
const UNPARKED: i32 = 0;
const PARKED: i32 = 1;
// Helper type for putting a thread to sleep until some other thread wakes it up
pub struct ThreadParker {
futex: AtomicI32,
}
impl super::ThreadParkerT for ThreadParker {
type UnparkHandle = UnparkHandle;
const IS_CHEAP_TO_CONSTRUCT: bool = true;
#[inline]
fn new() -> ThreadParker {
ThreadParker {
futex: AtomicI32::new(UNPARKED),
}
}
#[inline]
unsafe fn prepare_park(&self) {
self.futex.store(PARKED, Ordering::Relaxed);
}
#[inline]
unsafe fn timed_out(&self) -> bool {
self.futex.load(Ordering::Relaxed) != UNPARKED
}
#[inline]
unsafe fn park(&self) {
while self.futex.load(Ordering::Acquire) != UNPARKED {
self.futex_wait(None);
}
}
#[inline]
unsafe fn park_until(&self, timeout: Instant) -> bool {
while self.futex.load(Ordering::Acquire) != UNPARKED {
let now = Instant::now();
if timeout <= now {
return false;
}
let diff = timeout - now;
if diff.as_secs() > i64::max_value() as u64 {
// Timeout overflowed, just sleep indefinitely
self.park();
return true;
}
let ts = TimeSpec {
tv_sec: diff.as_secs() as i64,
tv_nsec: diff.subsec_nanos() as i32,
};
self.futex_wait(Some(ts));
}
true
}
#[inline]
unsafe fn unpark_lock(&self) -> UnparkHandle {
// We don't need to lock anything, just clear the state
self.futex.store(UNPARKED, Ordering::Release);
UnparkHandle { futex: self.ptr() }
}
}
impl ThreadParker {
#[inline]
fn futex_wait(&self, ts: Option<TimeSpec>) {
let ts_ptr = ts
.as_ref()
.map(|ts_ref| ts_ref as *const _)
.unwrap_or(ptr::null());
let r = unsafe {
futex(
self.ptr(),
FUTEX_WAIT,
PARKED,
ts_ptr as usize,
ptr::null_mut(),
)
};
match r {
Ok(r) => debug_assert_eq!(r, 0),
Err(Error { errno }) => {
debug_assert!(errno == EINTR || errno == EAGAIN || errno == ETIMEDOUT);
}
}
}
#[inline]
fn ptr(&self) -> *mut i32 {
&self.futex as *const AtomicI32 as *mut i32
}
}
pub struct UnparkHandle {
futex: *mut i32,
}
impl super::UnparkHandleT for UnparkHandle {
#[inline]
unsafe fn unpark(self) {
// The thread data may have been freed at this point, but it doesn't
// matter since the syscall will just return EFAULT in that case.
let r = futex(self.futex, FUTEX_WAKE, PARKED, 0, ptr::null_mut());
match r {
Ok(num_woken) => debug_assert!(num_woken == 0 || num_woken == 1),
Err(Error { errno }) => debug_assert_eq!(errno, EFAULT),
}
}
}
#[inline]
pub fn thread_yield() {
thread::yield_now();
}

View File

@@ -0,0 +1,94 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use core::sync::atomic::{AtomicBool, Ordering};
use std::time::Instant;
use std::{
io,
os::fortanix_sgx::{
thread::current as current_tcs,
usercalls::{
self,
raw::{Tcs, EV_UNPARK, WAIT_INDEFINITE},
},
},
thread,
};
// Helper type for putting a thread to sleep until some other thread wakes it up
pub struct ThreadParker {
parked: AtomicBool,
tcs: Tcs,
}
impl super::ThreadParkerT for ThreadParker {
type UnparkHandle = UnparkHandle;
const IS_CHEAP_TO_CONSTRUCT: bool = true;
#[inline]
fn new() -> ThreadParker {
ThreadParker {
parked: AtomicBool::new(false),
tcs: current_tcs(),
}
}
#[inline]
unsafe fn prepare_park(&self) {
self.parked.store(true, Ordering::Relaxed);
}
#[inline]
unsafe fn timed_out(&self) -> bool {
self.parked.load(Ordering::Relaxed)
}
#[inline]
unsafe fn park(&self) {
while self.parked.load(Ordering::Acquire) {
let result = usercalls::wait(EV_UNPARK, WAIT_INDEFINITE);
debug_assert_eq!(result.expect("wait returned error") & EV_UNPARK, EV_UNPARK);
}
}
#[inline]
unsafe fn park_until(&self, _timeout: Instant) -> bool {
// FIXME: https://github.com/fortanix/rust-sgx/issues/31
panic!("timeout not supported in SGX");
}
#[inline]
unsafe fn unpark_lock(&self) -> UnparkHandle {
// We don't need to lock anything, just clear the state
self.parked.store(false, Ordering::Release);
UnparkHandle(self.tcs)
}
}
pub struct UnparkHandle(Tcs);
impl super::UnparkHandleT for UnparkHandle {
#[inline]
unsafe fn unpark(self) {
let result = usercalls::send(EV_UNPARK, Some(self.0));
if cfg!(debug_assertions) {
if let Err(error) = result {
// `InvalidInput` may be returned if the thread we send to has
// already been unparked and exited.
if error.kind() != io::ErrorKind::InvalidInput {
panic!("send returned an unexpected error: {:?}", error);
}
}
}
}
}
#[inline]
pub fn thread_yield() {
thread::yield_now();
}

View File

@@ -0,0 +1,242 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#[cfg(target_vendor = "apple")]
use core::ptr;
use core::{
cell::{Cell, UnsafeCell},
mem::MaybeUninit,
};
use libc;
use std::time::Instant;
use std::{thread, time::Duration};
// x32 Linux uses a non-standard type for tv_nsec in timespec.
// See https://sourceware.org/bugzilla/show_bug.cgi?id=16437
#[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))]
#[allow(non_camel_case_types)]
type tv_nsec_t = i64;
#[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))]
#[allow(non_camel_case_types)]
type tv_nsec_t = libc::c_long;
// Helper type for putting a thread to sleep until some other thread wakes it up
pub struct ThreadParker {
should_park: Cell<bool>,
mutex: UnsafeCell<libc::pthread_mutex_t>,
condvar: UnsafeCell<libc::pthread_cond_t>,
initialized: Cell<bool>,
}
impl super::ThreadParkerT for ThreadParker {
type UnparkHandle = UnparkHandle;
const IS_CHEAP_TO_CONSTRUCT: bool = false;
#[inline]
fn new() -> ThreadParker {
ThreadParker {
should_park: Cell::new(false),
mutex: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER),
condvar: UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER),
initialized: Cell::new(false),
}
}
#[inline]
unsafe fn prepare_park(&self) {
self.should_park.set(true);
if !self.initialized.get() {
self.init();
self.initialized.set(true);
}
}
#[inline]
unsafe fn timed_out(&self) -> bool {
// We need to grab the mutex here because another thread may be
// concurrently executing UnparkHandle::unpark, which is done without
// holding the queue lock.
let r = libc::pthread_mutex_lock(self.mutex.get());
debug_assert_eq!(r, 0);
let should_park = self.should_park.get();
let r = libc::pthread_mutex_unlock(self.mutex.get());
debug_assert_eq!(r, 0);
should_park
}
#[inline]
unsafe fn park(&self) {
let r = libc::pthread_mutex_lock(self.mutex.get());
debug_assert_eq!(r, 0);
while self.should_park.get() {
let r = libc::pthread_cond_wait(self.condvar.get(), self.mutex.get());
debug_assert_eq!(r, 0);
}
let r = libc::pthread_mutex_unlock(self.mutex.get());
debug_assert_eq!(r, 0);
}
#[inline]
unsafe fn park_until(&self, timeout: Instant) -> bool {
let r = libc::pthread_mutex_lock(self.mutex.get());
debug_assert_eq!(r, 0);
while self.should_park.get() {
let now = Instant::now();
if timeout <= now {
let r = libc::pthread_mutex_unlock(self.mutex.get());
debug_assert_eq!(r, 0);
return false;
}
if let Some(ts) = timeout_to_timespec(timeout - now) {
let r = libc::pthread_cond_timedwait(self.condvar.get(), self.mutex.get(), &ts);
if ts.tv_sec < 0 {
// On some systems, negative timeouts will return EINVAL. In
// that case we won't sleep and will just busy loop instead,
// which is the best we can do.
debug_assert!(r == 0 || r == libc::ETIMEDOUT || r == libc::EINVAL);
} else {
debug_assert!(r == 0 || r == libc::ETIMEDOUT);
}
} else {
// Timeout calculation overflowed, just sleep indefinitely
let r = libc::pthread_cond_wait(self.condvar.get(), self.mutex.get());
debug_assert_eq!(r, 0);
}
}
let r = libc::pthread_mutex_unlock(self.mutex.get());
debug_assert_eq!(r, 0);
true
}
#[inline]
unsafe fn unpark_lock(&self) -> UnparkHandle {
let r = libc::pthread_mutex_lock(self.mutex.get());
debug_assert_eq!(r, 0);
UnparkHandle {
thread_parker: self,
}
}
}
impl ThreadParker {
/// Initializes the condvar to use CLOCK_MONOTONIC instead of CLOCK_REALTIME.
#[cfg(any(target_vendor = "apple", target_os = "android", target_os = "espidf"))]
#[inline]
unsafe fn init(&self) {}
/// Initializes the condvar to use CLOCK_MONOTONIC instead of CLOCK_REALTIME.
#[cfg(not(any(target_vendor = "apple", target_os = "android", target_os = "espidf")))]
#[inline]
unsafe fn init(&self) {
let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit();
let r = libc::pthread_condattr_init(attr.as_mut_ptr());
debug_assert_eq!(r, 0);
let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC);
debug_assert_eq!(r, 0);
let r = libc::pthread_cond_init(self.condvar.get(), attr.as_ptr());
debug_assert_eq!(r, 0);
let r = libc::pthread_condattr_destroy(attr.as_mut_ptr());
debug_assert_eq!(r, 0);
}
}
impl Drop for ThreadParker {
#[inline]
fn drop(&mut self) {
// On DragonFly pthread_mutex_destroy() returns EINVAL if called on a
// mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER.
// Once it is used (locked/unlocked) or pthread_mutex_init() is called,
// this behaviour no longer occurs. The same applies to condvars.
unsafe {
let r = libc::pthread_mutex_destroy(self.mutex.get());
debug_assert!(r == 0 || r == libc::EINVAL);
let r = libc::pthread_cond_destroy(self.condvar.get());
debug_assert!(r == 0 || r == libc::EINVAL);
}
}
}
pub struct UnparkHandle {
thread_parker: *const ThreadParker,
}
impl super::UnparkHandleT for UnparkHandle {
#[inline]
unsafe fn unpark(self) {
(*self.thread_parker).should_park.set(false);
// We notify while holding the lock here to avoid races with the target
// thread. In particular, the thread could exit after we unlock the
// mutex, which would make the condvar access invalid memory.
let r = libc::pthread_cond_signal((*self.thread_parker).condvar.get());
debug_assert_eq!(r, 0);
let r = libc::pthread_mutex_unlock((*self.thread_parker).mutex.get());
debug_assert_eq!(r, 0);
}
}
// Returns the current time on the clock used by pthread_cond_t as a timespec.
#[cfg(target_vendor = "apple")]
#[inline]
fn timespec_now() -> libc::timespec {
let mut now = MaybeUninit::<libc::timeval>::uninit();
let r = unsafe { libc::gettimeofday(now.as_mut_ptr(), ptr::null_mut()) };
debug_assert_eq!(r, 0);
// SAFETY: We know `libc::gettimeofday` has initialized the value.
let now = unsafe { now.assume_init() };
libc::timespec {
tv_sec: now.tv_sec,
tv_nsec: now.tv_usec as tv_nsec_t * 1000,
}
}
#[cfg(not(target_vendor = "apple"))]
#[inline]
fn timespec_now() -> libc::timespec {
let mut now = MaybeUninit::<libc::timespec>::uninit();
let clock = if cfg!(target_os = "android") {
// Android doesn't support pthread_condattr_setclock, so we need to
// specify the timeout in CLOCK_REALTIME.
libc::CLOCK_REALTIME
} else {
libc::CLOCK_MONOTONIC
};
let r = unsafe { libc::clock_gettime(clock, now.as_mut_ptr()) };
debug_assert_eq!(r, 0);
// SAFETY: We know `libc::clock_gettime` has initialized the value.
unsafe { now.assume_init() }
}
// Converts a relative timeout into an absolute timeout in the clock used by
// pthread_cond_t.
#[inline]
fn timeout_to_timespec(timeout: Duration) -> Option<libc::timespec> {
// Handle overflows early on
if timeout.as_secs() > libc::time_t::max_value() as u64 {
return None;
}
let now = timespec_now();
let mut nsec = now.tv_nsec + timeout.subsec_nanos() as tv_nsec_t;
let mut sec = now.tv_sec.checked_add(timeout.as_secs() as libc::time_t);
if nsec >= 1_000_000_000 {
nsec -= 1_000_000_000;
sec = sec.and_then(|sec| sec.checked_add(1));
}
sec.map(|sec| libc::timespec {
tv_nsec: nsec,
tv_sec: sec,
})
}
#[inline]
pub fn thread_yield() {
thread::yield_now();
}

View File

@@ -0,0 +1,54 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! The wasm platform can't park when atomic support is not available.
//! So this ThreadParker just panics on any attempt to park.
use std::thread;
use std::time::Instant;
pub struct ThreadParker(());
impl super::ThreadParkerT for ThreadParker {
type UnparkHandle = UnparkHandle;
const IS_CHEAP_TO_CONSTRUCT: bool = true;
fn new() -> ThreadParker {
ThreadParker(())
}
unsafe fn prepare_park(&self) {
panic!("Parking not supported on this platform");
}
unsafe fn timed_out(&self) -> bool {
panic!("Parking not supported on this platform");
}
unsafe fn park(&self) {
panic!("Parking not supported on this platform");
}
unsafe fn park_until(&self, _timeout: Instant) -> bool {
panic!("Parking not supported on this platform");
}
unsafe fn unpark_lock(&self) -> UnparkHandle {
panic!("Parking not supported on this platform");
}
}
pub struct UnparkHandle(());
impl super::UnparkHandleT for UnparkHandle {
unsafe fn unpark(self) {}
}
pub fn thread_yield() {
thread::yield_now();
}

View File

@@ -0,0 +1,97 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use core::{
arch::wasm32,
sync::atomic::{AtomicI32, Ordering},
};
use std::time::{Duration, Instant};
use std::{convert::TryFrom, thread};
// Helper type for putting a thread to sleep until some other thread wakes it up
pub struct ThreadParker {
parked: AtomicI32,
}
const UNPARKED: i32 = 0;
const PARKED: i32 = 1;
impl super::ThreadParkerT for ThreadParker {
type UnparkHandle = UnparkHandle;
const IS_CHEAP_TO_CONSTRUCT: bool = true;
#[inline]
fn new() -> ThreadParker {
ThreadParker {
parked: AtomicI32::new(UNPARKED),
}
}
#[inline]
unsafe fn prepare_park(&self) {
self.parked.store(PARKED, Ordering::Relaxed);
}
#[inline]
unsafe fn timed_out(&self) -> bool {
self.parked.load(Ordering::Relaxed) == PARKED
}
#[inline]
unsafe fn park(&self) {
while self.parked.load(Ordering::Acquire) == PARKED {
let r = wasm32::memory_atomic_wait32(self.ptr(), PARKED, -1);
// we should have either woken up (0) or got a not-equal due to a
// race (1). We should never time out (2)
debug_assert!(r == 0 || r == 1);
}
}
#[inline]
unsafe fn park_until(&self, timeout: Instant) -> bool {
while self.parked.load(Ordering::Acquire) == PARKED {
if let Some(left) = timeout.checked_duration_since(Instant::now()) {
let nanos_left = i64::try_from(left.as_nanos()).unwrap_or(i64::max_value());
let r = wasm32::memory_atomic_wait32(self.ptr(), PARKED, nanos_left);
debug_assert!(r == 0 || r == 1 || r == 2);
} else {
return false;
}
}
true
}
#[inline]
unsafe fn unpark_lock(&self) -> UnparkHandle {
// We don't need to lock anything, just clear the state
self.parked.store(UNPARKED, Ordering::Release);
UnparkHandle(self.ptr())
}
}
impl ThreadParker {
#[inline]
fn ptr(&self) -> *mut i32 {
&self.parked as *const AtomicI32 as *mut i32
}
}
pub struct UnparkHandle(*mut i32);
impl super::UnparkHandleT for UnparkHandle {
#[inline]
unsafe fn unpark(self) {
let num_notified = wasm32::memory_atomic_notify(self.0 as *mut i32, 1);
debug_assert!(num_notified == 0 || num_notified == 1);
}
}
#[inline]
pub fn thread_yield() {
thread::yield_now();
}

View File

@@ -0,0 +1,31 @@
//! Manual bindings to the win32 API to avoid dependencies on windows-sys or winapi
//! as these bindings will **never** change and `parking_lot_core` is a foundational
//! dependency for the Rust ecosystem, so the dependencies used by it have an
//! outsize affect
pub const INFINITE: u32 = 4294967295;
pub const ERROR_TIMEOUT: u32 = 1460;
pub const GENERIC_READ: u32 = 2147483648;
pub const GENERIC_WRITE: u32 = 1073741824;
pub const STATUS_SUCCESS: i32 = 0;
pub const STATUS_TIMEOUT: i32 = 258;
pub type HANDLE = isize;
pub type HINSTANCE = isize;
pub type BOOL = i32;
pub type BOOLEAN = u8;
pub type NTSTATUS = i32;
pub type FARPROC = Option<unsafe extern "system" fn() -> isize>;
pub type WaitOnAddress = unsafe extern "system" fn(
Address: *const std::ffi::c_void,
CompareAddress: *const std::ffi::c_void,
AddressSize: usize,
dwMilliseconds: u32,
) -> BOOL;
pub type WakeByAddressSingle = unsafe extern "system" fn(Address: *const std::ffi::c_void);
windows_targets::link!("kernel32.dll" "system" fn GetLastError() -> u32);
windows_targets::link!("kernel32.dll" "system" fn CloseHandle(hObject: HANDLE) -> BOOL);
windows_targets::link!("kernel32.dll" "system" fn GetModuleHandleA(lpModuleName: *const u8) -> HINSTANCE);
windows_targets::link!("kernel32.dll" "system" fn GetProcAddress(hModule: HINSTANCE, lpProcName: *const u8) -> FARPROC);
windows_targets::link!("kernel32.dll" "system" fn Sleep(dwMilliseconds: u32) -> ());

View File

@@ -0,0 +1,194 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use core::{
ffi,
mem::{self, MaybeUninit},
ptr,
};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Instant;
const STATE_UNPARKED: usize = 0;
const STATE_PARKED: usize = 1;
const STATE_TIMED_OUT: usize = 2;
use super::bindings::*;
#[allow(non_snake_case)]
pub struct KeyedEvent {
handle: HANDLE,
NtReleaseKeyedEvent: extern "system" fn(
EventHandle: HANDLE,
Key: *mut ffi::c_void,
Alertable: BOOLEAN,
Timeout: *mut i64,
) -> NTSTATUS,
NtWaitForKeyedEvent: extern "system" fn(
EventHandle: HANDLE,
Key: *mut ffi::c_void,
Alertable: BOOLEAN,
Timeout: *mut i64,
) -> NTSTATUS,
}
impl KeyedEvent {
#[inline]
unsafe fn wait_for(&self, key: *mut ffi::c_void, timeout: *mut i64) -> NTSTATUS {
(self.NtWaitForKeyedEvent)(self.handle, key, false.into(), timeout)
}
#[inline]
unsafe fn release(&self, key: *mut ffi::c_void) -> NTSTATUS {
(self.NtReleaseKeyedEvent)(self.handle, key, false.into(), ptr::null_mut())
}
#[allow(non_snake_case)]
pub fn create() -> Option<KeyedEvent> {
let ntdll = unsafe { GetModuleHandleA(b"ntdll.dll\0".as_ptr()) };
if ntdll == 0 {
return None;
}
let NtCreateKeyedEvent =
unsafe { GetProcAddress(ntdll, b"NtCreateKeyedEvent\0".as_ptr())? };
let NtReleaseKeyedEvent =
unsafe { GetProcAddress(ntdll, b"NtReleaseKeyedEvent\0".as_ptr())? };
let NtWaitForKeyedEvent =
unsafe { GetProcAddress(ntdll, b"NtWaitForKeyedEvent\0".as_ptr())? };
let NtCreateKeyedEvent: extern "system" fn(
KeyedEventHandle: *mut HANDLE,
DesiredAccess: u32,
ObjectAttributes: *mut ffi::c_void,
Flags: u32,
) -> NTSTATUS = unsafe { mem::transmute(NtCreateKeyedEvent) };
let mut handle = MaybeUninit::uninit();
let status = NtCreateKeyedEvent(
handle.as_mut_ptr(),
GENERIC_READ | GENERIC_WRITE,
ptr::null_mut(),
0,
);
if status != STATUS_SUCCESS {
return None;
}
Some(KeyedEvent {
handle: unsafe { handle.assume_init() },
NtReleaseKeyedEvent: unsafe { mem::transmute(NtReleaseKeyedEvent) },
NtWaitForKeyedEvent: unsafe { mem::transmute(NtWaitForKeyedEvent) },
})
}
#[inline]
pub fn prepare_park(&'static self, key: &AtomicUsize) {
key.store(STATE_PARKED, Ordering::Relaxed);
}
#[inline]
pub fn timed_out(&'static self, key: &AtomicUsize) -> bool {
key.load(Ordering::Relaxed) == STATE_TIMED_OUT
}
#[inline]
pub unsafe fn park(&'static self, key: &AtomicUsize) {
let status = self.wait_for(key as *const _ as *mut ffi::c_void, ptr::null_mut());
debug_assert_eq!(status, STATUS_SUCCESS);
}
#[inline]
pub unsafe fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool {
let now = Instant::now();
if timeout <= now {
// If another thread unparked us, we need to call
// NtWaitForKeyedEvent otherwise that thread will stay stuck at
// NtReleaseKeyedEvent.
if key.swap(STATE_TIMED_OUT, Ordering::Relaxed) == STATE_UNPARKED {
self.park(key);
return true;
}
return false;
}
// NT uses a timeout in units of 100ns. We use a negative value to
// indicate a relative timeout based on a monotonic clock.
let diff = timeout - now;
let value = (diff.as_secs() as i64)
.checked_mul(-10000000)
.and_then(|x| x.checked_sub((diff.subsec_nanos() as i64 + 99) / 100));
let mut nt_timeout = match value {
Some(x) => x,
None => {
// Timeout overflowed, just sleep indefinitely
self.park(key);
return true;
}
};
let status = self.wait_for(key as *const _ as *mut ffi::c_void, &mut nt_timeout);
if status == STATUS_SUCCESS {
return true;
}
debug_assert_eq!(status, STATUS_TIMEOUT);
// If another thread unparked us, we need to call NtWaitForKeyedEvent
// otherwise that thread will stay stuck at NtReleaseKeyedEvent.
if key.swap(STATE_TIMED_OUT, Ordering::Relaxed) == STATE_UNPARKED {
self.park(key);
return true;
}
false
}
#[inline]
pub unsafe fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle {
// If the state was STATE_PARKED then we need to wake up the thread
if key.swap(STATE_UNPARKED, Ordering::Relaxed) == STATE_PARKED {
UnparkHandle {
key: key,
keyed_event: self,
}
} else {
UnparkHandle {
key: ptr::null(),
keyed_event: self,
}
}
}
}
impl Drop for KeyedEvent {
#[inline]
fn drop(&mut self) {
unsafe {
let ok = CloseHandle(self.handle);
debug_assert_eq!(ok, true.into());
}
}
}
// Handle for a thread that is about to be unparked. We need to mark the thread
// as unparked while holding the queue lock, but we delay the actual unparking
// until after the queue lock is released.
pub struct UnparkHandle {
key: *const AtomicUsize,
keyed_event: &'static KeyedEvent,
}
impl UnparkHandle {
// Wakes up the parked thread. This should be called after the queue lock is
// released to avoid blocking the queue for too long.
#[inline]
pub unsafe fn unpark(self) {
if !self.key.is_null() {
let status = self.keyed_event.release(self.key as *mut ffi::c_void);
debug_assert_eq!(status, STATUS_SUCCESS);
}
}
}

View File

@@ -0,0 +1,175 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use core::{
ptr,
sync::atomic::{AtomicPtr, AtomicUsize, Ordering},
};
use std::time::Instant;
mod bindings;
mod keyed_event;
mod waitaddress;
enum Backend {
KeyedEvent(keyed_event::KeyedEvent),
WaitAddress(waitaddress::WaitAddress),
}
static BACKEND: AtomicPtr<Backend> = AtomicPtr::new(ptr::null_mut());
impl Backend {
#[inline]
fn get() -> &'static Backend {
// Fast path: use the existing object
let backend_ptr = BACKEND.load(Ordering::Acquire);
if !backend_ptr.is_null() {
return unsafe { &*backend_ptr };
};
Backend::create()
}
#[cold]
fn create() -> &'static Backend {
// Try to create a new Backend
let backend;
if let Some(waitaddress) = waitaddress::WaitAddress::create() {
backend = Backend::WaitAddress(waitaddress);
} else if let Some(keyed_event) = keyed_event::KeyedEvent::create() {
backend = Backend::KeyedEvent(keyed_event);
} else {
panic!(
"parking_lot requires either NT Keyed Events (WinXP+) or \
WaitOnAddress/WakeByAddress (Win8+)"
);
}
// Try to set our new Backend as the global one
let backend_ptr = Box::into_raw(Box::new(backend));
match BACKEND.compare_exchange(
ptr::null_mut(),
backend_ptr,
Ordering::Release,
Ordering::Acquire,
) {
Ok(_) => unsafe { &*backend_ptr },
Err(global_backend_ptr) => {
unsafe {
// We lost the race, free our object and return the global one
let _ = Box::from_raw(backend_ptr);
&*global_backend_ptr
}
}
}
}
}
// Helper type for putting a thread to sleep until some other thread wakes it up
pub struct ThreadParker {
key: AtomicUsize,
backend: &'static Backend,
}
impl super::ThreadParkerT for ThreadParker {
type UnparkHandle = UnparkHandle;
const IS_CHEAP_TO_CONSTRUCT: bool = true;
#[inline]
fn new() -> ThreadParker {
// Initialize the backend here to ensure we don't get any panics
// later on, which could leave synchronization primitives in a broken
// state.
ThreadParker {
key: AtomicUsize::new(0),
backend: Backend::get(),
}
}
// Prepares the parker. This should be called before adding it to the queue.
#[inline]
unsafe fn prepare_park(&self) {
match *self.backend {
Backend::KeyedEvent(ref x) => x.prepare_park(&self.key),
Backend::WaitAddress(ref x) => x.prepare_park(&self.key),
}
}
// Checks if the park timed out. This should be called while holding the
// queue lock after park_until has returned false.
#[inline]
unsafe fn timed_out(&self) -> bool {
match *self.backend {
Backend::KeyedEvent(ref x) => x.timed_out(&self.key),
Backend::WaitAddress(ref x) => x.timed_out(&self.key),
}
}
// Parks the thread until it is unparked. This should be called after it has
// been added to the queue, after unlocking the queue.
#[inline]
unsafe fn park(&self) {
match *self.backend {
Backend::KeyedEvent(ref x) => x.park(&self.key),
Backend::WaitAddress(ref x) => x.park(&self.key),
}
}
// Parks the thread until it is unparked or the timeout is reached. This
// should be called after it has been added to the queue, after unlocking
// the queue. Returns true if we were unparked and false if we timed out.
#[inline]
unsafe fn park_until(&self, timeout: Instant) -> bool {
match *self.backend {
Backend::KeyedEvent(ref x) => x.park_until(&self.key, timeout),
Backend::WaitAddress(ref x) => x.park_until(&self.key, timeout),
}
}
// Locks the parker to prevent the target thread from exiting. This is
// necessary to ensure that thread-local ThreadData objects remain valid.
// This should be called while holding the queue lock.
#[inline]
unsafe fn unpark_lock(&self) -> UnparkHandle {
match *self.backend {
Backend::KeyedEvent(ref x) => UnparkHandle::KeyedEvent(x.unpark_lock(&self.key)),
Backend::WaitAddress(ref x) => UnparkHandle::WaitAddress(x.unpark_lock(&self.key)),
}
}
}
// Handle for a thread that is about to be unparked. We need to mark the thread
// as unparked while holding the queue lock, but we delay the actual unparking
// until after the queue lock is released.
pub enum UnparkHandle {
KeyedEvent(keyed_event::UnparkHandle),
WaitAddress(waitaddress::UnparkHandle),
}
impl super::UnparkHandleT for UnparkHandle {
// Wakes up the parked thread. This should be called after the queue lock is
// released to avoid blocking the queue for too long.
#[inline]
unsafe fn unpark(self) {
match self {
UnparkHandle::KeyedEvent(x) => x.unpark(),
UnparkHandle::WaitAddress(x) => x.unpark(),
}
}
}
// Yields the rest of the current timeslice to the OS
#[inline]
pub fn thread_yield() {
unsafe {
// We don't use SwitchToThread here because it doesn't consider all
// threads in the system and the thread we are waiting for may not get
// selected.
bindings::Sleep(0);
}
}

View File

@@ -0,0 +1,125 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use core::{
mem,
sync::atomic::{AtomicUsize, Ordering},
};
use std::{ffi, time::Instant};
use super::bindings::*;
#[allow(non_snake_case)]
pub struct WaitAddress {
WaitOnAddress: WaitOnAddress,
WakeByAddressSingle: WakeByAddressSingle,
}
impl WaitAddress {
#[allow(non_snake_case)]
pub fn create() -> Option<WaitAddress> {
let synch_dll = unsafe { GetModuleHandleA(b"api-ms-win-core-synch-l1-2-0.dll\0".as_ptr()) };
if synch_dll == 0 {
return None;
}
let WaitOnAddress = unsafe { GetProcAddress(synch_dll, b"WaitOnAddress\0".as_ptr())? };
let WakeByAddressSingle =
unsafe { GetProcAddress(synch_dll, b"WakeByAddressSingle\0".as_ptr())? };
Some(WaitAddress {
WaitOnAddress: unsafe { mem::transmute(WaitOnAddress) },
WakeByAddressSingle: unsafe { mem::transmute(WakeByAddressSingle) },
})
}
#[inline]
pub fn prepare_park(&'static self, key: &AtomicUsize) {
key.store(1, Ordering::Relaxed);
}
#[inline]
pub fn timed_out(&'static self, key: &AtomicUsize) -> bool {
key.load(Ordering::Relaxed) != 0
}
#[inline]
pub fn park(&'static self, key: &AtomicUsize) {
while key.load(Ordering::Acquire) != 0 {
let r = self.wait_on_address(key, INFINITE);
debug_assert!(r == true.into());
}
}
#[inline]
pub fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool {
while key.load(Ordering::Acquire) != 0 {
let now = Instant::now();
if timeout <= now {
return false;
}
let diff = timeout - now;
let timeout = diff
.as_secs()
.checked_mul(1000)
.and_then(|x| x.checked_add((diff.subsec_nanos() as u64 + 999999) / 1000000))
.map(|ms| {
if ms > std::u32::MAX as u64 {
INFINITE
} else {
ms as u32
}
})
.unwrap_or(INFINITE);
if self.wait_on_address(key, timeout) == false.into() {
debug_assert_eq!(unsafe { GetLastError() }, ERROR_TIMEOUT);
}
}
true
}
#[inline]
pub fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle {
// We don't need to lock anything, just clear the state
key.store(0, Ordering::Release);
UnparkHandle {
key: key,
waitaddress: self,
}
}
#[inline]
fn wait_on_address(&'static self, key: &AtomicUsize, timeout: u32) -> BOOL {
let cmp = 1usize;
unsafe {
(self.WaitOnAddress)(
key as *const _ as *mut ffi::c_void,
&cmp as *const _ as *mut ffi::c_void,
mem::size_of::<usize>(),
timeout,
)
}
}
}
// Handle for a thread that is about to be unparked. We need to mark the thread
// as unparked while holding the queue lock, but we delay the actual unparking
// until after the queue lock is released.
pub struct UnparkHandle {
key: *const AtomicUsize,
waitaddress: &'static WaitAddress,
}
impl UnparkHandle {
// Wakes up the parked thread. This should be called after the queue lock is
// released to avoid blocking the queue for too long.
#[inline]
pub fn unpark(self) {
unsafe { (self.waitaddress.WakeByAddressSingle)(self.key as *mut ffi::c_void) };
}
}

31
vendor/parking_lot_core/src/util.rs vendored Normal file
View File

@@ -0,0 +1,31 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
// Option::unchecked_unwrap
pub trait UncheckedOptionExt<T> {
unsafe fn unchecked_unwrap(self) -> T;
}
impl<T> UncheckedOptionExt<T> for Option<T> {
#[inline]
unsafe fn unchecked_unwrap(self) -> T {
match self {
Some(x) => x,
None => unreachable(),
}
}
}
// hint::unreachable_unchecked() in release mode
#[inline]
unsafe fn unreachable() -> ! {
if cfg!(debug_assertions) {
unreachable!();
} else {
core::hint::unreachable_unchecked()
}
}

327
vendor/parking_lot_core/src/word_lock.rs vendored Normal file
View File

@@ -0,0 +1,327 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::spinwait::SpinWait;
use crate::thread_parker::{ThreadParker, ThreadParkerT, UnparkHandleT};
use core::{
cell::Cell,
mem, ptr,
sync::atomic::{fence, AtomicUsize, Ordering},
};
struct ThreadData {
parker: ThreadParker,
// Linked list of threads in the queue. The queue is split into two parts:
// the processed part and the unprocessed part. When new nodes are added to
// the list, they only have the next pointer set, and queue_tail is null.
//
// Nodes are processed with the queue lock held, which consists of setting
// the prev pointer for each node and setting the queue_tail pointer on the
// first processed node of the list.
//
// This setup allows nodes to be added to the queue without a lock, while
// still allowing O(1) removal of nodes from the processed part of the list.
// The only cost is the O(n) processing, but this only needs to be done
// once for each node, and therefore isn't too expensive.
queue_tail: Cell<*const ThreadData>,
prev: Cell<*const ThreadData>,
next: Cell<*const ThreadData>,
}
impl ThreadData {
#[inline]
fn new() -> ThreadData {
assert!(mem::align_of::<ThreadData>() > !QUEUE_MASK);
ThreadData {
parker: ThreadParker::new(),
queue_tail: Cell::new(ptr::null()),
prev: Cell::new(ptr::null()),
next: Cell::new(ptr::null()),
}
}
}
// Invokes the given closure with a reference to the current thread `ThreadData`.
#[inline]
fn with_thread_data<T>(f: impl FnOnce(&ThreadData) -> T) -> T {
let mut thread_data_ptr = ptr::null();
// If ThreadData is expensive to construct, then we want to use a cached
// version in thread-local storage if possible.
if !ThreadParker::IS_CHEAP_TO_CONSTRUCT {
thread_local!(static THREAD_DATA: ThreadData = ThreadData::new());
if let Ok(tls_thread_data) = THREAD_DATA.try_with(|x| x as *const ThreadData) {
thread_data_ptr = tls_thread_data;
}
}
// Otherwise just create a ThreadData on the stack
let mut thread_data_storage = None;
if thread_data_ptr.is_null() {
thread_data_ptr = thread_data_storage.get_or_insert_with(ThreadData::new);
}
f(unsafe { &*thread_data_ptr })
}
const LOCKED_BIT: usize = 1;
const QUEUE_LOCKED_BIT: usize = 2;
const QUEUE_MASK: usize = !3;
// Word-sized lock that is used to implement the parking_lot API. Since this
// can't use parking_lot, it instead manages its own queue of waiting threads.
pub struct WordLock {
state: AtomicUsize,
}
impl WordLock {
/// Returns a new, unlocked, `WordLock`.
pub const fn new() -> Self {
WordLock {
state: AtomicUsize::new(0),
}
}
#[inline]
pub fn lock(&self) {
if self
.state
.compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
return;
}
self.lock_slow();
}
/// Must not be called on an already unlocked `WordLock`!
#[inline]
pub unsafe fn unlock(&self) {
let state = self.state.fetch_sub(LOCKED_BIT, Ordering::Release);
if state.is_queue_locked() || state.queue_head().is_null() {
return;
}
self.unlock_slow();
}
#[cold]
fn lock_slow(&self) {
let mut spinwait = SpinWait::new();
let mut state = self.state.load(Ordering::Relaxed);
loop {
// Grab the lock if it isn't locked, even if there is a queue on it
if !state.is_locked() {
match self.state.compare_exchange_weak(
state,
state | LOCKED_BIT,
Ordering::Acquire,
Ordering::Relaxed,
) {
Ok(_) => return,
Err(x) => state = x,
}
continue;
}
// If there is no queue, try spinning a few times
if state.queue_head().is_null() && spinwait.spin() {
state = self.state.load(Ordering::Relaxed);
continue;
}
// Get our thread data and prepare it for parking
state = with_thread_data(|thread_data| {
// The pthread implementation is still unsafe, so we need to surround `prepare_park`
// with `unsafe {}`.
#[allow(unused_unsafe)]
unsafe {
thread_data.parker.prepare_park();
}
// Add our thread to the front of the queue
let queue_head = state.queue_head();
if queue_head.is_null() {
thread_data.queue_tail.set(thread_data);
thread_data.prev.set(ptr::null());
} else {
thread_data.queue_tail.set(ptr::null());
thread_data.prev.set(ptr::null());
thread_data.next.set(queue_head);
}
if let Err(x) = self.state.compare_exchange_weak(
state,
state.with_queue_head(thread_data),
Ordering::AcqRel,
Ordering::Relaxed,
) {
return x;
}
// Sleep until we are woken up by an unlock
// Ignoring unused unsafe, since it's only a few platforms where this is unsafe.
#[allow(unused_unsafe)]
unsafe {
thread_data.parker.park();
}
// Loop back and try locking again
spinwait.reset();
self.state.load(Ordering::Relaxed)
});
}
}
#[cold]
fn unlock_slow(&self) {
let mut state = self.state.load(Ordering::Relaxed);
loop {
// We just unlocked the WordLock. Just check if there is a thread
// to wake up. If the queue is locked then another thread is already
// taking care of waking up a thread.
if state.is_queue_locked() || state.queue_head().is_null() {
return;
}
// Try to grab the queue lock
match self.state.compare_exchange_weak(
state,
state | QUEUE_LOCKED_BIT,
Ordering::Acquire,
Ordering::Relaxed,
) {
Ok(_) => break,
Err(x) => state = x,
}
}
// Now we have the queue lock and the queue is non-empty
'outer: loop {
// First, we need to fill in the prev pointers for any newly added
// threads. We do this until we reach a node that we previously
// processed, which has a non-null queue_tail pointer.
let queue_head = state.queue_head();
let mut queue_tail;
let mut current = queue_head;
loop {
queue_tail = unsafe { (*current).queue_tail.get() };
if !queue_tail.is_null() {
break;
}
unsafe {
let next = (*current).next.get();
(*next).prev.set(current);
current = next;
}
}
// Set queue_tail on the queue head to indicate that the whole list
// has prev pointers set correctly.
unsafe {
(*queue_head).queue_tail.set(queue_tail);
}
// If the WordLock is locked, then there is no point waking up a
// thread now. Instead we let the next unlocker take care of waking
// up a thread.
if state.is_locked() {
match self.state.compare_exchange_weak(
state,
state & !QUEUE_LOCKED_BIT,
Ordering::Release,
Ordering::Relaxed,
) {
Ok(_) => return,
Err(x) => state = x,
}
// Need an acquire fence before reading the new queue
fence_acquire(&self.state);
continue;
}
// Remove the last thread from the queue and unlock the queue
let new_tail = unsafe { (*queue_tail).prev.get() };
if new_tail.is_null() {
loop {
match self.state.compare_exchange_weak(
state,
state & LOCKED_BIT,
Ordering::Release,
Ordering::Relaxed,
) {
Ok(_) => break,
Err(x) => state = x,
}
// If the compare_exchange failed because a new thread was
// added to the queue then we need to re-scan the queue to
// find the previous element.
if state.queue_head().is_null() {
continue;
} else {
// Need an acquire fence before reading the new queue
fence_acquire(&self.state);
continue 'outer;
}
}
} else {
unsafe {
(*queue_head).queue_tail.set(new_tail);
}
self.state.fetch_and(!QUEUE_LOCKED_BIT, Ordering::Release);
}
// Finally, wake up the thread we removed from the queue. Note that
// we don't need to worry about any races here since the thread is
// guaranteed to be sleeping right now and we are the only one who
// can wake it up.
unsafe {
(*queue_tail).parker.unpark_lock().unpark();
}
break;
}
}
}
// Thread-Sanitizer only has partial fence support, so when running under it, we
// try and avoid false positives by using a discarded acquire load instead.
#[inline]
fn fence_acquire(a: &AtomicUsize) {
if cfg!(tsan_enabled) {
let _ = a.load(Ordering::Acquire);
} else {
fence(Ordering::Acquire);
}
}
trait LockState {
fn is_locked(self) -> bool;
fn is_queue_locked(self) -> bool;
fn queue_head(self) -> *const ThreadData;
fn with_queue_head(self, thread_data: *const ThreadData) -> Self;
}
impl LockState for usize {
#[inline]
fn is_locked(self) -> bool {
self & LOCKED_BIT != 0
}
#[inline]
fn is_queue_locked(self) -> bool {
self & QUEUE_LOCKED_BIT != 0
}
#[inline]
fn queue_head(self) -> *const ThreadData {
(self & QUEUE_MASK) as *const ThreadData
}
#[inline]
fn with_queue_head(self, thread_data: *const ThreadData) -> Self {
(self & !QUEUE_MASK) | thread_data as *const _ as usize
}
}