Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,6 @@
//! Provides [`HashTable`]
pub use hashbrown::hash_table::{
AbsentEntry, Drain, Entry, ExtractIf, HashTable, IntoIter, Iter, IterHash, IterHashMut,
IterMut, OccupiedEntry, VacantEntry,
};

View File

@@ -0,0 +1,12 @@
//! Provides [`HashMap`] and [`HashSet`] from [`hashbrown`] with some customized defaults.
//!
//! Also provides the [`HashTable`] type, which is specific to [`hashbrown`].
pub use hash_map::HashMap;
pub use hash_set::HashSet;
pub use hash_table::HashTable;
pub use hashbrown::Equivalent;
pub mod hash_map;
pub mod hash_set;
pub mod hash_table;

180
vendor/bevy_platform/src/hash.rs vendored Normal file
View File

@@ -0,0 +1,180 @@
//! Provides replacements for `std::hash` items using [`foldhash`].
//!
//! Also provides some additional items beyond the standard library.
use core::{
fmt::Debug,
hash::{BuildHasher, Hash, Hasher},
marker::PhantomData,
ops::Deref,
};
pub use foldhash::fast::{FixedState, FoldHasher as DefaultHasher, RandomState};
/// For when you want a deterministic hasher.
///
/// Seed was randomly generated with a fair dice roll. Guaranteed to be random:
/// <https://github.com/bevyengine/bevy/pull/1268/files#r560918426>
const FIXED_HASHER: FixedState =
FixedState::with_seed(0b1001010111101110000001001100010000000011001001101011001001111000);
/// Deterministic hasher based upon a random but fixed state.
#[derive(Copy, Clone, Default, Debug)]
pub struct FixedHasher;
impl BuildHasher for FixedHasher {
type Hasher = DefaultHasher;
#[inline]
fn build_hasher(&self) -> Self::Hasher {
FIXED_HASHER.build_hasher()
}
}
/// A pre-hashed value of a specific type. Pre-hashing enables memoization of hashes that are expensive to compute.
///
/// It also enables faster [`PartialEq`] comparisons by short circuiting on hash equality.
/// See [`PassHash`] and [`PassHasher`] for a "pass through" [`BuildHasher`] and [`Hasher`] implementation
/// designed to work with [`Hashed`]
/// See `PreHashMap` for a hashmap pre-configured to use [`Hashed`] keys.
pub struct Hashed<V, S = FixedHasher> {
hash: u64,
value: V,
marker: PhantomData<S>,
}
impl<V: Hash, H: BuildHasher + Default> Hashed<V, H> {
/// Pre-hashes the given value using the [`BuildHasher`] configured in the [`Hashed`] type.
pub fn new(value: V) -> Self {
Self {
hash: H::default().hash_one(&value),
value,
marker: PhantomData,
}
}
/// The pre-computed hash.
#[inline]
pub fn hash(&self) -> u64 {
self.hash
}
}
impl<V, H> Hash for Hashed<V, H> {
#[inline]
fn hash<R: Hasher>(&self, state: &mut R) {
state.write_u64(self.hash);
}
}
impl<V, H> Deref for Hashed<V, H> {
type Target = V;
#[inline]
fn deref(&self) -> &Self::Target {
&self.value
}
}
impl<V: PartialEq, H> PartialEq for Hashed<V, H> {
/// A fast impl of [`PartialEq`] that first checks that `other`'s pre-computed hash
/// matches this value's pre-computed hash.
#[inline]
fn eq(&self, other: &Self) -> bool {
self.hash == other.hash && self.value.eq(&other.value)
}
}
impl<V: Debug, H> Debug for Hashed<V, H> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("Hashed")
.field("hash", &self.hash)
.field("value", &self.value)
.finish()
}
}
impl<V: Clone, H> Clone for Hashed<V, H> {
#[inline]
fn clone(&self) -> Self {
Self {
hash: self.hash,
value: self.value.clone(),
marker: PhantomData,
}
}
}
impl<V: Copy, H> Copy for Hashed<V, H> {}
impl<V: Eq, H> Eq for Hashed<V, H> {}
/// A [`BuildHasher`] that results in a [`PassHasher`].
#[derive(Default, Clone)]
pub struct PassHash;
impl BuildHasher for PassHash {
type Hasher = PassHasher;
fn build_hasher(&self) -> Self::Hasher {
PassHasher::default()
}
}
/// A no-op hash that only works on `u64`s. Will panic if attempting to
/// hash a type containing non-u64 fields.
#[derive(Debug, Default)]
pub struct PassHasher {
hash: u64,
}
impl Hasher for PassHasher {
#[inline]
fn finish(&self) -> u64 {
self.hash
}
fn write(&mut self, _bytes: &[u8]) {
panic!("can only hash u64 using PassHasher");
}
#[inline]
fn write_u64(&mut self, i: u64) {
self.hash = i;
}
}
/// [`BuildHasher`] for types that already contain a high-quality hash.
#[derive(Clone, Default)]
pub struct NoOpHash;
impl BuildHasher for NoOpHash {
type Hasher = NoOpHasher;
fn build_hasher(&self) -> Self::Hasher {
NoOpHasher(0)
}
}
#[doc(hidden)]
pub struct NoOpHasher(u64);
// This is for types that already contain a high-quality hash and want to skip
// re-hashing that hash.
impl Hasher for NoOpHasher {
fn finish(&self) -> u64 {
self.0
}
fn write(&mut self, bytes: &[u8]) {
// This should never be called by consumers. Prefer to call `write_u64` instead.
// Don't break applications (slower fallback, just check in test):
self.0 = bytes.iter().fold(self.0, |hash, b| {
hash.rotate_left(8).wrapping_add(*b as u64)
});
}
#[inline]
fn write_u64(&mut self, i: u64) {
self.0 = i;
}
}

49
vendor/bevy_platform/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,49 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc(
html_logo_url = "https://bevyengine.org/assets/icon.png",
html_favicon_url = "https://bevyengine.org/assets/icon.png"
)]
#![no_std]
//! Platform compatibility support for first-party [Bevy] engine crates.
//!
//! [Bevy]: https://bevyengine.org/
#[cfg(feature = "std")]
extern crate std;
#[cfg(feature = "alloc")]
extern crate alloc;
pub mod hash;
pub mod sync;
pub mod thread;
pub mod time;
#[cfg(feature = "alloc")]
pub mod collections;
/// Frequently used items which would typically be included in most contexts.
///
/// When adding `no_std` support to a crate for the first time, often there's a substantial refactor
/// required due to the change in implicit prelude from `std::prelude` to `core::prelude`.
/// This unfortunately leaves out many items from `alloc`, even if the crate unconditionally
/// includes that crate.
///
/// This prelude aims to ease the transition by re-exporting items from `alloc` which would
/// otherwise be included in the `std` implicit prelude.
pub mod prelude {
#[cfg(feature = "alloc")]
pub use alloc::{
borrow::ToOwned, boxed::Box, format, string::String, string::ToString, vec, vec::Vec,
};
// Items from `std::prelude` that are missing in this module:
// * dbg
// * eprint
// * eprintln
// * is_x86_feature_detected
// * print
// * println
// * thread_local
}

43
vendor/bevy_platform/src/sync/atomic.rs vendored Normal file
View File

@@ -0,0 +1,43 @@
//! Provides various atomic alternatives to language primitives.
//!
//! Certain platforms lack complete atomic support, requiring the use of a fallback
//! such as `portable-atomic`.
//! Using these types will ensure the correct atomic provider is used without the need for
//! feature gates in your own code.
pub use atomic_16::{AtomicI16, AtomicU16};
pub use atomic_32::{AtomicI32, AtomicU32};
pub use atomic_64::{AtomicI64, AtomicU64};
pub use atomic_8::{AtomicBool, AtomicI8, AtomicU8};
pub use atomic_ptr::{AtomicIsize, AtomicPtr, AtomicUsize};
pub use core::sync::atomic::Ordering;
#[cfg(target_has_atomic = "8")]
use core::sync::atomic as atomic_8;
#[cfg(not(target_has_atomic = "8"))]
use portable_atomic as atomic_8;
#[cfg(target_has_atomic = "16")]
use core::sync::atomic as atomic_16;
#[cfg(not(target_has_atomic = "16"))]
use portable_atomic as atomic_16;
#[cfg(target_has_atomic = "32")]
use core::sync::atomic as atomic_32;
#[cfg(not(target_has_atomic = "32"))]
use portable_atomic as atomic_32;
#[cfg(target_has_atomic = "64")]
use core::sync::atomic as atomic_64;
#[cfg(not(target_has_atomic = "64"))]
use portable_atomic as atomic_64;
#[cfg(target_has_atomic = "ptr")]
use core::sync::atomic as atomic_ptr;
#[cfg(not(target_has_atomic = "ptr"))]
use portable_atomic as atomic_ptr;

View File

@@ -0,0 +1,66 @@
//! Provides `Barrier` and `BarrierWaitResult`
pub use implementation::{Barrier, BarrierWaitResult};
#[cfg(feature = "std")]
use std::sync as implementation;
#[cfg(not(feature = "std"))]
mod implementation {
use core::fmt;
/// Fallback implementation of `Barrier` from the standard library.
pub struct Barrier {
inner: spin::Barrier,
}
impl Barrier {
/// Creates a new barrier that can block a given number of threads.
///
/// See the standard library for further details.
#[must_use]
pub const fn new(n: usize) -> Self {
Self {
inner: spin::Barrier::new(n),
}
}
/// Blocks the current thread until all threads have rendezvoused here.
///
/// See the standard library for further details.
pub fn wait(&self) -> BarrierWaitResult {
BarrierWaitResult {
inner: self.inner.wait(),
}
}
}
impl fmt::Debug for Barrier {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Barrier").finish_non_exhaustive()
}
}
/// Fallback implementation of `BarrierWaitResult` from the standard library.
pub struct BarrierWaitResult {
inner: spin::barrier::BarrierWaitResult,
}
impl BarrierWaitResult {
/// Returns `true` if this thread is the "leader thread" for the call to [`Barrier::wait()`].
///
/// See the standard library for further details.
#[must_use]
pub fn is_leader(&self) -> bool {
self.inner.is_leader()
}
}
impl fmt::Debug for BarrierWaitResult {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BarrierWaitResult")
.field("is_leader", &self.is_leader())
.finish()
}
}
}

View File

@@ -0,0 +1,11 @@
//! Provides `LazyLock`
pub use implementation::LazyLock;
#[cfg(feature = "std")]
use std::sync as implementation;
#[cfg(not(feature = "std"))]
mod implementation {
pub use spin::Lazy as LazyLock;
}

33
vendor/bevy_platform/src/sync/mod.rs vendored Normal file
View File

@@ -0,0 +1,33 @@
//! Provides various synchronization alternatives to language primitives.
//!
//! Currently missing from this module are the following items:
//! * `Condvar`
//! * `WaitTimeoutResult`
//! * `mpsc`
//!
//! Otherwise, this is a drop-in replacement for `std::sync`.
pub use barrier::{Barrier, BarrierWaitResult};
pub use lazy_lock::LazyLock;
pub use mutex::{Mutex, MutexGuard};
pub use once::{Once, OnceLock, OnceState};
pub use poison::{LockResult, PoisonError, TryLockError, TryLockResult};
pub use rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard};
#[cfg(feature = "alloc")]
pub use arc::{Arc, Weak};
pub mod atomic;
mod barrier;
mod lazy_lock;
mod mutex;
mod once;
mod poison;
mod rwlock;
#[cfg(all(feature = "alloc", not(target_has_atomic = "ptr")))]
use portable_atomic_util as arc;
#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))]
use alloc::sync as arc;

108
vendor/bevy_platform/src/sync/mutex.rs vendored Normal file
View File

@@ -0,0 +1,108 @@
//! Provides `Mutex` and `MutexGuard`
pub use implementation::{Mutex, MutexGuard};
#[cfg(feature = "std")]
use std::sync as implementation;
#[cfg(not(feature = "std"))]
mod implementation {
use crate::sync::{LockResult, TryLockError, TryLockResult};
use core::fmt;
pub use spin::MutexGuard;
/// Fallback implementation of `Mutex` from the standard library.
pub struct Mutex<T: ?Sized> {
inner: spin::Mutex<T>,
}
impl<T> Mutex<T> {
/// Creates a new mutex in an unlocked state ready for use.
///
/// See the standard library for further details.
pub const fn new(t: T) -> Self {
Self {
inner: spin::Mutex::new(t),
}
}
}
impl<T: ?Sized> Mutex<T> {
/// Acquires a mutex, blocking the current thread until it is able to do so.
///
/// See the standard library for further details.
pub fn lock(&self) -> LockResult<MutexGuard<'_, T>> {
Ok(self.inner.lock())
}
/// Attempts to acquire this lock.
///
/// See the standard library for further details.
pub fn try_lock(&self) -> TryLockResult<MutexGuard<'_, T>> {
self.inner.try_lock().ok_or(TryLockError::WouldBlock)
}
/// Determines whether the mutex is poisoned.
///
/// See the standard library for further details.
pub fn is_poisoned(&self) -> bool {
false
}
/// Clear the poisoned state from a mutex.
///
/// See the standard library for further details.
pub fn clear_poison(&self) {
// no-op
}
/// Consumes this mutex, returning the underlying data.
///
/// See the standard library for further details.
pub fn into_inner(self) -> LockResult<T>
where
T: Sized,
{
Ok(self.inner.into_inner())
}
/// Returns a mutable reference to the underlying data.
///
/// See the standard library for further details.
pub fn get_mut(&mut self) -> LockResult<&mut T> {
Ok(self.inner.get_mut())
}
}
impl<T> From<T> for Mutex<T> {
fn from(t: T) -> Self {
Mutex::new(t)
}
}
impl<T: Default> Default for Mutex<T> {
fn default() -> Mutex<T> {
Mutex::new(Default::default())
}
}
impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut d = f.debug_struct("Mutex");
match self.try_lock() {
Ok(guard) => {
d.field("data", &&*guard);
}
Err(TryLockError::Poisoned(err)) => {
d.field("data", &&**err.get_ref());
}
Err(TryLockError::WouldBlock) => {
d.field("data", &format_args!("<locked>"));
}
}
d.field("poisoned", &false);
d.finish_non_exhaustive()
}
}
}

217
vendor/bevy_platform/src/sync/once.rs vendored Normal file
View File

@@ -0,0 +1,217 @@
//! Provides `Once`, `OnceState`, `OnceLock`
pub use implementation::{Once, OnceLock, OnceState};
#[cfg(feature = "std")]
use std::sync as implementation;
#[cfg(not(feature = "std"))]
mod implementation {
use core::{
fmt,
panic::{RefUnwindSafe, UnwindSafe},
};
/// Fallback implementation of `OnceLock` from the standard library.
pub struct OnceLock<T> {
inner: spin::Once<T>,
}
impl<T> OnceLock<T> {
/// Creates a new empty cell.
///
/// See the standard library for further details.
#[must_use]
pub const fn new() -> Self {
Self {
inner: spin::Once::new(),
}
}
/// Gets the reference to the underlying value.
///
/// See the standard library for further details.
pub fn get(&self) -> Option<&T> {
self.inner.get()
}
/// Gets the mutable reference to the underlying value.
///
/// See the standard library for further details.
pub fn get_mut(&mut self) -> Option<&mut T> {
self.inner.get_mut()
}
/// Sets the contents of this cell to `value`.
///
/// See the standard library for further details.
pub fn set(&self, value: T) -> Result<(), T> {
let mut value = Some(value);
self.inner.call_once(|| value.take().unwrap());
match value {
Some(value) => Err(value),
None => Ok(()),
}
}
/// Gets the contents of the cell, initializing it with `f` if the cell
/// was empty.
///
/// See the standard library for further details.
pub fn get_or_init<F>(&self, f: F) -> &T
where
F: FnOnce() -> T,
{
self.inner.call_once(f)
}
/// Consumes the `OnceLock`, returning the wrapped value. Returns
/// `None` if the cell was empty.
///
/// See the standard library for further details.
pub fn into_inner(mut self) -> Option<T> {
self.take()
}
/// Takes the value out of this `OnceLock`, moving it back to an uninitialized state.
///
/// See the standard library for further details.
pub fn take(&mut self) -> Option<T> {
if self.inner.is_completed() {
let mut inner = spin::Once::new();
core::mem::swap(&mut self.inner, &mut inner);
inner.try_into_inner()
} else {
None
}
}
}
impl<T: RefUnwindSafe + UnwindSafe> RefUnwindSafe for OnceLock<T> {}
impl<T: UnwindSafe> UnwindSafe for OnceLock<T> {}
impl<T> Default for OnceLock<T> {
fn default() -> OnceLock<T> {
OnceLock::new()
}
}
impl<T: fmt::Debug> fmt::Debug for OnceLock<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut d = f.debug_tuple("OnceLock");
match self.get() {
Some(v) => d.field(v),
None => d.field(&format_args!("<uninit>")),
};
d.finish()
}
}
impl<T: Clone> Clone for OnceLock<T> {
fn clone(&self) -> OnceLock<T> {
let cell = Self::new();
if let Some(value) = self.get() {
cell.set(value.clone()).ok().unwrap();
}
cell
}
}
impl<T> From<T> for OnceLock<T> {
fn from(value: T) -> Self {
let cell = Self::new();
cell.set(value).map(move |_| cell).ok().unwrap()
}
}
impl<T: PartialEq> PartialEq for OnceLock<T> {
fn eq(&self, other: &OnceLock<T>) -> bool {
self.get() == other.get()
}
}
impl<T: Eq> Eq for OnceLock<T> {}
/// Fallback implementation of `Once` from the standard library.
pub struct Once {
inner: OnceLock<()>,
}
impl Once {
/// Creates a new `Once` value.
///
/// See the standard library for further details.
#[expect(clippy::new_without_default, reason = "matching std::sync::Once")]
pub const fn new() -> Self {
Self {
inner: OnceLock::new(),
}
}
/// Performs an initialization routine once and only once. The given closure
/// will be executed if this is the first time `call_once` has been called,
/// and otherwise the routine will *not* be invoked.
///
/// See the standard library for further details.
pub fn call_once<F: FnOnce()>(&self, f: F) {
self.inner.get_or_init(f);
}
/// Performs the same function as [`call_once()`] except ignores poisoning.
///
/// See the standard library for further details.
pub fn call_once_force<F: FnOnce(&OnceState)>(&self, f: F) {
const STATE: OnceState = OnceState { _private: () };
self.call_once(move || f(&STATE));
}
/// Returns `true` if some [`call_once()`] call has completed
/// successfully. Specifically, `is_completed` will return false in
/// the following situations:
/// * [`call_once()`] was not called at all,
/// * [`call_once()`] was called, but has not yet completed,
/// * the [`Once`] instance is poisoned
///
/// See the standard library for further details.
pub fn is_completed(&self) -> bool {
self.inner.get().is_some()
}
}
impl RefUnwindSafe for Once {}
impl UnwindSafe for Once {}
impl fmt::Debug for Once {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Once").finish_non_exhaustive()
}
}
/// Fallback implementation of `OnceState` from the standard library.
pub struct OnceState {
_private: (),
}
impl OnceState {
/// Returns `true` if the associated [`Once`] was poisoned prior to the
/// invocation of the closure passed to [`Once::call_once_force()`].
///
/// See the standard library for further details.
pub fn is_poisoned(&self) -> bool {
false
}
}
impl fmt::Debug for OnceState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("OnceState")
.field("poisoned", &self.is_poisoned())
.finish()
}
}
}

107
vendor/bevy_platform/src/sync/poison.rs vendored Normal file
View File

@@ -0,0 +1,107 @@
//! Provides `LockResult`, `PoisonError`, `TryLockError`, `TryLockResult`
pub use implementation::{LockResult, PoisonError, TryLockError, TryLockResult};
#[cfg(feature = "std")]
use std::sync as implementation;
#[cfg(not(feature = "std"))]
mod implementation {
use core::{error::Error, fmt};
/// Fallback implementation of `PoisonError` from the standard library.
pub struct PoisonError<T> {
guard: T,
}
impl<T> fmt::Debug for PoisonError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PoisonError").finish_non_exhaustive()
}
}
impl<T> fmt::Display for PoisonError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
"poisoned lock: another task failed inside".fmt(f)
}
}
impl<T> Error for PoisonError<T> {}
impl<T> PoisonError<T> {
/// Creates a `PoisonError`.
///
/// See the standard library for further details.
#[cfg(panic = "unwind")]
pub fn new(guard: T) -> PoisonError<T> {
PoisonError { guard }
}
/// Consumes this error indicating that a lock is poisoned, returning the
/// underlying guard to allow access regardless.
///
/// See the standard library for further details.
pub fn into_inner(self) -> T {
self.guard
}
/// Reaches into this error indicating that a lock is poisoned, returning a
/// reference to the underlying guard to allow access regardless.
///
/// See the standard library for further details.
pub fn get_ref(&self) -> &T {
&self.guard
}
/// Reaches into this error indicating that a lock is poisoned, returning a
/// mutable reference to the underlying guard to allow access regardless.
///
/// See the standard library for further details.
pub fn get_mut(&mut self) -> &mut T {
&mut self.guard
}
}
/// Fallback implementation of `TryLockError` from the standard library.
pub enum TryLockError<T> {
/// The lock could not be acquired because another thread failed while holding
/// the lock.
Poisoned(PoisonError<T>),
/// The lock could not be acquired at this time because the operation would
/// otherwise block.
WouldBlock,
}
impl<T> From<PoisonError<T>> for TryLockError<T> {
fn from(err: PoisonError<T>) -> TryLockError<T> {
TryLockError::Poisoned(err)
}
}
impl<T> fmt::Debug for TryLockError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
TryLockError::Poisoned(..) => "Poisoned(..)".fmt(f),
TryLockError::WouldBlock => "WouldBlock".fmt(f),
}
}
}
impl<T> fmt::Display for TryLockError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
TryLockError::Poisoned(..) => "poisoned lock: another task failed inside",
TryLockError::WouldBlock => "try_lock failed because the operation would block",
}
.fmt(f)
}
}
impl<T> Error for TryLockError<T> {}
/// Fallback implementation of `LockResult` from the standard library.
pub type LockResult<Guard> = Result<Guard, PoisonError<Guard>>;
/// Fallback implementation of `TryLockResult` from the standard library.
pub type TryLockResult<Guard> = Result<Guard, TryLockError<Guard>>;
}

124
vendor/bevy_platform/src/sync/rwlock.rs vendored Normal file
View File

@@ -0,0 +1,124 @@
//! Provides `RwLock`, `RwLockReadGuard`, `RwLockWriteGuard`
pub use implementation::{RwLock, RwLockReadGuard, RwLockWriteGuard};
#[cfg(feature = "std")]
use std::sync as implementation;
#[cfg(not(feature = "std"))]
mod implementation {
use crate::sync::{LockResult, TryLockError, TryLockResult};
use core::fmt;
pub use spin::rwlock::{RwLockReadGuard, RwLockWriteGuard};
/// Fallback implementation of `RwLock` from the standard library.
pub struct RwLock<T: ?Sized> {
inner: spin::RwLock<T>,
}
impl<T> RwLock<T> {
/// Creates a new instance of an `RwLock<T>` which is unlocked.
///
/// See the standard library for further details.
pub const fn new(t: T) -> RwLock<T> {
Self {
inner: spin::RwLock::new(t),
}
}
}
impl<T: ?Sized> RwLock<T> {
/// Locks this `RwLock` with shared read access, blocking the current thread
/// until it can be acquired.
///
/// See the standard library for further details.
pub fn read(&self) -> LockResult<RwLockReadGuard<'_, T>> {
Ok(self.inner.read())
}
/// Attempts to acquire this `RwLock` with shared read access.
///
/// See the standard library for further details.
pub fn try_read(&self) -> TryLockResult<RwLockReadGuard<'_, T>> {
self.inner.try_read().ok_or(TryLockError::WouldBlock)
}
/// Locks this `RwLock` with exclusive write access, blocking the current
/// thread until it can be acquired.
///
/// See the standard library for further details.
pub fn write(&self) -> LockResult<RwLockWriteGuard<'_, T>> {
Ok(self.inner.write())
}
/// Attempts to lock this `RwLock` with exclusive write access.
///
/// See the standard library for further details.
pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<'_, T>> {
self.inner.try_write().ok_or(TryLockError::WouldBlock)
}
/// Determines whether the lock is poisoned.
///
/// See the standard library for further details.
pub fn is_poisoned(&self) -> bool {
false
}
/// Clear the poisoned state from a lock.
///
/// See the standard library for further details.
pub fn clear_poison(&self) {
// no-op
}
/// Consumes this `RwLock`, returning the underlying data.
///
/// See the standard library for further details.
pub fn into_inner(self) -> LockResult<T>
where
T: Sized,
{
Ok(self.inner.into_inner())
}
/// Returns a mutable reference to the underlying data.
///
/// See the standard library for further details.
pub fn get_mut(&mut self) -> LockResult<&mut T> {
Ok(self.inner.get_mut())
}
}
impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut d = f.debug_struct("RwLock");
match self.try_read() {
Ok(guard) => {
d.field("data", &&*guard);
}
Err(TryLockError::Poisoned(err)) => {
d.field("data", &&**err.get_ref());
}
Err(TryLockError::WouldBlock) => {
d.field("data", &format_args!("<locked>"));
}
}
d.field("poisoned", &false);
d.finish_non_exhaustive()
}
}
impl<T: Default> Default for RwLock<T> {
fn default() -> RwLock<T> {
RwLock::new(Default::default())
}
}
impl<T> From<T> for RwLock<T> {
fn from(t: T) -> Self {
RwLock::new(t)
}
}
}

29
vendor/bevy_platform/src/thread.rs vendored Normal file
View File

@@ -0,0 +1,29 @@
//! Provides `sleep` for all platforms.
pub use thread::sleep;
cfg_if::cfg_if! {
// TODO: use browser timeouts based on ScheduleRunnerPlugin::build
if #[cfg(feature = "std")] {
use std::thread;
} else {
mod fallback {
use core::{hint::spin_loop, time::Duration};
use crate::time::Instant;
/// Puts the current thread to sleep for at least the specified amount of time.
///
/// As this is a `no_std` fallback implementation, this will spin the current thread.
pub fn sleep(dur: Duration) {
let start = Instant::now();
while start.elapsed() < dur {
spin_loop()
}
}
}
use fallback as thread;
}
}

View File

@@ -0,0 +1,177 @@
//! Provides a fallback implementation of `Instant` from the standard library.
#![expect(
unsafe_code,
reason = "Instant fallback requires unsafe to allow users to update the internal value"
)]
use crate::sync::atomic::{AtomicPtr, Ordering};
use core::{
fmt,
ops::{Add, AddAssign, Sub, SubAssign},
time::Duration,
};
static ELAPSED_GETTER: AtomicPtr<()> = AtomicPtr::new(unset_getter as *mut _);
/// Fallback implementation of `Instant` suitable for a `no_std` environment.
///
/// If you are on any of the following target architectures, this is a drop-in replacement:
///
/// - `x86`
/// - `x86_64`
/// - `aarch64`
///
/// On any other architecture, you must call [`Instant::set_elapsed`], providing a method
/// which when called supplies a monotonically increasing count of elapsed nanoseconds relative
/// to some arbitrary point in time.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Instant(Duration);
impl Instant {
/// Returns an instant corresponding to "now".
#[must_use]
pub fn now() -> Instant {
let getter = ELAPSED_GETTER.load(Ordering::Acquire);
// SAFETY: Function pointer is always valid
let getter = unsafe { core::mem::transmute::<*mut (), fn() -> Duration>(getter) };
Self((getter)())
}
/// Provides a function returning the amount of time that has elapsed since execution began.
/// The getter provided to this method will be used by [`now`](Instant::now).
///
/// # Safety
///
/// - The function provided must accurately represent the elapsed time.
/// - The function must preserve all invariants of the [`Instant`] type.
/// - The pointer to the function must be valid whenever [`Instant::now`] is called.
pub unsafe fn set_elapsed(getter: fn() -> Duration) {
ELAPSED_GETTER.store(getter as *mut _, Ordering::Release);
}
/// Returns the amount of time elapsed from another instant to this one,
/// or zero duration if that instant is later than this one.
#[must_use]
pub fn duration_since(&self, earlier: Instant) -> Duration {
self.saturating_duration_since(earlier)
}
/// Returns the amount of time elapsed from another instant to this one,
/// or None if that instant is later than this one.
///
/// Due to monotonicity bugs, even under correct logical ordering of the passed `Instant`s,
/// this method can return `None`.
#[must_use]
pub fn checked_duration_since(&self, earlier: Instant) -> Option<Duration> {
self.0.checked_sub(earlier.0)
}
/// Returns the amount of time elapsed from another instant to this one,
/// or zero duration if that instant is later than this one.
#[must_use]
pub fn saturating_duration_since(&self, earlier: Instant) -> Duration {
self.0.saturating_sub(earlier.0)
}
/// Returns the amount of time elapsed since this instant.
#[must_use]
pub fn elapsed(&self) -> Duration {
Instant::now().saturating_duration_since(*self)
}
/// Returns `Some(t)` where `t` is the time `self + duration` if `t` can be represented as
/// `Instant` (which means it's inside the bounds of the underlying data structure), `None`
/// otherwise.
pub fn checked_add(&self, duration: Duration) -> Option<Instant> {
self.0.checked_add(duration).map(Instant)
}
/// Returns `Some(t)` where `t` is the time `self - duration` if `t` can be represented as
/// `Instant` (which means it's inside the bounds of the underlying data structure), `None`
/// otherwise.
pub fn checked_sub(&self, duration: Duration) -> Option<Instant> {
self.0.checked_sub(duration).map(Instant)
}
}
impl Add<Duration> for Instant {
type Output = Instant;
/// # Panics
///
/// This function may panic if the resulting point in time cannot be represented by the
/// underlying data structure. See [`Instant::checked_add`] for a version without panic.
fn add(self, other: Duration) -> Instant {
self.checked_add(other)
.expect("overflow when adding duration to instant")
}
}
impl AddAssign<Duration> for Instant {
fn add_assign(&mut self, other: Duration) {
*self = *self + other;
}
}
impl Sub<Duration> for Instant {
type Output = Instant;
fn sub(self, other: Duration) -> Instant {
self.checked_sub(other)
.expect("overflow when subtracting duration from instant")
}
}
impl SubAssign<Duration> for Instant {
fn sub_assign(&mut self, other: Duration) {
*self = *self - other;
}
}
impl Sub<Instant> for Instant {
type Output = Duration;
/// Returns the amount of time elapsed from another instant to this one,
/// or zero duration if that instant is later than this one.
fn sub(self, other: Instant) -> Duration {
self.duration_since(other)
}
}
impl fmt::Debug for Instant {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
fn unset_getter() -> Duration {
cfg_if::cfg_if! {
if #[cfg(target_arch = "x86")] {
// SAFETY: standard technique for getting a nanosecond counter on x86
let nanos = unsafe {
core::arch::x86::_rdtsc()
};
Duration::from_nanos(nanos)
} else if #[cfg(target_arch = "x86_64")] {
// SAFETY: standard technique for getting a nanosecond counter on x86_64
let nanos = unsafe {
core::arch::x86_64::_rdtsc()
};
Duration::from_nanos(nanos)
} else if #[cfg(target_arch = "aarch64")] {
// SAFETY: standard technique for getting a nanosecond counter of aarch64
let nanos = unsafe {
let mut ticks: u64;
core::arch::asm!("mrs {}, cntvct_el0", out(reg) ticks);
ticks
};
Duration::from_nanos(nanos)
} else {
panic!("An elapsed time getter has not been provided to `Instant`. Please use `Instant::set_elapsed(...)` before calling `Instant::now()`")
}
}
}

15
vendor/bevy_platform/src/time/mod.rs vendored Normal file
View File

@@ -0,0 +1,15 @@
//! Provides `Instant` for all platforms.
pub use time::Instant;
cfg_if::cfg_if! {
if #[cfg(all(target_arch = "wasm32", feature = "web"))] {
use web_time as time;
} else if #[cfg(feature = "std")] {
use std::time;
} else {
mod fallback;
use fallback as time;
}
}