Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

652
vendor/bevy_tasks/src/edge_executor.rs vendored Normal file
View File

@@ -0,0 +1,652 @@
//! Alternative to `async_executor` based on [`edge_executor`] by Ivan Markov.
//!
//! It has been vendored along with its tests to update several outdated dependencies.
//!
//! [`async_executor`]: https://github.com/smol-rs/async-executor
//! [`edge_executor`]: https://github.com/ivmarkov/edge-executor
#![expect(unsafe_code, reason = "original implementation relies on unsafe")]
#![expect(
dead_code,
reason = "keeping methods from original implementation for transparency"
)]
// TODO: Create a more tailored replacement, possibly integrating [Fotre](https://github.com/NthTensor/Forte)
use alloc::rc::Rc;
use core::{
future::{poll_fn, Future},
marker::PhantomData,
task::{Context, Poll},
};
use async_task::{Runnable, Task};
use atomic_waker::AtomicWaker;
use bevy_platform::sync::{Arc, LazyLock};
use futures_lite::FutureExt;
/// An async executor.
///
/// # Examples
///
/// A multi-threaded executor:
///
/// ```ignore
/// use async_channel::unbounded;
/// use easy_parallel::Parallel;
///
/// use edge_executor::{Executor, block_on};
///
/// let ex: Executor = Default::default();
/// let (signal, shutdown) = unbounded::<()>();
///
/// Parallel::new()
/// // Run four executor threads.
/// .each(0..4, |_| block_on(ex.run(shutdown.recv())))
/// // Run the main future on the current thread.
/// .finish(|| block_on(async {
/// println!("Hello world!");
/// drop(signal);
/// }));
/// ```
pub struct Executor<'a, const C: usize = 64> {
state: LazyLock<Arc<State<C>>>,
_invariant: PhantomData<core::cell::UnsafeCell<&'a ()>>,
}
impl<'a, const C: usize> Executor<'a, C> {
/// Creates a new executor.
///
/// # Examples
///
/// ```ignore
/// use edge_executor::Executor;
///
/// let ex: Executor = Default::default();
/// ```
pub const fn new() -> Self {
Self {
state: LazyLock::new(|| Arc::new(State::new())),
_invariant: PhantomData,
}
}
/// Spawns a task onto the executor.
///
/// # Examples
///
/// ```ignore
/// use edge_executor::Executor;
///
/// let ex: Executor = Default::default();
///
/// let task = ex.spawn(async {
/// println!("Hello world");
/// });
/// ```
///
/// Note that if the executor's queue size is equal to the number of currently
/// spawned and running tasks, spawning this additional task might cause the executor to panic
/// later, when the task is scheduled for polling.
pub fn spawn<F>(&self, fut: F) -> Task<F::Output>
where
F: Future + Send + 'a,
F::Output: Send + 'a,
{
// SAFETY: Original implementation missing safety documentation
unsafe { self.spawn_unchecked(fut) }
}
/// Attempts to run a task if at least one is scheduled.
///
/// Running a scheduled task means simply polling its future once.
///
/// # Examples
///
/// ```ignore
/// use edge_executor::Executor;
///
/// let ex: Executor = Default::default();
/// assert!(!ex.try_tick()); // no tasks to run
///
/// let task = ex.spawn(async {
/// println!("Hello world");
/// });
/// assert!(ex.try_tick()); // a task was found
/// ```
pub fn try_tick(&self) -> bool {
if let Some(runnable) = self.try_runnable() {
runnable.run();
true
} else {
false
}
}
/// Runs a single task asynchronously.
///
/// Running a task means simply polling its future once.
///
/// If no tasks are scheduled when this method is called, it will wait until one is scheduled.
///
/// # Examples
///
/// ```ignore
/// use edge_executor::{Executor, block_on};
///
/// let ex: Executor = Default::default();
///
/// let task = ex.spawn(async {
/// println!("Hello world");
/// });
/// block_on(ex.tick()); // runs the task
/// ```
pub async fn tick(&self) {
self.runnable().await.run();
}
/// Runs the executor asynchronously until the given future completes.
///
/// # Examples
///
/// ```ignore
/// use edge_executor::{Executor, block_on};
///
/// let ex: Executor = Default::default();
///
/// let task = ex.spawn(async { 1 + 2 });
/// let res = block_on(ex.run(async { task.await * 2 }));
///
/// assert_eq!(res, 6);
/// ```
pub async fn run<F>(&self, fut: F) -> F::Output
where
F: Future + Send + 'a,
{
// SAFETY: Original implementation missing safety documentation
unsafe { self.run_unchecked(fut).await }
}
/// Waits for the next runnable task to run.
async fn runnable(&self) -> Runnable {
poll_fn(|ctx| self.poll_runnable(ctx)).await
}
/// Polls the first task scheduled for execution by the executor.
fn poll_runnable(&self, ctx: &Context<'_>) -> Poll<Runnable> {
self.state().waker.register(ctx.waker());
if let Some(runnable) = self.try_runnable() {
Poll::Ready(runnable)
} else {
Poll::Pending
}
}
/// Pops the first task scheduled for execution by the executor.
///
/// Returns
/// - `None` - if no task was scheduled for execution
/// - `Some(Runnnable)` - the first task scheduled for execution. Calling `Runnable::run` will
/// execute the task. In other words, it will poll its future.
fn try_runnable(&self) -> Option<Runnable> {
let runnable;
#[cfg(all(
target_has_atomic = "8",
target_has_atomic = "16",
target_has_atomic = "32",
target_has_atomic = "64",
target_has_atomic = "ptr"
))]
{
runnable = self.state().queue.pop();
}
#[cfg(not(all(
target_has_atomic = "8",
target_has_atomic = "16",
target_has_atomic = "32",
target_has_atomic = "64",
target_has_atomic = "ptr"
)))]
{
runnable = self.state().queue.dequeue();
}
runnable
}
/// # Safety
///
/// Original implementation missing safety documentation
unsafe fn spawn_unchecked<F>(&self, fut: F) -> Task<F::Output>
where
F: Future,
{
let schedule = {
let state = self.state().clone();
move |runnable| {
#[cfg(all(
target_has_atomic = "8",
target_has_atomic = "16",
target_has_atomic = "32",
target_has_atomic = "64",
target_has_atomic = "ptr"
))]
{
state.queue.push(runnable).unwrap();
}
#[cfg(not(all(
target_has_atomic = "8",
target_has_atomic = "16",
target_has_atomic = "32",
target_has_atomic = "64",
target_has_atomic = "ptr"
)))]
{
state.queue.enqueue(runnable).unwrap();
}
if let Some(waker) = state.waker.take() {
waker.wake();
}
}
};
// SAFETY: Original implementation missing safety documentation
let (runnable, task) = unsafe { async_task::spawn_unchecked(fut, schedule) };
runnable.schedule();
task
}
/// # Safety
///
/// Original implementation missing safety documentation
async unsafe fn run_unchecked<F>(&self, fut: F) -> F::Output
where
F: Future,
{
let run_forever = async {
loop {
self.tick().await;
}
};
run_forever.or(fut).await
}
/// Returns a reference to the inner state.
fn state(&self) -> &Arc<State<C>> {
&self.state
}
}
impl<'a, const C: usize> Default for Executor<'a, C> {
fn default() -> Self {
Self::new()
}
}
// SAFETY: Original implementation missing safety documentation
unsafe impl<'a, const C: usize> Send for Executor<'a, C> {}
// SAFETY: Original implementation missing safety documentation
unsafe impl<'a, const C: usize> Sync for Executor<'a, C> {}
/// A thread-local executor.
///
/// The executor can only be run on the thread that created it.
///
/// # Examples
///
/// ```ignore
/// use edge_executor::{LocalExecutor, block_on};
///
/// let local_ex: LocalExecutor = Default::default();
///
/// block_on(local_ex.run(async {
/// println!("Hello world!");
/// }));
/// ```
pub struct LocalExecutor<'a, const C: usize = 64> {
executor: Executor<'a, C>,
_not_send: PhantomData<core::cell::UnsafeCell<&'a Rc<()>>>,
}
impl<'a, const C: usize> LocalExecutor<'a, C> {
/// Creates a single-threaded executor.
///
/// # Examples
///
/// ```ignore
/// use edge_executor::LocalExecutor;
///
/// let local_ex: LocalExecutor = Default::default();
/// ```
pub const fn new() -> Self {
Self {
executor: Executor::<C>::new(),
_not_send: PhantomData,
}
}
/// Spawns a task onto the executor.
///
/// # Examples
///
/// ```ignore
/// use edge_executor::LocalExecutor;
///
/// let local_ex: LocalExecutor = Default::default();
///
/// let task = local_ex.spawn(async {
/// println!("Hello world");
/// });
/// ```
///
/// Note that if the executor's queue size is equal to the number of currently
/// spawned and running tasks, spawning this additional task might cause the executor to panic
/// later, when the task is scheduled for polling.
pub fn spawn<F>(&self, fut: F) -> Task<F::Output>
where
F: Future + 'a,
F::Output: 'a,
{
// SAFETY: Original implementation missing safety documentation
unsafe { self.executor.spawn_unchecked(fut) }
}
/// Attempts to run a task if at least one is scheduled.
///
/// Running a scheduled task means simply polling its future once.
///
/// # Examples
///
/// ```ignore
/// use edge_executor::LocalExecutor;
///
/// let local_ex: LocalExecutor = Default::default();
/// assert!(!local_ex.try_tick()); // no tasks to run
///
/// let task = local_ex.spawn(async {
/// println!("Hello world");
/// });
/// assert!(local_ex.try_tick()); // a task was found
/// ```
pub fn try_tick(&self) -> bool {
self.executor.try_tick()
}
/// Runs a single task asynchronously.
///
/// Running a task means simply polling its future once.
///
/// If no tasks are scheduled when this method is called, it will wait until one is scheduled.
///
/// # Examples
///
/// ```ignore
/// use edge_executor::{LocalExecutor, block_on};
///
/// let local_ex: LocalExecutor = Default::default();
///
/// let task = local_ex.spawn(async {
/// println!("Hello world");
/// });
/// block_on(local_ex.tick()); // runs the task
/// ```
pub async fn tick(&self) {
self.executor.tick().await;
}
/// Runs the executor asynchronously until the given future completes.
///
/// # Examples
///
/// ```ignore
/// use edge_executor::{LocalExecutor, block_on};
///
/// let local_ex: LocalExecutor = Default::default();
///
/// let task = local_ex.spawn(async { 1 + 2 });
/// let res = block_on(local_ex.run(async { task.await * 2 }));
///
/// assert_eq!(res, 6);
/// ```
pub async fn run<F>(&self, fut: F) -> F::Output
where
F: Future,
{
// SAFETY: Original implementation missing safety documentation
unsafe { self.executor.run_unchecked(fut) }.await
}
}
impl<'a, const C: usize> Default for LocalExecutor<'a, C> {
fn default() -> Self {
Self::new()
}
}
struct State<const C: usize> {
#[cfg(all(
target_has_atomic = "8",
target_has_atomic = "16",
target_has_atomic = "32",
target_has_atomic = "64",
target_has_atomic = "ptr"
))]
queue: crossbeam_queue::ArrayQueue<Runnable>,
#[cfg(not(all(
target_has_atomic = "8",
target_has_atomic = "16",
target_has_atomic = "32",
target_has_atomic = "64",
target_has_atomic = "ptr"
)))]
queue: heapless::mpmc::MpMcQueue<Runnable, C>,
waker: AtomicWaker,
}
impl<const C: usize> State<C> {
fn new() -> Self {
Self {
#[cfg(all(
target_has_atomic = "8",
target_has_atomic = "16",
target_has_atomic = "32",
target_has_atomic = "64",
target_has_atomic = "ptr"
))]
queue: crossbeam_queue::ArrayQueue::new(C),
#[cfg(not(all(
target_has_atomic = "8",
target_has_atomic = "16",
target_has_atomic = "32",
target_has_atomic = "64",
target_has_atomic = "ptr"
)))]
queue: heapless::mpmc::MpMcQueue::new(),
waker: AtomicWaker::new(),
}
}
}
#[cfg(test)]
mod different_executor_tests {
use core::cell::Cell;
use futures_lite::future::{block_on, pending, poll_once};
use futures_lite::pin;
use super::LocalExecutor;
#[test]
fn shared_queue_slot() {
block_on(async {
let was_polled = Cell::new(false);
let future = async {
was_polled.set(true);
pending::<()>().await;
};
let ex1: LocalExecutor = Default::default();
let ex2: LocalExecutor = Default::default();
// Start the futures for running forever.
let (run1, run2) = (ex1.run(pending::<()>()), ex2.run(pending::<()>()));
pin!(run1);
pin!(run2);
assert!(poll_once(run1.as_mut()).await.is_none());
assert!(poll_once(run2.as_mut()).await.is_none());
// Spawn the future on executor one and then poll executor two.
ex1.spawn(future).detach();
assert!(poll_once(run2).await.is_none());
assert!(!was_polled.get());
// Poll the first one.
assert!(poll_once(run1).await.is_none());
assert!(was_polled.get());
});
}
}
#[cfg(test)]
mod drop_tests {
use alloc::string::String;
use core::mem;
use core::sync::atomic::{AtomicUsize, Ordering};
use core::task::{Poll, Waker};
use std::sync::Mutex;
use bevy_platform::sync::LazyLock;
use futures_lite::future;
use super::{Executor, Task};
#[test]
fn leaked_executor_leaks_everything() {
static DROP: AtomicUsize = AtomicUsize::new(0);
static WAKER: LazyLock<Mutex<Option<Waker>>> = LazyLock::new(Default::default);
let ex: Executor = Default::default();
let task = ex.spawn(async {
let _guard = CallOnDrop(|| {
DROP.fetch_add(1, Ordering::SeqCst);
});
future::poll_fn(|cx| {
*WAKER.lock().unwrap() = Some(cx.waker().clone());
Poll::Pending::<()>
})
.await;
});
future::block_on(ex.tick());
assert!(WAKER.lock().unwrap().is_some());
assert_eq!(DROP.load(Ordering::SeqCst), 0);
mem::forget(ex);
assert_eq!(DROP.load(Ordering::SeqCst), 0);
assert!(future::block_on(future::poll_once(task)).is_none());
assert_eq!(DROP.load(Ordering::SeqCst), 0);
}
#[test]
fn await_task_after_dropping_executor() {
let s: String = "hello".into();
let ex: Executor = Default::default();
let task: Task<&str> = ex.spawn(async { &*s });
assert!(ex.try_tick());
drop(ex);
assert_eq!(future::block_on(task), "hello");
drop(s);
}
#[test]
fn drop_executor_and_then_drop_finished_task() {
static DROP: AtomicUsize = AtomicUsize::new(0);
let ex: Executor = Default::default();
let task = ex.spawn(async {
CallOnDrop(|| {
DROP.fetch_add(1, Ordering::SeqCst);
})
});
assert!(ex.try_tick());
assert_eq!(DROP.load(Ordering::SeqCst), 0);
drop(ex);
assert_eq!(DROP.load(Ordering::SeqCst), 0);
drop(task);
assert_eq!(DROP.load(Ordering::SeqCst), 1);
}
#[test]
fn drop_finished_task_and_then_drop_executor() {
static DROP: AtomicUsize = AtomicUsize::new(0);
let ex: Executor = Default::default();
let task = ex.spawn(async {
CallOnDrop(|| {
DROP.fetch_add(1, Ordering::SeqCst);
})
});
assert!(ex.try_tick());
assert_eq!(DROP.load(Ordering::SeqCst), 0);
drop(task);
assert_eq!(DROP.load(Ordering::SeqCst), 1);
drop(ex);
assert_eq!(DROP.load(Ordering::SeqCst), 1);
}
struct CallOnDrop<F: Fn()>(F);
impl<F: Fn()> Drop for CallOnDrop<F> {
fn drop(&mut self) {
(self.0)();
}
}
}
#[cfg(test)]
mod local_queue {
use alloc::boxed::Box;
use futures_lite::{future, pin};
use super::Executor;
#[test]
fn two_queues() {
future::block_on(async {
// Create an executor with two runners.
let ex: Executor = Default::default();
let (run1, run2) = (
ex.run(future::pending::<()>()),
ex.run(future::pending::<()>()),
);
let mut run1 = Box::pin(run1);
pin!(run2);
// Poll them both.
assert!(future::poll_once(run1.as_mut()).await.is_none());
assert!(future::poll_once(run2.as_mut()).await.is_none());
// Drop the first one, which should leave the local queue in the `None` state.
drop(run1);
assert!(future::poll_once(run2.as_mut()).await.is_none());
});
}
}

83
vendor/bevy_tasks/src/executor.rs vendored Normal file
View File

@@ -0,0 +1,83 @@
//! Provides a fundamental executor primitive appropriate for the target platform
//! and feature set selected.
//! By default, the `async_executor` feature will be enabled, which will rely on
//! [`async-executor`] for the underlying implementation. This requires `std`,
//! so is not suitable for `no_std` contexts. Instead, you must use `edge_executor`,
//! which relies on the alternate [`edge-executor`] backend.
//!
//! [`async-executor`]: https://crates.io/crates/async-executor
//! [`edge-executor`]: https://crates.io/crates/edge-executor
use core::{
fmt,
panic::{RefUnwindSafe, UnwindSafe},
};
use derive_more::{Deref, DerefMut};
cfg_if::cfg_if! {
if #[cfg(feature = "async_executor")] {
type ExecutorInner<'a> = async_executor::Executor<'a>;
type LocalExecutorInner<'a> = async_executor::LocalExecutor<'a>;
} else {
type ExecutorInner<'a> = crate::edge_executor::Executor<'a, 64>;
type LocalExecutorInner<'a> = crate::edge_executor::LocalExecutor<'a, 64>;
}
}
#[cfg(all(feature = "multi_threaded", not(target_arch = "wasm32")))]
pub use async_task::FallibleTask;
/// Wrapper around a multi-threading-aware async executor.
/// Spawning will generally require tasks to be `Send` and `Sync` to allow multiple
/// threads to send/receive/advance tasks.
///
/// If you require an executor _without_ the `Send` and `Sync` requirements, consider
/// using [`LocalExecutor`] instead.
#[derive(Deref, DerefMut, Default)]
pub struct Executor<'a>(ExecutorInner<'a>);
/// Wrapper around a single-threaded async executor.
/// Spawning wont generally require tasks to be `Send` and `Sync`, at the cost of
/// this executor itself not being `Send` or `Sync`. This makes it unsuitable for
/// global statics.
///
/// If need to store an executor in a global static, or send across threads,
/// consider using [`Executor`] instead.
#[derive(Deref, DerefMut, Default)]
pub struct LocalExecutor<'a>(LocalExecutorInner<'a>);
impl Executor<'_> {
/// Construct a new [`Executor`]
#[expect(clippy::allow_attributes, reason = "This lint may not always trigger.")]
#[allow(dead_code, reason = "not all feature flags require this function")]
pub const fn new() -> Self {
Self(ExecutorInner::new())
}
}
impl LocalExecutor<'_> {
/// Construct a new [`LocalExecutor`]
#[expect(clippy::allow_attributes, reason = "This lint may not always trigger.")]
#[allow(dead_code, reason = "not all feature flags require this function")]
pub const fn new() -> Self {
Self(LocalExecutorInner::new())
}
}
impl UnwindSafe for Executor<'_> {}
impl RefUnwindSafe for Executor<'_> {}
impl UnwindSafe for LocalExecutor<'_> {}
impl RefUnwindSafe for LocalExecutor<'_> {}
impl fmt::Debug for Executor<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Executor").finish()
}
}
impl fmt::Debug for LocalExecutor<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("LocalExecutor").finish()
}
}

56
vendor/bevy_tasks/src/futures.rs vendored Normal file
View File

@@ -0,0 +1,56 @@
#![expect(unsafe_code, reason = "Futures require unsafe code.")]
//! Utilities for working with [`Future`]s.
use core::{
future::Future,
pin::Pin,
task::{Context, Poll, RawWaker, RawWakerVTable, Waker},
};
/// Consumes a future, polls it once, and immediately returns the output
/// or returns `None` if it wasn't ready yet.
///
/// This will cancel the future if it's not ready.
pub fn now_or_never<F: Future>(mut future: F) -> Option<F::Output> {
let noop_waker = noop_waker();
let mut cx = Context::from_waker(&noop_waker);
// SAFETY: `future` is not moved and the original value is shadowed
let future = unsafe { Pin::new_unchecked(&mut future) };
match future.poll(&mut cx) {
Poll::Ready(x) => Some(x),
_ => None,
}
}
/// Polls a future once, and returns the output if ready
/// or returns `None` if it wasn't ready yet.
pub fn check_ready<F: Future + Unpin>(future: &mut F) -> Option<F::Output> {
let noop_waker = noop_waker();
let mut cx = Context::from_waker(&noop_waker);
let future = Pin::new(future);
match future.poll(&mut cx) {
Poll::Ready(x) => Some(x),
_ => None,
}
}
fn noop_clone(_data: *const ()) -> RawWaker {
noop_raw_waker()
}
fn noop(_data: *const ()) {}
const NOOP_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(noop_clone, noop, noop, noop);
fn noop_raw_waker() -> RawWaker {
RawWaker::new(core::ptr::null(), &NOOP_WAKER_VTABLE)
}
fn noop_waker() -> Waker {
// SAFETY: the `RawWakerVTable` is just a big noop and doesn't violate any of the rules in `RawWakerVTable`s documentation
// (which talks about retaining and releasing any "resources", of which there are none in this case)
unsafe { Waker::from_raw(noop_raw_waker()) }
}

223
vendor/bevy_tasks/src/iter/adapters.rs vendored Normal file
View File

@@ -0,0 +1,223 @@
use crate::iter::ParallelIterator;
/// Chains two [`ParallelIterator`]s `T` and `U`, first returning
/// batches from `T`, and then from `U`.
#[derive(Debug)]
pub struct Chain<T, U> {
pub(crate) left: T,
pub(crate) right: U,
pub(crate) left_in_progress: bool,
}
impl<B, T, U> ParallelIterator<B> for Chain<T, U>
where
B: Iterator + Send,
T: ParallelIterator<B>,
U: ParallelIterator<B>,
{
fn next_batch(&mut self) -> Option<B> {
if self.left_in_progress {
match self.left.next_batch() {
b @ Some(_) => return b,
None => self.left_in_progress = false,
}
}
self.right.next_batch()
}
}
/// Maps a [`ParallelIterator`] `P` using the provided function `F`.
#[derive(Debug)]
pub struct Map<P, F> {
pub(crate) iter: P,
pub(crate) f: F,
}
impl<B, U, T, F> ParallelIterator<core::iter::Map<B, F>> for Map<U, F>
where
B: Iterator + Send,
U: ParallelIterator<B>,
F: FnMut(B::Item) -> T + Send + Clone,
{
fn next_batch(&mut self) -> Option<core::iter::Map<B, F>> {
self.iter.next_batch().map(|b| b.map(self.f.clone()))
}
}
/// Filters a [`ParallelIterator`] `P` using the provided predicate `F`.
#[derive(Debug)]
pub struct Filter<P, F> {
pub(crate) iter: P,
pub(crate) predicate: F,
}
impl<B, P, F> ParallelIterator<core::iter::Filter<B, F>> for Filter<P, F>
where
B: Iterator + Send,
P: ParallelIterator<B>,
F: FnMut(&B::Item) -> bool + Send + Clone,
{
fn next_batch(&mut self) -> Option<core::iter::Filter<B, F>> {
self.iter
.next_batch()
.map(|b| b.filter(self.predicate.clone()))
}
}
/// Filter-maps a [`ParallelIterator`] `P` using the provided function `F`.
#[derive(Debug)]
pub struct FilterMap<P, F> {
pub(crate) iter: P,
pub(crate) f: F,
}
impl<B, P, R, F> ParallelIterator<core::iter::FilterMap<B, F>> for FilterMap<P, F>
where
B: Iterator + Send,
P: ParallelIterator<B>,
F: FnMut(B::Item) -> Option<R> + Send + Clone,
{
fn next_batch(&mut self) -> Option<core::iter::FilterMap<B, F>> {
self.iter.next_batch().map(|b| b.filter_map(self.f.clone()))
}
}
/// Flat-maps a [`ParallelIterator`] `P` using the provided function `F`.
#[derive(Debug)]
pub struct FlatMap<P, F> {
pub(crate) iter: P,
pub(crate) f: F,
}
impl<B, P, U, F> ParallelIterator<core::iter::FlatMap<B, U, F>> for FlatMap<P, F>
where
B: Iterator + Send,
P: ParallelIterator<B>,
F: FnMut(B::Item) -> U + Send + Clone,
U: IntoIterator,
U::IntoIter: Send,
{
// This extends each batch using the flat map. The other option is
// to turn each IntoIter into its own batch.
fn next_batch(&mut self) -> Option<core::iter::FlatMap<B, U, F>> {
self.iter.next_batch().map(|b| b.flat_map(self.f.clone()))
}
}
/// Flattens a [`ParallelIterator`] `P`.
#[derive(Debug)]
pub struct Flatten<P> {
pub(crate) iter: P,
}
impl<B, P> ParallelIterator<core::iter::Flatten<B>> for Flatten<P>
where
B: Iterator + Send,
P: ParallelIterator<B>,
B::Item: IntoIterator,
<B::Item as IntoIterator>::IntoIter: Send,
{
// This extends each batch using the flatten. The other option is to
// turn each IntoIter into its own batch.
fn next_batch(&mut self) -> Option<core::iter::Flatten<B>> {
self.iter.next_batch().map(Iterator::flatten)
}
}
/// Fuses a [`ParallelIterator`] `P`, ensuring once it returns [`None`] once, it always
/// returns [`None`].
#[derive(Debug)]
pub struct Fuse<P> {
pub(crate) iter: Option<P>,
}
impl<B, P> ParallelIterator<B> for Fuse<P>
where
B: Iterator + Send,
P: ParallelIterator<B>,
{
fn next_batch(&mut self) -> Option<B> {
match &mut self.iter {
Some(iter) => iter.next_batch().or_else(|| {
self.iter = None;
None
}),
None => None,
}
}
}
/// Inspects a [`ParallelIterator`] `P` using the provided function `F`.
#[derive(Debug)]
pub struct Inspect<P, F> {
pub(crate) iter: P,
pub(crate) f: F,
}
impl<B, P, F> ParallelIterator<core::iter::Inspect<B, F>> for Inspect<P, F>
where
B: Iterator + Send,
P: ParallelIterator<B>,
F: FnMut(&B::Item) + Send + Clone,
{
fn next_batch(&mut self) -> Option<core::iter::Inspect<B, F>> {
self.iter.next_batch().map(|b| b.inspect(self.f.clone()))
}
}
/// Copies a [`ParallelIterator`] `P`'s returned values.
#[derive(Debug)]
pub struct Copied<P> {
pub(crate) iter: P,
}
impl<'a, B, P, T> ParallelIterator<core::iter::Copied<B>> for Copied<P>
where
B: Iterator<Item = &'a T> + Send,
P: ParallelIterator<B>,
T: 'a + Copy,
{
fn next_batch(&mut self) -> Option<core::iter::Copied<B>> {
self.iter.next_batch().map(Iterator::copied)
}
}
/// Clones a [`ParallelIterator`] `P`'s returned values.
#[derive(Debug)]
pub struct Cloned<P> {
pub(crate) iter: P,
}
impl<'a, B, P, T> ParallelIterator<core::iter::Cloned<B>> for Cloned<P>
where
B: Iterator<Item = &'a T> + Send,
P: ParallelIterator<B>,
T: 'a + Copy,
{
fn next_batch(&mut self) -> Option<core::iter::Cloned<B>> {
self.iter.next_batch().map(Iterator::cloned)
}
}
/// Cycles a [`ParallelIterator`] `P` indefinitely.
#[derive(Debug)]
pub struct Cycle<P> {
pub(crate) iter: P,
pub(crate) curr: Option<P>,
}
impl<B, P> ParallelIterator<B> for Cycle<P>
where
B: Iterator + Send,
P: ParallelIterator<B> + Clone,
{
fn next_batch(&mut self) -> Option<B> {
self.curr
.as_mut()
.and_then(ParallelIterator::next_batch)
.or_else(|| {
self.curr = Some(self.iter.clone());
self.next_batch()
})
}
}

511
vendor/bevy_tasks/src/iter/mod.rs vendored Normal file
View File

@@ -0,0 +1,511 @@
use crate::TaskPool;
use alloc::vec::Vec;
mod adapters;
pub use adapters::*;
/// [`ParallelIterator`] closely emulates the `std::iter::Iterator`
/// interface. However, it uses `bevy_task` to compute batches in parallel.
///
/// Note that the overhead of [`ParallelIterator`] is high relative to some
/// workloads. In particular, if the batch size is too small or task being
/// run in parallel is inexpensive, *a [`ParallelIterator`] could take longer
/// than a normal [`Iterator`]*. Therefore, you should profile your code before
/// using [`ParallelIterator`].
pub trait ParallelIterator<BatchIter>
where
BatchIter: Iterator + Send,
Self: Sized + Send,
{
/// Returns the next batch of items for processing.
///
/// Each batch is an iterator with items of the same type as the
/// [`ParallelIterator`]. Returns `None` when there are no batches left.
fn next_batch(&mut self) -> Option<BatchIter>;
/// Returns the bounds on the remaining number of items in the
/// parallel iterator.
///
/// See [`Iterator::size_hint()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.size_hint)
fn size_hint(&self) -> (usize, Option<usize>) {
(0, None)
}
/// Consumes the parallel iterator and returns the number of items.
///
/// See [`Iterator::count()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.count)
fn count(mut self, pool: &TaskPool) -> usize {
pool.scope(|s| {
while let Some(batch) = self.next_batch() {
s.spawn(async move { batch.count() });
}
})
.iter()
.sum()
}
/// Consumes the parallel iterator and returns the last item.
///
/// See [`Iterator::last()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.last)
fn last(mut self, _pool: &TaskPool) -> Option<BatchIter::Item> {
let mut last_item = None;
while let Some(batch) = self.next_batch() {
last_item = batch.last();
}
last_item
}
/// Consumes the parallel iterator and returns the nth item.
///
/// See [`Iterator::nth()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.nth)
// TODO: Optimize with size_hint on each batch
fn nth(mut self, _pool: &TaskPool, n: usize) -> Option<BatchIter::Item> {
let mut i = 0;
while let Some(batch) = self.next_batch() {
for item in batch {
if i == n {
return Some(item);
}
i += 1;
}
}
None
}
/// Takes two parallel iterators and returns a parallel iterators over
/// both in sequence.
///
/// See [`Iterator::chain()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.chain)
// TODO: Use IntoParallelIterator for U
fn chain<U>(self, other: U) -> Chain<Self, U>
where
U: ParallelIterator<BatchIter>,
{
Chain {
left: self,
right: other,
left_in_progress: true,
}
}
/// Takes a closure and creates a parallel iterator which calls that
/// closure on each item.
///
/// See [`Iterator::map()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.map)
fn map<T, F>(self, f: F) -> Map<Self, F>
where
F: FnMut(BatchIter::Item) -> T + Send + Clone,
{
Map { iter: self, f }
}
/// Calls a closure on each item of a parallel iterator.
///
/// See [`Iterator::for_each()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.for_each)
fn for_each<F>(mut self, pool: &TaskPool, f: F)
where
F: FnMut(BatchIter::Item) + Send + Clone + Sync,
{
pool.scope(|s| {
while let Some(batch) = self.next_batch() {
let newf = f.clone();
s.spawn(async move {
batch.for_each(newf);
});
}
});
}
/// Creates a parallel iterator which uses a closure to determine
/// if an element should be yielded.
///
/// See [`Iterator::filter()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.filter)
fn filter<F>(self, predicate: F) -> Filter<Self, F>
where
F: FnMut(&BatchIter::Item) -> bool,
{
Filter {
iter: self,
predicate,
}
}
/// Creates a parallel iterator that both filters and maps.
///
/// See [`Iterator::filter_map()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.filter_map)
fn filter_map<R, F>(self, f: F) -> FilterMap<Self, F>
where
F: FnMut(BatchIter::Item) -> Option<R>,
{
FilterMap { iter: self, f }
}
/// Creates a parallel iterator that works like map, but flattens
/// nested structure.
///
/// See [`Iterator::flat_map()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.flat_map)
fn flat_map<U, F>(self, f: F) -> FlatMap<Self, F>
where
F: FnMut(BatchIter::Item) -> U,
U: IntoIterator,
{
FlatMap { iter: self, f }
}
/// Creates a parallel iterator that flattens nested structure.
///
/// See [`Iterator::flatten()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.flatten)
fn flatten(self) -> Flatten<Self>
where
BatchIter::Item: IntoIterator,
{
Flatten { iter: self }
}
/// Creates a parallel iterator which ends after the first None.
///
/// See [`Iterator::fuse()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.fuse)
fn fuse(self) -> Fuse<Self> {
Fuse { iter: Some(self) }
}
/// Does something with each item of a parallel iterator, passing
/// the value on.
///
/// See [`Iterator::inspect()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.inspect)
fn inspect<F>(self, f: F) -> Inspect<Self, F>
where
F: FnMut(&BatchIter::Item),
{
Inspect { iter: self, f }
}
/// Borrows a parallel iterator, rather than consuming it.
///
/// See [`Iterator::by_ref()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.by_ref)
fn by_ref(&mut self) -> &mut Self {
self
}
/// Transforms a parallel iterator into a collection.
///
/// See [`Iterator::collect()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.collect)
// TODO: Investigate optimizations for less copying
fn collect<C>(mut self, pool: &TaskPool) -> C
where
C: FromIterator<BatchIter::Item>,
BatchIter::Item: Send + 'static,
{
pool.scope(|s| {
while let Some(batch) = self.next_batch() {
s.spawn(async move { batch.collect::<Vec<_>>() });
}
})
.into_iter()
.flatten()
.collect()
}
/// Consumes a parallel iterator, creating two collections from it.
///
/// See [`Iterator::partition()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.partition)
// TODO: Investigate optimizations for less copying
fn partition<C, F>(mut self, pool: &TaskPool, f: F) -> (C, C)
where
C: Default + Extend<BatchIter::Item> + Send,
F: FnMut(&BatchIter::Item) -> bool + Send + Sync + Clone,
BatchIter::Item: Send + 'static,
{
let (mut a, mut b) = <(C, C)>::default();
pool.scope(|s| {
while let Some(batch) = self.next_batch() {
let newf = f.clone();
s.spawn(async move { batch.partition::<Vec<_>, F>(newf) });
}
})
.into_iter()
.for_each(|(c, d)| {
a.extend(c);
b.extend(d);
});
(a, b)
}
/// Repeatedly applies a function to items of each batch of a parallel
/// iterator, producing a Vec of final values.
///
/// *Note that this folds each batch independently and returns a Vec of
/// results (in batch order).*
///
/// See [`Iterator::fold()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.fold)
fn fold<C, F, D>(mut self, pool: &TaskPool, init: C, f: F) -> Vec<C>
where
F: FnMut(C, BatchIter::Item) -> C + Send + Sync + Clone,
C: Clone + Send + Sync + 'static,
{
pool.scope(|s| {
while let Some(batch) = self.next_batch() {
let newf = f.clone();
let newi = init.clone();
s.spawn(async move { batch.fold(newi, newf) });
}
})
}
/// Tests if every element of the parallel iterator matches a predicate.
///
/// *Note that all is **not** short circuiting.*
///
/// See [`Iterator::all()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.all)
fn all<F>(mut self, pool: &TaskPool, f: F) -> bool
where
F: FnMut(BatchIter::Item) -> bool + Send + Sync + Clone,
{
pool.scope(|s| {
while let Some(mut batch) = self.next_batch() {
let newf = f.clone();
s.spawn(async move { batch.all(newf) });
}
})
.into_iter()
.all(core::convert::identity)
}
/// Tests if any element of the parallel iterator matches a predicate.
///
/// *Note that any is **not** short circuiting.*
///
/// See [`Iterator::any()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.any)
fn any<F>(mut self, pool: &TaskPool, f: F) -> bool
where
F: FnMut(BatchIter::Item) -> bool + Send + Sync + Clone,
{
pool.scope(|s| {
while let Some(mut batch) = self.next_batch() {
let newf = f.clone();
s.spawn(async move { batch.any(newf) });
}
})
.into_iter()
.any(core::convert::identity)
}
/// Searches for an element in a parallel iterator, returning its index.
///
/// *Note that position consumes the whole iterator.*
///
/// See [`Iterator::position()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.position)
// TODO: Investigate optimizations for less copying
fn position<F>(mut self, pool: &TaskPool, f: F) -> Option<usize>
where
F: FnMut(BatchIter::Item) -> bool + Send + Sync + Clone,
{
let poses = pool.scope(|s| {
while let Some(batch) = self.next_batch() {
let mut newf = f.clone();
s.spawn(async move {
let mut len = 0;
let mut pos = None;
for item in batch {
if pos.is_none() && newf(item) {
pos = Some(len);
}
len += 1;
}
(len, pos)
});
}
});
let mut start = 0;
for (len, pos) in poses {
if let Some(pos) = pos {
return Some(start + pos);
}
start += len;
}
None
}
/// Returns the maximum item of a parallel iterator.
///
/// See [`Iterator::max()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.max)
fn max(mut self, pool: &TaskPool) -> Option<BatchIter::Item>
where
BatchIter::Item: Ord + Send + 'static,
{
pool.scope(|s| {
while let Some(batch) = self.next_batch() {
s.spawn(async move { batch.max() });
}
})
.into_iter()
.flatten()
.max()
}
/// Returns the minimum item of a parallel iterator.
///
/// See [`Iterator::min()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.min)
fn min(mut self, pool: &TaskPool) -> Option<BatchIter::Item>
where
BatchIter::Item: Ord + Send + 'static,
{
pool.scope(|s| {
while let Some(batch) = self.next_batch() {
s.spawn(async move { batch.min() });
}
})
.into_iter()
.flatten()
.min()
}
/// Returns the item that gives the maximum value from the specified function.
///
/// See [`Iterator::max_by_key()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.max_by_key)
fn max_by_key<R, F>(mut self, pool: &TaskPool, f: F) -> Option<BatchIter::Item>
where
R: Ord,
F: FnMut(&BatchIter::Item) -> R + Send + Sync + Clone,
BatchIter::Item: Send + 'static,
{
pool.scope(|s| {
while let Some(batch) = self.next_batch() {
let newf = f.clone();
s.spawn(async move { batch.max_by_key(newf) });
}
})
.into_iter()
.flatten()
.max_by_key(f)
}
/// Returns the item that gives the maximum value with respect to the specified comparison
/// function.
///
/// See [`Iterator::max_by()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.max_by)
fn max_by<F>(mut self, pool: &TaskPool, f: F) -> Option<BatchIter::Item>
where
F: FnMut(&BatchIter::Item, &BatchIter::Item) -> core::cmp::Ordering + Send + Sync + Clone,
BatchIter::Item: Send + 'static,
{
pool.scope(|s| {
while let Some(batch) = self.next_batch() {
let newf = f.clone();
s.spawn(async move { batch.max_by(newf) });
}
})
.into_iter()
.flatten()
.max_by(f)
}
/// Returns the item that gives the minimum value from the specified function.
///
/// See [`Iterator::min_by_key()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.min_by_key)
fn min_by_key<R, F>(mut self, pool: &TaskPool, f: F) -> Option<BatchIter::Item>
where
R: Ord,
F: FnMut(&BatchIter::Item) -> R + Send + Sync + Clone,
BatchIter::Item: Send + 'static,
{
pool.scope(|s| {
while let Some(batch) = self.next_batch() {
let newf = f.clone();
s.spawn(async move { batch.min_by_key(newf) });
}
})
.into_iter()
.flatten()
.min_by_key(f)
}
/// Returns the item that gives the minimum value with respect to the specified comparison
/// function.
///
/// See [`Iterator::min_by()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.min_by)
fn min_by<F>(mut self, pool: &TaskPool, f: F) -> Option<BatchIter::Item>
where
F: FnMut(&BatchIter::Item, &BatchIter::Item) -> core::cmp::Ordering + Send + Sync + Clone,
BatchIter::Item: Send + 'static,
{
pool.scope(|s| {
while let Some(batch) = self.next_batch() {
let newf = f.clone();
s.spawn(async move { batch.min_by(newf) });
}
})
.into_iter()
.flatten()
.min_by(f)
}
/// Creates a parallel iterator which copies all of its items.
///
/// See [`Iterator::copied()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.copied)
fn copied<'a, T>(self) -> Copied<Self>
where
Self: ParallelIterator<BatchIter>,
T: 'a + Copy,
{
Copied { iter: self }
}
/// Creates a parallel iterator which clones all of its items.
///
/// See [`Iterator::cloned()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.cloned)
fn cloned<'a, T>(self) -> Cloned<Self>
where
Self: ParallelIterator<BatchIter>,
T: 'a + Copy,
{
Cloned { iter: self }
}
/// Repeats a parallel iterator endlessly.
///
/// See [`Iterator::cycle()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.cycle)
fn cycle(self) -> Cycle<Self>
where
Self: Clone,
{
Cycle {
iter: self,
curr: None,
}
}
/// Sums the items of a parallel iterator.
///
/// See [`Iterator::sum()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.sum)
fn sum<S, R>(mut self, pool: &TaskPool) -> R
where
S: core::iter::Sum<BatchIter::Item> + Send + 'static,
R: core::iter::Sum<S>,
{
pool.scope(|s| {
while let Some(batch) = self.next_batch() {
s.spawn(async move { batch.sum() });
}
})
.into_iter()
.sum()
}
/// Multiplies all the items of a parallel iterator.
///
/// See [`Iterator::product()`](https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.product)
fn product<S, R>(mut self, pool: &TaskPool) -> R
where
S: core::iter::Product<BatchIter::Item> + Send + 'static,
R: core::iter::Product<S>,
{
pool.scope(|s| {
while let Some(batch) = self.next_batch() {
s.spawn(async move { batch.product() });
}
})
.into_iter()
.product()
}
}

132
vendor/bevy_tasks/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,132 @@
#![doc = include_str!("../README.md")]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc(
html_logo_url = "https://bevyengine.org/assets/icon.png",
html_favicon_url = "https://bevyengine.org/assets/icon.png"
)]
#![no_std]
#[cfg(feature = "std")]
extern crate std;
extern crate alloc;
mod conditional_send {
cfg_if::cfg_if! {
if #[cfg(target_arch = "wasm32")] {
/// Use [`ConditionalSend`] to mark an optional Send trait bound. Useful as on certain platforms (eg. Wasm),
/// futures aren't Send.
pub trait ConditionalSend {}
impl<T> ConditionalSend for T {}
} else {
/// Use [`ConditionalSend`] to mark an optional Send trait bound. Useful as on certain platforms (eg. Wasm),
/// futures aren't Send.
pub trait ConditionalSend: Send {}
impl<T: Send> ConditionalSend for T {}
}
}
}
pub use conditional_send::*;
/// Use [`ConditionalSendFuture`] for a future with an optional Send trait bound, as on certain platforms (eg. Wasm),
/// futures aren't Send.
pub trait ConditionalSendFuture: Future + ConditionalSend {}
impl<T: Future + ConditionalSend> ConditionalSendFuture for T {}
use alloc::boxed::Box;
/// An owned and dynamically typed Future used when you can't statically type your result or need to add some indirection.
pub type BoxedFuture<'a, T> = core::pin::Pin<Box<dyn ConditionalSendFuture<Output = T> + 'a>>;
pub mod futures;
#[cfg(not(feature = "async_executor"))]
mod edge_executor;
mod executor;
mod slice;
pub use slice::{ParallelSlice, ParallelSliceMut};
#[cfg_attr(all(target_arch = "wasm32", feature = "web"), path = "wasm_task.rs")]
mod task;
pub use task::Task;
cfg_if::cfg_if! {
if #[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] {
mod task_pool;
mod thread_executor;
pub use task_pool::{Scope, TaskPool, TaskPoolBuilder};
pub use thread_executor::{ThreadExecutor, ThreadExecutorTicker};
} else if #[cfg(any(target_arch = "wasm32", not(feature = "multi_threaded")))] {
mod single_threaded_task_pool;
pub use single_threaded_task_pool::{Scope, TaskPool, TaskPoolBuilder, ThreadExecutor};
}
}
mod usages;
pub use futures_lite::future::poll_once;
pub use usages::{AsyncComputeTaskPool, ComputeTaskPool, IoTaskPool};
#[cfg(not(all(target_arch = "wasm32", feature = "web")))]
pub use usages::tick_global_task_pools_on_main_thread;
#[cfg(feature = "std")]
cfg_if::cfg_if! {
if #[cfg(feature = "async-io")] {
pub use async_io::block_on;
} else {
pub use futures_lite::future::block_on;
}
}
mod iter;
pub use iter::ParallelIterator;
pub use futures_lite;
/// The tasks prelude.
///
/// This includes the most common types in this crate, re-exported for your convenience.
pub mod prelude {
#[doc(hidden)]
pub use crate::{
iter::ParallelIterator,
slice::{ParallelSlice, ParallelSliceMut},
usages::{AsyncComputeTaskPool, ComputeTaskPool, IoTaskPool},
};
#[cfg(feature = "std")]
#[doc(hidden)]
pub use crate::block_on;
}
cfg_if::cfg_if! {
if #[cfg(feature = "std")] {
use core::num::NonZero;
/// Gets the logical CPU core count available to the current process.
///
/// This is identical to [`std::thread::available_parallelism`], except
/// it will return a default value of 1 if it internally errors out.
///
/// This will always return at least 1.
pub fn available_parallelism() -> usize {
std::thread::available_parallelism()
.map(NonZero::<usize>::get)
.unwrap_or(1)
}
} else {
/// Gets the logical CPU core count available to the current process.
///
/// This will always return at least 1.
pub fn available_parallelism() -> usize {
// Without access to std, assume a single thread is available
1
}
}
}

View File

@@ -0,0 +1,338 @@
use alloc::{string::String, vec::Vec};
use bevy_platform::sync::Arc;
use core::{cell::RefCell, future::Future, marker::PhantomData, mem};
use crate::Task;
#[cfg(feature = "std")]
use std::thread_local;
#[cfg(not(feature = "std"))]
use bevy_platform::sync::{Mutex, PoisonError};
#[cfg(feature = "std")]
use crate::executor::LocalExecutor;
#[cfg(not(feature = "std"))]
use crate::executor::Executor as LocalExecutor;
#[cfg(feature = "std")]
thread_local! {
static LOCAL_EXECUTOR: LocalExecutor<'static> = const { LocalExecutor::new() };
}
#[cfg(not(feature = "std"))]
static LOCAL_EXECUTOR: LocalExecutor<'static> = const { LocalExecutor::new() };
#[cfg(feature = "std")]
type ScopeResult<T> = alloc::rc::Rc<RefCell<Option<T>>>;
#[cfg(not(feature = "std"))]
type ScopeResult<T> = Arc<Mutex<Option<T>>>;
/// Used to create a [`TaskPool`].
#[derive(Debug, Default, Clone)]
pub struct TaskPoolBuilder {}
/// This is a dummy struct for wasm support to provide the same api as with the multithreaded
/// task pool. In the case of the multithreaded task pool this struct is used to spawn
/// tasks on a specific thread. But the wasm task pool just calls
/// `wasm_bindgen_futures::spawn_local` for spawning which just runs tasks on the main thread
/// and so the [`ThreadExecutor`] does nothing.
#[derive(Default)]
pub struct ThreadExecutor<'a>(PhantomData<&'a ()>);
impl<'a> ThreadExecutor<'a> {
/// Creates a new `ThreadExecutor`
pub fn new() -> Self {
Self::default()
}
}
impl TaskPoolBuilder {
/// Creates a new `TaskPoolBuilder` instance
pub fn new() -> Self {
Self::default()
}
/// No op on the single threaded task pool
pub fn num_threads(self, _num_threads: usize) -> Self {
self
}
/// No op on the single threaded task pool
pub fn stack_size(self, _stack_size: usize) -> Self {
self
}
/// No op on the single threaded task pool
pub fn thread_name(self, _thread_name: String) -> Self {
self
}
/// No op on the single threaded task pool
pub fn on_thread_spawn(self, _f: impl Fn() + Send + Sync + 'static) -> Self {
self
}
/// No op on the single threaded task pool
pub fn on_thread_destroy(self, _f: impl Fn() + Send + Sync + 'static) -> Self {
self
}
/// Creates a new [`TaskPool`]
pub fn build(self) -> TaskPool {
TaskPool::new_internal()
}
}
/// A thread pool for executing tasks. Tasks are futures that are being automatically driven by
/// the pool on threads owned by the pool. In this case - main thread only.
#[derive(Debug, Default, Clone)]
pub struct TaskPool {}
impl TaskPool {
/// Just create a new `ThreadExecutor` for wasm
pub fn get_thread_executor() -> Arc<ThreadExecutor<'static>> {
Arc::new(ThreadExecutor::new())
}
/// Create a `TaskPool` with the default configuration.
pub fn new() -> Self {
TaskPoolBuilder::new().build()
}
fn new_internal() -> Self {
Self {}
}
/// Return the number of threads owned by the task pool
pub fn thread_num(&self) -> usize {
1
}
/// Allows spawning non-`'static` futures on the thread pool. The function takes a callback,
/// passing a scope object into it. The scope object provided to the callback can be used
/// to spawn tasks. This function will await the completion of all tasks before returning.
///
/// This is similar to `rayon::scope` and `crossbeam::scope`
pub fn scope<'env, F, T>(&self, f: F) -> Vec<T>
where
F: for<'scope> FnOnce(&'env mut Scope<'scope, 'env, T>),
T: Send + 'static,
{
self.scope_with_executor(false, None, f)
}
/// Allows spawning non-`'static` futures on the thread pool. The function takes a callback,
/// passing a scope object into it. The scope object provided to the callback can be used
/// to spawn tasks. This function will await the completion of all tasks before returning.
///
/// This is similar to `rayon::scope` and `crossbeam::scope`
#[expect(unsafe_code, reason = "Required to transmute lifetimes.")]
pub fn scope_with_executor<'env, F, T>(
&self,
_tick_task_pool_executor: bool,
_thread_executor: Option<&ThreadExecutor>,
f: F,
) -> Vec<T>
where
F: for<'scope> FnOnce(&'env mut Scope<'scope, 'env, T>),
T: Send + 'static,
{
// SAFETY: This safety comment applies to all references transmuted to 'env.
// Any futures spawned with these references need to return before this function completes.
// This is guaranteed because we drive all the futures spawned onto the Scope
// to completion in this function. However, rust has no way of knowing this so we
// transmute the lifetimes to 'env here to appease the compiler as it is unable to validate safety.
// Any usages of the references passed into `Scope` must be accessed through
// the transmuted reference for the rest of this function.
let executor = &LocalExecutor::new();
// SAFETY: As above, all futures must complete in this function so we can change the lifetime
let executor: &'env LocalExecutor<'env> = unsafe { mem::transmute(executor) };
let results: RefCell<Vec<ScopeResult<T>>> = RefCell::new(Vec::new());
// SAFETY: As above, all futures must complete in this function so we can change the lifetime
let results: &'env RefCell<Vec<ScopeResult<T>>> = unsafe { mem::transmute(&results) };
let mut scope = Scope {
executor,
results,
scope: PhantomData,
env: PhantomData,
};
// SAFETY: As above, all futures must complete in this function so we can change the lifetime
let scope_ref: &'env mut Scope<'_, 'env, T> = unsafe { mem::transmute(&mut scope) };
f(scope_ref);
// Loop until all tasks are done
while executor.try_tick() {}
let results = scope.results.borrow();
results
.iter()
.map(|result| {
#[cfg(feature = "std")]
return result.borrow_mut().take().unwrap();
#[cfg(not(feature = "std"))]
{
let mut lock = result.lock().unwrap_or_else(PoisonError::into_inner);
lock.take().unwrap()
}
})
.collect()
}
/// Spawns a static future onto the thread pool. The returned Task is a future, which can be polled
/// to retrieve the output of the original future. Dropping the task will attempt to cancel it.
/// It can also be "detached", allowing it to continue running without having to be polled by the
/// end-user.
///
/// If the provided future is non-`Send`, [`TaskPool::spawn_local`] should be used instead.
pub fn spawn<T>(
&self,
future: impl Future<Output = T> + 'static + MaybeSend + MaybeSync,
) -> Task<T>
where
T: 'static + MaybeSend + MaybeSync,
{
cfg_if::cfg_if! {
if #[cfg(all(target_arch = "wasm32", feature = "web"))] {
Task::wrap_future(future)
} else if #[cfg(feature = "std")] {
LOCAL_EXECUTOR.with(|executor| {
let task = executor.spawn(future);
// Loop until all tasks are done
while executor.try_tick() {}
Task::new(task)
})
} else {
{
let task = LOCAL_EXECUTOR.spawn(future);
// Loop until all tasks are done
while LOCAL_EXECUTOR.try_tick() {}
Task::new(task)
}
}
}
}
/// Spawns a static future on the JS event loop. This is exactly the same as [`TaskPool::spawn`].
pub fn spawn_local<T>(
&self,
future: impl Future<Output = T> + 'static + MaybeSend + MaybeSync,
) -> Task<T>
where
T: 'static + MaybeSend + MaybeSync,
{
self.spawn(future)
}
/// Runs a function with the local executor. Typically used to tick
/// the local executor on the main thread as it needs to share time with
/// other things.
///
/// ```
/// use bevy_tasks::TaskPool;
///
/// TaskPool::new().with_local_executor(|local_executor| {
/// local_executor.try_tick();
/// });
/// ```
pub fn with_local_executor<F, R>(&self, f: F) -> R
where
F: FnOnce(&LocalExecutor) -> R,
{
#[cfg(feature = "std")]
return LOCAL_EXECUTOR.with(f);
#[cfg(not(feature = "std"))]
return f(&LOCAL_EXECUTOR);
}
}
/// A `TaskPool` scope for running one or more non-`'static` futures.
///
/// For more information, see [`TaskPool::scope`].
#[derive(Debug)]
pub struct Scope<'scope, 'env: 'scope, T> {
executor: &'scope LocalExecutor<'scope>,
// Vector to gather results of all futures spawned during scope run
results: &'env RefCell<Vec<ScopeResult<T>>>,
// make `Scope` invariant over 'scope and 'env
scope: PhantomData<&'scope mut &'scope ()>,
env: PhantomData<&'env mut &'env ()>,
}
impl<'scope, 'env, T: Send + 'env> Scope<'scope, 'env, T> {
/// Spawns a scoped future onto the executor. The scope *must* outlive
/// the provided future. The results of the future will be returned as a part of
/// [`TaskPool::scope`]'s return value.
///
/// On the single threaded task pool, it just calls [`Scope::spawn_on_scope`].
///
/// For more information, see [`TaskPool::scope`].
pub fn spawn<Fut: Future<Output = T> + 'scope + MaybeSend>(&self, f: Fut) {
self.spawn_on_scope(f);
}
/// Spawns a scoped future onto the executor. The scope *must* outlive
/// the provided future. The results of the future will be returned as a part of
/// [`TaskPool::scope`]'s return value.
///
/// On the single threaded task pool, it just calls [`Scope::spawn_on_scope`].
///
/// For more information, see [`TaskPool::scope`].
pub fn spawn_on_external<Fut: Future<Output = T> + 'scope + MaybeSend>(&self, f: Fut) {
self.spawn_on_scope(f);
}
/// Spawns a scoped future that runs on the thread the scope called from. The
/// scope *must* outlive the provided future. The results of the future will be
/// returned as a part of [`TaskPool::scope`]'s return value.
///
/// For more information, see [`TaskPool::scope`].
pub fn spawn_on_scope<Fut: Future<Output = T> + 'scope + MaybeSend>(&self, f: Fut) {
let result = ScopeResult::<T>::default();
self.results.borrow_mut().push(result.clone());
let f = async move {
let temp_result = f.await;
#[cfg(feature = "std")]
result.borrow_mut().replace(temp_result);
#[cfg(not(feature = "std"))]
{
let mut lock = result.lock().unwrap_or_else(PoisonError::into_inner);
*lock = Some(temp_result);
}
};
self.executor.spawn(f).detach();
}
}
#[cfg(feature = "std")]
mod send_sync_bounds {
pub trait MaybeSend {}
impl<T> MaybeSend for T {}
pub trait MaybeSync {}
impl<T> MaybeSync for T {}
}
#[cfg(not(feature = "std"))]
mod send_sync_bounds {
pub trait MaybeSend: Send {}
impl<T: Send> MaybeSend for T {}
pub trait MaybeSync: Sync {}
impl<T: Sync> MaybeSync for T {}
}
use send_sync_bounds::{MaybeSend, MaybeSync};

267
vendor/bevy_tasks/src/slice.rs vendored Normal file
View File

@@ -0,0 +1,267 @@
use super::TaskPool;
use alloc::vec::Vec;
/// Provides functions for mapping read-only slices across a provided [`TaskPool`].
pub trait ParallelSlice<T: Sync>: AsRef<[T]> {
/// Splits the slice in chunks of size `chunks_size` or less and maps the chunks
/// in parallel across the provided `task_pool`. One task is spawned in the task pool
/// for every chunk.
///
/// The iteration function takes the index of the chunk in the original slice as the
/// first argument, and the chunk as the second argument.
///
/// Returns a `Vec` of the mapped results in the same order as the input.
///
/// # Example
///
/// ```
/// # use bevy_tasks::prelude::*;
/// # use bevy_tasks::TaskPool;
/// let task_pool = TaskPool::new();
/// let counts = (0..10000).collect::<Vec<u32>>();
/// let incremented = counts.par_chunk_map(&task_pool, 100, |_index, chunk| {
/// let mut results = Vec::new();
/// for count in chunk {
/// results.push(*count + 2);
/// }
/// results
/// });
/// # let flattened: Vec<_> = incremented.into_iter().flatten().collect();
/// # assert_eq!(flattened, (2..10002).collect::<Vec<u32>>());
/// ```
///
/// # See Also
///
/// - [`ParallelSliceMut::par_chunk_map_mut`] for mapping mutable slices.
/// - [`ParallelSlice::par_splat_map`] for mapping when a specific chunk size is unknown.
fn par_chunk_map<F, R>(&self, task_pool: &TaskPool, chunk_size: usize, f: F) -> Vec<R>
where
F: Fn(usize, &[T]) -> R + Send + Sync,
R: Send + 'static,
{
let slice = self.as_ref();
let f = &f;
task_pool.scope(|scope| {
for (index, chunk) in slice.chunks(chunk_size).enumerate() {
scope.spawn(async move { f(index, chunk) });
}
})
}
/// Splits the slice into a maximum of `max_tasks` chunks, and maps the chunks in parallel
/// across the provided `task_pool`. One task is spawned in the task pool for every chunk.
///
/// If `max_tasks` is `None`, this function will attempt to use one chunk per thread in
/// `task_pool`.
///
/// The iteration function takes the index of the chunk in the original slice as the
/// first argument, and the chunk as the second argument.
///
/// Returns a `Vec` of the mapped results in the same order as the input.
///
/// # Example
///
/// ```
/// # use bevy_tasks::prelude::*;
/// # use bevy_tasks::TaskPool;
/// let task_pool = TaskPool::new();
/// let counts = (0..10000).collect::<Vec<u32>>();
/// let incremented = counts.par_splat_map(&task_pool, None, |_index, chunk| {
/// let mut results = Vec::new();
/// for count in chunk {
/// results.push(*count + 2);
/// }
/// results
/// });
/// # let flattened: Vec<_> = incremented.into_iter().flatten().collect();
/// # assert_eq!(flattened, (2..10002).collect::<Vec<u32>>());
/// ```
///
/// # See Also
///
/// [`ParallelSliceMut::par_splat_map_mut`] for mapping mutable slices.
/// [`ParallelSlice::par_chunk_map`] for mapping when a specific chunk size is desirable.
fn par_splat_map<F, R>(&self, task_pool: &TaskPool, max_tasks: Option<usize>, f: F) -> Vec<R>
where
F: Fn(usize, &[T]) -> R + Send + Sync,
R: Send + 'static,
{
let slice = self.as_ref();
let chunk_size = core::cmp::max(
1,
core::cmp::max(
slice.len() / task_pool.thread_num(),
slice.len() / max_tasks.unwrap_or(usize::MAX),
),
);
slice.par_chunk_map(task_pool, chunk_size, f)
}
}
impl<S, T: Sync> ParallelSlice<T> for S where S: AsRef<[T]> {}
/// Provides functions for mapping mutable slices across a provided [`TaskPool`].
pub trait ParallelSliceMut<T: Send>: AsMut<[T]> {
/// Splits the slice in chunks of size `chunks_size` or less and maps the chunks
/// in parallel across the provided `task_pool`. One task is spawned in the task pool
/// for every chunk.
///
/// The iteration function takes the index of the chunk in the original slice as the
/// first argument, and the chunk as the second argument.
///
/// Returns a `Vec` of the mapped results in the same order as the input.
///
/// # Example
///
/// ```
/// # use bevy_tasks::prelude::*;
/// # use bevy_tasks::TaskPool;
/// let task_pool = TaskPool::new();
/// let mut counts = (0..10000).collect::<Vec<u32>>();
/// let incremented = counts.par_chunk_map_mut(&task_pool, 100, |_index, chunk| {
/// let mut results = Vec::new();
/// for count in chunk {
/// *count += 5;
/// results.push(*count - 2);
/// }
/// results
/// });
///
/// assert_eq!(counts, (5..10005).collect::<Vec<u32>>());
/// # let flattened: Vec<_> = incremented.into_iter().flatten().collect();
/// # assert_eq!(flattened, (3..10003).collect::<Vec<u32>>());
/// ```
///
/// # See Also
///
/// [`ParallelSlice::par_chunk_map`] for mapping immutable slices.
/// [`ParallelSliceMut::par_splat_map_mut`] for mapping when a specific chunk size is unknown.
fn par_chunk_map_mut<F, R>(&mut self, task_pool: &TaskPool, chunk_size: usize, f: F) -> Vec<R>
where
F: Fn(usize, &mut [T]) -> R + Send + Sync,
R: Send + 'static,
{
let slice = self.as_mut();
let f = &f;
task_pool.scope(|scope| {
for (index, chunk) in slice.chunks_mut(chunk_size).enumerate() {
scope.spawn(async move { f(index, chunk) });
}
})
}
/// Splits the slice into a maximum of `max_tasks` chunks, and maps the chunks in parallel
/// across the provided `task_pool`. One task is spawned in the task pool for every chunk.
///
/// If `max_tasks` is `None`, this function will attempt to use one chunk per thread in
/// `task_pool`.
///
/// The iteration function takes the index of the chunk in the original slice as the
/// first argument, and the chunk as the second argument.
///
/// Returns a `Vec` of the mapped results in the same order as the input.
///
/// # Example
///
/// ```
/// # use bevy_tasks::prelude::*;
/// # use bevy_tasks::TaskPool;
/// let task_pool = TaskPool::new();
/// let mut counts = (0..10000).collect::<Vec<u32>>();
/// let incremented = counts.par_splat_map_mut(&task_pool, None, |_index, chunk| {
/// let mut results = Vec::new();
/// for count in chunk {
/// *count += 5;
/// results.push(*count - 2);
/// }
/// results
/// });
///
/// assert_eq!(counts, (5..10005).collect::<Vec<u32>>());
/// # let flattened: Vec<_> = incremented.into_iter().flatten().collect::<Vec<u32>>();
/// # assert_eq!(flattened, (3..10003).collect::<Vec<u32>>());
/// ```
///
/// # See Also
///
/// [`ParallelSlice::par_splat_map`] for mapping immutable slices.
/// [`ParallelSliceMut::par_chunk_map_mut`] for mapping when a specific chunk size is desirable.
fn par_splat_map_mut<F, R>(
&mut self,
task_pool: &TaskPool,
max_tasks: Option<usize>,
f: F,
) -> Vec<R>
where
F: Fn(usize, &mut [T]) -> R + Send + Sync,
R: Send + 'static,
{
let mut slice = self.as_mut();
let chunk_size = core::cmp::max(
1,
core::cmp::max(
slice.len() / task_pool.thread_num(),
slice.len() / max_tasks.unwrap_or(usize::MAX),
),
);
slice.par_chunk_map_mut(task_pool, chunk_size, f)
}
}
impl<S, T: Send> ParallelSliceMut<T> for S where S: AsMut<[T]> {}
#[cfg(test)]
mod tests {
use crate::*;
use alloc::vec;
#[test]
fn test_par_chunks_map() {
let v = vec![42; 1000];
let task_pool = TaskPool::new();
let outputs = v.par_splat_map(&task_pool, None, |_, numbers| -> i32 {
numbers.iter().sum()
});
let mut sum = 0;
for output in outputs {
sum += output;
}
assert_eq!(sum, 1000 * 42);
}
#[test]
fn test_par_chunks_map_mut() {
let mut v = vec![42; 1000];
let task_pool = TaskPool::new();
let outputs = v.par_splat_map_mut(&task_pool, None, |_, numbers| -> i32 {
for number in numbers.iter_mut() {
*number *= 2;
}
numbers.iter().sum()
});
let mut sum = 0;
for output in outputs {
sum += output;
}
assert_eq!(sum, 1000 * 42 * 2);
assert_eq!(v[0], 84);
}
#[test]
fn test_par_chunks_map_index() {
let v = vec![1; 1000];
let task_pool = TaskPool::new();
let outputs = v.par_chunk_map(&task_pool, 100, |index, numbers| -> i32 {
numbers.iter().sum::<i32>() * index as i32
});
assert_eq!(outputs.iter().sum::<i32>(), 100 * (9 * 10) / 2);
}
}

60
vendor/bevy_tasks/src/task.rs vendored Normal file
View File

@@ -0,0 +1,60 @@
use core::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
/// Wraps `async_executor::Task`, a spawned future.
///
/// Tasks are also futures themselves and yield the output of the spawned future.
///
/// When a task is dropped, its gets canceled and won't be polled again. To cancel a task a bit
/// more gracefully and wait until it stops running, use the [`Task::cancel()`] method.
///
/// Tasks that panic get immediately canceled. Awaiting a canceled task also causes a panic.
#[derive(Debug)]
#[must_use = "Tasks are canceled when dropped, use `.detach()` to run them in the background."]
pub struct Task<T>(async_task::Task<T>);
impl<T> Task<T> {
/// Creates a new task from a given `async_executor::Task`
pub fn new(task: async_task::Task<T>) -> Self {
Self(task)
}
/// Detaches the task to let it keep running in the background. See
/// `async_executor::Task::detach`
pub fn detach(self) {
self.0.detach();
}
/// Cancels the task and waits for it to stop running.
///
/// Returns the task's output if it was completed just before it got canceled, or [`None`] if
/// it didn't complete.
///
/// While it's possible to simply drop the [`Task`] to cancel it, this is a cleaner way of
/// canceling because it also waits for the task to stop running.
///
/// See `async_executor::Task::cancel`
pub async fn cancel(self) -> Option<T> {
self.0.cancel().await
}
/// Returns `true` if the current task is finished.
///
///
/// Unlike poll, it doesn't resolve the final value, it just checks if the task has finished.
/// Note that in a multithreaded environment, this task can be finished immediately after calling this function.
pub fn is_finished(&self) -> bool {
self.0.is_finished()
}
}
impl<T> Future for Task<T> {
type Output = T;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Pin::new(&mut self.0).poll(cx)
}
}

967
vendor/bevy_tasks/src/task_pool.rs vendored Normal file
View File

@@ -0,0 +1,967 @@
use alloc::{boxed::Box, format, string::String, vec::Vec};
use core::{future::Future, marker::PhantomData, mem, panic::AssertUnwindSafe};
use std::{
thread::{self, JoinHandle},
thread_local,
};
use crate::executor::FallibleTask;
use bevy_platform::sync::Arc;
use concurrent_queue::ConcurrentQueue;
use futures_lite::FutureExt;
use crate::{
block_on,
thread_executor::{ThreadExecutor, ThreadExecutorTicker},
Task,
};
struct CallOnDrop(Option<Arc<dyn Fn() + Send + Sync + 'static>>);
impl Drop for CallOnDrop {
fn drop(&mut self) {
if let Some(call) = self.0.as_ref() {
call();
}
}
}
/// Used to create a [`TaskPool`]
#[derive(Default)]
#[must_use]
pub struct TaskPoolBuilder {
/// If set, we'll set up the thread pool to use at most `num_threads` threads.
/// Otherwise use the logical core count of the system
num_threads: Option<usize>,
/// If set, we'll use the given stack size rather than the system default
stack_size: Option<usize>,
/// Allows customizing the name of the threads - helpful for debugging. If set, threads will
/// be named `<thread_name> (<thread_index>)`, i.e. `"MyThreadPool (2)"`.
thread_name: Option<String>,
on_thread_spawn: Option<Arc<dyn Fn() + Send + Sync + 'static>>,
on_thread_destroy: Option<Arc<dyn Fn() + Send + Sync + 'static>>,
}
impl TaskPoolBuilder {
/// Creates a new [`TaskPoolBuilder`] instance
pub fn new() -> Self {
Self::default()
}
/// Override the number of threads created for the pool. If unset, we default to the number
/// of logical cores of the system
pub fn num_threads(mut self, num_threads: usize) -> Self {
self.num_threads = Some(num_threads);
self
}
/// Override the stack size of the threads created for the pool
pub fn stack_size(mut self, stack_size: usize) -> Self {
self.stack_size = Some(stack_size);
self
}
/// Override the name of the threads created for the pool. If set, threads will
/// be named `<thread_name> (<thread_index>)`, i.e. `MyThreadPool (2)`
pub fn thread_name(mut self, thread_name: String) -> Self {
self.thread_name = Some(thread_name);
self
}
/// Sets a callback that is invoked once for every created thread as it starts.
///
/// This is called on the thread itself and has access to all thread-local storage.
/// This will block running async tasks on the thread until the callback completes.
pub fn on_thread_spawn(mut self, f: impl Fn() + Send + Sync + 'static) -> Self {
let arc = Arc::new(f);
#[cfg(not(target_has_atomic = "ptr"))]
#[expect(
unsafe_code,
reason = "unsized coercion is an unstable feature for non-std types"
)]
// SAFETY:
// - Coercion from `impl Fn` to `dyn Fn` is valid
// - `Arc::from_raw` receives a valid pointer from a previous call to `Arc::into_raw`
let arc = unsafe {
Arc::from_raw(Arc::into_raw(arc) as *const (dyn Fn() + Send + Sync + 'static))
};
self.on_thread_spawn = Some(arc);
self
}
/// Sets a callback that is invoked once for every created thread as it terminates.
///
/// This is called on the thread itself and has access to all thread-local storage.
/// This will block thread termination until the callback completes.
pub fn on_thread_destroy(mut self, f: impl Fn() + Send + Sync + 'static) -> Self {
let arc = Arc::new(f);
#[cfg(not(target_has_atomic = "ptr"))]
#[expect(
unsafe_code,
reason = "unsized coercion is an unstable feature for non-std types"
)]
// SAFETY:
// - Coercion from `impl Fn` to `dyn Fn` is valid
// - `Arc::from_raw` receives a valid pointer from a previous call to `Arc::into_raw`
let arc = unsafe {
Arc::from_raw(Arc::into_raw(arc) as *const (dyn Fn() + Send + Sync + 'static))
};
self.on_thread_destroy = Some(arc);
self
}
/// Creates a new [`TaskPool`] based on the current options.
pub fn build(self) -> TaskPool {
TaskPool::new_internal(self)
}
}
/// A thread pool for executing tasks.
///
/// While futures usually need to be polled to be executed, Bevy tasks are being
/// automatically driven by the pool on threads owned by the pool. The [`Task`]
/// future only needs to be polled in order to receive the result. (For that
/// purpose, it is often stored in a component or resource, see the
/// `async_compute` example.)
///
/// If the result is not required, one may also use [`Task::detach`] and the pool
/// will still execute a task, even if it is dropped.
#[derive(Debug)]
pub struct TaskPool {
/// The executor for the pool.
executor: Arc<crate::executor::Executor<'static>>,
// The inner state of the pool.
threads: Vec<JoinHandle<()>>,
shutdown_tx: async_channel::Sender<()>,
}
impl TaskPool {
thread_local! {
static LOCAL_EXECUTOR: crate::executor::LocalExecutor<'static> = const { crate::executor::LocalExecutor::new() };
static THREAD_EXECUTOR: Arc<ThreadExecutor<'static>> = Arc::new(ThreadExecutor::new());
}
/// Each thread should only create one `ThreadExecutor`, otherwise, there are good chances they will deadlock
pub fn get_thread_executor() -> Arc<ThreadExecutor<'static>> {
Self::THREAD_EXECUTOR.with(Clone::clone)
}
/// Create a `TaskPool` with the default configuration.
pub fn new() -> Self {
TaskPoolBuilder::new().build()
}
fn new_internal(builder: TaskPoolBuilder) -> Self {
let (shutdown_tx, shutdown_rx) = async_channel::unbounded::<()>();
let executor = Arc::new(crate::executor::Executor::new());
let num_threads = builder
.num_threads
.unwrap_or_else(crate::available_parallelism);
let threads = (0..num_threads)
.map(|i| {
let ex = Arc::clone(&executor);
let shutdown_rx = shutdown_rx.clone();
let thread_name = if let Some(thread_name) = builder.thread_name.as_deref() {
format!("{thread_name} ({i})")
} else {
format!("TaskPool ({i})")
};
let mut thread_builder = thread::Builder::new().name(thread_name);
if let Some(stack_size) = builder.stack_size {
thread_builder = thread_builder.stack_size(stack_size);
}
let on_thread_spawn = builder.on_thread_spawn.clone();
let on_thread_destroy = builder.on_thread_destroy.clone();
thread_builder
.spawn(move || {
TaskPool::LOCAL_EXECUTOR.with(|local_executor| {
if let Some(on_thread_spawn) = on_thread_spawn {
on_thread_spawn();
drop(on_thread_spawn);
}
let _destructor = CallOnDrop(on_thread_destroy);
loop {
let res = std::panic::catch_unwind(|| {
let tick_forever = async move {
loop {
local_executor.tick().await;
}
};
block_on(ex.run(tick_forever.or(shutdown_rx.recv())))
});
if let Ok(value) = res {
// Use unwrap_err because we expect a Closed error
value.unwrap_err();
break;
}
}
});
})
.expect("Failed to spawn thread.")
})
.collect();
Self {
executor,
threads,
shutdown_tx,
}
}
/// Return the number of threads owned by the task pool
pub fn thread_num(&self) -> usize {
self.threads.len()
}
/// Allows spawning non-`'static` futures on the thread pool. The function takes a callback,
/// passing a scope object into it. The scope object provided to the callback can be used
/// to spawn tasks. This function will await the completion of all tasks before returning.
///
/// This is similar to [`thread::scope`] and `rayon::scope`.
///
/// # Example
///
/// ```
/// use bevy_tasks::TaskPool;
///
/// let pool = TaskPool::new();
/// let mut x = 0;
/// let results = pool.scope(|s| {
/// s.spawn(async {
/// // you can borrow the spawner inside a task and spawn tasks from within the task
/// s.spawn(async {
/// // borrow x and mutate it.
/// x = 2;
/// // return a value from the task
/// 1
/// });
/// // return some other value from the first task
/// 0
/// });
/// });
///
/// // The ordering of results is non-deterministic if you spawn from within tasks as above.
/// // If you're doing this, you'll have to write your code to not depend on the ordering.
/// assert!(results.contains(&0));
/// assert!(results.contains(&1));
///
/// // The ordering is deterministic if you only spawn directly from the closure function.
/// let results = pool.scope(|s| {
/// s.spawn(async { 0 });
/// s.spawn(async { 1 });
/// });
/// assert_eq!(&results[..], &[0, 1]);
///
/// // You can access x after scope runs, since it was only temporarily borrowed in the scope.
/// assert_eq!(x, 2);
/// ```
///
/// # Lifetimes
///
/// The [`Scope`] object takes two lifetimes: `'scope` and `'env`.
///
/// The `'scope` lifetime represents the lifetime of the scope. That is the time during
/// which the provided closure and tasks that are spawned into the scope are run.
///
/// The `'env` lifetime represents the lifetime of whatever is borrowed by the scope.
/// Thus this lifetime must outlive `'scope`.
///
/// ```compile_fail
/// use bevy_tasks::TaskPool;
/// fn scope_escapes_closure() {
/// let pool = TaskPool::new();
/// let foo = Box::new(42);
/// pool.scope(|scope| {
/// std::thread::spawn(move || {
/// // UB. This could spawn on the scope after `.scope` returns and the internal Scope is dropped.
/// scope.spawn(async move {
/// assert_eq!(*foo, 42);
/// });
/// });
/// });
/// }
/// ```
///
/// ```compile_fail
/// use bevy_tasks::TaskPool;
/// fn cannot_borrow_from_closure() {
/// let pool = TaskPool::new();
/// pool.scope(|scope| {
/// let x = 1;
/// let y = &x;
/// scope.spawn(async move {
/// assert_eq!(*y, 1);
/// });
/// });
/// }
pub fn scope<'env, F, T>(&self, f: F) -> Vec<T>
where
F: for<'scope> FnOnce(&'scope Scope<'scope, 'env, T>),
T: Send + 'static,
{
Self::THREAD_EXECUTOR.with(|scope_executor| {
self.scope_with_executor_inner(true, scope_executor, scope_executor, f)
})
}
/// This allows passing an external executor to spawn tasks on. When you pass an external executor
/// [`Scope::spawn_on_scope`] spawns is then run on the thread that [`ThreadExecutor`] is being ticked on.
/// If [`None`] is passed the scope will use a [`ThreadExecutor`] that is ticked on the current thread.
///
/// When `tick_task_pool_executor` is set to `true`, the multithreaded task stealing executor is ticked on the scope
/// thread. Disabling this can be useful when finishing the scope is latency sensitive. Pulling tasks from
/// global executor can run tasks unrelated to the scope and delay when the scope returns.
///
/// See [`Self::scope`] for more details in general about how scopes work.
pub fn scope_with_executor<'env, F, T>(
&self,
tick_task_pool_executor: bool,
external_executor: Option<&ThreadExecutor>,
f: F,
) -> Vec<T>
where
F: for<'scope> FnOnce(&'scope Scope<'scope, 'env, T>),
T: Send + 'static,
{
Self::THREAD_EXECUTOR.with(|scope_executor| {
// If an `external_executor` is passed, use that. Otherwise, get the executor stored
// in the `THREAD_EXECUTOR` thread local.
if let Some(external_executor) = external_executor {
self.scope_with_executor_inner(
tick_task_pool_executor,
external_executor,
scope_executor,
f,
)
} else {
self.scope_with_executor_inner(
tick_task_pool_executor,
scope_executor,
scope_executor,
f,
)
}
})
}
#[expect(unsafe_code, reason = "Required to transmute lifetimes.")]
fn scope_with_executor_inner<'env, F, T>(
&self,
tick_task_pool_executor: bool,
external_executor: &ThreadExecutor,
scope_executor: &ThreadExecutor,
f: F,
) -> Vec<T>
where
F: for<'scope> FnOnce(&'scope Scope<'scope, 'env, T>),
T: Send + 'static,
{
// SAFETY: This safety comment applies to all references transmuted to 'env.
// Any futures spawned with these references need to return before this function completes.
// This is guaranteed because we drive all the futures spawned onto the Scope
// to completion in this function. However, rust has no way of knowing this so we
// transmute the lifetimes to 'env here to appease the compiler as it is unable to validate safety.
// Any usages of the references passed into `Scope` must be accessed through
// the transmuted reference for the rest of this function.
let executor: &crate::executor::Executor = &self.executor;
// SAFETY: As above, all futures must complete in this function so we can change the lifetime
let executor: &'env crate::executor::Executor = unsafe { mem::transmute(executor) };
// SAFETY: As above, all futures must complete in this function so we can change the lifetime
let external_executor: &'env ThreadExecutor<'env> =
unsafe { mem::transmute(external_executor) };
// SAFETY: As above, all futures must complete in this function so we can change the lifetime
let scope_executor: &'env ThreadExecutor<'env> = unsafe { mem::transmute(scope_executor) };
let spawned: ConcurrentQueue<FallibleTask<Result<T, Box<(dyn core::any::Any + Send)>>>> =
ConcurrentQueue::unbounded();
// shadow the variable so that the owned value cannot be used for the rest of the function
// SAFETY: As above, all futures must complete in this function so we can change the lifetime
let spawned: &'env ConcurrentQueue<
FallibleTask<Result<T, Box<(dyn core::any::Any + Send)>>>,
> = unsafe { mem::transmute(&spawned) };
let scope = Scope {
executor,
external_executor,
scope_executor,
spawned,
scope: PhantomData,
env: PhantomData,
};
// shadow the variable so that the owned value cannot be used for the rest of the function
// SAFETY: As above, all futures must complete in this function so we can change the lifetime
let scope: &'env Scope<'_, 'env, T> = unsafe { mem::transmute(&scope) };
f(scope);
if spawned.is_empty() {
Vec::new()
} else {
block_on(async move {
let get_results = async {
let mut results = Vec::with_capacity(spawned.len());
while let Ok(task) = spawned.pop() {
if let Some(res) = task.await {
match res {
Ok(res) => results.push(res),
Err(payload) => std::panic::resume_unwind(payload),
}
} else {
panic!("Failed to catch panic!");
}
}
results
};
let tick_task_pool_executor = tick_task_pool_executor || self.threads.is_empty();
// we get this from a thread local so we should always be on the scope executors thread.
// note: it is possible `scope_executor` and `external_executor` is the same executor,
// in that case, we should only tick one of them, otherwise, it may cause deadlock.
let scope_ticker = scope_executor.ticker().unwrap();
let external_ticker = if !external_executor.is_same(scope_executor) {
external_executor.ticker()
} else {
None
};
match (external_ticker, tick_task_pool_executor) {
(Some(external_ticker), true) => {
Self::execute_global_external_scope(
executor,
external_ticker,
scope_ticker,
get_results,
)
.await
}
(Some(external_ticker), false) => {
Self::execute_external_scope(external_ticker, scope_ticker, get_results)
.await
}
// either external_executor is none or it is same as scope_executor
(None, true) => {
Self::execute_global_scope(executor, scope_ticker, get_results).await
}
(None, false) => Self::execute_scope(scope_ticker, get_results).await,
}
})
}
}
#[inline]
async fn execute_global_external_scope<'scope, 'ticker, T>(
executor: &'scope crate::executor::Executor<'scope>,
external_ticker: ThreadExecutorTicker<'scope, 'ticker>,
scope_ticker: ThreadExecutorTicker<'scope, 'ticker>,
get_results: impl Future<Output = Vec<T>>,
) -> Vec<T> {
// we restart the executors if a task errors. if a scoped
// task errors it will panic the scope on the call to get_results
let execute_forever = async move {
loop {
let tick_forever = async {
loop {
external_ticker.tick().or(scope_ticker.tick()).await;
}
};
// we don't care if it errors. If a scoped task errors it will propagate
// to get_results
let _result = AssertUnwindSafe(executor.run(tick_forever))
.catch_unwind()
.await
.is_ok();
}
};
get_results.or(execute_forever).await
}
#[inline]
async fn execute_external_scope<'scope, 'ticker, T>(
external_ticker: ThreadExecutorTicker<'scope, 'ticker>,
scope_ticker: ThreadExecutorTicker<'scope, 'ticker>,
get_results: impl Future<Output = Vec<T>>,
) -> Vec<T> {
let execute_forever = async {
loop {
let tick_forever = async {
loop {
external_ticker.tick().or(scope_ticker.tick()).await;
}
};
let _result = AssertUnwindSafe(tick_forever).catch_unwind().await.is_ok();
}
};
get_results.or(execute_forever).await
}
#[inline]
async fn execute_global_scope<'scope, 'ticker, T>(
executor: &'scope crate::executor::Executor<'scope>,
scope_ticker: ThreadExecutorTicker<'scope, 'ticker>,
get_results: impl Future<Output = Vec<T>>,
) -> Vec<T> {
let execute_forever = async {
loop {
let tick_forever = async {
loop {
scope_ticker.tick().await;
}
};
let _result = AssertUnwindSafe(executor.run(tick_forever))
.catch_unwind()
.await
.is_ok();
}
};
get_results.or(execute_forever).await
}
#[inline]
async fn execute_scope<'scope, 'ticker, T>(
scope_ticker: ThreadExecutorTicker<'scope, 'ticker>,
get_results: impl Future<Output = Vec<T>>,
) -> Vec<T> {
let execute_forever = async {
loop {
let tick_forever = async {
loop {
scope_ticker.tick().await;
}
};
let _result = AssertUnwindSafe(tick_forever).catch_unwind().await.is_ok();
}
};
get_results.or(execute_forever).await
}
/// Spawns a static future onto the thread pool. The returned [`Task`] is a
/// future that can be polled for the result. It can also be canceled and
/// "detached", allowing the task to continue running even if dropped. In
/// any case, the pool will execute the task even without polling by the
/// end-user.
///
/// If the provided future is non-`Send`, [`TaskPool::spawn_local`] should
/// be used instead.
pub fn spawn<T>(&self, future: impl Future<Output = T> + Send + 'static) -> Task<T>
where
T: Send + 'static,
{
Task::new(self.executor.spawn(future))
}
/// Spawns a static future on the thread-local async executor for the
/// current thread. The task will run entirely on the thread the task was
/// spawned on.
///
/// The returned [`Task`] is a future that can be polled for the
/// result. It can also be canceled and "detached", allowing the task to
/// continue running even if dropped. In any case, the pool will execute the
/// task even without polling by the end-user.
///
/// Users should generally prefer to use [`TaskPool::spawn`] instead,
/// unless the provided future is not `Send`.
pub fn spawn_local<T>(&self, future: impl Future<Output = T> + 'static) -> Task<T>
where
T: 'static,
{
Task::new(TaskPool::LOCAL_EXECUTOR.with(|executor| executor.spawn(future)))
}
/// Runs a function with the local executor. Typically used to tick
/// the local executor on the main thread as it needs to share time with
/// other things.
///
/// ```
/// use bevy_tasks::TaskPool;
///
/// TaskPool::new().with_local_executor(|local_executor| {
/// local_executor.try_tick();
/// });
/// ```
pub fn with_local_executor<F, R>(&self, f: F) -> R
where
F: FnOnce(&crate::executor::LocalExecutor) -> R,
{
Self::LOCAL_EXECUTOR.with(f)
}
}
impl Default for TaskPool {
fn default() -> Self {
Self::new()
}
}
impl Drop for TaskPool {
fn drop(&mut self) {
self.shutdown_tx.close();
let panicking = thread::panicking();
for join_handle in self.threads.drain(..) {
let res = join_handle.join();
if !panicking {
res.expect("Task thread panicked while executing.");
}
}
}
}
/// A [`TaskPool`] scope for running one or more non-`'static` futures.
///
/// For more information, see [`TaskPool::scope`].
#[derive(Debug)]
pub struct Scope<'scope, 'env: 'scope, T> {
executor: &'scope crate::executor::Executor<'scope>,
external_executor: &'scope ThreadExecutor<'scope>,
scope_executor: &'scope ThreadExecutor<'scope>,
spawned: &'scope ConcurrentQueue<FallibleTask<Result<T, Box<(dyn core::any::Any + Send)>>>>,
// make `Scope` invariant over 'scope and 'env
scope: PhantomData<&'scope mut &'scope ()>,
env: PhantomData<&'env mut &'env ()>,
}
impl<'scope, 'env, T: Send + 'scope> Scope<'scope, 'env, T> {
/// Spawns a scoped future onto the thread pool. The scope *must* outlive
/// the provided future. The results of the future will be returned as a part of
/// [`TaskPool::scope`]'s return value.
///
/// For futures that should run on the thread `scope` is called on [`Scope::spawn_on_scope`] should be used
/// instead.
///
/// For more information, see [`TaskPool::scope`].
pub fn spawn<Fut: Future<Output = T> + 'scope + Send>(&self, f: Fut) {
let task = self
.executor
.spawn(AssertUnwindSafe(f).catch_unwind())
.fallible();
// ConcurrentQueue only errors when closed or full, but we never
// close and use an unbounded queue, so it is safe to unwrap
self.spawned.push(task).unwrap();
}
/// Spawns a scoped future onto the thread the scope is run on. The scope *must* outlive
/// the provided future. The results of the future will be returned as a part of
/// [`TaskPool::scope`]'s return value. Users should generally prefer to use
/// [`Scope::spawn`] instead, unless the provided future needs to run on the scope's thread.
///
/// For more information, see [`TaskPool::scope`].
pub fn spawn_on_scope<Fut: Future<Output = T> + 'scope + Send>(&self, f: Fut) {
let task = self
.scope_executor
.spawn(AssertUnwindSafe(f).catch_unwind())
.fallible();
// ConcurrentQueue only errors when closed or full, but we never
// close and use an unbounded queue, so it is safe to unwrap
self.spawned.push(task).unwrap();
}
/// Spawns a scoped future onto the thread of the external thread executor.
/// This is typically the main thread. The scope *must* outlive
/// the provided future. The results of the future will be returned as a part of
/// [`TaskPool::scope`]'s return value. Users should generally prefer to use
/// [`Scope::spawn`] instead, unless the provided future needs to run on the external thread.
///
/// For more information, see [`TaskPool::scope`].
pub fn spawn_on_external<Fut: Future<Output = T> + 'scope + Send>(&self, f: Fut) {
let task = self
.external_executor
.spawn(AssertUnwindSafe(f).catch_unwind())
.fallible();
// ConcurrentQueue only errors when closed or full, but we never
// close and use an unbounded queue, so it is safe to unwrap
self.spawned.push(task).unwrap();
}
}
impl<'scope, 'env, T> Drop for Scope<'scope, 'env, T>
where
T: 'scope,
{
fn drop(&mut self) {
block_on(async {
while let Ok(task) = self.spawned.pop() {
task.cancel().await;
}
});
}
}
#[cfg(test)]
mod tests {
use super::*;
use core::sync::atomic::{AtomicBool, AtomicI32, Ordering};
use std::sync::Barrier;
#[test]
fn test_spawn() {
let pool = TaskPool::new();
let foo = Box::new(42);
let foo = &*foo;
let count = Arc::new(AtomicI32::new(0));
let outputs = pool.scope(|scope| {
for _ in 0..100 {
let count_clone = count.clone();
scope.spawn(async move {
if *foo != 42 {
panic!("not 42!?!?")
} else {
count_clone.fetch_add(1, Ordering::Relaxed);
*foo
}
});
}
});
for output in &outputs {
assert_eq!(*output, 42);
}
assert_eq!(outputs.len(), 100);
assert_eq!(count.load(Ordering::Relaxed), 100);
}
#[test]
fn test_thread_callbacks() {
let counter = Arc::new(AtomicI32::new(0));
let start_counter = counter.clone();
{
let barrier = Arc::new(Barrier::new(11));
let last_barrier = barrier.clone();
// Build and immediately drop to terminate
let _pool = TaskPoolBuilder::new()
.num_threads(10)
.on_thread_spawn(move || {
start_counter.fetch_add(1, Ordering::Relaxed);
barrier.clone().wait();
})
.build();
last_barrier.wait();
assert_eq!(10, counter.load(Ordering::Relaxed));
}
assert_eq!(10, counter.load(Ordering::Relaxed));
let end_counter = counter.clone();
{
let _pool = TaskPoolBuilder::new()
.num_threads(20)
.on_thread_destroy(move || {
end_counter.fetch_sub(1, Ordering::Relaxed);
})
.build();
assert_eq!(10, counter.load(Ordering::Relaxed));
}
assert_eq!(-10, counter.load(Ordering::Relaxed));
let start_counter = counter.clone();
let end_counter = counter.clone();
{
let barrier = Arc::new(Barrier::new(6));
let last_barrier = barrier.clone();
let _pool = TaskPoolBuilder::new()
.num_threads(5)
.on_thread_spawn(move || {
start_counter.fetch_add(1, Ordering::Relaxed);
barrier.wait();
})
.on_thread_destroy(move || {
end_counter.fetch_sub(1, Ordering::Relaxed);
})
.build();
last_barrier.wait();
assert_eq!(-5, counter.load(Ordering::Relaxed));
}
assert_eq!(-10, counter.load(Ordering::Relaxed));
}
#[test]
fn test_mixed_spawn_on_scope_and_spawn() {
let pool = TaskPool::new();
let foo = Box::new(42);
let foo = &*foo;
let local_count = Arc::new(AtomicI32::new(0));
let non_local_count = Arc::new(AtomicI32::new(0));
let outputs = pool.scope(|scope| {
for i in 0..100 {
if i % 2 == 0 {
let count_clone = non_local_count.clone();
scope.spawn(async move {
if *foo != 42 {
panic!("not 42!?!?")
} else {
count_clone.fetch_add(1, Ordering::Relaxed);
*foo
}
});
} else {
let count_clone = local_count.clone();
scope.spawn_on_scope(async move {
if *foo != 42 {
panic!("not 42!?!?")
} else {
count_clone.fetch_add(1, Ordering::Relaxed);
*foo
}
});
}
}
});
for output in &outputs {
assert_eq!(*output, 42);
}
assert_eq!(outputs.len(), 100);
assert_eq!(local_count.load(Ordering::Relaxed), 50);
assert_eq!(non_local_count.load(Ordering::Relaxed), 50);
}
#[test]
fn test_thread_locality() {
let pool = Arc::new(TaskPool::new());
let count = Arc::new(AtomicI32::new(0));
let barrier = Arc::new(Barrier::new(101));
let thread_check_failed = Arc::new(AtomicBool::new(false));
for _ in 0..100 {
let inner_barrier = barrier.clone();
let count_clone = count.clone();
let inner_pool = pool.clone();
let inner_thread_check_failed = thread_check_failed.clone();
thread::spawn(move || {
inner_pool.scope(|scope| {
let inner_count_clone = count_clone.clone();
scope.spawn(async move {
inner_count_clone.fetch_add(1, Ordering::Release);
});
let spawner = thread::current().id();
let inner_count_clone = count_clone.clone();
scope.spawn_on_scope(async move {
inner_count_clone.fetch_add(1, Ordering::Release);
if thread::current().id() != spawner {
// NOTE: This check is using an atomic rather than simply panicking the
// thread to avoid deadlocking the barrier on failure
inner_thread_check_failed.store(true, Ordering::Release);
}
});
});
inner_barrier.wait();
});
}
barrier.wait();
assert!(!thread_check_failed.load(Ordering::Acquire));
assert_eq!(count.load(Ordering::Acquire), 200);
}
#[test]
fn test_nested_spawn() {
let pool = TaskPool::new();
let foo = Box::new(42);
let foo = &*foo;
let count = Arc::new(AtomicI32::new(0));
let outputs: Vec<i32> = pool.scope(|scope| {
for _ in 0..10 {
let count_clone = count.clone();
scope.spawn(async move {
for _ in 0..10 {
let count_clone_clone = count_clone.clone();
scope.spawn(async move {
if *foo != 42 {
panic!("not 42!?!?")
} else {
count_clone_clone.fetch_add(1, Ordering::Relaxed);
*foo
}
});
}
*foo
});
}
});
for output in &outputs {
assert_eq!(*output, 42);
}
// the inner loop runs 100 times and the outer one runs 10. 100 + 10
assert_eq!(outputs.len(), 110);
assert_eq!(count.load(Ordering::Relaxed), 100);
}
#[test]
fn test_nested_locality() {
let pool = Arc::new(TaskPool::new());
let count = Arc::new(AtomicI32::new(0));
let barrier = Arc::new(Barrier::new(101));
let thread_check_failed = Arc::new(AtomicBool::new(false));
for _ in 0..100 {
let inner_barrier = barrier.clone();
let count_clone = count.clone();
let inner_pool = pool.clone();
let inner_thread_check_failed = thread_check_failed.clone();
thread::spawn(move || {
inner_pool.scope(|scope| {
let spawner = thread::current().id();
let inner_count_clone = count_clone.clone();
scope.spawn(async move {
inner_count_clone.fetch_add(1, Ordering::Release);
// spawning on the scope from another thread runs the futures on the scope's thread
scope.spawn_on_scope(async move {
inner_count_clone.fetch_add(1, Ordering::Release);
if thread::current().id() != spawner {
// NOTE: This check is using an atomic rather than simply panicking the
// thread to avoid deadlocking the barrier on failure
inner_thread_check_failed.store(true, Ordering::Release);
}
});
});
});
inner_barrier.wait();
});
}
barrier.wait();
assert!(!thread_check_failed.load(Ordering::Acquire));
assert_eq!(count.load(Ordering::Acquire), 200);
}
// This test will often freeze on other executors.
#[test]
fn test_nested_scopes() {
let pool = TaskPool::new();
let count = Arc::new(AtomicI32::new(0));
pool.scope(|scope| {
scope.spawn(async {
pool.scope(|scope| {
scope.spawn(async {
count.fetch_add(1, Ordering::Relaxed);
});
});
});
});
assert_eq!(count.load(Ordering::Acquire), 1);
}
}

132
vendor/bevy_tasks/src/thread_executor.rs vendored Normal file
View File

@@ -0,0 +1,132 @@
use core::marker::PhantomData;
use std::thread::{self, ThreadId};
use crate::executor::Executor;
use async_task::Task;
use futures_lite::Future;
/// An executor that can only be ticked on the thread it was instantiated on. But
/// can spawn `Send` tasks from other threads.
///
/// # Example
/// ```
/// # use std::sync::{Arc, atomic::{AtomicI32, Ordering}};
/// use bevy_tasks::ThreadExecutor;
///
/// let thread_executor = ThreadExecutor::new();
/// let count = Arc::new(AtomicI32::new(0));
///
/// // create some owned values that can be moved into another thread
/// let count_clone = count.clone();
///
/// std::thread::scope(|scope| {
/// scope.spawn(|| {
/// // we cannot get the ticker from another thread
/// let not_thread_ticker = thread_executor.ticker();
/// assert!(not_thread_ticker.is_none());
///
/// // but we can spawn tasks from another thread
/// thread_executor.spawn(async move {
/// count_clone.fetch_add(1, Ordering::Relaxed);
/// }).detach();
/// });
/// });
///
/// // the tasks do not make progress unless the executor is manually ticked
/// assert_eq!(count.load(Ordering::Relaxed), 0);
///
/// // tick the ticker until task finishes
/// let thread_ticker = thread_executor.ticker().unwrap();
/// thread_ticker.try_tick();
/// assert_eq!(count.load(Ordering::Relaxed), 1);
/// ```
#[derive(Debug)]
pub struct ThreadExecutor<'task> {
executor: Executor<'task>,
thread_id: ThreadId,
}
impl<'task> Default for ThreadExecutor<'task> {
fn default() -> Self {
Self {
executor: Executor::new(),
thread_id: thread::current().id(),
}
}
}
impl<'task> ThreadExecutor<'task> {
/// create a new [`ThreadExecutor`]
pub fn new() -> Self {
Self::default()
}
/// Spawn a task on the thread executor
pub fn spawn<T: Send + 'task>(
&self,
future: impl Future<Output = T> + Send + 'task,
) -> Task<T> {
self.executor.spawn(future)
}
/// Gets the [`ThreadExecutorTicker`] for this executor.
/// Use this to tick the executor.
/// It only returns the ticker if it's on the thread the executor was created on
/// and returns `None` otherwise.
pub fn ticker<'ticker>(&'ticker self) -> Option<ThreadExecutorTicker<'task, 'ticker>> {
if thread::current().id() == self.thread_id {
return Some(ThreadExecutorTicker {
executor: self,
_marker: PhantomData,
});
}
None
}
/// Returns true if `self` and `other`'s executor is same
pub fn is_same(&self, other: &Self) -> bool {
core::ptr::eq(self, other)
}
}
/// Used to tick the [`ThreadExecutor`]. The executor does not
/// make progress unless it is manually ticked on the thread it was
/// created on.
#[derive(Debug)]
pub struct ThreadExecutorTicker<'task, 'ticker> {
executor: &'ticker ThreadExecutor<'task>,
// make type not send or sync
_marker: PhantomData<*const ()>,
}
impl<'task, 'ticker> ThreadExecutorTicker<'task, 'ticker> {
/// Tick the thread executor.
pub async fn tick(&self) {
self.executor.executor.tick().await;
}
/// Synchronously try to tick a task on the executor.
/// Returns false if does not find a task to tick.
pub fn try_tick(&self) -> bool {
self.executor.executor.try_tick()
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloc::sync::Arc;
#[test]
fn test_ticker() {
let executor = Arc::new(ThreadExecutor::new());
let ticker = executor.ticker();
assert!(ticker.is_some());
thread::scope(|s| {
s.spawn(|| {
let ticker = executor.ticker();
assert!(ticker.is_none());
});
});
}
}

106
vendor/bevy_tasks/src/usages.rs vendored Normal file
View File

@@ -0,0 +1,106 @@
use super::TaskPool;
use bevy_platform::sync::OnceLock;
use core::ops::Deref;
macro_rules! taskpool {
($(#[$attr:meta])* ($static:ident, $type:ident)) => {
static $static: OnceLock<$type> = OnceLock::new();
$(#[$attr])*
#[derive(Debug)]
pub struct $type(TaskPool);
impl $type {
#[doc = concat!(" Gets the global [`", stringify!($type), "`] instance, or initializes it with `f`.")]
pub fn get_or_init(f: impl FnOnce() -> TaskPool) -> &'static Self {
$static.get_or_init(|| Self(f()))
}
#[doc = concat!(" Attempts to get the global [`", stringify!($type), "`] instance, \
or returns `None` if it is not initialized.")]
pub fn try_get() -> Option<&'static Self> {
$static.get()
}
#[doc = concat!(" Gets the global [`", stringify!($type), "`] instance.")]
#[doc = ""]
#[doc = " # Panics"]
#[doc = " Panics if the global instance has not been initialized yet."]
pub fn get() -> &'static Self {
$static.get().expect(
concat!(
"The ",
stringify!($type),
" has not been initialized yet. Please call ",
stringify!($type),
"::get_or_init beforehand."
)
)
}
}
impl Deref for $type {
type Target = TaskPool;
fn deref(&self) -> &Self::Target {
&self.0
}
}
};
}
taskpool! {
/// A newtype for a task pool for CPU-intensive work that must be completed to
/// deliver the next frame
///
/// See [`TaskPool`] documentation for details on Bevy tasks.
/// [`AsyncComputeTaskPool`] should be preferred if the work does not have to be
/// completed before the next frame.
(COMPUTE_TASK_POOL, ComputeTaskPool)
}
taskpool! {
/// A newtype for a task pool for CPU-intensive work that may span across multiple frames
///
/// See [`TaskPool`] documentation for details on Bevy tasks.
/// Use [`ComputeTaskPool`] if the work must be complete before advancing to the next frame.
(ASYNC_COMPUTE_TASK_POOL, AsyncComputeTaskPool)
}
taskpool! {
/// A newtype for a task pool for IO-intensive work (i.e. tasks that spend very little time in a
/// "woken" state)
///
/// See [`TaskPool`] documentation for details on Bevy tasks.
(IO_TASK_POOL, IoTaskPool)
}
/// A function used by `bevy_app` to tick the global tasks pools on the main thread.
/// This will run a maximum of 100 local tasks per executor per call to this function.
///
/// # Warning
///
/// This function *must* be called on the main thread, or the task pools will not be updated appropriately.
#[cfg(not(all(target_arch = "wasm32", feature = "web")))]
pub fn tick_global_task_pools_on_main_thread() {
COMPUTE_TASK_POOL
.get()
.unwrap()
.with_local_executor(|compute_local_executor| {
ASYNC_COMPUTE_TASK_POOL
.get()
.unwrap()
.with_local_executor(|async_local_executor| {
IO_TASK_POOL
.get()
.unwrap()
.with_local_executor(|io_local_executor| {
for _ in 0..100 {
compute_local_executor.try_tick();
async_local_executor.try_tick();
io_local_executor.try_tick();
}
});
});
});
}

93
vendor/bevy_tasks/src/wasm_task.rs vendored Normal file
View File

@@ -0,0 +1,93 @@
use alloc::boxed::Box;
use core::{
any::Any,
future::{Future, IntoFuture},
panic::{AssertUnwindSafe, UnwindSafe},
pin::Pin,
task::{Context, Poll},
};
use futures_channel::oneshot;
/// Wraps an asynchronous task, a spawned future.
///
/// Tasks are also futures themselves and yield the output of the spawned future.
#[derive(Debug)]
pub struct Task<T>(oneshot::Receiver<Result<T, Panic>>);
impl<T: 'static> Task<T> {
pub(crate) fn wrap_future(future: impl Future<Output = T> + 'static) -> Self {
let (sender, receiver) = oneshot::channel();
wasm_bindgen_futures::spawn_local(async move {
// Catch any panics that occur when polling the future so they can
// be propagated back to the task handle.
let value = CatchUnwind(AssertUnwindSafe(future)).await;
let _ = sender.send(value);
});
Self(receiver.into_future())
}
/// When building for Wasm, this method has no effect.
/// This is only included for feature parity with other platforms.
pub fn detach(self) {}
/// Requests a task to be cancelled and returns a future that suspends until it completes.
/// Returns the output of the future if it has already completed.
///
/// # Implementation
///
/// When building for Wasm, it is not possible to cancel tasks, which means this is the same
/// as just awaiting the task. This method is only included for feature parity with other platforms.
pub async fn cancel(self) -> Option<T> {
match self.0.await {
Ok(Ok(value)) => Some(value),
Err(_) => None,
Ok(Err(panic)) => {
// drop this to prevent the panic payload from resuming the panic on drop.
// this also leaks the box but I'm not sure how to avoid that
core::mem::forget(panic);
None
}
}
}
}
impl<T> Future for Task<T> {
type Output = T;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match Pin::new(&mut self.0).poll(cx) {
Poll::Ready(Ok(Ok(value))) => Poll::Ready(value),
// NOTE: Propagating the panic here sorta has parity with the async_executor behavior.
// For those tasks, polling them after a panic returns a `None` which gets `unwrap`ed, so
// using `resume_unwind` here is essentially keeping the same behavior while adding more information.
#[cfg(feature = "std")]
Poll::Ready(Ok(Err(panic))) => std::panic::resume_unwind(panic),
#[cfg(not(feature = "std"))]
Poll::Ready(Ok(Err(_panic))) => {
unreachable!("catching a panic is only possible with std")
}
Poll::Ready(Err(_)) => panic!("Polled a task after it was cancelled"),
Poll::Pending => Poll::Pending,
}
}
}
type Panic = Box<dyn Any + Send + 'static>;
#[pin_project::pin_project]
struct CatchUnwind<F: UnwindSafe>(#[pin] F);
impl<F: Future + UnwindSafe> Future for CatchUnwind<F> {
type Output = Result<F::Output, Panic>;
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let f = AssertUnwindSafe(|| self.project().0.poll(cx));
#[cfg(feature = "std")]
let result = std::panic::catch_unwind(f)?;
#[cfg(not(feature = "std"))]
let result = f();
result.map(Ok)
}
}