Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

177
vendor/async-task/src/header.rs vendored Normal file
View File

@@ -0,0 +1,177 @@
use core::cell::UnsafeCell;
use core::fmt;
use core::task::Waker;
#[cfg(not(feature = "portable-atomic"))]
use core::sync::atomic::AtomicUsize;
use core::sync::atomic::Ordering;
#[cfg(feature = "portable-atomic")]
use portable_atomic::AtomicUsize;
use crate::raw::TaskVTable;
use crate::state::*;
use crate::utils::abort_on_panic;
/// The header of a task.
///
/// This header is stored in memory at the beginning of the heap-allocated task.
pub(crate) struct Header<M> {
/// Current state of the task.
///
/// Contains flags representing the current state and the reference count.
pub(crate) state: AtomicUsize,
/// The task that is blocked on the `Task` handle.
///
/// This waker needs to be woken up once the task completes or is closed.
pub(crate) awaiter: UnsafeCell<Option<Waker>>,
/// The virtual table.
///
/// In addition to the actual waker virtual table, it also contains pointers to several other
/// methods necessary for bookkeeping the heap-allocated task.
pub(crate) vtable: &'static TaskVTable,
/// Metadata associated with the task.
///
/// This metadata may be provided to the user.
pub(crate) metadata: M,
/// Whether or not a panic that occurs in the task should be propagated.
#[cfg(feature = "std")]
pub(crate) propagate_panic: bool,
}
impl<M> Header<M> {
/// Notifies the awaiter blocked on this task.
///
/// If the awaiter is the same as the current waker, it will not be notified.
#[inline]
pub(crate) fn notify(&self, current: Option<&Waker>) {
if let Some(w) = self.take(current) {
abort_on_panic(|| w.wake());
}
}
/// Takes the awaiter blocked on this task.
///
/// If there is no awaiter or if it is the same as the current waker, returns `None`.
#[inline]
pub(crate) fn take(&self, current: Option<&Waker>) -> Option<Waker> {
// Set the bit indicating that the task is notifying its awaiter.
let state = self.state.fetch_or(NOTIFYING, Ordering::AcqRel);
// If the task was not notifying or registering an awaiter...
if state & (NOTIFYING | REGISTERING) == 0 {
// Take the waker out.
let waker = unsafe { (*self.awaiter.get()).take() };
// Unset the bit indicating that the task is notifying its awaiter.
self.state
.fetch_and(!NOTIFYING & !AWAITER, Ordering::Release);
// Finally, notify the waker if it's different from the current waker.
if let Some(w) = waker {
match current {
None => return Some(w),
Some(c) if !w.will_wake(c) => return Some(w),
Some(_) => abort_on_panic(|| drop(w)),
}
}
}
None
}
/// Registers a new awaiter blocked on this task.
///
/// This method is called when `Task` is polled and it has not yet completed.
#[inline]
pub(crate) fn register(&self, waker: &Waker) {
// Load the state and synchronize with it.
let mut state = self.state.fetch_or(0, Ordering::Acquire);
loop {
// There can't be two concurrent registrations because `Task` can only be polled
// by a unique pinned reference.
debug_assert!(state & REGISTERING == 0);
// If we're in the notifying state at this moment, just wake and return without
// registering.
if state & NOTIFYING != 0 {
abort_on_panic(|| waker.wake_by_ref());
return;
}
// Mark the state to let other threads know we're registering a new awaiter.
match self.state.compare_exchange_weak(
state,
state | REGISTERING,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
state |= REGISTERING;
break;
}
Err(s) => state = s,
}
}
// Put the waker into the awaiter field.
unsafe {
abort_on_panic(|| (*self.awaiter.get()) = Some(waker.clone()));
}
// This variable will contain the newly registered waker if a notification comes in before
// we complete registration.
let mut waker = None;
loop {
// If there was a notification, take the waker out of the awaiter field.
if state & NOTIFYING != 0 {
if let Some(w) = unsafe { (*self.awaiter.get()).take() } {
abort_on_panic(|| waker = Some(w));
}
}
// The new state is not being notified nor registered, but there might or might not be
// an awaiter depending on whether there was a concurrent notification.
let new = if waker.is_none() {
(state & !NOTIFYING & !REGISTERING) | AWAITER
} else {
state & !NOTIFYING & !REGISTERING & !AWAITER
};
match self
.state
.compare_exchange_weak(state, new, Ordering::AcqRel, Ordering::Acquire)
{
Ok(_) => break,
Err(s) => state = s,
}
}
// If there was a notification during registration, wake the awaiter now.
if let Some(w) = waker {
abort_on_panic(|| w.wake());
}
}
}
impl<M: fmt::Debug> fmt::Debug for Header<M> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let state = self.state.load(Ordering::SeqCst);
f.debug_struct("Header")
.field("scheduled", &(state & SCHEDULED != 0))
.field("running", &(state & RUNNING != 0))
.field("completed", &(state & COMPLETED != 0))
.field("closed", &(state & CLOSED != 0))
.field("awaiter", &(state & AWAITER != 0))
.field("task", &(state & TASK != 0))
.field("ref_count", &(state / REFERENCE))
.field("metadata", &self.metadata)
.finish()
}
}

118
vendor/async-task/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,118 @@
//! Task abstraction for building executors.
//!
//! To spawn a future onto an executor, we first need to allocate it on the heap and keep some
//! state attached to it. The state indicates whether the future is ready for polling, waiting to
//! be woken up, or completed. Such a stateful future is called a *task*.
//!
//! All executors have a queue that holds scheduled tasks:
//!
//! ```
//! let (sender, receiver) = flume::unbounded();
//! #
//! # // A future that will get spawned.
//! # let future = async { 1 + 2 };
//! #
//! # // A function that schedules the task when it gets woken up.
//! # let schedule = move |runnable| sender.send(runnable).unwrap();
//! #
//! # // Create a task.
//! # let (runnable, task) = async_task::spawn(future, schedule);
//! ```
//!
//! A task is created using either [`spawn()`], [`spawn_local()`], or [`spawn_unchecked()`] which
//! return a [`Runnable`] and a [`Task`]:
//!
//! ```
//! # let (sender, receiver) = flume::unbounded();
//! #
//! // A future that will be spawned.
//! let future = async { 1 + 2 };
//!
//! // A function that schedules the task when it gets woken up.
//! let schedule = move |runnable| sender.send(runnable).unwrap();
//!
//! // Construct a task.
//! let (runnable, task) = async_task::spawn(future, schedule);
//!
//! // Push the task into the queue by invoking its schedule function.
//! runnable.schedule();
//! ```
//!
//! The [`Runnable`] is used to poll the task's future, and the [`Task`] is used to await its
//! output.
//!
//! Finally, we need a loop that takes scheduled tasks from the queue and runs them:
//!
//! ```no_run
//! # let (sender, receiver) = flume::unbounded();
//! #
//! # // A future that will get spawned.
//! # let future = async { 1 + 2 };
//! #
//! # // A function that schedules the task when it gets woken up.
//! # let schedule = move |runnable| sender.send(runnable).unwrap();
//! #
//! # // Create a task.
//! # let (runnable, task) = async_task::spawn(future, schedule);
//! #
//! # // Push the task into the queue by invoking its schedule function.
//! # runnable.schedule();
//! #
//! for runnable in receiver {
//! runnable.run();
//! }
//! ```
//!
//! Method [`run()`][`Runnable::run()`] polls the task's future once. Then, the [`Runnable`]
//! vanishes and only reappears when its [`Waker`][`core::task::Waker`] wakes the task, thus
//! scheduling it to be run again.
#![no_std]
#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)]
#![doc(test(attr(deny(rust_2018_idioms, warnings))))]
#![doc(test(attr(allow(unused_extern_crates, unused_variables))))]
#![doc(
html_favicon_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png"
)]
#![doc(
html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png"
)]
extern crate alloc;
#[cfg(feature = "std")]
extern crate std;
/// We can't use `?` in const contexts yet, so this macro acts
/// as a workaround.
macro_rules! leap {
($x: expr) => {{
match ($x) {
Some(val) => val,
None => return None,
}
}};
}
macro_rules! leap_unwrap {
($x: expr) => {{
match ($x) {
Some(val) => val,
None => panic!("called `Option::unwrap()` on a `None` value"),
}
}};
}
mod header;
mod raw;
mod runnable;
mod state;
mod task;
mod utils;
pub use crate::runnable::{
spawn, spawn_unchecked, Builder, Runnable, Schedule, ScheduleInfo, WithInfo,
};
pub use crate::task::{FallibleTask, Task};
#[cfg(feature = "std")]
pub use crate::runnable::spawn_local;

756
vendor/async-task/src/raw.rs vendored Normal file
View File

@@ -0,0 +1,756 @@
use alloc::alloc::Layout as StdLayout;
use core::cell::UnsafeCell;
use core::future::Future;
use core::mem::{self, ManuallyDrop};
use core::pin::Pin;
use core::ptr::NonNull;
use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
#[cfg(not(feature = "portable-atomic"))]
use core::sync::atomic::AtomicUsize;
use core::sync::atomic::Ordering;
#[cfg(feature = "portable-atomic")]
use portable_atomic::AtomicUsize;
use crate::header::Header;
use crate::runnable::{Schedule, ScheduleInfo};
use crate::state::*;
use crate::utils::{abort, abort_on_panic, max, Layout};
use crate::Runnable;
#[cfg(feature = "std")]
pub(crate) type Panic = alloc::boxed::Box<dyn core::any::Any + Send + 'static>;
#[cfg(not(feature = "std"))]
pub(crate) type Panic = core::convert::Infallible;
/// The vtable for a task.
pub(crate) struct TaskVTable {
/// Schedules the task.
pub(crate) schedule: unsafe fn(*const (), ScheduleInfo),
/// Drops the future inside the task.
pub(crate) drop_future: unsafe fn(*const ()),
/// Returns a pointer to the output stored after completion.
pub(crate) get_output: unsafe fn(*const ()) -> *const (),
/// Drops the task reference (`Runnable` or `Waker`).
pub(crate) drop_ref: unsafe fn(ptr: *const ()),
/// Destroys the task.
pub(crate) destroy: unsafe fn(*const ()),
/// Runs the task.
pub(crate) run: unsafe fn(*const ()) -> bool,
/// Creates a new waker associated with the task.
pub(crate) clone_waker: unsafe fn(ptr: *const ()) -> RawWaker,
/// The memory layout of the task. This information enables
/// debuggers to decode raw task memory blobs. Do not remove
/// the field, even if it appears to be unused.
#[allow(unused)]
pub(crate) layout_info: &'static TaskLayout,
}
/// Memory layout of a task.
///
/// This struct contains the following information:
///
/// 1. How to allocate and deallocate the task.
/// 2. How to access the fields inside the task.
#[derive(Clone, Copy)]
pub(crate) struct TaskLayout {
/// Memory layout of the whole task.
pub(crate) layout: StdLayout,
/// Offset into the task at which the schedule function is stored.
pub(crate) offset_s: usize,
/// Offset into the task at which the future is stored.
pub(crate) offset_f: usize,
/// Offset into the task at which the output is stored.
pub(crate) offset_r: usize,
}
/// Raw pointers to the fields inside a task.
pub(crate) struct RawTask<F, T, S, M> {
/// The task header.
pub(crate) header: *const Header<M>,
/// The schedule function.
pub(crate) schedule: *const S,
/// The future.
pub(crate) future: *mut F,
/// The output of the future.
pub(crate) output: *mut Result<T, Panic>,
}
impl<F, T, S, M> Copy for RawTask<F, T, S, M> {}
impl<F, T, S, M> Clone for RawTask<F, T, S, M> {
fn clone(&self) -> Self {
*self
}
}
impl<F, T, S, M> RawTask<F, T, S, M> {
const TASK_LAYOUT: TaskLayout = Self::eval_task_layout();
/// Computes the memory layout for a task.
#[inline]
const fn eval_task_layout() -> TaskLayout {
// Compute the layouts for `Header`, `S`, `F`, and `T`.
let layout_header = Layout::new::<Header<M>>();
let layout_s = Layout::new::<S>();
let layout_f = Layout::new::<F>();
let layout_r = Layout::new::<Result<T, Panic>>();
// Compute the layout for `union { F, T }`.
let size_union = max(layout_f.size(), layout_r.size());
let align_union = max(layout_f.align(), layout_r.align());
let layout_union = Layout::from_size_align(size_union, align_union);
// Compute the layout for `Header` followed `S` and `union { F, T }`.
let layout = layout_header;
let (layout, offset_s) = leap_unwrap!(layout.extend(layout_s));
let (layout, offset_union) = leap_unwrap!(layout.extend(layout_union));
let offset_f = offset_union;
let offset_r = offset_union;
TaskLayout {
layout: unsafe { layout.into_std() },
offset_s,
offset_f,
offset_r,
}
}
}
impl<F, T, S, M> RawTask<F, T, S, M>
where
F: Future<Output = T>,
S: Schedule<M>,
{
const RAW_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
Self::clone_waker,
Self::wake,
Self::wake_by_ref,
Self::drop_waker,
);
/// Allocates a task with the given `future` and `schedule` function.
///
/// It is assumed that initially only the `Runnable` and the `Task` exist.
pub(crate) fn allocate<'a, Gen: FnOnce(&'a M) -> F>(
future: Gen,
schedule: S,
builder: crate::Builder<M>,
) -> NonNull<()>
where
F: 'a,
M: 'a,
{
// Compute the layout of the task for allocation. Abort if the computation fails.
//
// n.b. notgull: task_layout now automatically aborts instead of panicking
let task_layout = Self::task_layout();
unsafe {
// Allocate enough space for the entire task.
let ptr = match NonNull::new(alloc::alloc::alloc(task_layout.layout) as *mut ()) {
None => abort(),
Some(p) => p,
};
let raw = Self::from_ptr(ptr.as_ptr());
let crate::Builder {
metadata,
#[cfg(feature = "std")]
propagate_panic,
} = builder;
// Write the header as the first field of the task.
(raw.header as *mut Header<M>).write(Header {
state: AtomicUsize::new(SCHEDULED | TASK | REFERENCE),
awaiter: UnsafeCell::new(None),
vtable: &TaskVTable {
schedule: Self::schedule,
drop_future: Self::drop_future,
get_output: Self::get_output,
drop_ref: Self::drop_ref,
destroy: Self::destroy,
run: Self::run,
clone_waker: Self::clone_waker,
layout_info: &Self::TASK_LAYOUT,
},
metadata,
#[cfg(feature = "std")]
propagate_panic,
});
// Write the schedule function as the third field of the task.
(raw.schedule as *mut S).write(schedule);
// Generate the future, now that the metadata has been pinned in place.
let future = abort_on_panic(|| future(&(*raw.header).metadata));
// Write the future as the fourth field of the task.
raw.future.write(future);
ptr
}
}
/// Creates a `RawTask` from a raw task pointer.
#[inline]
pub(crate) fn from_ptr(ptr: *const ()) -> Self {
let task_layout = Self::task_layout();
let p = ptr as *const u8;
unsafe {
Self {
header: p as *const Header<M>,
schedule: p.add(task_layout.offset_s) as *const S,
future: p.add(task_layout.offset_f) as *mut F,
output: p.add(task_layout.offset_r) as *mut Result<T, Panic>,
}
}
}
/// Returns the layout of the task.
#[inline]
fn task_layout() -> TaskLayout {
Self::TASK_LAYOUT
}
/// Wakes a waker.
unsafe fn wake(ptr: *const ()) {
// This is just an optimization. If the schedule function has captured variables, then
// we'll do less reference counting if we wake the waker by reference and then drop it.
if mem::size_of::<S>() > 0 {
Self::wake_by_ref(ptr);
Self::drop_waker(ptr);
return;
}
let raw = Self::from_ptr(ptr);
let mut state = (*raw.header).state.load(Ordering::Acquire);
loop {
// If the task is completed or closed, it can't be woken up.
if state & (COMPLETED | CLOSED) != 0 {
// Drop the waker.
Self::drop_waker(ptr);
break;
}
// If the task is already scheduled, we just need to synchronize with the thread that
// will run the task by "publishing" our current view of the memory.
if state & SCHEDULED != 0 {
// Update the state without actually modifying it.
match (*raw.header).state.compare_exchange_weak(
state,
state,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
// Drop the waker.
Self::drop_waker(ptr);
break;
}
Err(s) => state = s,
}
} else {
// Mark the task as scheduled.
match (*raw.header).state.compare_exchange_weak(
state,
state | SCHEDULED,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
// If the task is not yet scheduled and isn't currently running, now is the
// time to schedule it.
if state & RUNNING == 0 {
// Schedule the task.
Self::schedule(ptr, ScheduleInfo::new(false));
} else {
// Drop the waker.
Self::drop_waker(ptr);
}
break;
}
Err(s) => state = s,
}
}
}
}
/// Wakes a waker by reference.
unsafe fn wake_by_ref(ptr: *const ()) {
let raw = Self::from_ptr(ptr);
let mut state = (*raw.header).state.load(Ordering::Acquire);
loop {
// If the task is completed or closed, it can't be woken up.
if state & (COMPLETED | CLOSED) != 0 {
break;
}
// If the task is already scheduled, we just need to synchronize with the thread that
// will run the task by "publishing" our current view of the memory.
if state & SCHEDULED != 0 {
// Update the state without actually modifying it.
match (*raw.header).state.compare_exchange_weak(
state,
state,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => break,
Err(s) => state = s,
}
} else {
// If the task is not running, we can schedule right away.
let new = if state & RUNNING == 0 {
(state | SCHEDULED) + REFERENCE
} else {
state | SCHEDULED
};
// Mark the task as scheduled.
match (*raw.header).state.compare_exchange_weak(
state,
new,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
// If the task is not running, now is the time to schedule.
if state & RUNNING == 0 {
// If the reference count overflowed, abort.
if state > isize::MAX as usize {
abort();
}
// Schedule the task. There is no need to call `Self::schedule(ptr)`
// because the schedule function cannot be destroyed while the waker is
// still alive.
let task = Runnable::from_raw(NonNull::new_unchecked(ptr as *mut ()));
(*raw.schedule).schedule(task, ScheduleInfo::new(false));
}
break;
}
Err(s) => state = s,
}
}
}
}
/// Clones a waker.
unsafe fn clone_waker(ptr: *const ()) -> RawWaker {
let raw = Self::from_ptr(ptr);
// Increment the reference count. With any kind of reference-counted data structure,
// relaxed ordering is appropriate when incrementing the counter.
let state = (*raw.header).state.fetch_add(REFERENCE, Ordering::Relaxed);
// If the reference count overflowed, abort.
if state > isize::MAX as usize {
abort();
}
RawWaker::new(ptr, &Self::RAW_WAKER_VTABLE)
}
/// Drops a waker.
///
/// This function will decrement the reference count. If it drops down to zero, the associated
/// `Task` has been dropped too, and the task has not been completed, then it will get
/// scheduled one more time so that its future gets dropped by the executor.
#[inline]
unsafe fn drop_waker(ptr: *const ()) {
let raw = Self::from_ptr(ptr);
// Decrement the reference count.
let new = (*raw.header).state.fetch_sub(REFERENCE, Ordering::AcqRel) - REFERENCE;
// If this was the last reference to the task and the `Task` has been dropped too,
// then we need to decide how to destroy the task.
if new & !(REFERENCE - 1) == 0 && new & TASK == 0 {
if new & (COMPLETED | CLOSED) == 0 {
// If the task was not completed nor closed, close it and schedule one more time so
// that its future gets dropped by the executor.
(*raw.header)
.state
.store(SCHEDULED | CLOSED | REFERENCE, Ordering::Release);
Self::schedule(ptr, ScheduleInfo::new(false));
} else {
// Otherwise, destroy the task right away.
Self::destroy(ptr);
}
}
}
/// Drops a task reference (`Runnable` or `Waker`).
///
/// This function will decrement the reference count. If it drops down to zero and the
/// associated `Task` handle has been dropped too, then the task gets destroyed.
#[inline]
unsafe fn drop_ref(ptr: *const ()) {
let raw = Self::from_ptr(ptr);
// Decrement the reference count.
let new = (*raw.header).state.fetch_sub(REFERENCE, Ordering::AcqRel) - REFERENCE;
// If this was the last reference to the task and the `Task` has been dropped too,
// then destroy the task.
if new & !(REFERENCE - 1) == 0 && new & TASK == 0 {
Self::destroy(ptr);
}
}
/// Schedules a task for running.
///
/// This function doesn't modify the state of the task. It only passes the task reference to
/// its schedule function.
unsafe fn schedule(ptr: *const (), info: ScheduleInfo) {
let raw = Self::from_ptr(ptr);
// If the schedule function has captured variables, create a temporary waker that prevents
// the task from getting deallocated while the function is being invoked.
let _waker;
if mem::size_of::<S>() > 0 {
_waker = Waker::from_raw(Self::clone_waker(ptr));
}
let task = Runnable::from_raw(NonNull::new_unchecked(ptr as *mut ()));
(*raw.schedule).schedule(task, info);
}
/// Drops the future inside a task.
#[inline]
unsafe fn drop_future(ptr: *const ()) {
let raw = Self::from_ptr(ptr);
// We need a safeguard against panics because the destructor can panic.
abort_on_panic(|| {
raw.future.drop_in_place();
})
}
/// Returns a pointer to the output inside a task.
unsafe fn get_output(ptr: *const ()) -> *const () {
let raw = Self::from_ptr(ptr);
raw.output as *const ()
}
/// Cleans up task's resources and deallocates it.
///
/// The schedule function will be dropped, and the task will then get deallocated.
/// The task must be closed before this function is called.
#[inline]
unsafe fn destroy(ptr: *const ()) {
let raw = Self::from_ptr(ptr);
let task_layout = Self::task_layout();
// We need a safeguard against panics because destructors can panic.
abort_on_panic(|| {
// Drop the header along with the metadata.
(raw.header as *mut Header<M>).drop_in_place();
// Drop the schedule function.
(raw.schedule as *mut S).drop_in_place();
});
// Finally, deallocate the memory reserved by the task.
alloc::alloc::dealloc(ptr as *mut u8, task_layout.layout);
}
/// Runs a task.
///
/// If polling its future panics, the task will be closed and the panic will be propagated into
/// the caller.
unsafe fn run(ptr: *const ()) -> bool {
let raw = Self::from_ptr(ptr);
// Create a context from the raw task pointer and the vtable inside the its header.
let waker = ManuallyDrop::new(Waker::from_raw(RawWaker::new(ptr, &Self::RAW_WAKER_VTABLE)));
let cx = &mut Context::from_waker(&waker);
let mut state = (*raw.header).state.load(Ordering::Acquire);
// Update the task's state before polling its future.
loop {
// If the task has already been closed, drop the task reference and return.
if state & CLOSED != 0 {
// Drop the future.
Self::drop_future(ptr);
// Mark the task as unscheduled.
let state = (*raw.header).state.fetch_and(!SCHEDULED, Ordering::AcqRel);
// Take the awaiter out.
let mut awaiter = None;
if state & AWAITER != 0 {
awaiter = (*raw.header).take(None);
}
// Drop the task reference.
Self::drop_ref(ptr);
// Notify the awaiter that the future has been dropped.
if let Some(w) = awaiter {
abort_on_panic(|| w.wake());
}
return false;
}
// Mark the task as unscheduled and running.
match (*raw.header).state.compare_exchange_weak(
state,
(state & !SCHEDULED) | RUNNING,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
// Update the state because we're continuing with polling the future.
state = (state & !SCHEDULED) | RUNNING;
break;
}
Err(s) => state = s,
}
}
// Poll the inner future, but surround it with a guard that closes the task in case polling
// panics.
// If available, we should also try to catch the panic so that it is propagated correctly.
let guard = Guard(raw);
// Panic propagation is not available for no_std.
#[cfg(not(feature = "std"))]
let poll = <F as Future>::poll(Pin::new_unchecked(&mut *raw.future), cx).map(Ok);
#[cfg(feature = "std")]
let poll = {
// Check if we should propagate panics.
if (*raw.header).propagate_panic {
// Use catch_unwind to catch the panic.
match std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
<F as Future>::poll(Pin::new_unchecked(&mut *raw.future), cx)
})) {
Ok(Poll::Ready(v)) => Poll::Ready(Ok(v)),
Ok(Poll::Pending) => Poll::Pending,
Err(e) => Poll::Ready(Err(e)),
}
} else {
<F as Future>::poll(Pin::new_unchecked(&mut *raw.future), cx).map(Ok)
}
};
mem::forget(guard);
match poll {
Poll::Ready(out) => {
// Replace the future with its output.
Self::drop_future(ptr);
raw.output.write(out);
// The task is now completed.
loop {
// If the `Task` is dropped, we'll need to close it and drop the output.
let new = if state & TASK == 0 {
(state & !RUNNING & !SCHEDULED) | COMPLETED | CLOSED
} else {
(state & !RUNNING & !SCHEDULED) | COMPLETED
};
// Mark the task as not running and completed.
match (*raw.header).state.compare_exchange_weak(
state,
new,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
// If the `Task` is dropped or if the task was closed while running,
// now it's time to drop the output.
if state & TASK == 0 || state & CLOSED != 0 {
// Drop the output.
abort_on_panic(|| raw.output.drop_in_place());
}
// Take the awaiter out.
let mut awaiter = None;
if state & AWAITER != 0 {
awaiter = (*raw.header).take(None);
}
// Drop the task reference.
Self::drop_ref(ptr);
// Notify the awaiter that the future has been dropped.
if let Some(w) = awaiter {
abort_on_panic(|| w.wake());
}
break;
}
Err(s) => state = s,
}
}
}
Poll::Pending => {
let mut future_dropped = false;
// The task is still not completed.
loop {
// If the task was closed while running, we'll need to unschedule in case it
// was woken up and then destroy it.
let new = if state & CLOSED != 0 {
state & !RUNNING & !SCHEDULED
} else {
state & !RUNNING
};
if state & CLOSED != 0 && !future_dropped {
// The thread that closed the task didn't drop the future because it was
// running so now it's our responsibility to do so.
Self::drop_future(ptr);
future_dropped = true;
}
// Mark the task as not running.
match (*raw.header).state.compare_exchange_weak(
state,
new,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(state) => {
// If the task was closed while running, we need to notify the awaiter.
// If the task was woken up while running, we need to schedule it.
// Otherwise, we just drop the task reference.
if state & CLOSED != 0 {
// Take the awaiter out.
let mut awaiter = None;
if state & AWAITER != 0 {
awaiter = (*raw.header).take(None);
}
// Drop the task reference.
Self::drop_ref(ptr);
// Notify the awaiter that the future has been dropped.
if let Some(w) = awaiter {
abort_on_panic(|| w.wake());
}
} else if state & SCHEDULED != 0 {
// The thread that woke the task up didn't reschedule it because
// it was running so now it's our responsibility to do so.
Self::schedule(ptr, ScheduleInfo::new(true));
return true;
} else {
// Drop the task reference.
Self::drop_ref(ptr);
}
break;
}
Err(s) => state = s,
}
}
}
}
return false;
/// A guard that closes the task if polling its future panics.
struct Guard<F, T, S, M>(RawTask<F, T, S, M>)
where
F: Future<Output = T>,
S: Schedule<M>;
impl<F, T, S, M> Drop for Guard<F, T, S, M>
where
F: Future<Output = T>,
S: Schedule<M>,
{
fn drop(&mut self) {
let raw = self.0;
let ptr = raw.header as *const ();
unsafe {
let mut state = (*raw.header).state.load(Ordering::Acquire);
loop {
// If the task was closed while running, then unschedule it, drop its
// future, and drop the task reference.
if state & CLOSED != 0 {
// The thread that closed the task didn't drop the future because it
// was running so now it's our responsibility to do so.
RawTask::<F, T, S, M>::drop_future(ptr);
// Mark the task as not running and not scheduled.
(*raw.header)
.state
.fetch_and(!RUNNING & !SCHEDULED, Ordering::AcqRel);
// Take the awaiter out.
let mut awaiter = None;
if state & AWAITER != 0 {
awaiter = (*raw.header).take(None);
}
// Drop the task reference.
RawTask::<F, T, S, M>::drop_ref(ptr);
// Notify the awaiter that the future has been dropped.
if let Some(w) = awaiter {
abort_on_panic(|| w.wake());
}
break;
}
// Mark the task as not running, not scheduled, and closed.
match (*raw.header).state.compare_exchange_weak(
state,
(state & !RUNNING & !SCHEDULED) | CLOSED,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(state) => {
// Drop the future because the task is now closed.
RawTask::<F, T, S, M>::drop_future(ptr);
// Take the awaiter out.
let mut awaiter = None;
if state & AWAITER != 0 {
awaiter = (*raw.header).take(None);
}
// Drop the task reference.
RawTask::<F, T, S, M>::drop_ref(ptr);
// Notify the awaiter that the future has been dropped.
if let Some(w) = awaiter {
abort_on_panic(|| w.wake());
}
break;
}
Err(s) => state = s,
}
}
}
}
}
}
}

945
vendor/async-task/src/runnable.rs vendored Normal file
View File

@@ -0,0 +1,945 @@
use core::fmt;
use core::future::Future;
use core::marker::PhantomData;
use core::mem;
use core::ptr::NonNull;
use core::sync::atomic::Ordering;
use core::task::Waker;
use alloc::boxed::Box;
use crate::header::Header;
use crate::raw::RawTask;
use crate::state::*;
use crate::Task;
mod sealed {
use super::*;
pub trait Sealed<M> {}
impl<M, F> Sealed<M> for F where F: Fn(Runnable<M>) {}
impl<M, F> Sealed<M> for WithInfo<F> where F: Fn(Runnable<M>, ScheduleInfo) {}
}
/// A builder that creates a new task.
#[derive(Debug)]
pub struct Builder<M> {
/// The metadata associated with the task.
pub(crate) metadata: M,
/// Whether or not a panic that occurs in the task should be propagated.
#[cfg(feature = "std")]
pub(crate) propagate_panic: bool,
}
impl<M: Default> Default for Builder<M> {
fn default() -> Self {
Builder::new().metadata(M::default())
}
}
/// Extra scheduling information that can be passed to the scheduling function.
///
/// The data source of this struct is directly from the actual implementation
/// of the crate itself, different from [`Runnable`]'s metadata, which is
/// managed by the caller.
///
/// # Examples
///
/// ```
/// use async_task::{Runnable, ScheduleInfo, WithInfo};
/// use std::sync::{Arc, Mutex};
///
/// // The future inside the task.
/// let future = async {
/// println!("Hello, world!");
/// };
///
/// // If the task gets woken up while running, it will be sent into this channel.
/// let (s, r) = flume::unbounded();
/// // Otherwise, it will be placed into this slot.
/// let lifo_slot = Arc::new(Mutex::new(None));
/// let schedule = move |runnable: Runnable, info: ScheduleInfo| {
/// if info.woken_while_running {
/// s.send(runnable).unwrap()
/// } else {
/// let last = lifo_slot.lock().unwrap().replace(runnable);
/// if let Some(last) = last {
/// s.send(last).unwrap()
/// }
/// }
/// };
///
/// // Create the actual scheduler to be spawned with some future.
/// let scheduler = WithInfo(schedule);
/// // Create a task with the future and the scheduler.
/// let (runnable, task) = async_task::spawn(future, scheduler);
/// ```
#[derive(Debug, Copy, Clone)]
#[non_exhaustive]
pub struct ScheduleInfo {
/// Indicates whether the task gets woken up while running.
///
/// It is set to true usually because the task has yielded itself to the
/// scheduler.
pub woken_while_running: bool,
}
impl ScheduleInfo {
pub(crate) fn new(woken_while_running: bool) -> Self {
ScheduleInfo {
woken_while_running,
}
}
}
/// The trait for scheduling functions.
pub trait Schedule<M = ()>: sealed::Sealed<M> {
/// The actual scheduling procedure.
fn schedule(&self, runnable: Runnable<M>, info: ScheduleInfo);
}
impl<M, F> Schedule<M> for F
where
F: Fn(Runnable<M>),
{
fn schedule(&self, runnable: Runnable<M>, _: ScheduleInfo) {
self(runnable)
}
}
/// Pass a scheduling function with more scheduling information - a.k.a.
/// [`ScheduleInfo`].
///
/// Sometimes, it's useful to pass the runnable's state directly to the
/// scheduling function, such as whether it's woken up while running. The
/// scheduler can thus use the information to determine its scheduling
/// strategy.
///
/// The data source of [`ScheduleInfo`] is directly from the actual
/// implementation of the crate itself, different from [`Runnable`]'s metadata,
/// which is managed by the caller.
///
/// # Examples
///
/// ```
/// use async_task::{ScheduleInfo, WithInfo};
/// use std::sync::{Arc, Mutex};
///
/// // The future inside the task.
/// let future = async {
/// println!("Hello, world!");
/// };
///
/// // If the task gets woken up while running, it will be sent into this channel.
/// let (s, r) = flume::unbounded();
/// // Otherwise, it will be placed into this slot.
/// let lifo_slot = Arc::new(Mutex::new(None));
/// let schedule = move |runnable, info: ScheduleInfo| {
/// if info.woken_while_running {
/// s.send(runnable).unwrap()
/// } else {
/// let last = lifo_slot.lock().unwrap().replace(runnable);
/// if let Some(last) = last {
/// s.send(last).unwrap()
/// }
/// }
/// };
///
/// // Create a task with the future and the schedule function.
/// let (runnable, task) = async_task::spawn(future, WithInfo(schedule));
/// ```
#[derive(Debug)]
pub struct WithInfo<F>(pub F);
impl<F> From<F> for WithInfo<F> {
fn from(value: F) -> Self {
WithInfo(value)
}
}
impl<M, F> Schedule<M> for WithInfo<F>
where
F: Fn(Runnable<M>, ScheduleInfo),
{
fn schedule(&self, runnable: Runnable<M>, info: ScheduleInfo) {
(self.0)(runnable, info)
}
}
impl Builder<()> {
/// Creates a new task builder.
///
/// By default, this task builder has no metadata. Use the [`metadata`] method to
/// set the metadata.
///
/// # Examples
///
/// ```
/// use async_task::Builder;
///
/// let (runnable, task) = Builder::new().spawn(|()| async {}, |_| {});
/// ```
pub fn new() -> Builder<()> {
Builder {
metadata: (),
#[cfg(feature = "std")]
propagate_panic: false,
}
}
/// Adds metadata to the task.
///
/// In certain cases, it may be useful to associate some metadata with a task. For instance,
/// you may want to associate a name with a task, or a priority for a priority queue. This
/// method allows the user to attach arbitrary metadata to a task that is available through
/// the [`Runnable`] or the [`Task`].
///
/// # Examples
///
/// This example creates an executor that associates a "priority" number with each task, and
/// then runs the tasks in order of priority.
///
/// ```
/// use async_task::{Builder, Runnable};
/// use once_cell::sync::Lazy;
/// use std::cmp;
/// use std::collections::BinaryHeap;
/// use std::sync::Mutex;
///
/// # smol::future::block_on(async {
/// /// A wrapper around a `Runnable<usize>` that implements `Ord` so that it can be used in a
/// /// priority queue.
/// struct TaskWrapper(Runnable<usize>);
///
/// impl PartialEq for TaskWrapper {
/// fn eq(&self, other: &Self) -> bool {
/// self.0.metadata() == other.0.metadata()
/// }
/// }
///
/// impl Eq for TaskWrapper {}
///
/// impl PartialOrd for TaskWrapper {
/// fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
/// Some(self.cmp(other))
/// }
/// }
///
/// impl Ord for TaskWrapper {
/// fn cmp(&self, other: &Self) -> cmp::Ordering {
/// self.0.metadata().cmp(other.0.metadata())
/// }
/// }
///
/// static EXECUTOR: Lazy<Mutex<BinaryHeap<TaskWrapper>>> = Lazy::new(|| {
/// Mutex::new(BinaryHeap::new())
/// });
///
/// let schedule = |runnable| {
/// EXECUTOR.lock().unwrap().push(TaskWrapper(runnable));
/// };
///
/// // Spawn a few tasks with different priorities.
/// let spawn_task = move |priority| {
/// let (runnable, task) = Builder::new().metadata(priority).spawn(
/// move |_| async move { priority },
/// schedule,
/// );
/// runnable.schedule();
/// task
/// };
///
/// let t1 = spawn_task(1);
/// let t2 = spawn_task(2);
/// let t3 = spawn_task(3);
///
/// // Run the tasks in order of priority.
/// let mut metadata_seen = vec![];
/// while let Some(TaskWrapper(runnable)) = EXECUTOR.lock().unwrap().pop() {
/// metadata_seen.push(*runnable.metadata());
/// runnable.run();
/// }
///
/// assert_eq!(metadata_seen, vec![3, 2, 1]);
/// assert_eq!(t1.await, 1);
/// assert_eq!(t2.await, 2);
/// assert_eq!(t3.await, 3);
/// # });
/// ```
pub fn metadata<M>(self, metadata: M) -> Builder<M> {
Builder {
metadata,
#[cfg(feature = "std")]
propagate_panic: self.propagate_panic,
}
}
}
impl<M> Builder<M> {
/// Propagates panics that occur in the task.
///
/// When this is `true`, panics that occur in the task will be propagated to the caller of
/// the [`Task`]. When this is false, no special action is taken when a panic occurs in the
/// task, meaning that the caller of [`Runnable::run`] will observe a panic.
///
/// This is only available when the `std` feature is enabled. By default, this is `false`.
///
/// # Examples
///
/// ```
/// use async_task::Builder;
/// use futures_lite::future::poll_fn;
/// use std::future::Future;
/// use std::panic;
/// use std::pin::Pin;
/// use std::task::{Context, Poll};
///
/// fn did_panic<F: FnOnce()>(f: F) -> bool {
/// panic::catch_unwind(panic::AssertUnwindSafe(f)).is_err()
/// }
///
/// # smol::future::block_on(async {
/// let (runnable1, mut task1) = Builder::new()
/// .propagate_panic(true)
/// .spawn(|()| async move { panic!() }, |_| {});
///
/// let (runnable2, mut task2) = Builder::new()
/// .propagate_panic(false)
/// .spawn(|()| async move { panic!() }, |_| {});
///
/// assert!(!did_panic(|| { runnable1.run(); }));
/// assert!(did_panic(|| { runnable2.run(); }));
///
/// let waker = poll_fn(|cx| Poll::Ready(cx.waker().clone())).await;
/// let mut cx = Context::from_waker(&waker);
/// assert!(did_panic(|| { let _ = Pin::new(&mut task1).poll(&mut cx); }));
/// assert!(did_panic(|| { let _ = Pin::new(&mut task2).poll(&mut cx); }));
/// # });
/// ```
#[cfg(feature = "std")]
pub fn propagate_panic(self, propagate_panic: bool) -> Builder<M> {
Builder {
metadata: self.metadata,
propagate_panic,
}
}
/// Creates a new task.
///
/// The returned [`Runnable`] is used to poll the `future`, and the [`Task`] is used to await its
/// output.
///
/// Method [`run()`][`Runnable::run()`] polls the task's future once. Then, the [`Runnable`]
/// vanishes and only reappears when its [`Waker`] wakes the task, thus scheduling it to be run
/// again.
///
/// When the task is woken, its [`Runnable`] is passed to the `schedule` function.
/// The `schedule` function should not attempt to run the [`Runnable`] nor to drop it. Instead, it
/// should push it into a task queue so that it can be processed later.
///
/// If you need to spawn a future that does not implement [`Send`] or isn't `'static`, consider
/// using [`spawn_local()`] or [`spawn_unchecked()`] instead.
///
/// # Examples
///
/// ```
/// use async_task::Builder;
///
/// // The future inside the task.
/// let future = async {
/// println!("Hello, world!");
/// };
///
/// // A function that schedules the task when it gets woken up.
/// let (s, r) = flume::unbounded();
/// let schedule = move |runnable| s.send(runnable).unwrap();
///
/// // Create a task with the future and the schedule function.
/// let (runnable, task) = Builder::new().spawn(|()| future, schedule);
/// ```
pub fn spawn<F, Fut, S>(self, future: F, schedule: S) -> (Runnable<M>, Task<Fut::Output, M>)
where
F: FnOnce(&M) -> Fut,
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
S: Schedule<M> + Send + Sync + 'static,
{
unsafe { self.spawn_unchecked(future, schedule) }
}
/// Creates a new thread-local task.
///
/// This function is same as [`spawn()`], except it does not require [`Send`] on `future`. If the
/// [`Runnable`] is used or dropped on another thread, a panic will occur.
///
/// This function is only available when the `std` feature for this crate is enabled.
///
/// # Examples
///
/// ```
/// use async_task::{Builder, Runnable};
/// use flume::{Receiver, Sender};
/// use std::rc::Rc;
///
/// thread_local! {
/// // A queue that holds scheduled tasks.
/// static QUEUE: (Sender<Runnable>, Receiver<Runnable>) = flume::unbounded();
/// }
///
/// // Make a non-Send future.
/// let msg: Rc<str> = "Hello, world!".into();
/// let future = async move {
/// println!("{}", msg);
/// };
///
/// // A function that schedules the task when it gets woken up.
/// let s = QUEUE.with(|(s, _)| s.clone());
/// let schedule = move |runnable| s.send(runnable).unwrap();
///
/// // Create a task with the future and the schedule function.
/// let (runnable, task) = Builder::new().spawn_local(move |()| future, schedule);
/// ```
#[cfg(feature = "std")]
pub fn spawn_local<F, Fut, S>(
self,
future: F,
schedule: S,
) -> (Runnable<M>, Task<Fut::Output, M>)
where
F: FnOnce(&M) -> Fut,
Fut: Future + 'static,
Fut::Output: 'static,
S: Schedule<M> + Send + Sync + 'static,
{
use std::mem::ManuallyDrop;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::thread::{self, ThreadId};
#[inline]
fn thread_id() -> ThreadId {
std::thread_local! {
static ID: ThreadId = thread::current().id();
}
ID.try_with(|id| *id)
.unwrap_or_else(|_| thread::current().id())
}
struct Checked<F> {
id: ThreadId,
inner: ManuallyDrop<F>,
}
impl<F> Drop for Checked<F> {
fn drop(&mut self) {
assert!(
self.id == thread_id(),
"local task dropped by a thread that didn't spawn it"
);
unsafe {
ManuallyDrop::drop(&mut self.inner);
}
}
}
impl<F: Future> Future for Checked<F> {
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
assert!(
self.id == thread_id(),
"local task polled by a thread that didn't spawn it"
);
unsafe { self.map_unchecked_mut(|c| &mut *c.inner).poll(cx) }
}
}
// Wrap the future into one that checks which thread it's on.
let future = move |meta| {
let future = future(meta);
Checked {
id: thread_id(),
inner: ManuallyDrop::new(future),
}
};
unsafe { self.spawn_unchecked(future, schedule) }
}
/// Creates a new task without [`Send`], [`Sync`], and `'static` bounds.
///
/// This function is same as [`spawn()`], except it does not require [`Send`], [`Sync`], and
/// `'static` on `future` and `schedule`.
///
/// # Safety
///
/// - If `Fut` is not [`Send`], its [`Runnable`] must be used and dropped on the original
/// thread.
/// - If `Fut` is not `'static`, borrowed non-metadata variables must outlive its [`Runnable`].
/// - If `schedule` is not [`Send`] and [`Sync`], all instances of the [`Runnable`]'s [`Waker`]
/// must be used and dropped on the original thread.
/// - If `schedule` is not `'static`, borrowed variables must outlive all instances of the
/// [`Runnable`]'s [`Waker`].
///
/// # Examples
///
/// ```
/// use async_task::Builder;
///
/// // The future inside the task.
/// let future = async {
/// println!("Hello, world!");
/// };
///
/// // If the task gets woken up, it will be sent into this channel.
/// let (s, r) = flume::unbounded();
/// let schedule = move |runnable| s.send(runnable).unwrap();
///
/// // Create a task with the future and the schedule function.
/// let (runnable, task) = unsafe { Builder::new().spawn_unchecked(move |()| future, schedule) };
/// ```
pub unsafe fn spawn_unchecked<'a, F, Fut, S>(
self,
future: F,
schedule: S,
) -> (Runnable<M>, Task<Fut::Output, M>)
where
F: FnOnce(&'a M) -> Fut,
Fut: Future + 'a,
S: Schedule<M>,
M: 'a,
{
// Allocate large futures on the heap.
let ptr = if mem::size_of::<Fut>() >= 2048 {
let future = |meta| {
let future = future(meta);
Box::pin(future)
};
RawTask::<_, Fut::Output, S, M>::allocate(future, schedule, self)
} else {
RawTask::<Fut, Fut::Output, S, M>::allocate(future, schedule, self)
};
let runnable = Runnable::from_raw(ptr);
let task = Task {
ptr,
_marker: PhantomData,
};
(runnable, task)
}
}
/// Creates a new task.
///
/// The returned [`Runnable`] is used to poll the `future`, and the [`Task`] is used to await its
/// output.
///
/// Method [`run()`][`Runnable::run()`] polls the task's future once. Then, the [`Runnable`]
/// vanishes and only reappears when its [`Waker`] wakes the task, thus scheduling it to be run
/// again.
///
/// When the task is woken, its [`Runnable`] is passed to the `schedule` function.
/// The `schedule` function should not attempt to run the [`Runnable`] nor to drop it. Instead, it
/// should push it into a task queue so that it can be processed later.
///
/// If you need to spawn a future that does not implement [`Send`] or isn't `'static`, consider
/// using [`spawn_local()`] or [`spawn_unchecked()`] instead.
///
/// # Examples
///
/// ```
/// // The future inside the task.
/// let future = async {
/// println!("Hello, world!");
/// };
///
/// // A function that schedules the task when it gets woken up.
/// let (s, r) = flume::unbounded();
/// let schedule = move |runnable| s.send(runnable).unwrap();
///
/// // Create a task with the future and the schedule function.
/// let (runnable, task) = async_task::spawn(future, schedule);
/// ```
pub fn spawn<F, S>(future: F, schedule: S) -> (Runnable, Task<F::Output>)
where
F: Future + Send + 'static,
F::Output: Send + 'static,
S: Schedule + Send + Sync + 'static,
{
unsafe { spawn_unchecked(future, schedule) }
}
/// Creates a new thread-local task.
///
/// This function is same as [`spawn()`], except it does not require [`Send`] on `future`. If the
/// [`Runnable`] is used or dropped on another thread, a panic will occur.
///
/// This function is only available when the `std` feature for this crate is enabled.
///
/// # Examples
///
/// ```
/// use async_task::Runnable;
/// use flume::{Receiver, Sender};
/// use std::rc::Rc;
///
/// thread_local! {
/// // A queue that holds scheduled tasks.
/// static QUEUE: (Sender<Runnable>, Receiver<Runnable>) = flume::unbounded();
/// }
///
/// // Make a non-Send future.
/// let msg: Rc<str> = "Hello, world!".into();
/// let future = async move {
/// println!("{}", msg);
/// };
///
/// // A function that schedules the task when it gets woken up.
/// let s = QUEUE.with(|(s, _)| s.clone());
/// let schedule = move |runnable| s.send(runnable).unwrap();
///
/// // Create a task with the future and the schedule function.
/// let (runnable, task) = async_task::spawn_local(future, schedule);
/// ```
#[cfg(feature = "std")]
pub fn spawn_local<F, S>(future: F, schedule: S) -> (Runnable, Task<F::Output>)
where
F: Future + 'static,
F::Output: 'static,
S: Schedule + Send + Sync + 'static,
{
Builder::new().spawn_local(move |()| future, schedule)
}
/// Creates a new task without [`Send`], [`Sync`], and `'static` bounds.
///
/// This function is same as [`spawn()`], except it does not require [`Send`], [`Sync`], and
/// `'static` on `future` and `schedule`.
///
/// # Safety
///
/// - If `future` is not [`Send`], its [`Runnable`] must be used and dropped on the original
/// thread.
/// - If `future` is not `'static`, borrowed variables must outlive its [`Runnable`].
/// - If `schedule` is not [`Send`] and [`Sync`], all instances of the [`Runnable`]'s [`Waker`]
/// must be used and dropped on the original thread.
/// - If `schedule` is not `'static`, borrowed variables must outlive all instances of the
/// [`Runnable`]'s [`Waker`].
///
/// # Examples
///
/// ```
/// // The future inside the task.
/// let future = async {
/// println!("Hello, world!");
/// };
///
/// // If the task gets woken up, it will be sent into this channel.
/// let (s, r) = flume::unbounded();
/// let schedule = move |runnable| s.send(runnable).unwrap();
///
/// // Create a task with the future and the schedule function.
/// let (runnable, task) = unsafe { async_task::spawn_unchecked(future, schedule) };
/// ```
pub unsafe fn spawn_unchecked<F, S>(future: F, schedule: S) -> (Runnable, Task<F::Output>)
where
F: Future,
S: Schedule,
{
Builder::new().spawn_unchecked(move |()| future, schedule)
}
/// A handle to a runnable task.
///
/// Every spawned task has a single [`Runnable`] handle, which only exists when the task is
/// scheduled for running.
///
/// Method [`run()`][`Runnable::run()`] polls the task's future once. Then, the [`Runnable`]
/// vanishes and only reappears when its [`Waker`] wakes the task, thus scheduling it to be run
/// again.
///
/// Dropping a [`Runnable`] cancels the task, which means its future won't be polled again, and
/// awaiting the [`Task`] after that will result in a panic.
///
/// # Examples
///
/// ```
/// use async_task::Runnable;
/// use once_cell::sync::Lazy;
/// use std::{panic, thread};
///
/// // A simple executor.
/// static QUEUE: Lazy<flume::Sender<Runnable>> = Lazy::new(|| {
/// let (sender, receiver) = flume::unbounded::<Runnable>();
/// thread::spawn(|| {
/// for runnable in receiver {
/// let _ignore_panic = panic::catch_unwind(|| runnable.run());
/// }
/// });
/// sender
/// });
///
/// // Create a task with a simple future.
/// let schedule = |runnable| QUEUE.send(runnable).unwrap();
/// let (runnable, task) = async_task::spawn(async { 1 + 2 }, schedule);
///
/// // Schedule the task and await its output.
/// runnable.schedule();
/// assert_eq!(smol::future::block_on(task), 3);
/// ```
pub struct Runnable<M = ()> {
/// A pointer to the heap-allocated task.
pub(crate) ptr: NonNull<()>,
/// A marker capturing generic type `M`.
pub(crate) _marker: PhantomData<M>,
}
unsafe impl<M: Send + Sync> Send for Runnable<M> {}
unsafe impl<M: Send + Sync> Sync for Runnable<M> {}
#[cfg(feature = "std")]
impl<M> std::panic::UnwindSafe for Runnable<M> {}
#[cfg(feature = "std")]
impl<M> std::panic::RefUnwindSafe for Runnable<M> {}
impl<M> Runnable<M> {
/// Get the metadata associated with this task.
///
/// Tasks can be created with a metadata object associated with them; by default, this
/// is a `()` value. See the [`Builder::metadata()`] method for more information.
pub fn metadata(&self) -> &M {
&self.header().metadata
}
/// Schedules the task.
///
/// This is a convenience method that passes the [`Runnable`] to the schedule function.
///
/// # Examples
///
/// ```
/// // A function that schedules the task when it gets woken up.
/// let (s, r) = flume::unbounded();
/// let schedule = move |runnable| s.send(runnable).unwrap();
///
/// // Create a task with a simple future and the schedule function.
/// let (runnable, task) = async_task::spawn(async {}, schedule);
///
/// // Schedule the task.
/// assert_eq!(r.len(), 0);
/// runnable.schedule();
/// assert_eq!(r.len(), 1);
/// ```
pub fn schedule(self) {
let ptr = self.ptr.as_ptr();
let header = ptr as *const Header<M>;
mem::forget(self);
unsafe {
((*header).vtable.schedule)(ptr, ScheduleInfo::new(false));
}
}
/// Runs the task by polling its future.
///
/// Returns `true` if the task was woken while running, in which case the [`Runnable`] gets
/// rescheduled at the end of this method invocation. Otherwise, returns `false` and the
/// [`Runnable`] vanishes until the task is woken.
/// The return value is just a hint: `true` usually indicates that the task has yielded, i.e.
/// it woke itself and then gave the control back to the executor.
///
/// If the [`Task`] handle was dropped or if [`cancel()`][`Task::cancel()`] was called, then
/// this method simply destroys the task.
///
/// If the polled future panics, this method propagates the panic, and awaiting the [`Task`]
/// after that will also result in a panic.
///
/// # Examples
///
/// ```
/// // A function that schedules the task when it gets woken up.
/// let (s, r) = flume::unbounded();
/// let schedule = move |runnable| s.send(runnable).unwrap();
///
/// // Create a task with a simple future and the schedule function.
/// let (runnable, task) = async_task::spawn(async { 1 + 2 }, schedule);
///
/// // Run the task and check its output.
/// runnable.run();
/// assert_eq!(smol::future::block_on(task), 3);
/// ```
pub fn run(self) -> bool {
let ptr = self.ptr.as_ptr();
let header = ptr as *const Header<M>;
mem::forget(self);
unsafe { ((*header).vtable.run)(ptr) }
}
/// Returns a waker associated with this task.
///
/// # Examples
///
/// ```
/// use smol::future;
///
/// // A function that schedules the task when it gets woken up.
/// let (s, r) = flume::unbounded();
/// let schedule = move |runnable| s.send(runnable).unwrap();
///
/// // Create a task with a simple future and the schedule function.
/// let (runnable, task) = async_task::spawn(future::pending::<()>(), schedule);
///
/// // Take a waker and run the task.
/// let waker = runnable.waker();
/// runnable.run();
///
/// // Reschedule the task by waking it.
/// assert_eq!(r.len(), 0);
/// waker.wake();
/// assert_eq!(r.len(), 1);
/// ```
pub fn waker(&self) -> Waker {
let ptr = self.ptr.as_ptr();
let header = ptr as *const Header<M>;
unsafe {
let raw_waker = ((*header).vtable.clone_waker)(ptr);
Waker::from_raw(raw_waker)
}
}
fn header(&self) -> &Header<M> {
unsafe { &*(self.ptr.as_ptr() as *const Header<M>) }
}
/// Converts this task into a raw pointer.
///
/// To avoid a memory leak the pointer must be converted back to a Runnable using [`Runnable<M>::from_raw`][from_raw].
///
/// `into_raw` does not change the state of the [`Task`], but there is no guarantee that it will be in the same state after calling [`Runnable<M>::from_raw`][from_raw],
/// as the corresponding [`Task`] might have been dropped or cancelled.
///
/// # Examples
///
/// ```rust
/// use async_task::{Runnable, spawn};
/// let (runnable, task) = spawn(async {}, |_| {});
/// let runnable_pointer = runnable.into_raw();
///
/// unsafe {
/// // Convert back to an `Runnable` to prevent leak.
/// let runnable = Runnable::<()>::from_raw(runnable_pointer);
/// runnable.run();
/// // Further calls to `Runnable::from_raw(runnable_pointer)` would be memory-unsafe.
/// }
/// // The memory was freed when `x` went out of scope above, so `runnable_pointer` is now dangling!
/// ```
/// [from_raw]: #method.from_raw
pub fn into_raw(self) -> NonNull<()> {
let ptr = self.ptr;
mem::forget(self);
ptr
}
/// Converts a raw pointer into a Runnable.
///
/// # Safety
///
/// This method should only be used with raw pointers returned from [`Runnable<M>::into_raw`][into_raw].
/// It is not safe to use the provided pointer once it is passed to `from_raw`.
/// Crucially, it is unsafe to call `from_raw` multiple times with the same pointer - even if the resulting [`Runnable`] is not used -
/// as internally `async-task` uses reference counting.
///
/// It is however safe to call [`Runnable<M>::into_raw`][into_raw] on a [`Runnable`] created with `from_raw` or
/// after the [`Task`] associated with a given Runnable has been dropped or cancelled.
///
/// The state of the [`Runnable`] created with `from_raw` is not specified.
///
/// # Examples
///
/// ```rust
/// use async_task::{Runnable, spawn};
/// let (runnable, task) = spawn(async {}, |_| {});
/// let runnable_pointer = runnable.into_raw();
///
/// drop(task);
/// unsafe {
/// // Convert back to an `Runnable` to prevent leak.
/// let runnable = Runnable::<()>::from_raw(runnable_pointer);
/// let did_poll = runnable.run();
/// assert!(!did_poll);
/// // Further calls to `Runnable::from_raw(runnable_pointer)` would be memory-unsafe.
/// }
/// // The memory was freed when `x` went out of scope above, so `runnable_pointer` is now dangling!
/// ```
/// [into_raw]: #method.into_raw
pub unsafe fn from_raw(ptr: NonNull<()>) -> Self {
Self {
ptr,
_marker: Default::default(),
}
}
}
impl<M> Drop for Runnable<M> {
fn drop(&mut self) {
let ptr = self.ptr.as_ptr();
let header = self.header();
unsafe {
let mut state = header.state.load(Ordering::Acquire);
loop {
// If the task has been completed or closed, it can't be canceled.
if state & (COMPLETED | CLOSED) != 0 {
break;
}
// Mark the task as closed.
match header.state.compare_exchange_weak(
state,
state | CLOSED,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => break,
Err(s) => state = s,
}
}
// Drop the future.
(header.vtable.drop_future)(ptr);
// Mark the task as unscheduled.
let state = header.state.fetch_and(!SCHEDULED, Ordering::AcqRel);
// Notify the awaiter that the future has been dropped.
if state & AWAITER != 0 {
(*header).notify(None);
}
// Drop the task reference.
(header.vtable.drop_ref)(ptr);
}
}
}
impl<M: fmt::Debug> fmt::Debug for Runnable<M> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let ptr = self.ptr.as_ptr();
let header = ptr as *const Header<M>;
f.debug_struct("Runnable")
.field("header", unsafe { &(*header) })
.finish()
}
}

69
vendor/async-task/src/state.rs vendored Normal file
View File

@@ -0,0 +1,69 @@
/// Set if the task is scheduled for running.
///
/// A task is considered to be scheduled whenever its `Runnable` exists.
///
/// This flag can't be set when the task is completed. However, it can be set while the task is
/// running, in which case it will be rescheduled as soon as polling finishes.
pub(crate) const SCHEDULED: usize = 1 << 0;
/// Set if the task is running.
///
/// A task is in running state while its future is being polled.
///
/// This flag can't be set when the task is completed. However, it can be in scheduled state while
/// it is running, in which case it will be rescheduled as soon as polling finishes.
pub(crate) const RUNNING: usize = 1 << 1;
/// Set if the task has been completed.
///
/// This flag is set when polling returns `Poll::Ready`. The output of the future is then stored
/// inside the task until it becomes closed. In fact, `Task` picks up the output by marking
/// the task as closed.
///
/// This flag can't be set when the task is scheduled or running.
pub(crate) const COMPLETED: usize = 1 << 2;
/// Set if the task is closed.
///
/// If a task is closed, that means it's either canceled or its output has been consumed by the
/// `Task`. A task becomes closed in the following cases:
///
/// 1. It gets canceled by `Runnable::drop()`, `Task::drop()`, or `Task::cancel()`.
/// 2. Its output gets awaited by the `Task`.
/// 3. It panics while polling the future.
/// 4. It is completed and the `Task` gets dropped.
pub(crate) const CLOSED: usize = 1 << 3;
/// Set if the `Task` still exists.
///
/// The `Task` is a special case in that it is only tracked by this flag, while all other
/// task references (`Runnable` and `Waker`s) are tracked by the reference count.
pub(crate) const TASK: usize = 1 << 4;
/// Set if the `Task` is awaiting the output.
///
/// This flag is set while there is a registered awaiter of type `Waker` inside the task. When the
/// task gets closed or completed, we need to wake the awaiter. This flag can be used as a fast
/// check that tells us if we need to wake anyone.
pub(crate) const AWAITER: usize = 1 << 5;
/// Set if an awaiter is being registered.
///
/// This flag is set when `Task` is polled and we are registering a new awaiter.
pub(crate) const REGISTERING: usize = 1 << 6;
/// Set if the awaiter is being notified.
///
/// This flag is set when notifying the awaiter. If an awaiter is concurrently registered and
/// notified, whichever side came first will take over the reposibility of resolving the race.
pub(crate) const NOTIFYING: usize = 1 << 7;
/// A single reference.
///
/// The lower bits in the state contain various flags representing the task state, while the upper
/// bits contain the reference count. The value of `REFERENCE` represents a single reference in the
/// total reference count.
///
/// Note that the reference counter only tracks the `Runnable` and `Waker`s. The `Task` is
/// tracked separately by the `TASK` flag.
pub(crate) const REFERENCE: usize = 1 << 8;

565
vendor/async-task/src/task.rs vendored Normal file
View File

@@ -0,0 +1,565 @@
use core::fmt;
use core::future::Future;
use core::marker::PhantomData;
use core::mem;
use core::pin::Pin;
use core::ptr::NonNull;
use core::sync::atomic::Ordering;
use core::task::{Context, Poll};
use crate::header::Header;
use crate::raw::Panic;
use crate::runnable::ScheduleInfo;
use crate::state::*;
/// A spawned task.
///
/// A [`Task`] can be awaited to retrieve the output of its future.
///
/// Dropping a [`Task`] cancels it, which means its future won't be polled again. To drop the
/// [`Task`] handle without canceling it, use [`detach()`][`Task::detach()`] instead. To cancel a
/// task gracefully and wait until it is fully destroyed, use the [`cancel()`][Task::cancel()]
/// method.
///
/// Note that canceling a task actually wakes it and reschedules one last time. Then, the executor
/// can destroy the task by simply dropping its [`Runnable`][`super::Runnable`] or by invoking
/// [`run()`][`super::Runnable::run()`].
///
/// # Examples
///
/// ```
/// use smol::{future, Executor};
/// use std::thread;
///
/// let ex = Executor::new();
///
/// // Spawn a future onto the executor.
/// let task = ex.spawn(async {
/// println!("Hello from a task!");
/// 1 + 2
/// });
///
/// // Run an executor thread.
/// thread::spawn(move || future::block_on(ex.run(future::pending::<()>())));
///
/// // Wait for the task's output.
/// assert_eq!(future::block_on(task), 3);
/// ```
#[must_use = "tasks get canceled when dropped, use `.detach()` to run them in the background"]
pub struct Task<T, M = ()> {
/// A raw task pointer.
pub(crate) ptr: NonNull<()>,
/// A marker capturing generic types `T` and `M`.
pub(crate) _marker: PhantomData<(T, M)>,
}
unsafe impl<T: Send, M: Send + Sync> Send for Task<T, M> {}
unsafe impl<T, M: Send + Sync> Sync for Task<T, M> {}
impl<T, M> Unpin for Task<T, M> {}
#[cfg(feature = "std")]
impl<T, M> std::panic::UnwindSafe for Task<T, M> {}
#[cfg(feature = "std")]
impl<T, M> std::panic::RefUnwindSafe for Task<T, M> {}
impl<T, M> Task<T, M> {
/// Detaches the task to let it keep running in the background.
///
/// # Examples
///
/// ```
/// use smol::{Executor, Timer};
/// use std::time::Duration;
///
/// let ex = Executor::new();
///
/// // Spawn a deamon future.
/// ex.spawn(async {
/// loop {
/// println!("I'm a daemon task looping forever.");
/// Timer::after(Duration::from_secs(1)).await;
/// }
/// })
/// .detach();
/// ```
pub fn detach(self) {
let mut this = self;
let _out = this.set_detached();
mem::forget(this);
}
/// Cancels the task and waits for it to stop running.
///
/// Returns the task's output if it was completed just before it got canceled, or [`None`] if
/// it didn't complete.
///
/// While it's possible to simply drop the [`Task`] to cancel it, this is a cleaner way of
/// canceling because it also waits for the task to stop running.
///
/// # Examples
///
/// ```
/// # if cfg!(miri) { return; } // Miri does not support epoll
/// use smol::{future, Executor, Timer};
/// use std::thread;
/// use std::time::Duration;
///
/// let ex = Executor::new();
///
/// // Spawn a deamon future.
/// let task = ex.spawn(async {
/// loop {
/// println!("Even though I'm in an infinite loop, you can still cancel me!");
/// Timer::after(Duration::from_secs(1)).await;
/// }
/// });
///
/// // Run an executor thread.
/// thread::spawn(move || future::block_on(ex.run(future::pending::<()>())));
///
/// future::block_on(async {
/// Timer::after(Duration::from_secs(3)).await;
/// task.cancel().await;
/// });
/// ```
pub async fn cancel(self) -> Option<T> {
let mut this = self;
this.set_canceled();
this.fallible().await
}
/// Converts this task into a [`FallibleTask`].
///
/// Like [`Task`], a fallible task will poll the task's output until it is
/// completed or cancelled due to its [`Runnable`][`super::Runnable`] being
/// dropped without being run. Resolves to the task's output when completed,
/// or [`None`] if it didn't complete.
///
/// # Examples
///
/// ```
/// use smol::{future, Executor};
/// use std::thread;
///
/// let ex = Executor::new();
///
/// // Spawn a future onto the executor.
/// let task = ex.spawn(async {
/// println!("Hello from a task!");
/// 1 + 2
/// })
/// .fallible();
///
/// // Run an executor thread.
/// thread::spawn(move || future::block_on(ex.run(future::pending::<()>())));
///
/// // Wait for the task's output.
/// assert_eq!(future::block_on(task), Some(3));
/// ```
///
/// ```
/// use smol::future;
///
/// // Schedule function which drops the runnable without running it.
/// let schedule = move |runnable| drop(runnable);
///
/// // Create a task with the future and the schedule function.
/// let (runnable, task) = async_task::spawn(async {
/// println!("Hello from a task!");
/// 1 + 2
/// }, schedule);
/// runnable.schedule();
///
/// // Wait for the task's output.
/// assert_eq!(future::block_on(task.fallible()), None);
/// ```
pub fn fallible(self) -> FallibleTask<T, M> {
FallibleTask { task: self }
}
/// Puts the task in canceled state.
fn set_canceled(&mut self) {
let ptr = self.ptr.as_ptr();
let header = ptr as *const Header<M>;
unsafe {
let mut state = (*header).state.load(Ordering::Acquire);
loop {
// If the task has been completed or closed, it can't be canceled.
if state & (COMPLETED | CLOSED) != 0 {
break;
}
// If the task is not scheduled nor running, we'll need to schedule it.
let new = if state & (SCHEDULED | RUNNING) == 0 {
(state | SCHEDULED | CLOSED) + REFERENCE
} else {
state | CLOSED
};
// Mark the task as closed.
match (*header).state.compare_exchange_weak(
state,
new,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
// If the task is not scheduled nor running, schedule it one more time so
// that its future gets dropped by the executor.
if state & (SCHEDULED | RUNNING) == 0 {
((*header).vtable.schedule)(ptr, ScheduleInfo::new(false));
}
// Notify the awaiter that the task has been closed.
if state & AWAITER != 0 {
(*header).notify(None);
}
break;
}
Err(s) => state = s,
}
}
}
}
/// Puts the task in detached state.
fn set_detached(&mut self) -> Option<Result<T, Panic>> {
let ptr = self.ptr.as_ptr();
let header = ptr as *const Header<M>;
unsafe {
// A place where the output will be stored in case it needs to be dropped.
let mut output = None;
// Optimistically assume the `Task` is being detached just after creating the task.
// This is a common case so if the `Task` is datached, the overhead of it is only one
// compare-exchange operation.
if let Err(mut state) = (*header).state.compare_exchange_weak(
SCHEDULED | TASK | REFERENCE,
SCHEDULED | REFERENCE,
Ordering::AcqRel,
Ordering::Acquire,
) {
loop {
// If the task has been completed but not yet closed, that means its output
// must be dropped.
if state & COMPLETED != 0 && state & CLOSED == 0 {
// Mark the task as closed in order to grab its output.
match (*header).state.compare_exchange_weak(
state,
state | CLOSED,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
// Read the output.
output = Some(
(((*header).vtable.get_output)(ptr) as *mut Result<T, Panic>)
.read(),
);
// Update the state variable because we're continuing the loop.
state |= CLOSED;
}
Err(s) => state = s,
}
} else {
// If this is the last reference to the task and it's not closed, then
// close it and schedule one more time so that its future gets dropped by
// the executor.
let new = if state & (!(REFERENCE - 1) | CLOSED) == 0 {
SCHEDULED | CLOSED | REFERENCE
} else {
state & !TASK
};
// Unset the `TASK` flag.
match (*header).state.compare_exchange_weak(
state,
new,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
// If this is the last reference to the task, we need to either
// schedule dropping its future or destroy it.
if state & !(REFERENCE - 1) == 0 {
if state & CLOSED == 0 {
((*header).vtable.schedule)(ptr, ScheduleInfo::new(false));
} else {
((*header).vtable.destroy)(ptr);
}
}
break;
}
Err(s) => state = s,
}
}
}
}
output
}
}
/// Polls the task to retrieve its output.
///
/// Returns `Some` if the task has completed or `None` if it was closed.
///
/// A task becomes closed in the following cases:
///
/// 1. It gets canceled by `Runnable::drop()`, `Task::drop()`, or `Task::cancel()`.
/// 2. Its output gets awaited by the `Task`.
/// 3. It panics while polling the future.
/// 4. It is completed and the `Task` gets dropped.
fn poll_task(&mut self, cx: &mut Context<'_>) -> Poll<Option<T>> {
let ptr = self.ptr.as_ptr();
let header = ptr as *const Header<M>;
unsafe {
let mut state = (*header).state.load(Ordering::Acquire);
loop {
// If the task has been closed, notify the awaiter and return `None`.
if state & CLOSED != 0 {
// If the task is scheduled or running, we need to wait until its future is
// dropped.
if state & (SCHEDULED | RUNNING) != 0 {
// Replace the waker with one associated with the current task.
(*header).register(cx.waker());
// Reload the state after registering. It is possible changes occurred just
// before registration so we need to check for that.
state = (*header).state.load(Ordering::Acquire);
// If the task is still scheduled or running, we need to wait because its
// future is not dropped yet.
if state & (SCHEDULED | RUNNING) != 0 {
return Poll::Pending;
}
}
// Even though the awaiter is most likely the current task, it could also be
// another task.
(*header).notify(Some(cx.waker()));
return Poll::Ready(None);
}
// If the task is not completed, register the current task.
if state & COMPLETED == 0 {
// Replace the waker with one associated with the current task.
(*header).register(cx.waker());
// Reload the state after registering. It is possible that the task became
// completed or closed just before registration so we need to check for that.
state = (*header).state.load(Ordering::Acquire);
// If the task has been closed, restart.
if state & CLOSED != 0 {
continue;
}
// If the task is still not completed, we're blocked on it.
if state & COMPLETED == 0 {
return Poll::Pending;
}
}
// Since the task is now completed, mark it as closed in order to grab its output.
match (*header).state.compare_exchange(
state,
state | CLOSED,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
// Notify the awaiter. Even though the awaiter is most likely the current
// task, it could also be another task.
if state & AWAITER != 0 {
(*header).notify(Some(cx.waker()));
}
// Take the output from the task.
let output = ((*header).vtable.get_output)(ptr) as *mut Result<T, Panic>;
let output = output.read();
// Propagate the panic if the task panicked.
let output = match output {
Ok(output) => output,
Err(panic) => {
#[cfg(feature = "std")]
std::panic::resume_unwind(panic);
#[cfg(not(feature = "std"))]
match panic {}
}
};
return Poll::Ready(Some(output));
}
Err(s) => state = s,
}
}
}
}
fn header(&self) -> &Header<M> {
let ptr = self.ptr.as_ptr();
let header = ptr as *const Header<M>;
unsafe { &*header }
}
/// Returns `true` if the current task is finished.
///
/// Note that in a multithreaded environment, this task can change finish immediately after calling this function.
pub fn is_finished(&self) -> bool {
let ptr = self.ptr.as_ptr();
let header = ptr as *const Header<M>;
unsafe {
let state = (*header).state.load(Ordering::Acquire);
state & (CLOSED | COMPLETED) != 0
}
}
/// Get the metadata associated with this task.
///
/// Tasks can be created with a metadata object associated with them; by default, this
/// is a `()` value. See the [`Builder::metadata()`] method for more information.
pub fn metadata(&self) -> &M {
&self.header().metadata
}
}
impl<T, M> Drop for Task<T, M> {
fn drop(&mut self) {
self.set_canceled();
self.set_detached();
}
}
impl<T, M> Future for Task<T, M> {
type Output = T;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.poll_task(cx) {
Poll::Ready(t) => Poll::Ready(t.expect("Task polled after completion")),
Poll::Pending => Poll::Pending,
}
}
}
impl<T, M: fmt::Debug> fmt::Debug for Task<T, M> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Task")
.field("header", self.header())
.finish()
}
}
/// A spawned task with a fallible response.
///
/// This type behaves like [`Task`], however it produces an `Option<T>` when
/// polled and will return `None` if the executor dropped its
/// [`Runnable`][`super::Runnable`] without being run.
///
/// This can be useful to avoid the panic produced when polling the `Task`
/// future if the executor dropped its `Runnable`.
#[must_use = "tasks get canceled when dropped, use `.detach()` to run them in the background"]
pub struct FallibleTask<T, M = ()> {
task: Task<T, M>,
}
impl<T, M> FallibleTask<T, M> {
/// Detaches the task to let it keep running in the background.
///
/// # Examples
///
/// ```
/// use smol::{Executor, Timer};
/// use std::time::Duration;
///
/// let ex = Executor::new();
///
/// // Spawn a deamon future.
/// ex.spawn(async {
/// loop {
/// println!("I'm a daemon task looping forever.");
/// Timer::after(Duration::from_secs(1)).await;
/// }
/// })
/// .fallible()
/// .detach();
/// ```
pub fn detach(self) {
self.task.detach()
}
/// Cancels the task and waits for it to stop running.
///
/// Returns the task's output if it was completed just before it got canceled, or [`None`] if
/// it didn't complete.
///
/// While it's possible to simply drop the [`Task`] to cancel it, this is a cleaner way of
/// canceling because it also waits for the task to stop running.
///
/// # Examples
///
/// ```
/// # if cfg!(miri) { return; } // Miri does not support epoll
/// use smol::{future, Executor, Timer};
/// use std::thread;
/// use std::time::Duration;
///
/// let ex = Executor::new();
///
/// // Spawn a deamon future.
/// let task = ex.spawn(async {
/// loop {
/// println!("Even though I'm in an infinite loop, you can still cancel me!");
/// Timer::after(Duration::from_secs(1)).await;
/// }
/// })
/// .fallible();
///
/// // Run an executor thread.
/// thread::spawn(move || future::block_on(ex.run(future::pending::<()>())));
///
/// future::block_on(async {
/// Timer::after(Duration::from_secs(3)).await;
/// task.cancel().await;
/// });
/// ```
pub async fn cancel(self) -> Option<T> {
self.task.cancel().await
}
/// Returns `true` if the current task is finished.
///
/// Note that in a multithreaded environment, this task can change finish immediately after calling this function.
pub fn is_finished(&self) -> bool {
self.task.is_finished()
}
}
impl<T, M> Future for FallibleTask<T, M> {
type Output = Option<T>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.task.poll_task(cx)
}
}
impl<T, M: fmt::Debug> fmt::Debug for FallibleTask<T, M> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FallibleTask")
.field("header", self.task.header())
.finish()
}
}

127
vendor/async-task/src/utils.rs vendored Normal file
View File

@@ -0,0 +1,127 @@
use core::alloc::Layout as StdLayout;
use core::mem;
/// Aborts the process.
///
/// To abort, this function simply panics while panicking.
pub(crate) fn abort() -> ! {
struct Panic;
impl Drop for Panic {
fn drop(&mut self) {
panic!("aborting the process");
}
}
let _panic = Panic;
panic!("aborting the process");
}
/// Calls a function and aborts if it panics.
///
/// This is useful in unsafe code where we can't recover from panics.
#[inline]
pub(crate) fn abort_on_panic<T>(f: impl FnOnce() -> T) -> T {
struct Bomb;
impl Drop for Bomb {
fn drop(&mut self) {
abort();
}
}
let bomb = Bomb;
let t = f();
mem::forget(bomb);
t
}
/// A version of `alloc::alloc::Layout` that can be used in the const
/// position.
#[derive(Clone, Copy, Debug)]
pub(crate) struct Layout {
size: usize,
align: usize,
}
impl Layout {
/// Creates a new `Layout` with the given size and alignment.
#[inline]
pub(crate) const fn from_size_align(size: usize, align: usize) -> Self {
Self { size, align }
}
/// Creates a new `Layout` for the given sized type.
#[inline]
pub(crate) const fn new<T>() -> Self {
Self::from_size_align(mem::size_of::<T>(), mem::align_of::<T>())
}
/// Convert this into the standard library's layout type.
///
/// # Safety
///
/// - `align` must be non-zero and a power of two.
/// - When rounded up to the nearest multiple of `align`, the size
/// must not overflow.
#[inline]
pub(crate) const unsafe fn into_std(self) -> StdLayout {
StdLayout::from_size_align_unchecked(self.size, self.align)
}
/// Get the alignment of this layout.
#[inline]
pub(crate) const fn align(&self) -> usize {
self.align
}
/// Get the size of this layout.
#[inline]
pub(crate) const fn size(&self) -> usize {
self.size
}
/// Returns the layout for `a` followed by `b` and the offset of `b`.
///
/// This function was adapted from the `Layout::extend()`:
/// https://doc.rust-lang.org/nightly/std/alloc/struct.Layout.html#method.extend
#[inline]
pub(crate) const fn extend(self, other: Layout) -> Option<(Layout, usize)> {
let new_align = max(self.align(), other.align());
let pad = self.padding_needed_for(other.align());
let offset = leap!(self.size().checked_add(pad));
let new_size = leap!(offset.checked_add(other.size()));
// return None if any of the following are true:
// - align is 0 (implied false by is_power_of_two())
// - align is not a power of 2
// - size rounded up to align overflows
if !new_align.is_power_of_two() || new_size > isize::MAX as usize - (new_align - 1) {
return None;
}
let layout = Layout::from_size_align(new_size, new_align);
Some((layout, offset))
}
/// Returns the padding after `layout` that aligns the following address to `align`.
///
/// This function was adapted from the `Layout::padding_needed_for()`:
/// https://doc.rust-lang.org/nightly/std/alloc/struct.Layout.html#method.padding_needed_for
#[inline]
pub(crate) const fn padding_needed_for(self, align: usize) -> usize {
let len = self.size();
let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1);
len_rounded_up.wrapping_sub(len)
}
}
#[inline]
pub(crate) const fn max(left: usize, right: usize) -> usize {
if left > right {
left
} else {
right
}
}