Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

83
vendor/calloop/src/error.rs vendored Normal file
View File

@@ -0,0 +1,83 @@
//! Error types used and generated by Calloop.
//!
//! This module contains error types for Calloop's operations. They are designed
//! to make it easy to deal with errors arising from Calloop's internal I/O and
//! other operations.
//!
//! There are two top-level error types:
//!
//! - [`Error`]: used by callback functions, internal operations, and some event
//! loop API calls
//!
//! - [`InsertError`]: used primarily by the [`insert_source()`] method when an
//! event source cannot be added to the loop and needs to be given back to the
//! caller
//!
//! [`insert_source()`]: crate::LoopHandle::insert_source()
use std::fmt::{self, Debug, Formatter};
/// The primary error type used by Calloop covering internal errors and I/O
/// errors that arise during loop operations such as source registration or
/// event dispatching.
#[derive(thiserror::Error, Debug)]
pub enum Error {
/// When an event source is registered (or re- or un-registered) with the
/// event loop, this error variant will occur if the token Calloop uses to
/// keep track of the event source is not valid.
#[error("invalid token provided to internal function")]
InvalidToken,
/// This variant wraps a [`std::io::Error`], which might arise from
/// Calloop's internal operations.
#[error("underlying IO error")]
IoError(#[from] std::io::Error),
/// Any other unexpected error kind (most likely from a user implementation of
/// [`EventSource::process_events()`]) will be wrapped in this.
///
/// [`EventSource::process_events()`]: crate::EventSource::process_events()
#[error("other error during loop operation")]
OtherError(#[from] Box<dyn std::error::Error + Sync + Send>),
}
impl From<Error> for std::io::Error {
/// Converts Calloop's error type into a [`std::io::Error`].
fn from(err: Error) -> Self {
match err {
Error::IoError(source) => source,
Error::InvalidToken => Self::new(std::io::ErrorKind::InvalidInput, err.to_string()),
Error::OtherError(source) => Self::new(std::io::ErrorKind::Other, source),
}
}
}
/// [`Result`] alias using Calloop's error type.
pub type Result<T> = core::result::Result<T, Error>;
/// An error generated when trying to insert an event source
#[derive(thiserror::Error)]
#[error("error inserting event source")]
pub struct InsertError<T> {
/// The source that could not be inserted
pub inserted: T,
/// The generated error
#[source]
pub error: Error,
}
impl<T> Debug for InsertError<T> {
#[cfg_attr(feature = "nightly_coverage", coverage(off))]
fn fmt(&self, formatter: &mut Formatter) -> core::result::Result<(), fmt::Error> {
write!(formatter, "{:?}", self.error)
}
}
impl<T> From<InsertError<T>> for crate::Error {
/// Converts the [`InsertError`] into Calloop's error type, throwing away
/// the contained source.
#[cfg_attr(feature = "nightly_coverage", coverage(off))]
fn from(e: InsertError<T>) -> crate::Error {
e.error
}
}

613
vendor/calloop/src/io.rs vendored Normal file
View File

@@ -0,0 +1,613 @@
//! Adapters for async IO objects
//!
//! This module mainly hosts the [`Async`] adapter for making IO objects async with readiness
//! monitoring backed by an [`EventLoop`](crate::EventLoop). See [`LoopHandle::adapt_io`] for
//! how to create them.
//!
//! [`LoopHandle::adapt_io`]: crate::LoopHandle#method.adapt_io
use std::cell::RefCell;
use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll as TaskPoll, Waker};
#[cfg(unix)]
use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd, RawFd};
#[cfg(windows)]
use std::os::windows::io::{
AsRawSocket as AsRawFd, AsSocket as AsFd, BorrowedSocket as BorrowedFd, RawSocket as RawFd,
};
#[cfg(feature = "futures-io")]
use futures_io::{AsyncRead, AsyncWrite, IoSlice, IoSliceMut};
use crate::loop_logic::EventIterator;
use crate::{
loop_logic::LoopInner, sources::EventDispatcher, Interest, Mode, Poll, PostAction, Readiness,
Token, TokenFactory,
};
use crate::{AdditionalLifecycleEventsSet, RegistrationToken};
/// Adapter for async IO manipulations
///
/// This type wraps an IO object, providing methods to create futures waiting for its
/// readiness.
///
/// If the `futures-io` cargo feature is enabled, it also implements `AsyncRead` and/or
/// `AsyncWrite` if the underlying type implements `Read` and/or `Write`.
///
/// Note that this adapter and the futures procuded from it and *not* threadsafe.
///
/// ## Platform-Specific
///
/// - **Windows:** Usually, on drop, the file descriptor is set back to its previous status.
/// For example, if the file was previously nonblocking it will be set to nonblocking, and
/// if the file was blocking it will be set to blocking. However, on Windows, it is impossible
/// to tell what its status was before. Therefore it will always be set to blocking.
pub struct Async<'l, F: AsFd> {
fd: Option<F>,
dispatcher: Rc<RefCell<IoDispatcher>>,
inner: Rc<dyn IoLoopInner + 'l>,
was_nonblocking: bool,
}
impl<'l, F: AsFd + std::fmt::Debug> std::fmt::Debug for Async<'l, F> {
#[cfg_attr(feature = "nightly_coverage", coverage(off))]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Async").field("fd", &self.fd).finish()
}
}
impl<'l, F: AsFd> Async<'l, F> {
pub(crate) fn new<Data>(inner: Rc<LoopInner<'l, Data>>, fd: F) -> crate::Result<Async<'l, F>> {
// set non-blocking
let was_nonblocking = set_nonblocking(
#[cfg(unix)]
fd.as_fd(),
#[cfg(windows)]
fd.as_socket(),
true,
)?;
// register in the loop
let dispatcher = Rc::new(RefCell::new(IoDispatcher {
#[cfg(unix)]
fd: fd.as_fd().as_raw_fd(),
#[cfg(windows)]
fd: fd.as_socket().as_raw_socket(),
token: None,
waker: None,
is_registered: false,
interest: Interest::EMPTY,
last_readiness: Readiness::EMPTY,
}));
{
let mut sources = inner.sources.borrow_mut();
let slot = sources.vacant_entry();
slot.source = Some(dispatcher.clone());
dispatcher.borrow_mut().token = Some(Token { inner: slot.token });
}
// SAFETY: We are sure to deregister on drop.
unsafe {
inner.register(&dispatcher)?;
}
// Straightforward casting would require us to add the bound `Data: 'l` but we don't actually need it
// as this module never accesses the dispatch data, so we use transmute to erase it
let inner: Rc<dyn IoLoopInner + 'l> =
unsafe { std::mem::transmute(inner as Rc<dyn IoLoopInner>) };
Ok(Async {
fd: Some(fd),
dispatcher,
inner,
was_nonblocking,
})
}
/// Mutably access the underlying IO object
pub fn get_mut(&mut self) -> &mut F {
self.fd.as_mut().unwrap()
}
/// A future that resolves once the object becomes ready for reading
pub fn readable<'s>(&'s mut self) -> Readable<'s, 'l, F> {
Readable { io: self }
}
/// A future that resolves once the object becomes ready for writing
pub fn writable<'s>(&'s mut self) -> Writable<'s, 'l, F> {
Writable { io: self }
}
/// Remove the async adapter and retrieve the underlying object
pub fn into_inner(mut self) -> F {
self.fd.take().unwrap()
}
fn readiness(&self) -> Readiness {
self.dispatcher.borrow_mut().readiness()
}
fn register_waker(&self, interest: Interest, waker: Waker) -> crate::Result<()> {
{
let mut disp = self.dispatcher.borrow_mut();
disp.interest = interest;
disp.waker = Some(waker);
}
self.inner.reregister(&self.dispatcher)
}
}
/// A future that resolves once the associated object becomes ready for reading
#[derive(Debug)]
pub struct Readable<'s, 'l, F: AsFd> {
io: &'s mut Async<'l, F>,
}
impl<'s, 'l, F: AsFd> std::future::Future for Readable<'s, 'l, F> {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> TaskPoll<()> {
let io = &mut self.as_mut().io;
let readiness = io.readiness();
if readiness.readable || readiness.error {
TaskPoll::Ready(())
} else {
let _ = io.register_waker(Interest::READ, cx.waker().clone());
TaskPoll::Pending
}
}
}
/// A future that resolves once the associated object becomes ready for writing
#[derive(Debug)]
pub struct Writable<'s, 'l, F: AsFd> {
io: &'s mut Async<'l, F>,
}
impl<'s, 'l, F: AsFd> std::future::Future for Writable<'s, 'l, F> {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> TaskPoll<()> {
let io = &mut self.as_mut().io;
let readiness = io.readiness();
if readiness.writable || readiness.error {
TaskPoll::Ready(())
} else {
let _ = io.register_waker(Interest::WRITE, cx.waker().clone());
TaskPoll::Pending
}
}
}
impl<'l, F: AsFd> Drop for Async<'l, F> {
fn drop(&mut self) {
self.inner.kill(&self.dispatcher);
// restore flags
let _ = set_nonblocking(
unsafe { BorrowedFd::borrow_raw(self.dispatcher.borrow().fd) },
self.was_nonblocking,
);
}
}
impl<'l, F: AsFd> Unpin for Async<'l, F> {}
trait IoLoopInner {
unsafe fn register(&self, dispatcher: &RefCell<IoDispatcher>) -> crate::Result<()>;
fn reregister(&self, dispatcher: &RefCell<IoDispatcher>) -> crate::Result<()>;
fn kill(&self, dispatcher: &RefCell<IoDispatcher>);
}
impl<'l, Data> IoLoopInner for LoopInner<'l, Data> {
unsafe fn register(&self, dispatcher: &RefCell<IoDispatcher>) -> crate::Result<()> {
let disp = dispatcher.borrow();
self.poll.borrow_mut().register(
unsafe { BorrowedFd::borrow_raw(disp.fd) },
Interest::EMPTY,
Mode::OneShot,
disp.token.expect("No token for IO dispatcher"),
)
}
fn reregister(&self, dispatcher: &RefCell<IoDispatcher>) -> crate::Result<()> {
let disp = dispatcher.borrow();
self.poll.borrow_mut().reregister(
unsafe { BorrowedFd::borrow_raw(disp.fd) },
disp.interest,
Mode::OneShot,
disp.token.expect("No token for IO dispatcher"),
)
}
fn kill(&self, dispatcher: &RefCell<IoDispatcher>) {
let token = dispatcher
.borrow()
.token
.expect("No token for IO dispatcher");
if let Ok(slot) = self.sources.borrow_mut().get_mut(token.inner) {
slot.source = None;
}
}
}
struct IoDispatcher {
fd: RawFd, // FIXME: `BorrowedFd`? How to statically verify it doesn't outlive file?
token: Option<Token>,
waker: Option<Waker>,
is_registered: bool,
interest: Interest,
last_readiness: Readiness,
}
impl IoDispatcher {
fn readiness(&mut self) -> Readiness {
std::mem::replace(&mut self.last_readiness, Readiness::EMPTY)
}
}
impl<Data> EventDispatcher<Data> for RefCell<IoDispatcher> {
fn process_events(
&self,
readiness: Readiness,
_token: Token,
_data: &mut Data,
) -> crate::Result<PostAction> {
let mut disp = self.borrow_mut();
disp.last_readiness = readiness;
if let Some(waker) = disp.waker.take() {
waker.wake();
}
Ok(PostAction::Continue)
}
fn register(
&self,
_: &mut Poll,
_: &mut AdditionalLifecycleEventsSet,
_: &mut TokenFactory,
) -> crate::Result<()> {
// registration is handled by IoLoopInner
unreachable!()
}
fn reregister(
&self,
_: &mut Poll,
_: &mut AdditionalLifecycleEventsSet,
_: &mut TokenFactory,
) -> crate::Result<bool> {
// registration is handled by IoLoopInner
unreachable!()
}
fn unregister(
&self,
poll: &mut Poll,
_: &mut AdditionalLifecycleEventsSet,
_: RegistrationToken,
) -> crate::Result<bool> {
let disp = self.borrow();
if disp.is_registered {
poll.unregister(unsafe { BorrowedFd::borrow_raw(disp.fd) })?;
}
Ok(true)
}
fn before_sleep(&self) -> crate::Result<Option<(Readiness, Token)>> {
Ok(None)
}
fn before_handle_events(&self, _: EventIterator<'_>) {}
}
/*
* Async IO trait implementations
*/
#[cfg(feature = "futures-io")]
#[cfg_attr(docsrs, doc(cfg(feature = "futures-io")))]
impl<'l, F: AsFd + std::io::Read> AsyncRead for Async<'l, F> {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> TaskPoll<std::io::Result<usize>> {
match (*self).get_mut().read(buf) {
Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => {}
res => return TaskPoll::Ready(res),
}
self.register_waker(Interest::READ, cx.waker().clone())?;
TaskPoll::Pending
}
fn poll_read_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &mut [IoSliceMut<'_>],
) -> TaskPoll<std::io::Result<usize>> {
match (*self).get_mut().read_vectored(bufs) {
Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => {}
res => return TaskPoll::Ready(res),
}
self.register_waker(Interest::READ, cx.waker().clone())?;
TaskPoll::Pending
}
}
#[cfg(feature = "futures-io")]
#[cfg_attr(docsrs, doc(cfg(feature = "futures-io")))]
impl<'l, F: AsFd + std::io::Write> AsyncWrite for Async<'l, F> {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> TaskPoll<std::io::Result<usize>> {
match (*self).get_mut().write(buf) {
Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => {}
res => return TaskPoll::Ready(res),
}
self.register_waker(Interest::WRITE, cx.waker().clone())?;
TaskPoll::Pending
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> TaskPoll<std::io::Result<usize>> {
match (*self).get_mut().write_vectored(bufs) {
Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => {}
res => return TaskPoll::Ready(res),
}
self.register_waker(Interest::WRITE, cx.waker().clone())?;
TaskPoll::Pending
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> TaskPoll<std::io::Result<()>> {
match (*self).get_mut().flush() {
Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => {}
res => return TaskPoll::Ready(res),
}
self.register_waker(Interest::WRITE, cx.waker().clone())?;
TaskPoll::Pending
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> TaskPoll<std::io::Result<()>> {
self.poll_flush(cx)
}
}
// https://github.com/smol-rs/async-io/blob/6499077421495f2200d5b86918399f3a84bbe8e4/src/lib.rs#L2171-L2195
/// Set the nonblocking status of an FD and return whether it was nonblocking before.
#[allow(clippy::needless_return)]
#[inline]
fn set_nonblocking(fd: BorrowedFd<'_>, is_nonblocking: bool) -> std::io::Result<bool> {
#[cfg(windows)]
{
rustix::io::ioctl_fionbio(fd, is_nonblocking)?;
// Unfortunately it is impossible to tell if a socket was nonblocking on Windows.
// Just say it wasn't for now.
return Ok(false);
}
#[cfg(not(windows))]
{
let previous = rustix::fs::fcntl_getfl(fd)?;
let new = if is_nonblocking {
previous | rustix::fs::OFlags::NONBLOCK
} else {
previous & !(rustix::fs::OFlags::NONBLOCK)
};
if new != previous {
rustix::fs::fcntl_setfl(fd, new)?;
}
return Ok(previous.contains(rustix::fs::OFlags::NONBLOCK));
}
}
#[cfg(all(test, unix, feature = "executor", feature = "futures-io"))]
mod tests {
use futures::io::{AsyncReadExt, AsyncWriteExt};
use crate::sources::futures::executor;
#[test]
fn read_write() {
let mut event_loop = crate::EventLoop::try_new().unwrap();
let handle = event_loop.handle();
let (exec, sched) = executor().unwrap();
handle
.insert_source(exec, move |ret, &mut (), got| {
*got = ret;
})
.unwrap();
let (tx, rx) = std::os::unix::net::UnixStream::pair().unwrap();
let mut tx = handle.adapt_io(tx).unwrap();
let mut rx = handle.adapt_io(rx).unwrap();
let received = std::rc::Rc::new(std::cell::Cell::new(false));
let fut_received = received.clone();
sched
.schedule(async move {
let mut buf = [0; 12];
rx.read_exact(&mut buf).await.unwrap();
assert_eq!(&buf, b"Hello World!");
fut_received.set(true);
})
.unwrap();
// The receiving future alone cannot advance
event_loop
.dispatch(Some(std::time::Duration::from_millis(10)), &mut ())
.unwrap();
assert!(!received.get());
// schedule the writing future as well and wait until finish
sched
.schedule(async move {
tx.write_all(b"Hello World!").await.unwrap();
tx.flush().await.unwrap();
})
.unwrap();
while !received.get() {
event_loop.dispatch(None, &mut ()).unwrap();
}
}
#[test]
fn read_write_vectored() {
let mut event_loop = crate::EventLoop::try_new().unwrap();
let handle = event_loop.handle();
let (exec, sched) = executor().unwrap();
handle
.insert_source(exec, move |ret, &mut (), got| {
*got = ret;
})
.unwrap();
let (tx, rx) = std::os::unix::net::UnixStream::pair().unwrap();
let mut tx = handle.adapt_io(tx).unwrap();
let mut rx = handle.adapt_io(rx).unwrap();
let received = std::rc::Rc::new(std::cell::Cell::new(false));
let fut_received = received.clone();
sched
.schedule(async move {
let mut buf = [0; 12];
let mut ioslices = buf
.chunks_mut(2)
.map(std::io::IoSliceMut::new)
.collect::<Vec<_>>();
let count = rx.read_vectored(&mut ioslices).await.unwrap();
assert_eq!(count, 12);
assert_eq!(&buf, b"Hello World!");
fut_received.set(true);
})
.unwrap();
// The receiving future alone cannot advance
event_loop
.dispatch(Some(std::time::Duration::from_millis(10)), &mut ())
.unwrap();
assert!(!received.get());
// schedule the writing future as well and wait until finish
sched
.schedule(async move {
let buf = b"Hello World!";
let ioslices = buf.chunks(2).map(std::io::IoSlice::new).collect::<Vec<_>>();
let count = tx.write_vectored(&ioslices).await.unwrap();
assert_eq!(count, 12);
tx.flush().await.unwrap();
})
.unwrap();
while !received.get() {
event_loop.dispatch(None, &mut ()).unwrap();
}
}
#[test]
fn readable() {
use std::io::Write;
let mut event_loop = crate::EventLoop::try_new().unwrap();
let handle = event_loop.handle();
let (exec, sched) = executor().unwrap();
handle
.insert_source(exec, move |(), &mut (), got| {
*got = true;
})
.unwrap();
let (mut tx, rx) = std::os::unix::net::UnixStream::pair().unwrap();
let mut rx = handle.adapt_io(rx).unwrap();
sched
.schedule(async move {
rx.readable().await;
})
.unwrap();
let mut dispatched = false;
event_loop
.dispatch(Some(std::time::Duration::from_millis(100)), &mut dispatched)
.unwrap();
// The socket is not yet readable, so the readable() future has not completed
assert!(!dispatched);
tx.write_all(&[42]).unwrap();
tx.flush().unwrap();
// Now we should become readable
while !dispatched {
event_loop.dispatch(None, &mut dispatched).unwrap();
}
}
#[test]
fn writable() {
use std::io::{BufReader, BufWriter, Read, Write};
let mut event_loop = crate::EventLoop::try_new().unwrap();
let handle = event_loop.handle();
let (exec, sched) = executor().unwrap();
handle
.insert_source(exec, move |(), &mut (), got| {
*got = true;
})
.unwrap();
let (mut tx, mut rx) = std::os::unix::net::UnixStream::pair().unwrap();
tx.set_nonblocking(true).unwrap();
rx.set_nonblocking(true).unwrap();
// First, fill the socket buffers
{
let mut writer = BufWriter::new(&mut tx);
let data = vec![42u8; 1024];
loop {
if writer.write(&data).is_err() {
break;
}
}
}
// Now, wait for it to be readable
let mut tx = handle.adapt_io(tx).unwrap();
sched
.schedule(async move {
tx.writable().await;
})
.unwrap();
let mut dispatched = false;
event_loop
.dispatch(Some(std::time::Duration::from_millis(100)), &mut dispatched)
.unwrap();
// The socket is not yet writable, so the readable() future has not completed
assert!(!dispatched);
// now read everything
{
let mut reader = BufReader::new(&mut rx);
let mut buffer = vec![0u8; 1024];
loop {
if reader.read(&mut buffer).is_err() {
break;
}
}
}
// Now we should become writable
while !dispatched {
event_loop.dispatch(None, &mut dispatched).unwrap();
}
}
}

163
vendor/calloop/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,163 @@
//! Calloop, a Callback-based Event Loop
//!
//! This crate provides an [`EventLoop`] type, which is a small abstraction
//! over a polling system. The main difference between this crate
//! and other traditional rust event loops is that it is based on callbacks:
//! you can register several event sources, each being associated with a callback
//! closure that will be invoked whenever the associated event source generates
//! events.
//!
//! The main target use of this event loop is thus for apps that expect to spend
//! most of their time waiting for events and wishes to do so in a cheap and convenient
//! way. It is not meant for large scale high performance IO.
//!
//! ## How to use it
//!
//! Below is a quick usage example of calloop. For a more in-depth tutorial, see
//! the [calloop book](https://smithay.github.io/calloop).
//!
//! For simple uses, you can just add event sources with callbacks to the event
//! loop. For example, here's a runnable program that exits after five seconds:
//!
//! ```no_run
//! use calloop::{timer::{Timer, TimeoutAction}, EventLoop, LoopSignal};
//!
//! fn main() {
//! // Create the event loop. The loop is parameterised by the kind of shared
//! // data you want the callbacks to use. In this case, we want to be able to
//! // stop the loop when the timer fires, so we provide the loop with a
//! // LoopSignal, which has the ability to stop the loop from within events. We
//! // just annotate the type here; the actual data is provided later in the
//! // run() call.
//! let mut event_loop: EventLoop<LoopSignal> =
//! EventLoop::try_new().expect("Failed to initialize the event loop!");
//!
//! // Retrieve a handle. It is used to insert new sources into the event loop
//! // It can be cloned, allowing you to insert sources from within source
//! // callbacks.
//! let handle = event_loop.handle();
//!
//! // Create our event source, a timer, that will expire in 2 seconds
//! let source = Timer::from_duration(std::time::Duration::from_secs(2));
//!
//! // Inserting an event source takes this general form. It can also be done
//! // from within the callback of another event source.
//! handle
//! .insert_source(
//! // a type which implements the EventSource trait
//! source,
//! // a callback that is invoked whenever this source generates an event
//! |event, _metadata, shared_data| {
//! // This callback is given 3 values:
//! // - the event generated by the source (in our case, timer events are the Instant
//! // representing the deadline for which it has fired)
//! // - &mut access to some metadata, specific to the event source (in our case, a
//! // timer handle)
//! // - &mut access to the global shared data that was passed to EventLoop::run or
//! // EventLoop::dispatch (in our case, a LoopSignal object to stop the loop)
//! //
//! // The return type is just () because nothing uses it. Some
//! // sources will expect a Result of some kind instead.
//! println!("Timeout for {:?} expired!", event);
//! // notify the event loop to stop running using the signal in the shared data
//! // (see below)
//! shared_data.stop();
//! // The timer event source requires us to return a TimeoutAction to
//! // specify if the timer should be rescheduled. In our case we just drop it.
//! TimeoutAction::Drop
//! },
//! )
//! .expect("Failed to insert event source!");
//!
//! // Create the shared data for our loop.
//! let mut shared_data = event_loop.get_signal();
//!
//! // Actually run the event loop. This will dispatch received events to their
//! // callbacks, waiting at most 20ms for new events between each invocation of
//! // the provided callback (pass None for the timeout argument if you want to
//! // wait indefinitely between events).
//! //
//! // This is where we pass the *value* of the shared data, as a mutable
//! // reference that will be forwarded to all your callbacks, allowing them to
//! // share some state
//! event_loop
//! .run(
//! std::time::Duration::from_millis(20),
//! &mut shared_data,
//! |_shared_data| {
//! // Finally, this is where you can insert the processing you need
//! // to do do between each waiting event eg. drawing logic if
//! // you're doing a GUI app.
//! },
//! )
//! .expect("Error during event loop!");
//! }
//! ```
//!
//! ## Event source types
//!
//! The event loop is backed by an OS provided polling selector (epoll on Linux).
//!
//! This crate also provide some adapters for common event sources such as:
//!
//! - [MPSC channels](channel)
//! - [Timers](timer)
//! - [unix signals](signals) on Linux
//!
//! As well as generic objects backed by file descriptors.
//!
//! It is also possible to insert "idle" callbacks. These callbacks represent computations that
//! need to be done at some point, but are not as urgent as processing the events. These callbacks
//! are stored and then executed during [`EventLoop::dispatch`](EventLoop#method.dispatch), once all
//! events from the sources have been processed.
//!
//! ## Async/Await compatibility
//!
//! `calloop` can be used with futures, both as an executor and for monitoring Async IO.
//!
//! Activating the `executor` cargo feature will add the [`futures`] module, which provides
//! a future executor that can be inserted into an [`EventLoop`] as yet another [`EventSource`].
//!
//! IO objects can be made Async-aware via the [`LoopHandle::adapt_io`](LoopHandle#method.adapt_io)
//! method. Waking up the futures using these objects is handled by the associated [`EventLoop`]
//! directly.
//!
//! ## Custom event sources
//!
//! You can create custom event sources can will be inserted in the event loop by
//! implementing the [`EventSource`] trait. This can be done either directly from the file
//! descriptors of your source of interest, or by wrapping an other event source and further
//! processing its events. An [`EventSource`] can register more than one file descriptor and
//! aggregate them.
//!
//! ## Platforms support
//!
//! Currently, calloop is tested on Linux, FreeBSD and macOS.
//!
//! The following platforms are also enabled at compile time but not tested: Android, NetBSD,
//! OpenBSD, DragonFlyBSD.
//!
//! Those platforms *should* work based on the fact that they have the same polling mechanism as
//! tested platforms, but some subtle bugs might still occur.
#![warn(missing_docs, missing_debug_implementations)]
#![allow(clippy::needless_doctest_main)]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(feature = "nightly_coverage", feature(coverage_attribute))]
mod sys;
pub use sys::{Interest, Mode, Poll, Readiness, Token, TokenFactory};
pub use self::loop_logic::{EventLoop, LoopHandle, LoopSignal, RegistrationToken};
pub use self::sources::*;
pub mod error;
pub use error::{Error, InsertError, Result};
pub mod io;
mod list;
mod loop_logic;
mod macros;
mod sources;
mod token;

71
vendor/calloop/src/list.rs vendored Normal file
View File

@@ -0,0 +1,71 @@
use std::rc::Rc;
use crate::sources::EventDispatcher;
use crate::token::TokenInner;
pub(crate) struct SourceEntry<'l, Data> {
pub(crate) token: TokenInner,
pub(crate) source: Option<Rc<dyn EventDispatcher<Data> + 'l>>,
}
pub(crate) struct SourceList<'l, Data> {
sources: Vec<SourceEntry<'l, Data>>,
}
impl<'l, Data> SourceList<'l, Data> {
pub(crate) fn new() -> Self {
SourceList {
sources: Vec::new(),
}
}
pub(crate) fn vacant_entry(&mut self) -> &mut SourceEntry<'l, Data> {
let opt_id = self.sources.iter().position(|slot| slot.source.is_none());
match opt_id {
Some(id) => {
// we are reusing a slot
let slot = &mut self.sources[id];
// increment the slot version
slot.token = slot.token.increment_version();
slot
}
None => {
// we are inserting a new slot
let next_id = self.sources.len();
self.sources.push(SourceEntry {
token: TokenInner::new(self.sources.len())
.expect("Trying to insert too many sources in an event loop."),
source: None,
});
&mut self.sources[next_id]
}
}
}
pub(crate) fn get(&self, token: TokenInner) -> crate::Result<&SourceEntry<'l, Data>> {
let entry = self
.sources
.get(token.get_id())
.ok_or(crate::Error::InvalidToken)?;
if entry.token.same_source_as(token) {
Ok(entry)
} else {
Err(crate::Error::InvalidToken)
}
}
pub(crate) fn get_mut(
&mut self,
token: TokenInner,
) -> crate::Result<&mut SourceEntry<'l, Data>> {
let entry = self
.sources
.get_mut(token.get_id())
.ok_or(crate::Error::InvalidToken)?;
if entry.token.same_source_as(token) {
Ok(entry)
} else {
Err(crate::Error::InvalidToken)
}
}
}

1687
vendor/calloop/src/loop_logic.rs vendored Normal file

File diff suppressed because it is too large Load Diff

223
vendor/calloop/src/macros.rs vendored Normal file
View File

@@ -0,0 +1,223 @@
//! Macros for helping with common operations in Calloop.
/// Register a set of event sources. Effectively calls
/// [`EventSource::register()`] for all the sources provided.
///
/// Usage:
///
/// ```none,actually-rust-but-see-https://github.com/rust-lang/rust/issues/63193
/// calloop::batch_register!(
/// poll, token_factory,
/// self.source_one,
/// self.source_two,
/// self.source_three,
/// self.source_four,
/// )
/// ```
///
/// Note that there is no scope for customisation; if you need to do special
/// things with a particular source, you'll need to leave it off the list. Also
/// note that this only does try-or-early-return error handling in the order
/// that you list the sources; if you need anything else, don't use this macro.
///
/// [`EventSource::register()`]: crate::EventSource::register()
#[macro_export]
macro_rules! batch_register {
($poll:ident, $token_fac:ident, $( $source:expr ),* $(,)?) => {
{
$(
$source.register($poll, $token_fac)?;
)*
$crate::Result::<_>::Ok(())
}
};
}
/// Reregister a set of event sources. Effectively calls
/// [`EventSource::reregister()`] for all the sources provided.
///
/// Usage:
///
/// ```none,actually-rust-but-see-https://github.com/rust-lang/rust/issues/63193
/// calloop::batch_reregister!(
/// poll, token_factory,
/// self.source_one,
/// self.source_two,
/// self.source_three,
/// self.source_four,
/// )
/// ```
///
/// Note that there is no scope for customisation; if you need to do special
/// things with a particular source, you'll need to leave it off the list. Also
/// note that this only does try-or-early-return error handling in the order
/// that you list the sources; if you need anything else, don't use this macro.
///
/// [`EventSource::reregister()`]: crate::EventSource::reregister()
#[macro_export]
macro_rules! batch_reregister {
($poll:ident, $token_fac:ident, $( $source:expr ),* $(,)?) => {
{
$(
$source.reregister($poll, $token_fac)?;
)*
$crate::Result::<_>::Ok(())
}
};
}
/// Unregister a set of event sources. Effectively calls
/// [`EventSource::unregister()`] for all the sources provided.
///
/// Usage:
///
/// ```none,actually-rust-but-see-https://github.com/rust-lang/rust/issues/63193
/// calloop::batch_unregister!(
/// poll,
/// self.source_one,
/// self.source_two,
/// self.source_three,
/// self.source_four,
/// )
/// ```
///
/// Note that there is no scope for customisation; if you need to do special
/// things with a particular source, you'll need to leave it off the list. Also
/// note that this only does try-or-early-return error handling in the order
/// that you list the sources; if you need anything else, don't use this macro.
///
/// [`EventSource::unregister()`]: crate::EventSource::unregister()
#[macro_export]
macro_rules! batch_unregister {
($poll:ident, $( $source:expr ),* $(,)?) => {
{
$(
$source.unregister($poll)?;
)*
$crate::Result::<_>::Ok(())
}
};
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use crate::{
ping::{make_ping, PingSource},
EventSource, PostAction,
};
struct BatchSource {
ping0: PingSource,
ping1: PingSource,
ping2: PingSource,
}
impl EventSource for BatchSource {
type Event = usize;
type Metadata = ();
type Ret = ();
type Error = Box<dyn std::error::Error + Sync + Send>;
fn process_events<F>(
&mut self,
readiness: crate::Readiness,
token: crate::Token,
mut callback: F,
) -> Result<crate::PostAction, Self::Error>
where
F: FnMut(Self::Event, &mut Self::Metadata) -> Self::Ret,
{
self.ping0
.process_events(readiness, token, |_, m| callback(0, m))?;
self.ping1
.process_events(readiness, token, |_, m| callback(1, m))?;
self.ping2
.process_events(readiness, token, |_, m| callback(2, m))?;
Ok(PostAction::Continue)
}
fn register(
&mut self,
poll: &mut crate::Poll,
token_factory: &mut crate::TokenFactory,
) -> crate::Result<()> {
crate::batch_register!(poll, token_factory, self.ping0, self.ping1, self.ping2)
}
fn reregister(
&mut self,
poll: &mut crate::Poll,
token_factory: &mut crate::TokenFactory,
) -> crate::Result<()> {
crate::batch_reregister!(poll, token_factory, self.ping0, self.ping1, self.ping2)
}
fn unregister(&mut self, poll: &mut crate::Poll) -> crate::Result<()> {
crate::batch_unregister!(poll, self.ping0, self.ping1, self.ping2)
}
}
#[test]
fn test_batch_operations() {
let mut fired = [false; 3];
let (send0, ping0) = make_ping().unwrap();
let (send1, ping1) = make_ping().unwrap();
let (send2, ping2) = make_ping().unwrap();
let top = BatchSource {
ping0,
ping1,
ping2,
};
let mut event_loop = crate::EventLoop::<[bool; 3]>::try_new().unwrap();
let handle = event_loop.handle();
let token = handle
.insert_source(top, |idx, _, fired| {
fired[idx] = true;
})
.unwrap();
send0.ping();
send1.ping();
send2.ping();
event_loop
.dispatch(Duration::new(0, 0), &mut fired)
.unwrap();
assert_eq!(fired, [true; 3]);
fired = [false; 3];
handle.update(&token).unwrap();
send0.ping();
send1.ping();
send2.ping();
event_loop
.dispatch(Duration::new(0, 0), &mut fired)
.unwrap();
assert_eq!(fired, [true; 3]);
fired = [false; 3];
handle.remove(token);
send0.ping();
send1.ping();
send2.ping();
event_loop
.dispatch(Duration::new(0, 0), &mut fired)
.unwrap();
assert_eq!(fired, [false; 3]);
}
}

327
vendor/calloop/src/sources/channel.rs vendored Normal file
View File

@@ -0,0 +1,327 @@
//! An MPSC channel whose receiving end is an event source
//!
//! Create a channel using [`channel()`](channel), which returns a
//! [`Sender`] that can be cloned and sent accross threads if `T: Send`,
//! and a [`Channel`] that can be inserted into an [`EventLoop`](crate::EventLoop).
//! It will generate one event per message.
//!
//! A synchronous version of the channel is provided by [`sync_channel`], in which
//! the [`SyncSender`] will block when the channel is full.
use std::sync::mpsc;
use crate::{EventSource, Poll, PostAction, Readiness, Token, TokenFactory};
use super::ping::{make_ping, Ping, PingError, PingSource};
/// The events generated by the channel event source
#[derive(Debug)]
pub enum Event<T> {
/// A message was received and is bundled here
Msg(T),
/// The channel was closed
///
/// This means all the `Sender`s associated with this channel
/// have been dropped, no more messages will ever be received.
Closed,
}
/// The sender end of a channel
///
/// It can be cloned and sent accross threads (if `T` is).
#[derive(Debug)]
pub struct Sender<T> {
sender: mpsc::Sender<T>,
ping: Ping,
}
impl<T> Clone for Sender<T> {
#[cfg_attr(feature = "nightly_coverage", coverage(off))]
fn clone(&self) -> Sender<T> {
Sender {
sender: self.sender.clone(),
ping: self.ping.clone(),
}
}
}
impl<T> Sender<T> {
/// Send a message to the channel
///
/// This will wake the event loop and deliver an `Event::Msg` to
/// it containing the provided value.
pub fn send(&self, t: T) -> Result<(), mpsc::SendError<T>> {
self.sender.send(t).map(|()| self.ping.ping())
}
}
impl<T> Drop for Sender<T> {
fn drop(&mut self) {
// ping on drop, to notify about channel closure
self.ping.ping();
}
}
/// The sender end of a synchronous channel
///
/// It can be cloned and sent accross threads (if `T` is).
#[derive(Debug)]
pub struct SyncSender<T> {
sender: mpsc::SyncSender<T>,
ping: Ping,
}
impl<T> Clone for SyncSender<T> {
#[cfg_attr(feature = "nightly_coverage", coverage(off))]
fn clone(&self) -> SyncSender<T> {
SyncSender {
sender: self.sender.clone(),
ping: self.ping.clone(),
}
}
}
impl<T> SyncSender<T> {
/// Send a message to the synchronous channel
///
/// This will wake the event loop and deliver an `Event::Msg` to
/// it containing the provided value. If the channel is full, this
/// function will block until the event loop empties it and it can
/// deliver the message.
///
/// Due to the blocking behavior, this method should not be used on the
/// same thread as the one running the event loop, as it could cause deadlocks.
pub fn send(&self, t: T) -> Result<(), mpsc::SendError<T>> {
let ret = self.try_send(t);
match ret {
Ok(()) => Ok(()),
Err(mpsc::TrySendError::Full(t)) => self.sender.send(t).map(|()| self.ping.ping()),
Err(mpsc::TrySendError::Disconnected(t)) => Err(mpsc::SendError(t)),
}
}
/// Send a message to the synchronous channel
///
/// This will wake the event loop and deliver an `Event::Msg` to
/// it containing the provided value. If the channel is full, this
/// function will return an error, but the event loop will still be
/// signaled for readiness.
pub fn try_send(&self, t: T) -> Result<(), mpsc::TrySendError<T>> {
let ret = self.sender.try_send(t);
if let Ok(()) | Err(mpsc::TrySendError::Full(_)) = ret {
self.ping.ping();
}
ret
}
}
/// The receiving end of the channel
///
/// This is the event source to be inserted into your `EventLoop`.
#[derive(Debug)]
pub struct Channel<T> {
receiver: mpsc::Receiver<T>,
source: PingSource,
}
// This impl is safe because the Channel is only able to move around threads
// when it is not inserted into an event loop. (Otherwise it is stuck into
// a Source<_> and the internals of calloop, which are not Send).
// At this point, the Arc<Receiver> has a count of 1, and it is obviously
// safe to Send between threads.
unsafe impl<T: Send> Send for Channel<T> {}
impl<T> Channel<T> {
/// Proxy for [`mpsc::Receiver::recv`] to manually poll events.
///
/// *Note*: Normally you would want to use the `Channel` by inserting
/// it into an event loop instead. Use this for example to immediately
/// dispatch pending events after creation.
pub fn recv(&self) -> Result<T, mpsc::RecvError> {
self.receiver.recv()
}
/// Proxy for [`mpsc::Receiver::try_recv`] to manually poll events.
///
/// *Note*: Normally you would want to use the `Channel` by inserting
/// it into an event loop instead. Use this for example to immediately
/// dispatch pending events after creation.
pub fn try_recv(&self) -> Result<T, mpsc::TryRecvError> {
self.receiver.try_recv()
}
}
/// Create a new asynchronous channel
pub fn channel<T>() -> (Sender<T>, Channel<T>) {
let (sender, receiver) = mpsc::channel();
let (ping, source) = make_ping().expect("Failed to create a Ping.");
(Sender { sender, ping }, Channel { receiver, source })
}
/// Create a new synchronous, bounded channel
pub fn sync_channel<T>(bound: usize) -> (SyncSender<T>, Channel<T>) {
let (sender, receiver) = mpsc::sync_channel(bound);
let (ping, source) = make_ping().expect("Failed to create a Ping.");
(SyncSender { sender, ping }, Channel { receiver, source })
}
impl<T> EventSource for Channel<T> {
type Event = Event<T>;
type Metadata = ();
type Ret = ();
type Error = ChannelError;
fn process_events<C>(
&mut self,
readiness: Readiness,
token: Token,
mut callback: C,
) -> Result<PostAction, Self::Error>
where
C: FnMut(Self::Event, &mut Self::Metadata) -> Self::Ret,
{
let receiver = &self.receiver;
self.source
.process_events(readiness, token, |(), &mut ()| loop {
match receiver.try_recv() {
Ok(val) => callback(Event::Msg(val), &mut ()),
Err(mpsc::TryRecvError::Empty) => break,
Err(mpsc::TryRecvError::Disconnected) => {
callback(Event::Closed, &mut ());
break;
}
}
})
.map_err(ChannelError)
}
fn register(&mut self, poll: &mut Poll, token_factory: &mut TokenFactory) -> crate::Result<()> {
self.source.register(poll, token_factory)
}
fn reregister(
&mut self,
poll: &mut Poll,
token_factory: &mut TokenFactory,
) -> crate::Result<()> {
self.source.reregister(poll, token_factory)
}
fn unregister(&mut self, poll: &mut Poll) -> crate::Result<()> {
self.source.unregister(poll)
}
}
/// An error arising from processing events for a channel.
#[derive(thiserror::Error, Debug)]
#[error(transparent)]
pub struct ChannelError(PingError);
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn basic_channel() {
let mut event_loop = crate::EventLoop::try_new().unwrap();
let handle = event_loop.handle();
let (tx, rx) = channel::<()>();
// (got_msg, got_closed)
let mut got = (false, false);
let _channel_token = handle
.insert_source(rx, move |evt, &mut (), got: &mut (bool, bool)| match evt {
Event::Msg(()) => {
got.0 = true;
}
Event::Closed => {
got.1 = true;
}
})
.unwrap();
// nothing is sent, nothing is received
event_loop
.dispatch(Some(::std::time::Duration::ZERO), &mut got)
.unwrap();
assert_eq!(got, (false, false));
// a message is send
tx.send(()).unwrap();
event_loop
.dispatch(Some(::std::time::Duration::ZERO), &mut got)
.unwrap();
assert_eq!(got, (true, false));
// the sender is dropped
::std::mem::drop(tx);
event_loop
.dispatch(Some(::std::time::Duration::ZERO), &mut got)
.unwrap();
assert_eq!(got, (true, true));
}
#[test]
fn basic_sync_channel() {
let mut event_loop = crate::EventLoop::try_new().unwrap();
let handle = event_loop.handle();
let (tx, rx) = sync_channel::<()>(2);
let mut received = (0, false);
let _channel_token = handle
.insert_source(
rx,
move |evt, &mut (), received: &mut (u32, bool)| match evt {
Event::Msg(()) => {
received.0 += 1;
}
Event::Closed => {
received.1 = true;
}
},
)
.unwrap();
// nothing is sent, nothing is received
event_loop
.dispatch(Some(::std::time::Duration::ZERO), &mut received)
.unwrap();
assert_eq!(received.0, 0);
assert!(!received.1);
// fill the channel
tx.send(()).unwrap();
tx.send(()).unwrap();
assert!(tx.try_send(()).is_err());
// empty it
event_loop
.dispatch(Some(::std::time::Duration::ZERO), &mut received)
.unwrap();
assert_eq!(received.0, 2);
assert!(!received.1);
// send a final message and drop the sender
tx.send(()).unwrap();
std::mem::drop(tx);
// final read of the channel
event_loop
.dispatch(Some(::std::time::Duration::ZERO), &mut received)
.unwrap();
assert_eq!(received.0, 3);
assert!(received.1);
}
}

423
vendor/calloop/src/sources/futures.rs vendored Normal file
View File

@@ -0,0 +1,423 @@
//! A futures executor as an event source
//!
//! Only available with the `executor` cargo feature of `calloop`.
//!
//! This executor is intended for light futures, which will be polled as part of your
//! event loop. Such futures may be waiting for IO, or for some external computation on an
//! other thread for example.
//!
//! You can create a new executor using the `executor` function, which creates a pair
//! `(Executor<T>, Scheduler<T>)` to handle futures that all evaluate to type `T`. The
//! executor should be inserted into your event loop, and will yield the return values of
//! the futures as they finish into your callback. The scheduler can be cloned and used
//! to send futures to be executed into the executor. A generic executor can be obtained
//! by choosing `T = ()` and letting futures handle the forwarding of their return values
//! (if any) by their own means.
//!
//! **Note:** The futures must have their own means of being woken up, as this executor is,
//! by itself, not I/O aware. See [`LoopHandle::adapt_io`](crate::LoopHandle#method.adapt_io)
//! for that, or you can use some other mechanism if you prefer.
use async_task::{Builder, Runnable};
use slab::Slab;
use std::{
cell::RefCell,
future::Future,
rc::Rc,
sync::{
atomic::{AtomicBool, Ordering},
mpsc, Arc, Mutex,
},
task::Waker,
};
use crate::{
sources::{
channel::ChannelError,
ping::{make_ping, Ping, PingError, PingSource},
EventSource,
},
Poll, PostAction, Readiness, Token, TokenFactory,
};
/// A future executor as an event source
#[derive(Debug)]
pub struct Executor<T> {
/// Shared state between the executor and the scheduler.
state: Rc<State<T>>,
/// Notifies us when the executor is woken up.
ping: PingSource,
}
/// A scheduler to send futures to an executor
#[derive(Clone, Debug)]
pub struct Scheduler<T> {
/// Shared state between the executor and the scheduler.
state: Rc<State<T>>,
}
/// The inner state of the executor.
#[derive(Debug)]
struct State<T> {
/// The incoming queue of runnables to be executed.
incoming: mpsc::Receiver<Runnable<usize>>,
/// The sender corresponding to `incoming`.
sender: Arc<Sender>,
/// The list of currently active tasks.
///
/// This is set to `None` when the executor is destroyed.
active_tasks: RefCell<Option<Slab<Active<T>>>>,
}
/// Send a future to an executor.
///
/// This needs to be thread-safe, as it is called from a `Waker` that may be on a different thread.
#[derive(Debug)]
struct Sender {
/// The sender used to send runnables to the executor.
///
/// `mpsc::Sender` is `!Sync`, wrapping it in a `Mutex` makes it `Sync`.
sender: Mutex<mpsc::Sender<Runnable<usize>>>,
/// The ping source used to wake up the executor.
wake_up: Ping,
/// Whether the executor has already been woken.
notified: AtomicBool,
}
/// An active future or its result.
#[derive(Debug)]
enum Active<T> {
/// The future is currently being polled.
///
/// Waking this waker will insert the runnable into `incoming`.
Future(Waker),
/// The future has finished polling, and its result is stored here.
Finished(T),
}
impl<T> Active<T> {
fn is_finished(&self) -> bool {
matches!(self, Active::Finished(_))
}
}
impl<T> Scheduler<T> {
/// Sends the given future to the executor associated to this scheduler
///
/// Returns an error if the the executor not longer exists.
pub fn schedule<Fut: 'static>(&self, future: Fut) -> Result<(), ExecutorDestroyed>
where
Fut: Future<Output = T>,
T: 'static,
{
/// Store this future's result in the executor.
struct StoreOnDrop<'a, T> {
index: usize,
value: Option<T>,
state: &'a State<T>,
}
impl<T> Drop for StoreOnDrop<'_, T> {
fn drop(&mut self) {
let mut active_tasks = self.state.active_tasks.borrow_mut();
if let Some(active_tasks) = active_tasks.as_mut() {
if let Some(value) = self.value.take() {
active_tasks[self.index] = Active::Finished(value);
} else {
// The future was dropped before it finished.
// Remove it from the active list.
active_tasks.remove(self.index);
}
}
}
}
fn assert_send_and_sync<T: Send + Sync>(_: &T) {}
let mut active_guard = self.state.active_tasks.borrow_mut();
let active_tasks = active_guard.as_mut().ok_or(ExecutorDestroyed)?;
// Wrap the future in another future that polls it and stores the result.
let index = active_tasks.vacant_key();
let future = {
let state = self.state.clone();
async move {
let mut guard = StoreOnDrop {
index,
value: None,
state: &state,
};
// Get the value of the future.
let value = future.await;
// Store it in the executor.
guard.value = Some(value);
}
};
// A schedule function that inserts the runnable into the incoming queue.
let schedule = {
let sender = self.state.sender.clone();
move |runnable| sender.send(runnable)
};
assert_send_and_sync(&schedule);
// Spawn the future.
let (runnable, task) = Builder::new()
.metadata(index)
.spawn_local(move |_| future, schedule);
// Insert the runnable into the set of active tasks.
active_tasks.insert(Active::Future(runnable.waker()));
drop(active_guard);
// Schedule the runnable and detach the task so it isn't cancellable.
runnable.schedule();
task.detach();
Ok(())
}
}
impl Sender {
/// Send a runnable to the executor.
fn send(&self, runnable: Runnable<usize>) {
// Send on the channel.
//
// All we do with the lock is call `send`, so there's no chance of any state being corrupted on
// panic. Therefore it's safe to ignore the mutex poison.
if let Err(e) = self
.sender
.lock()
.unwrap_or_else(|e| e.into_inner())
.send(runnable)
{
// The runnable must be dropped on its origin thread, since the original future might be
// !Send. This channel immediately sends it back to the Executor, which is pinned to the
// origin thread. The executor's Drop implementation will force all of the runnables to be
// dropped, therefore the channel should always be available. If we can't send the runnable,
// it indicates that the above behavior is broken and that unsoundness has occurred. The
// only option at this stage is to forget the runnable and leak the future.
std::mem::forget(e);
unreachable!("Attempted to send runnable to a stopped executor");
}
// If the executor is already awake, don't bother waking it up again.
if self.notified.swap(true, Ordering::SeqCst) {
return;
}
// Wake the executor.
self.wake_up.ping();
}
}
impl<T> Drop for Executor<T> {
fn drop(&mut self) {
let active_tasks = self.state.active_tasks.borrow_mut().take().unwrap();
// Wake all of the active tasks in order to destroy their runnables.
for (_, task) in active_tasks {
if let Active::Future(waker) = task {
// Don't let a panicking waker blow everything up.
//
// There is a chance that a future will panic and, during the unwinding process,
// drop this executor. However, since the future panicked, there is a possibility
// that the internal state of the waker will be invalid in such a way that the waker
// panics as well. Since this would be a panic during a panic, Rust will upgrade it
// into an abort.
//
// In the interest of not aborting without a good reason, we just drop the panic here.
std::panic::catch_unwind(|| waker.wake()).ok();
}
}
// Drain the queue in order to drop all of the runnables.
while self.state.incoming.try_recv().is_ok() {}
}
}
/// Error generated when trying to schedule a future after the
/// executor was destroyed.
#[derive(thiserror::Error, Debug)]
#[error("the executor was destroyed")]
pub struct ExecutorDestroyed;
/// Create a new executor, and its associated scheduler
///
/// May fail due to OS errors preventing calloop to setup its internal pipes (if your
/// process has reatched its file descriptor limit for example).
pub fn executor<T>() -> crate::Result<(Executor<T>, Scheduler<T>)> {
let (sender, incoming) = mpsc::channel();
let (wake_up, ping) = make_ping()?;
let state = Rc::new(State {
incoming,
active_tasks: RefCell::new(Some(Slab::new())),
sender: Arc::new(Sender {
sender: Mutex::new(sender),
wake_up,
notified: AtomicBool::new(false),
}),
});
Ok((
Executor {
state: state.clone(),
ping,
},
Scheduler { state },
))
}
impl<T> EventSource for Executor<T> {
type Event = T;
type Metadata = ();
type Ret = ();
type Error = ExecutorError;
fn process_events<F>(
&mut self,
readiness: Readiness,
token: Token,
mut callback: F,
) -> Result<PostAction, Self::Error>
where
F: FnMut(T, &mut ()),
{
let state = &self.state;
let clear_readiness = {
let mut clear_readiness = false;
// Process runnables, but not too many at a time; better to move onto the next event quickly!
for _ in 0..1024 {
let runnable = match state.incoming.try_recv() {
Ok(runnable) => runnable,
Err(_) => {
// Make sure to clear the readiness if there are no more runnables.
clear_readiness = true;
break;
}
};
// Run the runnable.
let index = *runnable.metadata();
runnable.run();
// If the runnable finished with a result, call the callback.
let mut active_guard = state.active_tasks.borrow_mut();
let active_tasks = active_guard.as_mut().unwrap();
if let Some(state) = active_tasks.get(index) {
if state.is_finished() {
// Take out the state and provide it to the caller.
let result = match active_tasks.remove(index) {
Active::Finished(result) => result,
_ => unreachable!(),
};
// Drop the guard since the callback may register another future to the scheduler.
drop(active_guard);
callback(result, &mut ());
}
}
}
clear_readiness
};
// Clear the readiness of the ping source if there are no more runnables.
if clear_readiness {
self.ping
.process_events(readiness, token, |(), &mut ()| {})
.map_err(ExecutorError::WakeError)?;
}
// Set to the unnotified state.
state.sender.notified.store(false, Ordering::SeqCst);
Ok(PostAction::Continue)
}
fn register(&mut self, poll: &mut Poll, token_factory: &mut TokenFactory) -> crate::Result<()> {
self.ping.register(poll, token_factory)?;
Ok(())
}
fn reregister(
&mut self,
poll: &mut Poll,
token_factory: &mut TokenFactory,
) -> crate::Result<()> {
self.ping.reregister(poll, token_factory)?;
Ok(())
}
fn unregister(&mut self, poll: &mut Poll) -> crate::Result<()> {
self.ping.unregister(poll)?;
Ok(())
}
}
/// An error arising from processing events in an async executor event source.
#[derive(thiserror::Error, Debug)]
pub enum ExecutorError {
/// Error while reading new futures added via [`Scheduler::schedule()`].
#[error("error adding new futures")]
NewFutureError(ChannelError),
/// Error while processing wake events from existing futures.
#[error("error processing wake events")]
WakeError(PingError),
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn ready() {
let mut event_loop = crate::EventLoop::<u32>::try_new().unwrap();
let handle = event_loop.handle();
let (exec, sched) = executor::<u32>().unwrap();
handle
.insert_source(exec, move |ret, &mut (), got| {
*got = ret;
})
.unwrap();
let mut got = 0;
let fut = async { 42 };
event_loop
.dispatch(Some(::std::time::Duration::ZERO), &mut got)
.unwrap();
// the future is not yet inserted, and thus has not yet run
assert_eq!(got, 0);
sched.schedule(fut).unwrap();
event_loop
.dispatch(Some(::std::time::Duration::ZERO), &mut got)
.unwrap();
// the future has run
assert_eq!(got, 42);
}
}

506
vendor/calloop/src/sources/generic.rs vendored Normal file
View File

@@ -0,0 +1,506 @@
//! A generic event source wrapping an IO objects or file descriptor
//!
//! You can use this general purpose adapter around file-descriptor backed objects to
//! insert into an [`EventLoop`](crate::EventLoop).
//!
//! The event generated by this [`Generic`] event source are the [`Readiness`](crate::Readiness)
//! notification itself, and the monitored object is provided to your callback as the second
//! argument.
//!
#![cfg_attr(unix, doc = "```")]
#![cfg_attr(not(unix), doc = "```no_run")]
//! # extern crate calloop;
//! use calloop::{generic::Generic, Interest, Mode, PostAction};
//!
//! # fn main() {
//! # let mut event_loop = calloop::EventLoop::<()>::try_new()
//! # .expect("Failed to initialize the event loop!");
//! # let handle = event_loop.handle();
//! # #[cfg(unix)]
//! # let io_object = std::io::stdin();
//! # #[cfg(windows)]
//! # let io_object: std::net::TcpStream = panic!();
//! handle.insert_source(
//! // wrap your IO object in a Generic, here we register for read readiness
//! // in level-triggering mode
//! Generic::new(io_object, Interest::READ, Mode::Level),
//! |readiness, io_object, shared_data| {
//! // The first argument of the callback is a Readiness
//! // The second is a &mut reference to your object
//!
//! // your callback needs to return a Result<PostAction, std::io::Error>
//! // if it returns an error, the event loop will consider this event
//! // event source as erroring and report it to the user.
//! Ok(PostAction::Continue)
//! }
//! );
//! # }
//! ```
//!
//! It can also help you implementing your own event sources: just have
//! these `Generic<_>` as fields of your event source, and delegate the
//! [`EventSource`](crate::EventSource) implementation to them.
use polling::Poller;
use std::{borrow, marker::PhantomData, ops, sync::Arc};
#[cfg(unix)]
use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd};
#[cfg(windows)]
use std::os::windows::io::{
AsRawSocket as AsRawFd, AsSocket as AsFd, BorrowedSocket as BorrowedFd,
};
use crate::{EventSource, Interest, Mode, Poll, PostAction, Readiness, Token, TokenFactory};
/// Wrapper to use a type implementing `AsRawFd` but not `AsFd` with `Generic`
#[derive(Debug)]
pub struct FdWrapper<T: AsRawFd>(T);
impl<T: AsRawFd> FdWrapper<T> {
/// Wrap `inner` with an `AsFd` implementation.
///
/// # Safety
/// This is safe if the `AsRawFd` implementation of `inner` always returns
/// a valid fd. This should usually be true for types implementing
/// `AsRawFd`. But this isn't guaranteed with `FdWrapper<RawFd>`.
pub unsafe fn new(inner: T) -> Self {
Self(inner)
}
}
impl<T: AsRawFd> ops::Deref for FdWrapper<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T: AsRawFd> ops::DerefMut for FdWrapper<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<T: AsRawFd> AsFd for FdWrapper<T> {
#[cfg(unix)]
fn as_fd(&self) -> BorrowedFd {
unsafe { BorrowedFd::borrow_raw(self.0.as_raw_fd()) }
}
#[cfg(windows)]
fn as_socket(&self) -> BorrowedFd {
unsafe { BorrowedFd::borrow_raw(self.0.as_raw_socket()) }
}
}
/// A wrapper around a type that doesn't expose it mutably safely.
///
/// The [`EventSource`] trait's `Metadata` type demands mutable access to the inner I/O source.
/// However, the inner polling source used by `calloop` keeps the handle-based equivalent of an
/// immutable pointer to the underlying object's I/O handle. Therefore, if the inner source is
/// dropped, this leaves behind a dangling pointer which immediately invokes undefined behavior
/// on the next poll of the event loop.
///
/// In order to prevent this from happening, the [`Generic`] I/O source must not directly expose
/// a mutable reference to the underlying handle. This type wraps around the underlying handle and
/// easily allows users to take immutable (`&`) references to the type, but makes mutable (`&mut`)
/// references unsafe to get. Therefore, it prevents the source from being moved out and dropped
/// while it is still registered in the event loop.
///
/// [`EventSource`]: crate::EventSource
#[derive(Debug)]
pub struct NoIoDrop<T>(T);
impl<T> NoIoDrop<T> {
/// Get a mutable reference.
///
/// # Safety
///
/// The inner type's I/O source must not be dropped.
pub unsafe fn get_mut(&mut self) -> &mut T {
&mut self.0
}
}
impl<T> AsRef<T> for NoIoDrop<T> {
fn as_ref(&self) -> &T {
&self.0
}
}
impl<T> borrow::Borrow<T> for NoIoDrop<T> {
fn borrow(&self) -> &T {
&self.0
}
}
impl<T> ops::Deref for NoIoDrop<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T: AsFd> AsFd for NoIoDrop<T> {
#[cfg(unix)]
fn as_fd(&self) -> BorrowedFd<'_> {
// SAFETY: The innter type is not mutated.
self.0.as_fd()
}
#[cfg(windows)]
fn as_socket(&self) -> BorrowedFd<'_> {
// SAFETY: The innter type is not mutated.
self.0.as_socket()
}
}
/// A generic event source wrapping a FD-backed type
#[derive(Debug)]
pub struct Generic<F: AsFd, E = std::io::Error> {
/// The wrapped FD-backed type.
///
/// This must be deregistered before it is dropped.
file: Option<NoIoDrop<F>>,
/// The programmed interest
pub interest: Interest,
/// The programmed mode
pub mode: Mode,
/// Back-reference to the poller.
///
/// This is needed to drop the original file.
poller: Option<Arc<Poller>>,
// This token is used by the event loop logic to look up this source when an
// event occurs.
token: Option<Token>,
// This allows us to make the associated error and return types generic.
_error_type: PhantomData<E>,
}
impl<F: AsFd> Generic<F, std::io::Error> {
/// Wrap a FD-backed type into a `Generic` event source that uses
/// [`std::io::Error`] as its error type.
pub fn new(file: F, interest: Interest, mode: Mode) -> Generic<F, std::io::Error> {
Generic {
file: Some(NoIoDrop(file)),
interest,
mode,
token: None,
poller: None,
_error_type: PhantomData,
}
}
/// Wrap a FD-backed type into a `Generic` event source using an arbitrary error type.
pub fn new_with_error<E>(file: F, interest: Interest, mode: Mode) -> Generic<F, E> {
Generic {
file: Some(NoIoDrop(file)),
interest,
mode,
token: None,
poller: None,
_error_type: PhantomData,
}
}
}
impl<F: AsFd, E> Generic<F, E> {
/// Unwrap the `Generic` source to retrieve the underlying type
pub fn unwrap(mut self) -> F {
let NoIoDrop(file) = self.file.take().unwrap();
// Remove it from the poller.
if let Some(poller) = self.poller.take() {
poller
.delete(
#[cfg(unix)]
file.as_fd(),
#[cfg(windows)]
file.as_socket(),
)
.ok();
}
file
}
/// Get a reference to the underlying type.
pub fn get_ref(&self) -> &F {
&self.file.as_ref().unwrap().0
}
/// Get a mutable reference to the underlying type.
///
/// # Safety
///
/// This is unsafe because it allows you to modify the underlying type, which
/// allows you to drop the underlying event source. Dropping the underlying source
/// leads to a dangling reference.
pub unsafe fn get_mut(&mut self) -> &mut F {
self.file.as_mut().unwrap().get_mut()
}
}
impl<F: AsFd, E> Drop for Generic<F, E> {
fn drop(&mut self) {
// Remove it from the poller.
if let (Some(file), Some(poller)) = (self.file.take(), self.poller.take()) {
poller
.delete(
#[cfg(unix)]
file.as_fd(),
#[cfg(windows)]
file.as_socket(),
)
.ok();
}
}
}
impl<F, E> EventSource for Generic<F, E>
where
F: AsFd,
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
type Event = Readiness;
type Metadata = NoIoDrop<F>;
type Ret = Result<PostAction, E>;
type Error = E;
fn process_events<C>(
&mut self,
readiness: Readiness,
token: Token,
mut callback: C,
) -> Result<PostAction, Self::Error>
where
C: FnMut(Self::Event, &mut Self::Metadata) -> Self::Ret,
{
// If the token is invalid or not ours, skip processing.
if self.token != Some(token) {
return Ok(PostAction::Continue);
}
callback(readiness, self.file.as_mut().unwrap())
}
fn register(&mut self, poll: &mut Poll, token_factory: &mut TokenFactory) -> crate::Result<()> {
let token = token_factory.token();
// SAFETY: We ensure that we have a poller to deregister with (see below).
unsafe {
poll.register(
&self.file.as_ref().unwrap().0,
self.interest,
self.mode,
token,
)?;
}
// Make sure we can use the poller to deregister if need be.
// But only if registration actually succeeded
// So that we don't try to unregister the FD on drop if it wasn't registered
// in the first place (for example if registration failed because of a duplicate insertion)
self.poller = Some(poll.poller().clone());
self.token = Some(token);
Ok(())
}
fn reregister(
&mut self,
poll: &mut Poll,
token_factory: &mut TokenFactory,
) -> crate::Result<()> {
let token = token_factory.token();
poll.reregister(
&self.file.as_ref().unwrap().0,
self.interest,
self.mode,
token,
)?;
self.token = Some(token);
Ok(())
}
fn unregister(&mut self, poll: &mut Poll) -> crate::Result<()> {
poll.unregister(&self.file.as_ref().unwrap().0)?;
self.poller = None;
self.token = None;
Ok(())
}
}
#[cfg(all(unix, test))]
mod tests {
use std::io::{Read, Write};
use super::Generic;
use crate::{Dispatcher, Interest, Mode, PostAction};
#[cfg(unix)]
#[test]
fn dispatch_unix() {
use std::os::unix::net::UnixStream;
let mut event_loop = crate::EventLoop::try_new().unwrap();
let handle = event_loop.handle();
let (mut tx, rx) = UnixStream::pair().unwrap();
let generic = Generic::new(rx, Interest::READ, Mode::Level);
let mut dispached = false;
let _generic_token = handle
.insert_source(generic, move |readiness, file, d| {
assert!(readiness.readable);
// we have not registered for writability
assert!(!readiness.writable);
let mut buffer = vec![0; 10];
let ret = (&**file).read(&mut buffer).unwrap();
assert_eq!(ret, 6);
assert_eq!(&buffer[..6], &[1, 2, 3, 4, 5, 6]);
*d = true;
Ok(PostAction::Continue)
})
.unwrap();
event_loop
.dispatch(Some(::std::time::Duration::ZERO), &mut dispached)
.unwrap();
assert!(!dispached);
let ret = tx.write(&[1, 2, 3, 4, 5, 6]).unwrap();
assert_eq!(ret, 6);
tx.flush().unwrap();
event_loop
.dispatch(Some(::std::time::Duration::ZERO), &mut dispached)
.unwrap();
assert!(dispached);
}
#[test]
fn register_deregister_unix() {
use std::os::unix::net::UnixStream;
let mut event_loop = crate::EventLoop::try_new().unwrap();
let handle = event_loop.handle();
let (mut tx, rx) = UnixStream::pair().unwrap();
let generic = Generic::new(rx, Interest::READ, Mode::Level);
let dispatcher = Dispatcher::new(generic, move |_, _, d| {
*d = true;
Ok(PostAction::Continue)
});
let mut dispached = false;
let generic_token = handle.register_dispatcher(dispatcher.clone()).unwrap();
event_loop
.dispatch(Some(::std::time::Duration::ZERO), &mut dispached)
.unwrap();
assert!(!dispached);
// remove the source, and then write something
event_loop.handle().remove(generic_token);
let ret = tx.write(&[1, 2, 3, 4, 5, 6]).unwrap();
assert_eq!(ret, 6);
tx.flush().unwrap();
event_loop
.dispatch(Some(::std::time::Duration::ZERO), &mut dispached)
.unwrap();
// the source has not been dispatched, as the source is no longer here
assert!(!dispached);
// insert it again
let generic = dispatcher.into_source_inner();
let _generic_token = handle
.insert_source(generic, move |readiness, file, d| {
assert!(readiness.readable);
// we have not registered for writability
assert!(!readiness.writable);
let mut buffer = vec![0; 10];
let ret = (&**file).read(&mut buffer).unwrap();
assert_eq!(ret, 6);
assert_eq!(&buffer[..6], &[1, 2, 3, 4, 5, 6]);
*d = true;
Ok(PostAction::Continue)
})
.unwrap();
event_loop
.dispatch(Some(::std::time::Duration::ZERO), &mut dispached)
.unwrap();
// the has now been properly dispatched
assert!(dispached);
}
// Duplicate insertion does not fail on all platforms, but does on Linux
#[cfg(target_os = "linux")]
#[test]
fn duplicate_insert() {
use std::os::unix::{
io::{AsFd, BorrowedFd},
net::UnixStream,
};
let event_loop = crate::EventLoop::<()>::try_new().unwrap();
let handle = event_loop.handle();
let (_, rx) = UnixStream::pair().unwrap();
// Rc only implements AsFd since 1.69...
struct RcFd<T> {
rc: std::rc::Rc<T>,
}
impl<T: AsFd> AsFd for RcFd<T> {
fn as_fd(&self) -> BorrowedFd<'_> {
self.rc.as_fd()
}
}
let rx = std::rc::Rc::new(rx);
let token = handle
.insert_source(
Generic::new(RcFd { rc: rx.clone() }, Interest::READ, Mode::Level),
|_, _, _| Ok(PostAction::Continue),
)
.unwrap();
// inserting the same FD a second time should fail
let ret = handle.insert_source(
Generic::new(RcFd { rc: rx.clone() }, Interest::READ, Mode::Level),
|_, _, _| Ok(PostAction::Continue),
);
assert!(ret.is_err());
std::mem::drop(ret);
// but the original token is still registered
handle.update(&token).unwrap();
}
}

790
vendor/calloop/src/sources/mod.rs vendored Normal file
View File

@@ -0,0 +1,790 @@
use std::{
cell::{Ref, RefCell, RefMut},
ops::{BitOr, BitOrAssign},
rc::Rc,
};
use log::trace;
pub use crate::loop_logic::EventIterator;
use crate::{sys::TokenFactory, Poll, Readiness, RegistrationToken, Token};
pub mod channel;
#[cfg(feature = "executor")]
#[cfg_attr(docsrs, doc(cfg(feature = "executor")))]
pub mod futures;
pub mod generic;
pub mod ping;
#[cfg(all(target_os = "linux", feature = "signals"))]
#[cfg_attr(docsrs, doc(cfg(target_os = "linux")))]
pub mod signals;
pub mod timer;
pub mod transient;
/// Possible actions that can be requested to the event loop by an
/// event source once its events have been processed.
///
/// `PostAction` values can be combined with the `|` (bit-or) operator (or with
/// `|=`) with the result that:
/// - if both values are identical, the result is that value
/// - if they are different, the result is [`Reregister`](PostAction::Reregister)
///
/// Bit-or-ing these results is useful for composed sources to combine the
/// results of their child sources, but note that it only applies to the child
/// sources. For example, if every child source returns `Continue`, the result
/// will be `Continue`, but the parent source might still need to return
/// `Reregister` or something else depending on any additional logic it uses.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum PostAction {
/// Continue listening for events on this source as before
Continue,
/// Trigger a re-registration of this source
Reregister,
/// Disable this source
///
/// Has the same effect as [`LoopHandle::disable`](crate::LoopHandle#method.disable)
Disable,
/// Remove this source from the eventloop
///
/// Has the same effect as [`LoopHandle::kill`](crate::LoopHandle#method.kill)
Remove,
}
/// Combines `PostAction` values returned from nested event sources.
impl BitOr for PostAction {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
if matches!(self, x if x == rhs) {
self
} else {
Self::Reregister
}
}
}
/// Combines `PostAction` values returned from nested event sources.
impl BitOrAssign for PostAction {
fn bitor_assign(&mut self, rhs: Self) {
if *self != rhs {
*self = Self::Reregister;
}
}
}
/// Trait representing an event source
///
/// This is the trait you need to implement if you wish to create your own
/// calloop-compatible event sources.
///
/// The 3 associated types define the type of closure the user will need to
/// provide to process events for your event source.
///
/// The `process_events` method will be called when one of the FD you registered
/// is ready, with the associated readiness and token.
///
/// The `register`, `reregister` and `unregister` methods are plumbing to let your
/// source register itself with the polling system. See their documentation for details.
///
/// In case your event source needs to do some special processing before or after a
/// polling session occurs (to prepare the underlying source for polling, and cleanup
/// after that), you can override [`NEEDS_EXTRA_LIFECYCLE_EVENTS`] to `true`.
/// For all sources for which that constant is `true`, the methods [`before_sleep`] and
/// [`before_handle_events`] will be called.
/// [`before_sleep`] is called before the polling system performs a poll operation.
/// [`before_handle_events`] is called before any process_events methods have been called.
/// This means that during `process_events` you can assume that all cleanup has occured on
/// all sources.
///
/// [`NEEDS_EXTRA_LIFECYCLE_EVENTS`]: EventSource::NEEDS_EXTRA_LIFECYCLE_EVENTS
/// [`before_sleep`]: EventSource::before_sleep
/// [`before_handle_events`]: EventSource::before_handle_events
pub trait EventSource {
/// The type of events generated by your source.
type Event;
/// Some metadata of your event source
///
/// This is typically useful if your source contains some internal state that
/// the user may need to interact with when processing events. The user callback
/// will receive a `&mut Metadata` reference.
///
/// Set to `()` if not needed.
type Metadata;
/// The return type of the user callback
///
/// If the user needs to return some value back to your event source once its
/// processing is finshed (to indicate success or failure for example), you can
/// specify it using this type.
///
/// Set to `()` if not needed.
type Ret;
/// The error type returned from
/// [`process_events()`](Self::process_events()) (not the user callback!).
type Error: Into<Box<dyn std::error::Error + Sync + Send>>;
/// Process any relevant events
///
/// This method will be called every time one of the FD you registered becomes
/// ready, including the readiness details and the associated token.
///
/// Your event source will then do some processing of the file descriptor(s) to generate
/// events, and call the provided `callback` for each one of them.
///
/// You should ensure you drained the file descriptors of their events, especially if using
/// edge-triggered mode.
fn process_events<F>(
&mut self,
readiness: Readiness,
token: Token,
callback: F,
) -> Result<PostAction, Self::Error>
where
F: FnMut(Self::Event, &mut Self::Metadata) -> Self::Ret;
/// Register yourself to this poll instance
///
/// You should register all your relevant file descriptors to the provided [`Poll`](crate::Poll)
/// using its [`Poll::register`](crate::Poll#method.register) method.
///
/// If you need to register more than one file descriptor, you can change the
/// `sub_id` field of the [`Token`](crate::Token) to differentiate between them.
fn register(&mut self, poll: &mut Poll, token_factory: &mut TokenFactory) -> crate::Result<()>;
/// Re-register your file descriptors
///
/// Your should update the registration of all your relevant file descriptor to
/// the provided [`Poll`](crate::Poll) using its [`Poll::reregister`](crate::Poll#method.reregister),
/// if necessary.
fn reregister(
&mut self,
poll: &mut Poll,
token_factory: &mut TokenFactory,
) -> crate::Result<()>;
/// Unregister your file descriptors
///
/// You should unregister all your file descriptors from this [`Poll`](crate::Poll) using its
/// [`Poll::unregister`](crate::Poll#method.unregister) method.
fn unregister(&mut self, poll: &mut Poll) -> crate::Result<()>;
/// Whether this source needs to be sent the [`EventSource::before_sleep`]
/// and [`EventSource::before_handle_events`] notifications. These are opt-in because
/// they require more expensive checks, and almost all sources will not need these notifications
const NEEDS_EXTRA_LIFECYCLE_EVENTS: bool = false;
/// Notification that a single `poll` is about to begin
///
/// Use this to perform operations which must be done before polling,
/// but which may conflict with other event handlers. For example,
/// if polling requires a lock to be taken
///
/// If this returns Ok(Some), this will be treated as an event arriving in polling, and
/// your event handler will be called with the returned `Token` and `Readiness`.
/// Polling will however still occur, but with a timeout of 0, so additional events
/// from this or other sources may also be handled in the same iterations.
/// The returned `Token` must belong to this source
// If you need to return multiple synthetic events from this notification, please
// open an issue
fn before_sleep(&mut self) -> crate::Result<Option<(Readiness, Token)>> {
Ok(None)
}
/// Notification that polling is complete, and [`EventSource::process_events`] will
/// be called with the given events for this source. The iterator may be empty,
/// which indicates that no events were generated for this source
///
/// Please note, the iterator excludes any synthetic events returned from
/// [`EventSource::before_sleep`]
///
/// Use this to perform a cleanup before event handlers with arbitrary
/// code may run. This could be used to drop a lock obtained in
/// [`EventSource::before_sleep`]
#[allow(unused_variables)]
fn before_handle_events(&mut self, events: EventIterator<'_>) {}
}
/// Blanket implementation for boxed event sources. [`EventSource`] is not an
/// object safe trait, so this does not include trait objects.
impl<T: EventSource> EventSource for Box<T> {
type Event = T::Event;
type Metadata = T::Metadata;
type Ret = T::Ret;
type Error = T::Error;
fn process_events<F>(
&mut self,
readiness: Readiness,
token: Token,
callback: F,
) -> Result<PostAction, Self::Error>
where
F: FnMut(Self::Event, &mut Self::Metadata) -> Self::Ret,
{
T::process_events(&mut **self, readiness, token, callback)
}
fn register(&mut self, poll: &mut Poll, token_factory: &mut TokenFactory) -> crate::Result<()> {
T::register(&mut **self, poll, token_factory)
}
fn reregister(
&mut self,
poll: &mut Poll,
token_factory: &mut TokenFactory,
) -> crate::Result<()> {
T::reregister(&mut **self, poll, token_factory)
}
fn unregister(&mut self, poll: &mut Poll) -> crate::Result<()> {
T::unregister(&mut **self, poll)
}
const NEEDS_EXTRA_LIFECYCLE_EVENTS: bool = T::NEEDS_EXTRA_LIFECYCLE_EVENTS;
fn before_sleep(&mut self) -> crate::Result<Option<(Readiness, Token)>> {
T::before_sleep(&mut **self)
}
fn before_handle_events(&mut self, events: EventIterator) {
T::before_handle_events(&mut **self, events)
}
}
/// Blanket implementation for exclusive references to event sources.
/// [`EventSource`] is not an object safe trait, so this does not include trait
/// objects.
impl<T: EventSource> EventSource for &mut T {
type Event = T::Event;
type Metadata = T::Metadata;
type Ret = T::Ret;
type Error = T::Error;
fn process_events<F>(
&mut self,
readiness: Readiness,
token: Token,
callback: F,
) -> Result<PostAction, Self::Error>
where
F: FnMut(Self::Event, &mut Self::Metadata) -> Self::Ret,
{
T::process_events(&mut **self, readiness, token, callback)
}
fn register(&mut self, poll: &mut Poll, token_factory: &mut TokenFactory) -> crate::Result<()> {
T::register(&mut **self, poll, token_factory)
}
fn reregister(
&mut self,
poll: &mut Poll,
token_factory: &mut TokenFactory,
) -> crate::Result<()> {
T::reregister(&mut **self, poll, token_factory)
}
fn unregister(&mut self, poll: &mut Poll) -> crate::Result<()> {
T::unregister(&mut **self, poll)
}
const NEEDS_EXTRA_LIFECYCLE_EVENTS: bool = T::NEEDS_EXTRA_LIFECYCLE_EVENTS;
fn before_sleep(&mut self) -> crate::Result<Option<(Readiness, Token)>> {
T::before_sleep(&mut **self)
}
fn before_handle_events(&mut self, events: EventIterator) {
T::before_handle_events(&mut **self, events)
}
}
pub(crate) struct DispatcherInner<S, F> {
source: S,
callback: F,
needs_additional_lifecycle_events: bool,
}
impl<Data, S, F> EventDispatcher<Data> for RefCell<DispatcherInner<S, F>>
where
S: EventSource,
F: FnMut(S::Event, &mut S::Metadata, &mut Data) -> S::Ret,
{
fn process_events(
&self,
readiness: Readiness,
token: Token,
data: &mut Data,
) -> crate::Result<PostAction> {
let mut disp = self.borrow_mut();
let DispatcherInner {
ref mut source,
ref mut callback,
..
} = *disp;
trace!(
"[calloop] Processing events for source type {}",
std::any::type_name::<S>()
);
source
.process_events(readiness, token, |event, meta| callback(event, meta, data))
.map_err(|e| crate::Error::OtherError(e.into()))
}
fn register(
&self,
poll: &mut Poll,
additional_lifecycle_register: &mut AdditionalLifecycleEventsSet,
token_factory: &mut TokenFactory,
) -> crate::Result<()> {
let mut this = self.borrow_mut();
if this.needs_additional_lifecycle_events {
additional_lifecycle_register.register(token_factory.registration_token());
}
this.source.register(poll, token_factory)
}
fn reregister(
&self,
poll: &mut Poll,
additional_lifecycle_register: &mut AdditionalLifecycleEventsSet,
token_factory: &mut TokenFactory,
) -> crate::Result<bool> {
if let Ok(mut me) = self.try_borrow_mut() {
me.source.reregister(poll, token_factory)?;
if me.needs_additional_lifecycle_events {
additional_lifecycle_register.register(token_factory.registration_token());
}
Ok(true)
} else {
Ok(false)
}
}
fn unregister(
&self,
poll: &mut Poll,
additional_lifecycle_register: &mut AdditionalLifecycleEventsSet,
registration_token: RegistrationToken,
) -> crate::Result<bool> {
if let Ok(mut me) = self.try_borrow_mut() {
me.source.unregister(poll)?;
if me.needs_additional_lifecycle_events {
additional_lifecycle_register.unregister(registration_token);
}
Ok(true)
} else {
Ok(false)
}
}
fn before_sleep(&self) -> crate::Result<Option<(Readiness, Token)>> {
let mut disp = self.borrow_mut();
let DispatcherInner { ref mut source, .. } = *disp;
source.before_sleep()
}
fn before_handle_events(&self, events: EventIterator<'_>) {
let mut disp = self.borrow_mut();
let DispatcherInner { ref mut source, .. } = *disp;
source.before_handle_events(events);
}
}
pub(crate) trait EventDispatcher<Data> {
fn process_events(
&self,
readiness: Readiness,
token: Token,
data: &mut Data,
) -> crate::Result<PostAction>;
fn register(
&self,
poll: &mut Poll,
additional_lifecycle_register: &mut AdditionalLifecycleEventsSet,
token_factory: &mut TokenFactory,
) -> crate::Result<()>;
fn reregister(
&self,
poll: &mut Poll,
additional_lifecycle_register: &mut AdditionalLifecycleEventsSet,
token_factory: &mut TokenFactory,
) -> crate::Result<bool>;
fn unregister(
&self,
poll: &mut Poll,
additional_lifecycle_register: &mut AdditionalLifecycleEventsSet,
registration_token: RegistrationToken,
) -> crate::Result<bool>;
fn before_sleep(&self) -> crate::Result<Option<(Readiness, Token)>>;
fn before_handle_events(&self, events: EventIterator<'_>);
}
#[derive(Default)]
/// The list of events
pub(crate) struct AdditionalLifecycleEventsSet {
/// The list of sources
pub(crate) values: Vec<RegistrationToken>,
}
impl AdditionalLifecycleEventsSet {
fn register(&mut self, token: RegistrationToken) {
self.values.push(token)
}
fn unregister(&mut self, token: RegistrationToken) {
self.values.retain(|it| it != &token)
}
}
// An internal trait to erase the `F` type parameter of `DispatcherInner`
trait ErasedDispatcher<'a, S, Data> {
fn as_source_ref(&self) -> Ref<S>;
fn as_source_mut(&self) -> RefMut<S>;
fn into_source_inner(self: Rc<Self>) -> S;
fn into_event_dispatcher(self: Rc<Self>) -> Rc<dyn EventDispatcher<Data> + 'a>;
}
impl<'a, S, Data, F> ErasedDispatcher<'a, S, Data> for RefCell<DispatcherInner<S, F>>
where
S: EventSource + 'a,
F: FnMut(S::Event, &mut S::Metadata, &mut Data) -> S::Ret + 'a,
{
fn as_source_ref(&self) -> Ref<S> {
Ref::map(self.borrow(), |inner| &inner.source)
}
fn as_source_mut(&self) -> RefMut<S> {
RefMut::map(self.borrow_mut(), |inner| &mut inner.source)
}
fn into_source_inner(self: Rc<Self>) -> S {
if let Ok(ref_cell) = Rc::try_unwrap(self) {
ref_cell.into_inner().source
} else {
panic!("Dispatcher is still registered");
}
}
fn into_event_dispatcher(self: Rc<Self>) -> Rc<dyn EventDispatcher<Data> + 'a>
where
S: 'a,
{
self as Rc<dyn EventDispatcher<Data> + 'a>
}
}
/// An event source with its callback.
///
/// The `Dispatcher` can be registered in an event loop.
/// Use the `as_source_{ref,mut}` functions to interact with the event source.
/// Use `into_source_inner` to get the event source back.
pub struct Dispatcher<'a, S, Data>(Rc<dyn ErasedDispatcher<'a, S, Data> + 'a>);
impl<'a, S, Data> std::fmt::Debug for Dispatcher<'a, S, Data> {
#[cfg_attr(feature = "nightly_coverage", coverage(off))]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("Dispatcher { ... }")
}
}
impl<'a, S, Data> Dispatcher<'a, S, Data>
where
S: EventSource + 'a,
{
/// Builds a dispatcher.
///
/// The resulting `Dispatcher`
pub fn new<F>(source: S, callback: F) -> Self
where
F: FnMut(S::Event, &mut S::Metadata, &mut Data) -> S::Ret + 'a,
{
Dispatcher(Rc::new(RefCell::new(DispatcherInner {
source,
callback,
needs_additional_lifecycle_events: S::NEEDS_EXTRA_LIFECYCLE_EVENTS,
})))
}
/// Returns an immutable reference to the event source.
///
/// # Panics
///
/// Has the same semantics as `RefCell::borrow()`.
///
/// The dispatcher being mutably borrowed while its events are dispatched,
/// this method will panic if invoked from within the associated dispatching closure.
pub fn as_source_ref(&self) -> Ref<S> {
self.0.as_source_ref()
}
/// Returns a mutable reference to the event source.
///
/// # Panics
///
/// Has the same semantics as `RefCell::borrow_mut()`.
///
/// The dispatcher being mutably borrowed while its events are dispatched,
/// this method will panic if invoked from within the associated dispatching closure.
pub fn as_source_mut(&self) -> RefMut<S> {
self.0.as_source_mut()
}
/// Consumes the Dispatcher and returns the inner event source.
///
/// # Panics
///
/// Panics if the `Dispatcher` is still registered.
pub fn into_source_inner(self) -> S {
self.0.into_source_inner()
}
pub(crate) fn clone_as_event_dispatcher(&self) -> Rc<dyn EventDispatcher<Data> + 'a> {
Rc::clone(&self.0).into_event_dispatcher()
}
}
impl<'a, S, Data> Clone for Dispatcher<'a, S, Data> {
fn clone(&self) -> Dispatcher<'a, S, Data> {
Dispatcher(Rc::clone(&self.0))
}
}
/// An idle callback that was inserted in this loop
///
/// This handle allows you to cancel the callback. Dropping
/// it will *not* cancel it.
pub struct Idle<'i> {
pub(crate) callback: Rc<RefCell<dyn CancellableIdle + 'i>>,
}
impl<'i> std::fmt::Debug for Idle<'i> {
#[cfg_attr(feature = "nightly_coverage", coverage(off))]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("Idle { ... }")
}
}
impl<'i> Idle<'i> {
/// Cancel the idle callback if it was not already run
pub fn cancel(self) {
self.callback.borrow_mut().cancel();
}
}
pub(crate) trait CancellableIdle {
fn cancel(&mut self);
}
impl<F> CancellableIdle for Option<F> {
fn cancel(&mut self) {
self.take();
}
}
pub(crate) trait IdleDispatcher<Data> {
fn dispatch(&mut self, data: &mut Data);
}
impl<Data, F> IdleDispatcher<Data> for Option<F>
where
F: FnMut(&mut Data),
{
fn dispatch(&mut self, data: &mut Data) {
if let Some(callabck) = self.as_mut() {
callabck(data);
}
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use crate::{ping::make_ping, EventLoop};
// Test event source boxing.
#[test]
fn test_boxed_source() {
let mut fired = false;
let (pinger, source) = make_ping().unwrap();
let boxed = Box::new(source);
let mut event_loop = EventLoop::try_new().unwrap();
let handle = event_loop.handle();
let token = handle
.insert_source(boxed, |_, _, fired| *fired = true)
.unwrap();
pinger.ping();
event_loop
.dispatch(Duration::new(0, 0), &mut fired)
.unwrap();
assert!(fired);
fired = false;
handle.update(&token).unwrap();
pinger.ping();
event_loop
.dispatch(Duration::new(0, 0), &mut fired)
.unwrap();
assert!(fired);
fired = false;
handle.remove(token);
event_loop
.dispatch(Duration::new(0, 0), &mut fired)
.unwrap();
assert!(!fired);
}
// Test event source trait methods via mut ref.
#[test]
fn test_mut_ref_source() {
let mut fired = false;
let (pinger, mut source) = make_ping().unwrap();
let source_ref = &mut source;
let mut event_loop = EventLoop::try_new().unwrap();
let handle = event_loop.handle();
let token = handle
.insert_source(source_ref, |_, _, fired| *fired = true)
.unwrap();
pinger.ping();
event_loop
.dispatch(Duration::new(0, 0), &mut fired)
.unwrap();
assert!(fired);
fired = false;
handle.update(&token).unwrap();
pinger.ping();
event_loop
.dispatch(Duration::new(0, 0), &mut fired)
.unwrap();
assert!(fired);
fired = false;
handle.remove(token);
event_loop
.dispatch(Duration::new(0, 0), &mut fired)
.unwrap();
assert!(!fired);
}
// Test PostAction combinations.
#[test]
fn post_action_combine() {
use super::PostAction::*;
assert_eq!(Continue | Continue, Continue);
assert_eq!(Continue | Reregister, Reregister);
assert_eq!(Continue | Disable, Reregister);
assert_eq!(Continue | Remove, Reregister);
assert_eq!(Reregister | Continue, Reregister);
assert_eq!(Reregister | Reregister, Reregister);
assert_eq!(Reregister | Disable, Reregister);
assert_eq!(Reregister | Remove, Reregister);
assert_eq!(Disable | Continue, Reregister);
assert_eq!(Disable | Reregister, Reregister);
assert_eq!(Disable | Disable, Disable);
assert_eq!(Disable | Remove, Reregister);
assert_eq!(Remove | Continue, Reregister);
assert_eq!(Remove | Reregister, Reregister);
assert_eq!(Remove | Disable, Reregister);
assert_eq!(Remove | Remove, Remove);
}
// Test PostAction self-assignment.
#[test]
fn post_action_combine_assign() {
use super::PostAction::*;
let mut action = Continue;
action |= Continue;
assert_eq!(action, Continue);
let mut action = Continue;
action |= Reregister;
assert_eq!(action, Reregister);
let mut action = Continue;
action |= Disable;
assert_eq!(action, Reregister);
let mut action = Continue;
action |= Remove;
assert_eq!(action, Reregister);
let mut action = Reregister;
action |= Continue;
assert_eq!(action, Reregister);
let mut action = Reregister;
action |= Reregister;
assert_eq!(action, Reregister);
let mut action = Reregister;
action |= Disable;
assert_eq!(action, Reregister);
let mut action = Reregister;
action |= Remove;
assert_eq!(action, Reregister);
let mut action = Disable;
action |= Continue;
assert_eq!(action, Reregister);
let mut action = Disable;
action |= Reregister;
assert_eq!(action, Reregister);
let mut action = Disable;
action |= Disable;
assert_eq!(action, Disable);
let mut action = Disable;
action |= Remove;
assert_eq!(action, Reregister);
let mut action = Remove;
action |= Continue;
assert_eq!(action, Reregister);
let mut action = Remove;
action |= Reregister;
assert_eq!(action, Reregister);
let mut action = Remove;
action |= Disable;
assert_eq!(action, Reregister);
let mut action = Remove;
action |= Remove;
assert_eq!(action, Remove);
}
}

281
vendor/calloop/src/sources/ping.rs vendored Normal file
View File

@@ -0,0 +1,281 @@
//! Ping to the event loop
//!
//! This is an event source that just produces `()` events whevener the associated
//! [`Ping::ping`](Ping#method.ping) method is called. If the event source is pinged multiple times
//! between a single dispatching, it'll only generate one event.
//!
//! This event source is a simple way of waking up the event loop from an other part of your program
//! (and is what backs the [`LoopSignal`](crate::LoopSignal)). It can also be used as a building
//! block to construct event sources whose source of event is not file descriptor, but rather an
//! userspace source (like an other thread).
// The ping source has platform-dependent implementations provided by modules
// under this one. These modules should expose:
// - a make_ping() function
// - a Ping type
// - a PingSource type
//
// See eg. the pipe implementation for these items' specific requirements.
#[cfg(target_os = "linux")]
mod eventfd;
#[cfg(target_os = "linux")]
use eventfd as platform;
#[cfg(windows)]
mod iocp;
#[cfg(windows)]
use iocp as platform;
#[cfg(not(any(target_os = "linux", windows)))]
mod pipe;
#[cfg(not(any(target_os = "linux", windows)))]
use pipe as platform;
/// Create a new ping event source
///
/// you are given a [`Ping`] instance, which can be cloned and used to ping the
/// event loop, and a [`PingSource`], which you can insert in your event loop to
/// receive the pings.
pub fn make_ping() -> std::io::Result<(Ping, PingSource)> {
platform::make_ping()
}
/// The ping event source
///
/// You can insert it in your event loop to receive pings.
///
/// If you use it directly, it will automatically remove itself from the event loop
/// once all [`Ping`] instances are dropped.
pub type Ping = platform::Ping;
/// The Ping handle
///
/// This handle can be cloned and sent accross threads. It can be used to
/// send pings to the `PingSource`.
pub type PingSource = platform::PingSource;
/// An error arising from processing events for a ping.
#[derive(thiserror::Error, Debug)]
#[error(transparent)]
pub struct PingError(Box<dyn std::error::Error + Sync + Send>);
#[cfg(test)]
mod tests {
use crate::transient::TransientSource;
use std::time::Duration;
use super::*;
#[test]
fn ping() {
let mut event_loop = crate::EventLoop::<bool>::try_new().unwrap();
let (ping, source) = make_ping().unwrap();
event_loop
.handle()
.insert_source(source, |(), &mut (), dispatched| *dispatched = true)
.unwrap();
ping.ping();
let mut dispatched = false;
event_loop
.dispatch(std::time::Duration::ZERO, &mut dispatched)
.unwrap();
assert!(dispatched);
// Ping has been drained an no longer generates events
let mut dispatched = false;
event_loop
.dispatch(std::time::Duration::ZERO, &mut dispatched)
.unwrap();
assert!(!dispatched);
}
#[test]
fn ping_closed() {
let mut event_loop = crate::EventLoop::<bool>::try_new().unwrap();
let (_, source) = make_ping().unwrap();
event_loop
.handle()
.insert_source(source, |(), &mut (), dispatched| *dispatched = true)
.unwrap();
let mut dispatched = false;
// If the sender is closed from the start, the ping should first trigger
// once, disabling itself but not invoking the callback
event_loop
.dispatch(std::time::Duration::ZERO, &mut dispatched)
.unwrap();
assert!(!dispatched);
// Then it should not trigger any more, so this dispatch should wait the whole 100ms
let now = std::time::Instant::now();
event_loop
.dispatch(std::time::Duration::from_millis(100), &mut dispatched)
.unwrap();
assert!(now.elapsed() >= std::time::Duration::from_millis(100));
}
#[test]
fn ping_removed() {
// This keeps track of whether the event fired.
let mut dispatched = false;
let mut event_loop = crate::EventLoop::<bool>::try_new().unwrap();
let (sender, source) = make_ping().unwrap();
let wrapper = TransientSource::from(source);
// Check that the source starts off in the wrapper.
assert!(!wrapper.is_none());
// Put the source in the loop.
let dispatcher =
crate::Dispatcher::new(wrapper, |(), &mut (), dispatched| *dispatched = true);
let token = event_loop
.handle()
.register_dispatcher(dispatcher.clone())
.unwrap();
// Drop the sender and check that it's actually removed.
drop(sender);
// There should be no event, but the loop still needs to wake up to
// process the close event (just like in the ping_closed() test).
event_loop
.dispatch(Duration::ZERO, &mut dispatched)
.unwrap();
assert!(!dispatched);
// Pull the source wrapper out.
event_loop.handle().remove(token);
let wrapper = dispatcher.into_source_inner();
// Check that the inner source is now gone.
assert!(wrapper.is_none());
}
#[test]
fn ping_fired_and_removed() {
// This is like ping_removed() with the single difference that we fire a
// ping and drop it between two successive dispatches of the loop.
// This keeps track of whether the event fired.
let mut dispatched = false;
let mut event_loop = crate::EventLoop::<bool>::try_new().unwrap();
let (sender, source) = make_ping().unwrap();
let wrapper = TransientSource::from(source);
// Check that the source starts off in the wrapper.
assert!(!wrapper.is_none());
// Put the source in the loop.
let dispatcher =
crate::Dispatcher::new(wrapper, |(), &mut (), dispatched| *dispatched = true);
let token = event_loop
.handle()
.register_dispatcher(dispatcher.clone())
.unwrap();
// Send a ping AND drop the sender and check that it's actually removed.
sender.ping();
drop(sender);
// There should be an event, but the source should be removed from the
// loop immediately after.
event_loop
.dispatch(Duration::ZERO, &mut dispatched)
.unwrap();
assert!(dispatched);
// Pull the source wrapper out.
event_loop.handle().remove(token);
let wrapper = dispatcher.into_source_inner();
// Check that the inner source is now gone.
assert!(wrapper.is_none());
}
#[test]
fn ping_multiple_senders() {
// This is like ping_removed() but for testing the behaviour of multiple
// senders.
// This keeps track of whether the event fired.
let mut dispatched = false;
let mut event_loop = crate::EventLoop::<bool>::try_new().unwrap();
let (sender0, source) = make_ping().unwrap();
let wrapper = TransientSource::from(source);
let sender1 = sender0.clone();
let sender2 = sender1.clone();
// Check that the source starts off in the wrapper.
assert!(!wrapper.is_none());
// Put the source in the loop.
let dispatcher =
crate::Dispatcher::new(wrapper, |(), &mut (), dispatched| *dispatched = true);
let token = event_loop
.handle()
.register_dispatcher(dispatcher.clone())
.unwrap();
// Send a ping AND drop the sender and check that it's actually removed.
sender0.ping();
drop(sender0);
// There should be an event, and the source should remain in the loop.
event_loop
.dispatch(Duration::ZERO, &mut dispatched)
.unwrap();
assert!(dispatched);
// Now test that the clones still work. Drop after the dispatch loop
// instead of before, this time.
dispatched = false;
sender1.ping();
event_loop
.dispatch(Duration::ZERO, &mut dispatched)
.unwrap();
assert!(dispatched);
// Finally, drop all of them without sending anything.
dispatched = false;
drop(sender1);
drop(sender2);
event_loop
.dispatch(Duration::ZERO, &mut dispatched)
.unwrap();
assert!(!dispatched);
// Pull the source wrapper out.
event_loop.handle().remove(token);
let wrapper = dispatcher.into_source_inner();
// Check that the inner source is now gone.
assert!(wrapper.is_none());
}
}

View File

@@ -0,0 +1,194 @@
//! Eventfd based implementation of the ping event source.
//!
//! # Implementation notes
//!
//! The eventfd is a much lighter signalling mechanism provided by the Linux
//! kernel. Rather than write an arbitrary sequence of bytes, it only has a
//! 64-bit counter.
//!
//! To avoid closing the eventfd early, we wrap it in a RAII-style closer
//! `CloseOnDrop` in `make_ping()`. When all the senders are dropped, another
//! wrapper `FlagOnDrop` handles signalling this to the event source, which is
//! the sole owner of the eventfd itself. The senders have weak references to
//! the eventfd, and if the source is dropped before the senders, they will
//! simply not do anything (except log a message).
//!
//! To differentiate between regular ping events and close ping events, we add 2
//! to the counter for regular events and 1 for close events. In the source we
//! can then check the LSB and if it's set, we know it was a close event. This
//! only works if a close event never fires more than once.
use std::os::unix::io::{AsFd, BorrowedFd, OwnedFd};
use std::sync::Arc;
use rustix::event::{eventfd, EventfdFlags};
use rustix::io::{read, write, Errno};
use super::PingError;
use crate::{
generic::Generic, EventSource, Interest, Mode, Poll, PostAction, Readiness, Token, TokenFactory,
};
// These are not bitfields! They are increments to add to the eventfd counter.
// Since the fd can only be closed once, we can effectively use the
// INCREMENT_CLOSE value as a bitmask when checking.
const INCREMENT_PING: u64 = 0x2;
const INCREMENT_CLOSE: u64 = 0x1;
#[inline]
pub fn make_ping() -> std::io::Result<(Ping, PingSource)> {
let read = eventfd(0, EventfdFlags::CLOEXEC | EventfdFlags::NONBLOCK)?;
// We only have one fd for the eventfd. If the sending end closes it when
// all copies are dropped, the receiving end will be closed as well. We need
// to make sure the fd is not closed until all holders of it have dropped
// it.
let fd = Arc::new(read);
let ping = Ping {
event: Arc::new(FlagOnDrop(Arc::clone(&fd))),
};
let source = PingSource {
event: Generic::new(ArcAsFd(fd), Interest::READ, Mode::Level),
};
Ok((ping, source))
}
// Helper functions for the event source IO.
#[inline]
fn send_ping(fd: BorrowedFd<'_>, count: u64) -> std::io::Result<()> {
assert!(count > 0);
match write(fd, &count.to_ne_bytes()) {
// The write succeeded, the ping will wake up the loop.
Ok(_) => Ok(()),
// The counter hit its cap, which means previous calls to write() will
// wake up the loop.
Err(Errno::AGAIN) => Ok(()),
// Anything else is a real error.
Err(e) => Err(e.into()),
}
}
#[inline]
fn drain_ping(fd: BorrowedFd<'_>) -> std::io::Result<u64> {
// The eventfd counter is effectively a u64.
const NBYTES: usize = 8;
let mut buf = [0u8; NBYTES];
match read(fd, &mut buf) {
// Reading from an eventfd should only ever produce 8 bytes. No looping
// is required.
Ok(NBYTES) => Ok(u64::from_ne_bytes(buf)),
Ok(_) => unreachable!(),
// Any other error can be propagated.
Err(e) => Err(e.into()),
}
}
// Rust 1.64.0 adds an `AsFd` implementation for `Arc`, so this won't be needed
#[derive(Debug)]
struct ArcAsFd(Arc<OwnedFd>);
impl AsFd for ArcAsFd {
fn as_fd(&self) -> BorrowedFd {
self.0.as_fd()
}
}
// The event source is simply a generic source with one of the eventfds.
#[derive(Debug)]
pub struct PingSource {
event: Generic<ArcAsFd>,
}
impl EventSource for PingSource {
type Event = ();
type Metadata = ();
type Ret = ();
type Error = PingError;
fn process_events<C>(
&mut self,
readiness: Readiness,
token: Token,
mut callback: C,
) -> Result<PostAction, Self::Error>
where
C: FnMut(Self::Event, &mut Self::Metadata) -> Self::Ret,
{
self.event
.process_events(readiness, token, |_, fd| {
let counter = drain_ping(fd.as_fd())?;
// If the LSB is set, it means we were closed. If anything else
// is also set, it means we were pinged. The two are not
// mutually exclusive.
let close = (counter & INCREMENT_CLOSE) != 0;
let ping = (counter & (u64::MAX - 1)) != 0;
if ping {
callback((), &mut ());
}
if close {
Ok(PostAction::Remove)
} else {
Ok(PostAction::Continue)
}
})
.map_err(|e| PingError(e.into()))
}
fn register(&mut self, poll: &mut Poll, token_factory: &mut TokenFactory) -> crate::Result<()> {
self.event.register(poll, token_factory)
}
fn reregister(
&mut self,
poll: &mut Poll,
token_factory: &mut TokenFactory,
) -> crate::Result<()> {
self.event.reregister(poll, token_factory)
}
fn unregister(&mut self, poll: &mut Poll) -> crate::Result<()> {
self.event.unregister(poll)
}
}
#[derive(Clone, Debug)]
pub struct Ping {
// This is an Arc because it's potentially shared with clones. The last one
// dropped needs to signal to the event source via the eventfd.
event: Arc<FlagOnDrop>,
}
impl Ping {
/// Send a ping to the `PingSource`.
pub fn ping(&self) {
if let Err(e) = send_ping(self.event.0.as_fd(), INCREMENT_PING) {
log::warn!("[calloop] Failed to write a ping: {:?}", e);
}
}
}
/// This manages signalling to the PingSource when it's dropped. There should
/// only ever be one of these per PingSource.
#[derive(Debug)]
struct FlagOnDrop(Arc<OwnedFd>);
impl Drop for FlagOnDrop {
fn drop(&mut self) {
if let Err(e) = send_ping(self.0.as_fd(), INCREMENT_CLOSE) {
log::warn!("[calloop] Failed to send close ping: {:?}", e);
}
}
}

328
vendor/calloop/src/sources/ping/iocp.rs vendored Normal file
View File

@@ -0,0 +1,328 @@
//! IOCP-based implementation of the ping event source.
//!
//! The underlying `Poller` can be woken up at any time, using the `post` method
//! to send an arbitrary packet to the I/O completion port. The complication is
//! emulating a pipe.
//!
//! Since `Poller` is already wrapped in an `Arc`, we can clone it into some
//! synchronized inner state to send a pre-determined packet into it. Thankfully
//! calloop's use of the pipe is constrained enough that we can implement it using
//! a simple bool to keep track of whether or not it is notified.
use crate::sources::EventSource;
use polling::os::iocp::{CompletionPacket, PollerIocpExt};
use polling::Poller;
use std::fmt;
use std::io;
use std::sync::{Arc, Mutex, TryLockError};
#[inline]
pub fn make_ping() -> io::Result<(Ping, PingSource)> {
let state = Arc::new(State {
counter: Mutex::new(Counter {
notified: false,
poll_state: None,
}),
});
Ok((
Ping {
state: state.clone(),
},
PingSource { state },
))
}
/// The event to trigger.
#[derive(Clone)]
pub struct Ping {
state: Arc<State>,
}
impl fmt::Debug for Ping {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
debug_ping(&self.state, "Ping", f)
}
}
/// The event source.
pub struct PingSource {
state: Arc<State>,
}
impl fmt::Debug for PingSource {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
debug_ping(&self.state, "PingSource", f)
}
}
impl Ping {
/// Send a ping to the `PingSource`.
pub fn ping(&self) {
let mut counter = self.state.counter.lock().unwrap_or_else(|e| e.into_inner());
// Indicate that we are now notified.
counter.notified = true;
let poll_state = match &mut counter.poll_state {
Some(ps) => ps,
None => {
log::warn!("[calloop] ping was not registered with the event loop");
return;
}
};
// If we aren't currently inserted in the loop, send our packet.
if let Err(e) = poll_state.notify() {
log::warn!("[calloop] failed to post packet to IOCP: {}", e);
}
}
}
impl Drop for Ping {
fn drop(&mut self) {
// If this is the last ping, wake up the source so it removes itself.
if Arc::strong_count(&self.state) <= 2 {
let mut counter = self.state.counter.lock().unwrap_or_else(|e| e.into_inner());
if let Some(poll_state) = &mut counter.poll_state {
if let Err(e) = poll_state.notify() {
log::warn!("[calloop] failed to post packet to IOCP during drop: {}", e);
}
}
}
}
}
impl EventSource for PingSource {
type Error = super::PingError;
type Event = ();
type Metadata = ();
type Ret = ();
fn process_events<F>(
&mut self,
_readiness: crate::Readiness,
token: crate::Token,
mut callback: F,
) -> Result<crate::PostAction, Self::Error>
where
F: FnMut(Self::Event, &mut Self::Metadata) -> Self::Ret,
{
let mut counter = self.state.counter.lock().unwrap_or_else(|e| e.into_inner());
// If we aren't registered, break out.
let poll_state = match &mut counter.poll_state {
Some(ps) => ps,
None => {
// We were deregistered; indicate to the higher level loop.
return Ok(crate::PostAction::Disable);
}
};
// We are no longer inserted into the poller.
poll_state.inserted = false;
// Make sure this is our token.
let token: usize = token.inner.into();
if poll_state.packet.event().key != token {
log::warn!(
"[calloop] token does not match; expected {:x}, got {:x}",
poll_state.packet.event().key,
token
);
return Ok(crate::PostAction::Continue);
}
// Tell if we are registered.
if counter.notified {
counter.notified = false;
// Call the callback.
callback((), &mut ());
}
// Stop looping if all of the Ping's have been dropped.
let action = if Arc::strong_count(&self.state) <= 1 {
crate::PostAction::Remove
} else {
crate::PostAction::Continue
};
Ok(action)
}
fn register(
&mut self,
poll: &mut crate::Poll,
token_factory: &mut crate::TokenFactory,
) -> crate::Result<()> {
let token = token_factory.token();
let mut counter = self.state.counter.lock().unwrap_or_else(|e| e.into_inner());
// Make sure we haven't already been registered.
if counter.poll_state.is_some() {
return Err(io::Error::from(io::ErrorKind::AlreadyExists).into());
}
// Create the event to send.
let packet = {
let token = token.inner.into();
let event = polling::Event::readable(token);
CompletionPacket::new(event)
};
// Create the poll state.
let poll_state = PollState::new(poll.poller(), packet, counter.notified)?;
// Substitute it into our poll state.
counter.poll_state = Some(poll_state);
Ok(())
}
fn reregister(
&mut self,
poll: &mut crate::Poll,
token_factory: &mut crate::TokenFactory,
) -> crate::Result<()> {
let token = token_factory.token();
let mut counter = self.state.counter.lock().unwrap_or_else(|e| e.into_inner());
// Make sure that the poller has been registered.
let poll_state = match &mut counter.poll_state {
Some(ps) => ps,
None => return Err(io::Error::from(io::ErrorKind::NotFound).into()),
};
// If it's a different poller, throw an error.
if !Arc::ptr_eq(&poll_state.poller, poll.poller()) {
return Err(io::Error::new(
io::ErrorKind::NotFound,
"attempted to reregister() a PingSource with a different poller",
)
.into());
}
// Change the token if needed.
let token = token.inner.into();
let event = polling::Event::readable(token);
if event.key != poll_state.packet.event().key {
poll_state.packet = CompletionPacket::new(event);
if poll_state.inserted {
poll_state.inserted = false;
poll_state.notify()?;
}
}
Ok(())
}
fn unregister(&mut self, _poll: &mut crate::Poll) -> crate::Result<()> {
let mut counter = self.state.counter.lock().unwrap_or_else(|e| e.into_inner());
// Remove our current registration.
if counter.poll_state.take().is_none() {
log::trace!("[calloop] unregistered a source that wasn't registered");
}
Ok(())
}
}
/// Inner state of the pipe.
struct State {
/// The counter used to keep track of our state.
counter: Mutex<Counter>,
}
/// Inner counter of the pipe.
struct Counter {
/// Are we notified?
notified: bool,
/// The `Poller`-related state.
///
/// This is `None` if we aren't inserted into the `Poller` yet.
poll_state: Option<PollState>,
}
/// The `Poller` combined with some associated state.
struct PollState {
/// The `Poller` that we are registered in.
poller: Arc<Poller>,
/// Are we inserted into the poller?
inserted: bool,
/// The completion packet to send.
packet: CompletionPacket,
}
impl PollState {
/// Create a new `PollState` based on the `Poller` and the `packet`.
///
/// If `notified` is `true`, a packet is inserted into the poller.
fn new(poller: &Arc<Poller>, packet: CompletionPacket, notified: bool) -> io::Result<Self> {
let mut poll_state = Self {
poller: poller.clone(),
packet,
inserted: false,
};
if notified {
poll_state.notify()?;
}
Ok(poll_state)
}
/// Notify the poller.
fn notify(&mut self) -> io::Result<()> {
if !self.inserted {
self.poller.post(self.packet.clone())?;
self.inserted = true;
}
Ok(())
}
}
#[inline]
fn debug_ping(state: &State, name: &str, f: &mut fmt::Formatter) -> fmt::Result {
let counter = match state.counter.try_lock() {
Ok(counter) => counter,
Err(TryLockError::WouldBlock) => {
return f
.debug_tuple("Ping")
.field(&format_args!("<locked>"))
.finish()
}
Err(TryLockError::Poisoned(_)) => {
return f
.debug_tuple("Ping")
.field(&format_args!("<poisoned>"))
.finish()
}
};
let mut s = f.debug_struct(name);
s.field("notified", &counter.notified);
// Tell if we are registered.
match &counter.poll_state {
Some(poll_state) => {
s.field("packet", poll_state.packet.event());
s.field("inserted", &poll_state.inserted);
}
None => {
s.field("packet", &format_args!("<not registered>"));
}
}
s.finish()
}

146
vendor/calloop/src/sources/ping/pipe.rs vendored Normal file
View File

@@ -0,0 +1,146 @@
//! Pipe based implementation of the ping event source, using the pipe or pipe2
//! syscall. Sending a ping involves writing to one end of a pipe, and the other
//! end becoming readable is what wakes up the event loop.
use std::os::unix::io::{AsFd, BorrowedFd, OwnedFd};
use std::sync::Arc;
use rustix::io::{read, write, Errno};
use super::PingError;
use crate::{
generic::Generic, EventSource, Interest, Mode, Poll, PostAction, Readiness, Token, TokenFactory,
};
#[cfg(target_os = "macos")]
#[inline]
fn make_ends() -> std::io::Result<(OwnedFd, OwnedFd)> {
use rustix::fs::{fcntl_getfl, fcntl_setfl, OFlags};
use rustix::pipe::pipe;
let (read, write) = pipe()?;
let set_flags = |fd| fcntl_setfl(fd, fcntl_getfl(fd)? | OFlags::CLOEXEC | OFlags::NONBLOCK);
set_flags(&read)?;
set_flags(&write)?;
Ok((read, write))
}
#[cfg(not(target_os = "macos"))]
#[inline]
fn make_ends() -> std::io::Result<(OwnedFd, OwnedFd)> {
use rustix::pipe::{pipe_with, PipeFlags};
Ok(pipe_with(PipeFlags::CLOEXEC | PipeFlags::NONBLOCK)?)
}
#[inline]
pub fn make_ping() -> std::io::Result<(Ping, PingSource)> {
let (read, write) = make_ends()?;
let source = PingSource {
pipe: Generic::new(read, Interest::READ, Mode::Level),
};
let ping = Ping {
pipe: Arc::new(write),
};
Ok((ping, source))
}
// Helper functions for the event source IO.
#[inline]
fn send_ping(fd: BorrowedFd<'_>) -> std::io::Result<()> {
write(fd, &[0u8])?;
Ok(())
}
// The event source is simply a generic source with the FD of the read end of
// the pipe.
#[derive(Debug)]
pub struct PingSource {
pipe: Generic<OwnedFd>,
}
impl EventSource for PingSource {
type Event = ();
type Metadata = ();
type Ret = ();
type Error = PingError;
fn process_events<C>(
&mut self,
readiness: Readiness,
token: Token,
mut callback: C,
) -> Result<PostAction, Self::Error>
where
C: FnMut(Self::Event, &mut Self::Metadata) -> Self::Ret,
{
self.pipe
.process_events(readiness, token, |_, fd| {
let mut buf = [0u8; 32];
let mut read_something = false;
let mut action = PostAction::Continue;
loop {
match read(&fd, &mut buf) {
Ok(0) => {
// The other end of the pipe was closed, mark ourselves
// for removal.
action = PostAction::Remove;
break;
}
// Got one or more pings.
Ok(_) => read_something = true,
// Nothing more to read.
Err(Errno::AGAIN) => break,
// Propagate error.
Err(e) => return Err(e.into()),
}
}
if read_something {
callback((), &mut ());
}
Ok(action)
})
.map_err(|e| PingError(e.into()))
}
fn register(&mut self, poll: &mut Poll, token_factory: &mut TokenFactory) -> crate::Result<()> {
self.pipe.register(poll, token_factory)
}
fn reregister(
&mut self,
poll: &mut Poll,
token_factory: &mut TokenFactory,
) -> crate::Result<()> {
self.pipe.reregister(poll, token_factory)
}
fn unregister(&mut self, poll: &mut Poll) -> crate::Result<()> {
self.pipe.unregister(poll)
}
}
// The sending end of the ping writes zeroes to the write end of the pipe.
#[derive(Clone, Debug)]
pub struct Ping {
pipe: Arc<OwnedFd>,
}
// The sending end of the ping writes zeroes to the write end of the pipe.
impl Ping {
/// Send a ping to the `PingSource`
pub fn ping(&self) {
if let Err(e) = send_ping(self.pipe.as_fd()) {
log::warn!("[calloop] Failed to write a ping: {:?}", e);
}
}
}

199
vendor/calloop/src/sources/signals.rs vendored Normal file
View File

@@ -0,0 +1,199 @@
//! Event source for tracking Unix signals
//!
//! Only available on Linux.
//!
//! This allows you to track and receive Unix signals through the event loop
//! rather than by registering signal handlers. It uses `signalfd` under the hood.
//!
//! The source will take care of masking and unmasking signals for the thread it runs on,
//! but you are responsible for masking them on other threads if you run them. The simplest
//! way to ensure that is to setup the signal event source before spawning any thread, as
//! they'll inherit their parent signal mask.
use std::convert::TryFrom;
use std::io::Error as IoError;
use std::os::raw::c_int;
use nix::sys::signal::SigSet;
pub use nix::sys::signal::Signal;
pub use nix::sys::signalfd::siginfo;
use nix::sys::signalfd::{SfdFlags, SignalFd};
use super::generic::{FdWrapper, Generic};
use crate::{EventSource, Interest, Mode, Poll, PostAction, Readiness, Token, TokenFactory};
/// An event generated by the signal event source
#[derive(Copy, Clone, Debug)]
pub struct Event {
info: siginfo,
}
impl Event {
/// Retrieve the signal number that was receive
pub fn signal(&self) -> Signal {
Signal::try_from(self.info.ssi_signo as c_int).unwrap()
}
/// Access the full `siginfo_t` associated with this signal event
pub fn full_info(&self) -> siginfo {
self.info
}
}
/// An event source for receiving Unix signals
#[derive(Debug)]
pub struct Signals {
sfd: Generic<FdWrapper<SignalFd>>,
mask: SigSet,
}
impl Signals {
/// Create a new signal event source listening on the specified list of signals
pub fn new(signals: &[Signal]) -> crate::Result<Signals> {
let mut mask = SigSet::empty();
for &s in signals {
mask.add(s);
}
// Mask the signals for this thread
mask.thread_block().map_err(IoError::from)?;
// Create the SignalFd
let sfd = SignalFd::with_flags(&mask, SfdFlags::SFD_NONBLOCK | SfdFlags::SFD_CLOEXEC)
.map_err(IoError::from)?;
Ok(Signals {
sfd: Generic::new(unsafe { FdWrapper::new(sfd) }, Interest::READ, Mode::Level),
mask,
})
}
/// Add a list of signals to the signals source
///
/// If this function returns an error, the signal mask of the thread may
/// have still been changed.
pub fn add_signals(&mut self, signals: &[Signal]) -> crate::Result<()> {
for &s in signals {
self.mask.add(s);
}
self.mask.thread_block().map_err(IoError::from)?;
// SAFETY: We don't drop the underlying mask.
unsafe {
self.sfd
.get_mut()
.set_mask(&self.mask)
.map_err(IoError::from)?;
}
Ok(())
}
/// Remove a list of signals from the signals source
///
/// If this function returns an error, the signal mask of the thread may
/// have still been changed.
pub fn remove_signals(&mut self, signals: &[Signal]) -> crate::Result<()> {
let mut removed = SigSet::empty();
for &s in signals {
self.mask.remove(s);
removed.add(s);
}
removed.thread_unblock().map_err(IoError::from)?;
// SAFETY: We don't drop the underlying mask.
unsafe {
self.sfd
.get_mut()
.set_mask(&self.mask)
.map_err(IoError::from)?;
}
Ok(())
}
/// Replace the list of signals of the source
///
/// If this function returns an error, the signal mask of the thread may
/// have still been changed.
pub fn set_signals(&mut self, signals: &[Signal]) -> crate::Result<()> {
let mut new_mask = SigSet::empty();
for &s in signals {
new_mask.add(s);
}
self.mask.thread_unblock().map_err(IoError::from)?;
new_mask.thread_block().map_err(IoError::from)?;
// SAFETY: We don't drop the underlying mask.
unsafe {
self.sfd
.get_mut()
.set_mask(&new_mask)
.map_err(IoError::from)?;
}
self.mask = new_mask;
Ok(())
}
}
impl Drop for Signals {
fn drop(&mut self) {
// we cannot handle error here
if let Err(e) = self.mask.thread_unblock() {
log::warn!("[calloop] Failed to unmask signals: {:?}", e);
}
}
}
impl EventSource for Signals {
type Event = Event;
type Metadata = ();
type Ret = ();
type Error = SignalError;
fn process_events<C>(
&mut self,
readiness: Readiness,
token: Token,
mut callback: C,
) -> Result<PostAction, Self::Error>
where
C: FnMut(Self::Event, &mut Self::Metadata) -> Self::Ret,
{
self.sfd
.process_events(readiness, token, |_, sfd| {
loop {
match unsafe { sfd.get_mut().read_signal() } {
Ok(Some(info)) => callback(Event { info }, &mut ()),
Ok(None) => break,
Err(e) => {
log::warn!("[callop] Error reading from signalfd: {}", e);
return Err(e.into());
}
}
}
Ok(PostAction::Continue)
})
.map_err(|e| SignalError(e.into()))
}
fn register(&mut self, poll: &mut Poll, token_factory: &mut TokenFactory) -> crate::Result<()> {
self.sfd.register(poll, token_factory)
}
fn reregister(
&mut self,
poll: &mut Poll,
token_factory: &mut TokenFactory,
) -> crate::Result<()> {
self.sfd.reregister(poll, token_factory)
}
fn unregister(&mut self, poll: &mut Poll) -> crate::Result<()> {
self.sfd.unregister(poll)
}
}
/// An error arising from processing events for a process signal.
#[derive(thiserror::Error, Debug)]
#[error(transparent)]
pub struct SignalError(Box<dyn std::error::Error + Sync + Send>);

647
vendor/calloop/src/sources/timer.rs vendored Normal file
View File

@@ -0,0 +1,647 @@
//! Timer event source
//!
//! The [`Timer`] is an event source that will fire its event after a certain amount of time
//! specified at creation. Its timing is tracked directly by the event loop core logic, and it does
//! not consume any system resource.
//!
//! As of calloop v0.11.0, the event loop always uses high-precision timers. However, the timer
//! precision varies between operating systems; for instance, the scheduler granularity on Windows
//! is about 16 milliseconds. If you need to rely on good precision timers in general, you may need
//! to enable realtime features of your OS to ensure your thread is quickly woken up by the system
//! scheduler.
//!
//! The provided event is an [`Instant`] representing the deadline for which this timer has fired
//! (which can be earlier than the current time depending on the event loop congestion).
//!
//! The callback associated with this event source is expected to return a [`TimeoutAction`], which
//! can be used to implement self-repeating timers by telling calloop to reprogram the same timer
//! for a later timeout after it has fired.
/*
* This module provides two main types:
*
* - `Timer` is the user-facing type that represents a timer event source
* - `TimerWheel` is an internal data structure for tracking registered timeouts, it is used by
* the polling logic in sys/mod.rs
*/
use std::{
cell::RefCell,
collections::BinaryHeap,
rc::Rc,
task::Waker,
time::{Duration, Instant},
};
use crate::{EventSource, LoopHandle, Poll, PostAction, Readiness, Token, TokenFactory};
#[derive(Debug)]
struct Registration {
token: Token,
wheel: Rc<RefCell<TimerWheel>>,
counter: u32,
}
/// A timer event source
///
/// When registered to the event loop, it will trigger an event once its deadline is reached.
/// If the deadline is in the past relative to the moment of its insertion in the event loop,
/// the `TImer` will trigger an event as soon as the event loop is dispatched.
#[derive(Debug)]
pub struct Timer {
registration: Option<Registration>,
deadline: Option<Instant>,
}
impl Timer {
/// Create a timer that will fire immediately when inserted in the event loop
pub fn immediate() -> Timer {
Self::from_deadline(Instant::now())
}
/// Create a timer that will fire after a given duration from now
pub fn from_duration(duration: Duration) -> Timer {
Self::from_deadline_inner(Instant::now().checked_add(duration))
}
/// Create a timer that will fire at a given instant
pub fn from_deadline(deadline: Instant) -> Timer {
Self::from_deadline_inner(Some(deadline))
}
fn from_deadline_inner(deadline: Option<Instant>) -> Timer {
Timer {
registration: None,
deadline,
}
}
/// Changes the deadline of this timer to an [`Instant`]
///
/// If the `Timer` is currently registered in the event loop, it needs to be
/// re-registered for this change to take effect.
pub fn set_deadline(&mut self, deadline: Instant) {
self.deadline = Some(deadline);
}
/// Changes the deadline of this timer to a [`Duration`] from now
///
/// If the `Timer` is currently registered in the event loop, it needs to be
/// re-registered for this change to take effect.
pub fn set_duration(&mut self, duration: Duration) {
self.deadline = Instant::now().checked_add(duration);
}
/// Get the current deadline of this `Timer`
///
/// Returns `None` if the timer has overflowed.
pub fn current_deadline(&self) -> Option<Instant> {
self.deadline
}
}
impl EventSource for Timer {
type Event = Instant;
type Metadata = ();
type Ret = TimeoutAction;
type Error = std::io::Error;
fn process_events<F>(
&mut self,
_: Readiness,
token: Token,
mut callback: F,
) -> Result<PostAction, Self::Error>
where
F: FnMut(Self::Event, &mut Self::Metadata) -> Self::Ret,
{
if let (Some(ref registration), Some(ref deadline)) = (&self.registration, &self.deadline) {
if registration.token != token {
return Ok(PostAction::Continue);
}
let new_deadline = match callback(*deadline, &mut ()) {
TimeoutAction::Drop => return Ok(PostAction::Remove),
TimeoutAction::ToInstant(instant) => instant,
TimeoutAction::ToDuration(duration) => match Instant::now().checked_add(duration) {
Some(new_deadline) => new_deadline,
None => {
// The timer has overflowed, meaning we have no choice but to drop it.
self.deadline = None;
return Ok(PostAction::Remove);
}
},
};
// If we received an event, we MUST have a valid counter value
registration.wheel.borrow_mut().insert_reuse(
registration.counter,
new_deadline,
registration.token,
);
self.deadline = Some(new_deadline);
}
Ok(PostAction::Continue)
}
fn register(&mut self, poll: &mut Poll, token_factory: &mut TokenFactory) -> crate::Result<()> {
// Only register a deadline if we haven't overflowed.
if let Some(deadline) = self.deadline {
let wheel = poll.timers.clone();
let token = token_factory.token();
let counter = wheel.borrow_mut().insert(deadline, token);
self.registration = Some(Registration {
token,
wheel,
counter,
});
}
Ok(())
}
fn reregister(
&mut self,
poll: &mut Poll,
token_factory: &mut TokenFactory,
) -> crate::Result<()> {
self.unregister(poll)?;
self.register(poll, token_factory)
}
fn unregister(&mut self, poll: &mut Poll) -> crate::Result<()> {
if let Some(registration) = self.registration.take() {
poll.timers.borrow_mut().cancel(registration.counter);
}
Ok(())
}
}
/// Action to reschedule a timeout if necessary
#[derive(Debug)]
pub enum TimeoutAction {
/// Don't reschedule this timer
Drop,
/// Reschedule this timer to a given [`Instant`]
ToInstant(Instant),
/// Reschedule this timer to a given [`Duration`] in the future
ToDuration(Duration),
}
// Internal representation of a timeout registered in the TimerWheel
#[derive(Debug)]
struct TimeoutData {
deadline: Instant,
token: RefCell<Option<Token>>,
counter: u32,
}
// A data structure for tracking registered timeouts
#[derive(Debug)]
pub(crate) struct TimerWheel {
heap: BinaryHeap<TimeoutData>,
counter: u32,
}
impl TimerWheel {
pub(crate) fn new() -> TimerWheel {
TimerWheel {
heap: BinaryHeap::new(),
counter: 0,
}
}
pub(crate) fn insert(&mut self, deadline: Instant, token: Token) -> u32 {
self.heap.push(TimeoutData {
deadline,
token: RefCell::new(Some(token)),
counter: self.counter,
});
let ret = self.counter;
self.counter += 1;
ret
}
pub(crate) fn insert_reuse(&mut self, counter: u32, deadline: Instant, token: Token) {
self.heap.push(TimeoutData {
deadline,
token: RefCell::new(Some(token)),
counter,
});
}
pub(crate) fn cancel(&mut self, counter: u32) {
self.heap
.iter()
.find(|data| data.counter == counter)
.map(|data| data.token.take());
}
pub(crate) fn next_expired(&mut self, now: Instant) -> Option<(u32, Token)> {
loop {
// check if there is an expired item
if let Some(data) = self.heap.peek() {
if data.deadline > now {
return None;
}
// there is an expired timeout, continue the
// loop body
} else {
return None;
}
// There is an item in the heap, this unwrap cannot blow
let data = self.heap.pop().unwrap();
if let Some(token) = data.token.into_inner() {
return Some((data.counter, token));
}
// otherwise this timeout was cancelled, continue looping
}
}
pub(crate) fn next_deadline(&self) -> Option<std::time::Instant> {
self.heap.peek().map(|data| data.deadline)
}
}
// trait implementations for TimeoutData
impl std::cmp::Ord for TimeoutData {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
// earlier values have priority
self.deadline.cmp(&other.deadline).reverse()
}
}
impl std::cmp::PartialOrd for TimeoutData {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
// This impl is required for PartialOrd but actually never used
// and the type is private, so ignore its coverage
impl std::cmp::PartialEq for TimeoutData {
#[cfg_attr(feature = "nightly_coverage", coverage(off))]
fn eq(&self, other: &Self) -> bool {
self.deadline == other.deadline
}
}
impl std::cmp::Eq for TimeoutData {}
// Logic for timer futures
/// A future that resolves once a certain timeout is expired
pub struct TimeoutFuture {
deadline: Option<Instant>,
waker: Rc<RefCell<Option<Waker>>>,
}
impl std::fmt::Debug for TimeoutFuture {
#[cfg_attr(feature = "nightly_coverage", coverage(off))]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TimeoutFuture")
.field("deadline", &self.deadline)
.finish_non_exhaustive()
}
}
impl TimeoutFuture {
/// Create a future that resolves after a given duration
pub fn from_duration<Data>(handle: &LoopHandle<'_, Data>, duration: Duration) -> TimeoutFuture {
Self::from_deadline_inner(handle, Instant::now().checked_add(duration))
}
/// Create a future that resolves at a given instant
pub fn from_deadline<Data>(handle: &LoopHandle<'_, Data>, deadline: Instant) -> TimeoutFuture {
Self::from_deadline_inner(handle, Some(deadline))
}
/// Create a future that resolves at a given instant
fn from_deadline_inner<Data>(
handle: &LoopHandle<'_, Data>,
deadline: Option<Instant>,
) -> TimeoutFuture {
let timer = Timer::from_deadline_inner(deadline);
let waker = Rc::new(RefCell::new(None::<Waker>));
handle
.insert_source(timer, {
let waker = waker.clone();
move |_, &mut (), _| {
if let Some(waker) = waker.borrow_mut().clone() {
waker.wake()
}
TimeoutAction::Drop
}
})
.unwrap();
TimeoutFuture { deadline, waker }
}
}
impl std::future::Future for TimeoutFuture {
type Output = ();
fn poll(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Self::Output> {
match self.deadline {
None => return std::task::Poll::Pending,
Some(deadline) => {
if Instant::now() >= deadline {
return std::task::Poll::Ready(());
}
}
}
*self.waker.borrow_mut() = Some(cx.waker().clone());
std::task::Poll::Pending
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::*;
use std::time::Duration;
#[test]
fn simple_timer() {
let mut event_loop = EventLoop::try_new().unwrap();
let mut dispatched = false;
event_loop
.handle()
.insert_source(
Timer::from_duration(Duration::from_millis(100)),
|_, &mut (), dispatched| {
*dispatched = true;
TimeoutAction::Drop
},
)
.unwrap();
event_loop
.dispatch(Some(Duration::ZERO), &mut dispatched)
.unwrap();
// not yet dispatched
assert!(!dispatched);
event_loop
.dispatch(Some(Duration::from_millis(150)), &mut dispatched)
.unwrap();
// now dispatched
assert!(dispatched);
}
#[test]
fn simple_timer_instant() {
let mut event_loop = EventLoop::try_new().unwrap();
let mut dispatched = false;
event_loop
.handle()
.insert_source(
Timer::from_duration(Duration::from_millis(100)),
|_, &mut (), dispatched| {
*dispatched = true;
TimeoutAction::Drop
},
)
.unwrap();
event_loop
.dispatch(Some(Duration::from_millis(150)), &mut dispatched)
.unwrap();
// now dispatched
assert!(dispatched);
}
#[test]
fn immediate_timer() {
let mut event_loop = EventLoop::try_new().unwrap();
let mut dispatched = false;
event_loop
.handle()
.insert_source(Timer::immediate(), |_, &mut (), dispatched| {
*dispatched = true;
TimeoutAction::Drop
})
.unwrap();
event_loop
.dispatch(Some(Duration::ZERO), &mut dispatched)
.unwrap();
// now dispatched
assert!(dispatched);
}
// We cannot actually test high precision timers, as they are only high precision in release mode
// This test is here to ensure that the high-precision codepath are executed and work as intended
// even if we cannot test if they are actually high precision
#[test]
fn high_precision_timer() {
let mut event_loop = EventLoop::try_new().unwrap();
let mut dispatched = false;
event_loop
.handle()
.insert_source(
Timer::from_duration(Duration::from_millis(100)),
|_, &mut (), dispatched| {
*dispatched = true;
TimeoutAction::Drop
},
)
.unwrap();
event_loop
.dispatch(Some(Duration::ZERO), &mut dispatched)
.unwrap();
// not yet dispatched
assert!(!dispatched);
event_loop
.dispatch(Some(Duration::from_micros(10200)), &mut dispatched)
.unwrap();
// yet not dispatched
assert!(!dispatched);
event_loop
.dispatch(Some(Duration::from_millis(100)), &mut dispatched)
.unwrap();
// now dispatched
assert!(dispatched);
}
#[test]
fn cancel_timer() {
let mut event_loop = EventLoop::try_new().unwrap();
let mut dispatched = false;
let token = event_loop
.handle()
.insert_source(
Timer::from_duration(Duration::from_millis(100)),
|_, &mut (), dispatched| {
*dispatched = true;
TimeoutAction::Drop
},
)
.unwrap();
event_loop
.dispatch(Some(Duration::ZERO), &mut dispatched)
.unwrap();
// not yet dispatched
assert!(!dispatched);
event_loop.handle().remove(token);
event_loop
.dispatch(Some(Duration::from_millis(150)), &mut dispatched)
.unwrap();
// still not dispatched
assert!(!dispatched);
}
#[test]
fn repeating_timer() {
let mut event_loop = EventLoop::try_new().unwrap();
let mut dispatched = 0;
event_loop
.handle()
.insert_source(
Timer::from_duration(Duration::from_millis(500)),
|_, &mut (), dispatched| {
*dispatched += 1;
TimeoutAction::ToDuration(Duration::from_millis(500))
},
)
.unwrap();
event_loop
.dispatch(Some(Duration::from_millis(250)), &mut dispatched)
.unwrap();
assert_eq!(dispatched, 0);
event_loop
.dispatch(Some(Duration::from_millis(510)), &mut dispatched)
.unwrap();
assert_eq!(dispatched, 1);
event_loop
.dispatch(Some(Duration::from_millis(510)), &mut dispatched)
.unwrap();
assert_eq!(dispatched, 2);
event_loop
.dispatch(Some(Duration::from_millis(510)), &mut dispatched)
.unwrap();
assert_eq!(dispatched, 3);
}
#[cfg(feature = "executor")]
#[test]
fn timeout_future() {
let mut event_loop = EventLoop::try_new().unwrap();
let mut dispatched = 0;
let timeout_1 =
TimeoutFuture::from_duration(&event_loop.handle(), Duration::from_millis(500));
let timeout_2 =
TimeoutFuture::from_duration(&event_loop.handle(), Duration::from_millis(1500));
// This one should never go off.
let timeout_3 = TimeoutFuture::from_duration(&event_loop.handle(), Duration::MAX);
let (exec, sched) = crate::sources::futures::executor().unwrap();
event_loop
.handle()
.insert_source(exec, move |(), &mut (), got| {
*got += 1;
})
.unwrap();
sched.schedule(timeout_1).unwrap();
sched.schedule(timeout_2).unwrap();
sched.schedule(timeout_3).unwrap();
// We do a 0-timeout dispatch after every regular dispatch to let the timeout triggers
// flow back to the executor
event_loop
.dispatch(Some(Duration::ZERO), &mut dispatched)
.unwrap();
event_loop
.dispatch(Some(Duration::ZERO), &mut dispatched)
.unwrap();
assert_eq!(dispatched, 0);
event_loop
.dispatch(Some(Duration::from_millis(1000)), &mut dispatched)
.unwrap();
event_loop
.dispatch(Some(Duration::ZERO), &mut dispatched)
.unwrap();
assert_eq!(dispatched, 1);
event_loop
.dispatch(Some(Duration::from_millis(1100)), &mut dispatched)
.unwrap();
event_loop
.dispatch(Some(Duration::ZERO), &mut dispatched)
.unwrap();
assert_eq!(dispatched, 2);
}
#[test]
fn no_overflow() {
let mut event_loop = EventLoop::try_new().unwrap();
let mut dispatched = 0;
event_loop
.handle()
.insert_source(
Timer::from_duration(Duration::from_millis(500)),
|_, &mut (), dispatched| {
*dispatched += 1;
TimeoutAction::Drop
},
)
.unwrap();
event_loop
.handle()
.insert_source(Timer::from_duration(Duration::MAX), |_, &mut (), _| {
panic!("This timer should never go off")
})
.unwrap();
event_loop
.dispatch(Some(Duration::from_millis(250)), &mut dispatched)
.unwrap();
assert_eq!(dispatched, 0);
event_loop
.dispatch(Some(Duration::from_millis(510)), &mut dispatched)
.unwrap();
assert_eq!(dispatched, 1);
event_loop
.dispatch(Some(Duration::from_millis(510)), &mut dispatched)
.unwrap();
assert_eq!(dispatched, 1);
}
}

1132
vendor/calloop/src/sources/transient.rs vendored Normal file

File diff suppressed because it is too large Load Diff

447
vendor/calloop/src/sys.rs vendored Normal file
View File

@@ -0,0 +1,447 @@
use std::{cell::RefCell, collections::HashMap, rc::Rc, sync::Arc, time::Duration};
#[cfg(unix)]
use std::os::unix::io::{AsFd, AsRawFd, BorrowedFd as Borrowed, RawFd as Raw};
#[cfg(windows)]
use std::os::windows::io::{AsRawSocket, AsSocket, BorrowedSocket as Borrowed, RawSocket as Raw};
use polling::{Event, Events, PollMode, Poller};
use crate::sources::timer::TimerWheel;
use crate::token::TokenInner;
use crate::RegistrationToken;
/// Possible modes for registering a file descriptor
#[derive(Copy, Clone, Debug)]
pub enum Mode {
/// Single event generation
///
/// This FD will be disabled as soon as it has generated one event.
///
/// The user will need to use `LoopHandle::update()` to re-enable it if
/// desired.
OneShot,
/// Level-triggering
///
/// This FD will report events on every poll as long as the requested interests
/// are available.
Level,
/// Edge-triggering
///
/// This FD will report events only when it *gains* one of the requested interests.
/// it must thus be fully processed before it'll generate events again.
///
/// This mode is not supported on certain platforms, and an error will be returned
/// if it is used.
///
/// ## Supported Platforms
///
/// As of the time of writing, the platforms that support edge triggered polling are
/// as follows:
///
/// - Linux/Android
/// - macOS/iOS/tvOS/watchOS
/// - FreeBSD/OpenBSD/NetBSD/DragonflyBSD
Edge,
}
/// Interest to register regarding the file descriptor
#[derive(Copy, Clone, Debug)]
pub struct Interest {
/// Wait for the FD to be readable
pub readable: bool,
/// Wait for the FD to be writable
pub writable: bool,
}
impl Interest {
/// Shorthand for empty interest
pub const EMPTY: Interest = Interest {
readable: false,
writable: false,
};
/// Shorthand for read interest
pub const READ: Interest = Interest {
readable: true,
writable: false,
};
/// Shorthand for write interest
pub const WRITE: Interest = Interest {
readable: false,
writable: true,
};
/// Shorthand for read and write interest
pub const BOTH: Interest = Interest {
readable: true,
writable: true,
};
}
/// Readiness for a file descriptor notification
#[derive(Copy, Clone, Debug)]
pub struct Readiness {
/// Is the FD readable
pub readable: bool,
/// Is the FD writable
pub writable: bool,
/// Is the FD in an error state
pub error: bool,
}
impl Readiness {
/// Shorthand for empty readiness
pub const EMPTY: Readiness = Readiness {
readable: false,
writable: false,
error: false,
};
}
#[derive(Debug)]
pub(crate) struct PollEvent {
pub(crate) readiness: Readiness,
pub(crate) token: Token,
}
/// Factory for creating tokens in your registrations
///
/// When composing event sources, each sub-source needs to
/// have its own token to identify itself. This factory is
/// provided to produce such unique tokens.
#[derive(Debug)]
pub struct TokenFactory {
next_token: TokenInner,
}
impl TokenFactory {
pub(crate) fn new(token: TokenInner) -> TokenFactory {
TokenFactory {
next_token: token.forget_sub_id(),
}
}
/// Get the "raw" registration token of this TokenFactory
pub(crate) fn registration_token(&self) -> RegistrationToken {
RegistrationToken::new(self.next_token.forget_sub_id())
}
/// Produce a new unique token
pub fn token(&mut self) -> Token {
let token = self.next_token;
self.next_token = token.increment_sub_id();
Token { inner: token }
}
}
/// A token (for implementation of the [`EventSource`](crate::EventSource) trait)
///
/// This token is produced by the [`TokenFactory`] and is used when calling the
/// [`EventSource`](crate::EventSource) implementations to process event, in order
/// to identify which sub-source produced them.
///
/// You should forward it to the [`Poll`] when registering your file descriptors.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Token {
pub(crate) inner: TokenInner,
}
/// The polling system
///
/// This type represents the polling system of calloop, on which you
/// can register your file descriptors. This interface is only accessible in
/// implementations of the [`EventSource`](crate::EventSource) trait.
///
/// You only need to interact with this type if you are implementing your
/// own event sources, while implementing the [`EventSource`](crate::EventSource) trait.
/// And even in this case, you can often just use the [`Generic`](crate::generic::Generic) event
/// source and delegate the implementations to it.
pub struct Poll {
/// The handle to wepoll/epoll/kqueue/... used to poll for events.
pub(crate) poller: Arc<Poller>,
/// The buffer of events returned by the poller.
events: RefCell<Events>,
/// The sources registered as level triggered.
///
/// Some platforms that `polling` supports do not support level-triggered events. As of the time
/// of writing, this only includes Solaris and illumos. To work around this, we emulate level
/// triggered events by keeping this map of file descriptors.
///
/// One can emulate level triggered events on top of oneshot events by just re-registering the
/// file descriptor every time it is polled. However, this is not ideal, as it requires a
/// system call every time. It's better to use the intergrated system, if available.
level_triggered: Option<RefCell<HashMap<usize, (Raw, polling::Event)>>>,
pub(crate) timers: Rc<RefCell<TimerWheel>>,
}
impl std::fmt::Debug for Poll {
#[cfg_attr(feature = "nightly_coverage", coverage(off))]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("Poll { ... }")
}
}
impl Poll {
pub(crate) fn new() -> crate::Result<Poll> {
Self::new_inner(false)
}
fn new_inner(force_fallback_lt: bool) -> crate::Result<Poll> {
let poller = Poller::new()?;
let level_triggered = if poller.supports_level() && !force_fallback_lt {
None
} else {
Some(RefCell::new(HashMap::new()))
};
Ok(Poll {
poller: Arc::new(poller),
events: RefCell::new(Events::new()),
timers: Rc::new(RefCell::new(TimerWheel::new())),
level_triggered,
})
}
pub(crate) fn poll(&self, mut timeout: Option<Duration>) -> crate::Result<Vec<PollEvent>> {
let now = std::time::Instant::now();
// Adjust the timeout for the timers.
if let Some(next_timeout) = self.timers.borrow().next_deadline() {
if next_timeout <= now {
timeout = Some(Duration::ZERO);
} else if let Some(deadline) = timeout {
timeout = Some(std::cmp::min(deadline, next_timeout - now));
} else {
timeout = Some(next_timeout - now);
}
};
let mut events = self.events.borrow_mut();
events.clear();
self.poller.wait(&mut events, timeout)?;
// Convert `polling` events to `calloop` events.
let level_triggered = self.level_triggered.as_ref().map(RefCell::borrow);
let mut poll_events = events
.iter()
.map(|ev| {
// If we need to emulate level-triggered events...
if let Some(level_triggered) = level_triggered.as_ref() {
// ...and this event is from a level-triggered source...
if let Some((source, interest)) = level_triggered.get(&ev.key) {
// ...then we need to re-register the source.
// SAFETY: The source is valid.
self.poller
.modify(unsafe { Borrowed::borrow_raw(*source) }, *interest)?;
}
}
Ok(PollEvent {
readiness: Readiness {
readable: ev.readable,
writable: ev.writable,
error: false,
},
token: Token {
inner: TokenInner::from(ev.key),
},
})
})
.collect::<std::io::Result<Vec<_>>>()?;
drop(events);
// Update 'now' as some time may have elapsed in poll()
let now = std::time::Instant::now();
let mut timers = self.timers.borrow_mut();
while let Some((_, token)) = timers.next_expired(now) {
poll_events.push(PollEvent {
readiness: Readiness {
readable: true,
writable: false,
error: false,
},
token,
});
}
Ok(poll_events)
}
/// Register a new file descriptor for polling
///
/// The file descriptor will be registered with given interest,
/// mode and token. This function will fail if given a
/// bad file descriptor or if the provided file descriptor is already
/// registered.
///
/// # Safety
///
/// The registered source must not be dropped before it is unregistered.
///
/// # Leaking tokens
///
/// If your event source is dropped without being unregistered, the token
/// passed in here will remain on the heap and continue to be used by the
/// polling system even though no event source will match it.
pub unsafe fn register(
&self,
#[cfg(unix)] fd: impl AsFd,
#[cfg(windows)] fd: impl AsSocket,
interest: Interest,
mode: Mode,
token: Token,
) -> crate::Result<()> {
let raw = {
#[cfg(unix)]
{
fd.as_fd().as_raw_fd()
}
#[cfg(windows)]
{
fd.as_socket().as_raw_socket()
}
};
let ev = cvt_interest(interest, token);
// SAFETY: See invariant on function.
unsafe {
self.poller
.add_with_mode(raw, ev, cvt_mode(mode, self.poller.supports_level()))?;
}
// If this is level triggered and we're emulating level triggered mode...
if let (Mode::Level, Some(level_triggered)) = (mode, self.level_triggered.as_ref()) {
// ...then we need to keep track of the source.
let mut level_triggered = level_triggered.borrow_mut();
level_triggered.insert(ev.key, (raw, ev));
}
Ok(())
}
/// Update the registration for a file descriptor
///
/// This allows you to change the interest, mode or token of a file
/// descriptor. Fails if the provided fd is not currently registered.
///
/// See note on [`register()`](Self::register()) regarding leaking.
pub fn reregister(
&self,
#[cfg(unix)] fd: impl AsFd,
#[cfg(windows)] fd: impl AsSocket,
interest: Interest,
mode: Mode,
token: Token,
) -> crate::Result<()> {
let (borrowed, raw) = {
#[cfg(unix)]
{
(fd.as_fd(), fd.as_fd().as_raw_fd())
}
#[cfg(windows)]
{
(fd.as_socket(), fd.as_socket().as_raw_socket())
}
};
let ev = cvt_interest(interest, token);
self.poller
.modify_with_mode(borrowed, ev, cvt_mode(mode, self.poller.supports_level()))?;
// If this is level triggered and we're emulating level triggered mode...
if let (Mode::Level, Some(level_triggered)) = (mode, self.level_triggered.as_ref()) {
// ...then we need to keep track of the source.
let mut level_triggered = level_triggered.borrow_mut();
level_triggered.insert(ev.key, (raw, ev));
}
Ok(())
}
/// Unregister a file descriptor
///
/// This file descriptor will no longer generate events. Fails if the
/// provided file descriptor is not currently registered.
pub fn unregister(
&self,
#[cfg(unix)] fd: impl AsFd,
#[cfg(windows)] fd: impl AsSocket,
) -> crate::Result<()> {
let (borrowed, raw) = {
#[cfg(unix)]
{
(fd.as_fd(), fd.as_fd().as_raw_fd())
}
#[cfg(windows)]
{
(fd.as_socket(), fd.as_socket().as_raw_socket())
}
};
self.poller.delete(borrowed)?;
if let Some(level_triggered) = self.level_triggered.as_ref() {
let mut level_triggered = level_triggered.borrow_mut();
level_triggered.retain(|_, (source, _)| *source != raw);
}
Ok(())
}
/// Get a thread-safe handle which can be used to wake up the `Poll`.
pub(crate) fn notifier(&self) -> Notifier {
Notifier(self.poller.clone())
}
/// Get a reference to the poller.
pub(crate) fn poller(&self) -> &Arc<Poller> {
&self.poller
}
}
/// Thread-safe handle which can be used to wake up the `Poll`.
#[derive(Clone)]
pub(crate) struct Notifier(Arc<Poller>);
impl Notifier {
pub(crate) fn notify(&self) -> crate::Result<()> {
self.0.notify()?;
Ok(())
}
}
fn cvt_interest(interest: Interest, tok: Token) -> Event {
let mut event = Event::none(tok.inner.into());
event.readable = interest.readable;
event.writable = interest.writable;
event
}
fn cvt_mode(mode: Mode, supports_other_modes: bool) -> PollMode {
if !supports_other_modes {
return PollMode::Oneshot;
}
match mode {
Mode::Edge => PollMode::Edge,
Mode::Level => PollMode::Level,
Mode::OneShot => PollMode::Oneshot,
}
}

112
vendor/calloop/src/token.rs vendored Normal file
View File

@@ -0,0 +1,112 @@
// Several implementations of the internals of `Token` depending on the size of `usize`
use std::convert::TryInto;
#[cfg(target_pointer_width = "64")]
const BITS_VERSION: usize = 16;
#[cfg(target_pointer_width = "64")]
const BITS_SUBID: usize = 16;
#[cfg(target_pointer_width = "32")]
const BITS_VERSION: usize = 8;
#[cfg(target_pointer_width = "32")]
const BITS_SUBID: usize = 8;
#[cfg(target_pointer_width = "16")]
const BITS_VERSION: usize = 4;
#[cfg(target_pointer_width = "16")]
const BITS_SUBID: usize = 4;
const MASK_VERSION: usize = (1 << BITS_VERSION) - 1;
const MASK_SUBID: usize = (1 << BITS_SUBID) - 1;
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub(crate) struct TokenInner {
id: u32,
version: u16,
sub_id: u16,
}
impl TokenInner {
pub(crate) fn new(id: usize) -> Result<TokenInner, ()> {
Ok(TokenInner {
id: id.try_into().map_err(|_| ())?,
version: 0,
sub_id: 0,
})
}
pub(crate) fn get_id(self) -> usize {
self.id as usize
}
pub(crate) fn same_source_as(self, other: TokenInner) -> bool {
self.id == other.id && self.version == other.version
}
pub(crate) fn increment_version(self) -> TokenInner {
TokenInner {
id: self.id,
version: self.version.wrapping_add(1) & (MASK_VERSION as u16),
sub_id: 0,
}
}
pub(crate) fn increment_sub_id(self) -> TokenInner {
let sub_id = match self.sub_id.checked_add(1) {
Some(sid) if sid <= (MASK_SUBID as u16) => sid,
_ => panic!("Maximum number of sub-ids reached for source #{}", self.id),
};
TokenInner {
id: self.id,
version: self.version,
sub_id,
}
}
pub(crate) fn forget_sub_id(self) -> TokenInner {
TokenInner {
id: self.id,
version: self.version,
sub_id: 0,
}
}
}
impl From<usize> for TokenInner {
fn from(value: usize) -> Self {
let sub_id = (value & MASK_SUBID) as u16;
let version = ((value >> BITS_SUBID) & MASK_VERSION) as u16;
let id = (value >> (BITS_SUBID + BITS_VERSION)) as u32;
TokenInner {
id,
version,
sub_id,
}
}
}
impl From<TokenInner> for usize {
fn from(token: TokenInner) -> Self {
((token.id as usize) << (BITS_SUBID + BITS_VERSION))
+ ((token.version as usize) << BITS_SUBID)
+ (token.sub_id as usize)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[should_panic]
#[test]
fn overflow_subid() {
let token = TokenInner {
id: 0,
version: 0,
sub_id: MASK_SUBID as u16,
};
token.increment_sub_id();
}
}