Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

1
vendor/dispatch/.cargo-checksum.json vendored Normal file
View File

@@ -0,0 +1 @@
{"files":{"Cargo.lock":"122b2c045123aa5f9c1d7452839a6f8e9b200323478e3699490b40ff1fd01ed3","Cargo.toml":"bd6cecbea33be54304d73d48e29182b4baff69695a25e68d7ac67ff3e69263a9","README.md":"9e581e0edb3dcc8238feda4bc709c8c91d43c286ac91f42cf6b0004c9335da58","examples/main.rs":"da1d4571dd62e1fcef114a68c15bd340dbb5e83b84391e3bbb7a13cd8e57e88a","src/ffi.rs":"320c101bdd43517d8cdb5663861733aa569705495ca97ab547571afcf2820b87","src/group.rs":"22d17cacb1401da31acfca71ce8cead89dfc728f1873de7192b000f1c7456707","src/lib.rs":"1f75d8f3bb8dbc57449c8727c68d7c75e6a5cf20347617c96ab6a608f186612b","src/once.rs":"b67a020f1ba3ebecf52f065cb058aba61d97b5b1155e6495e5b0fc0b996f7931","src/queue.rs":"efffe03ab4615c85f0708ed47b41973b2ccd4d578e146743b455986ffc95da87","src/sem.rs":"5d74917749799a8b3385bf694caf0c344a11e8a9363c4512a21d956bcf7a14ac"},"package":"bd0c93bb4b0c6d9b77f4435b0ae98c24d17f1c45b2ff844c6151a07256ca923b"}

6
vendor/dispatch/Cargo.lock generated vendored Normal file
View File

@@ -0,0 +1,6 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
[[package]]
name = "dispatch"
version = "0.2.0"

24
vendor/dispatch/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,24 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "dispatch"
version = "0.2.0"
authors = ["Steven Sheldon"]
exclude = [".gitignore", ".travis.yml", "travis_install.sh", "travis_test.sh", "tests-ios/**"]
description = "Rust wrapper for Apple's Grand Central Dispatch."
documentation = "http://ssheldon.github.io/rust-objc/dispatch/"
readme = "README.md"
keywords = ["gcd", "objective-c", "osx", "ios"]
license = "MIT"
repository = "http://github.com/SSheldon/rust-dispatch"

44
vendor/dispatch/README.md vendored Normal file
View File

@@ -0,0 +1,44 @@
Rust wrapper for Apple's Grand Central Dispatch (GCD).
GCD is an implementation of task parallelism that allows tasks to be submitted
to queues where they are scheduled to execute.
For more information, see Apple's [Grand Central Dispatch reference](
https://developer.apple.com/library/mac/documentation/Performance/Reference/GCD_libdispatch_Ref/index.html).
* Documentation: http://ssheldon.github.io/rust-objc/dispatch/
* Crate: https://crates.io/crates/dispatch
# Serial Queues
Serial queues execute tasks serially in FIFO order. The application's main
queue is serial and can be accessed through the `Queue::main` function.
``` rust
use dispatch::{Queue, QueueAttribute};
let queue = Queue::create("com.example.rust", QueueAttribute::Serial);
queue.async(|| println!("Hello"));
queue.async(|| println!("World"));
```
# Concurrent Queues
Concurrent dispatch queues execute tasks concurrently. GCD provides global
concurrent queues that can be accessed through the `Queue::global` function.
`Queue` has two methods that can simplify processing data in parallel, `foreach`
and `map`:
``` rust
use dispatch::{Queue, QueuePriority};
let queue = Queue::global(QueuePriority::Default);
let mut nums = vec![1, 2];
queue.foreach(&mut nums, |x| *x += 1);
assert!(nums == [2, 3]);
let nums = queue.map(nums, |x| x.to_string());
assert!(nums[0] == "2");
```

51
vendor/dispatch/examples/main.rs vendored Normal file
View File

@@ -0,0 +1,51 @@
extern crate dispatch;
use std::io;
use std::process::exit;
use dispatch::{Queue, QueuePriority};
/// Prompts for a number and adds it to the given sum.
///
/// Reading from stdin is done on the given queue.
/// All printing is performed on the main queue.
/// Repeats until the user stops entering numbers.
fn prompt(mut sum: i32, queue: Queue) {
queue.clone().exec_async(move || {
let main = Queue::main();
// Print our prompt on the main thread and wait until it's complete
main.exec_sync(|| {
println!("Enter a number:");
});
// Read the number the user enters
let mut input = String::new();
io::stdin().read_line(&mut input).unwrap();
if let Ok(num) = input.trim().parse::<i32>() {
sum += num;
// Print the sum on the main thread and wait until it's complete
main.exec_sync(|| {
println!("Sum is {}\n", sum);
});
// Do it again!
prompt(sum, queue);
} else {
// Bail if no number was entered
main.exec_async(|| {
println!("Not a number, exiting.");
exit(0);
});
}
});
}
fn main() {
// Read from stdin on a background queue so that the main queue is free
// to handle other events. All printing still occurs through the main
// queue to avoid jumbled output.
prompt(0, Queue::global(QueuePriority::Default));
unsafe {
dispatch::ffi::dispatch_main();
}
}

173
vendor/dispatch/src/ffi.rs vendored Normal file
View File

@@ -0,0 +1,173 @@
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
use std::os::raw::{c_char, c_long, c_ulong, c_void};
#[repr(C)]
pub struct dispatch_object_s { _private: [u8; 0] }
// dispatch_block_t
pub type dispatch_function_t = extern fn(*mut c_void);
pub type dispatch_semaphore_t = *mut dispatch_object_s;
pub type dispatch_group_t = *mut dispatch_object_s;
pub type dispatch_object_t = *mut dispatch_object_s;
pub type dispatch_once_t = c_long;
pub type dispatch_queue_t = *mut dispatch_object_s;
pub type dispatch_time_t = u64;
// dispatch_source_type_t
// dispatch_fd_t
// dispatch_data_t
// dispatch_data_applier_t
// dispatch_io_t
// dispatch_io_handler_t
// dispatch_io_type_t
// dispatch_io_close_flags_t
// dispatch_io_interval_flags_t
pub type dispatch_queue_attr_t = *const dispatch_object_s;
#[cfg_attr(any(target_os = "macos", target_os = "ios"),
link(name = "System", kind = "dylib"))]
#[cfg_attr(not(any(target_os = "macos", target_os = "ios")),
link(name = "dispatch", kind = "dylib"))]
extern {
static _dispatch_main_q: dispatch_object_s;
static _dispatch_queue_attr_concurrent: dispatch_object_s;
pub fn dispatch_get_global_queue(identifier: c_long, flags: c_ulong) -> dispatch_queue_t;
pub fn dispatch_queue_create(label: *const c_char, attr: dispatch_queue_attr_t) -> dispatch_queue_t;
// dispatch_queue_attr_t dispatch_queue_attr_make_with_qos_class ( dispatch_queue_attr_t attr, dispatch_qos_class_t qos_class, int relative_priority );
pub fn dispatch_queue_get_label(queue: dispatch_queue_t) -> *const c_char;
pub fn dispatch_set_target_queue(object: dispatch_object_t, queue: dispatch_queue_t);
pub fn dispatch_main();
// void dispatch_async ( dispatch_queue_t queue, dispatch_block_t block );
pub fn dispatch_async_f(queue: dispatch_queue_t, context: *mut c_void, work: dispatch_function_t);
// void dispatch_sync ( dispatch_queue_t queue, dispatch_block_t block );
pub fn dispatch_sync_f(queue: dispatch_queue_t, context: *mut c_void, work: dispatch_function_t);
// void dispatch_after ( dispatch_time_t when, dispatch_queue_t queue, dispatch_block_t block );
pub fn dispatch_after_f(when: dispatch_time_t, queue: dispatch_queue_t, context: *mut c_void, work: dispatch_function_t);
// void dispatch_apply ( size_t iterations, dispatch_queue_t queue, void (^block)(size_t) );
pub fn dispatch_apply_f(iterations: usize, queue: dispatch_queue_t, context: *mut c_void, work: extern fn(*mut c_void, usize));
// void dispatch_once ( dispatch_once_t *predicate, dispatch_block_t block );
pub fn dispatch_once_f(predicate: *mut dispatch_once_t, context: *mut c_void, function: dispatch_function_t);
// void dispatch_group_async ( dispatch_group_t group, dispatch_queue_t queue, dispatch_block_t block );
pub fn dispatch_group_async_f(group: dispatch_group_t, queue: dispatch_queue_t, context: *mut c_void, work: dispatch_function_t);
pub fn dispatch_group_create() -> dispatch_group_t;
pub fn dispatch_group_enter(group: dispatch_group_t);
pub fn dispatch_group_leave(group: dispatch_group_t);
// void dispatch_group_notify ( dispatch_group_t group, dispatch_queue_t queue, dispatch_block_t block );
pub fn dispatch_group_notify_f(group: dispatch_group_t, queue: dispatch_queue_t, context: *mut c_void, work: dispatch_function_t);
pub fn dispatch_group_wait(group: dispatch_group_t, timeout: dispatch_time_t) -> c_long;
pub fn dispatch_get_context(object: dispatch_object_t) -> *mut c_void;
pub fn dispatch_release(object: dispatch_object_t);
pub fn dispatch_resume(object: dispatch_object_t);
pub fn dispatch_retain(object: dispatch_object_t);
pub fn dispatch_set_context(object: dispatch_object_t, context: *mut c_void);
pub fn dispatch_set_finalizer_f(object: dispatch_object_t, finalizer: dispatch_function_t);
pub fn dispatch_suspend(object: dispatch_object_t);
pub fn dispatch_semaphore_create(value: c_long) -> dispatch_semaphore_t;
pub fn dispatch_semaphore_signal(dsema: dispatch_semaphore_t) -> c_long;
pub fn dispatch_semaphore_wait(dsema: dispatch_semaphore_t, timeout: dispatch_time_t) -> c_long;
// void dispatch_barrier_async ( dispatch_queue_t queue, dispatch_block_t block );
pub fn dispatch_barrier_async_f(queue: dispatch_queue_t, context: *mut c_void, work: dispatch_function_t);
// void dispatch_barrier_sync ( dispatch_queue_t queue, dispatch_block_t block );
pub fn dispatch_barrier_sync_f(queue: dispatch_queue_t, context: *mut c_void, work: dispatch_function_t);
// void dispatch_source_cancel ( dispatch_source_t source );
// dispatch_source_t dispatch_source_create ( dispatch_source_type_t type, uintptr_t handle, unsigned long mask, dispatch_queue_t queue );
// unsigned long dispatch_source_get_data ( dispatch_source_t source );
// uintptr_t dispatch_source_get_handle ( dispatch_source_t source );
// unsigned long dispatch_source_get_mask ( dispatch_source_t source );
// void dispatch_source_merge_data ( dispatch_source_t source, unsigned long value );
// void dispatch_source_set_registration_handler ( dispatch_source_t source, dispatch_block_t handler );
// void dispatch_source_set_registration_handler_f ( dispatch_source_t source, dispatch_function_t handler );
// void dispatch_source_set_cancel_handler ( dispatch_source_t source, dispatch_block_t handler );
// void dispatch_source_set_cancel_handler_f ( dispatch_source_t source, dispatch_function_t handler );
// void dispatch_source_set_event_handler ( dispatch_source_t source, dispatch_block_t handler );
// void dispatch_source_set_event_handler_f ( dispatch_source_t source, dispatch_function_t handler );
// void dispatch_source_set_timer ( dispatch_source_t source, dispatch_time_t start, uint64_t interval, uint64_t leeway );
// long dispatch_source_testcancel ( dispatch_source_t source );
// void dispatch_read ( dispatch_fd_t fd, size_t length, dispatch_queue_t queue, void (^handler)(dispatch_data_t data, int error) );
// void dispatch_write ( dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, void (^handler)(dispatch_data_t data, int error) );
// dispatch_io_t dispatch_io_create ( dispatch_io_type_t type, dispatch_fd_t fd, dispatch_queue_t queue, void (^cleanup_handler)(int error) );
// dispatch_io_t dispatch_io_create_with_path ( dispatch_io_type_t type, const char *path, int oflag, mode_t mode, dispatch_queue_t queue, void (^cleanup_handler)(int error) );
// dispatch_io_t dispatch_io_create_with_io ( dispatch_io_type_t type, dispatch_io_t io, dispatch_queue_t queue, void (^cleanup_handler)(int error) );
// void dispatch_io_read ( dispatch_io_t channel, off_t offset, size_t length, dispatch_queue_t queue, dispatch_io_handler_t io_handler );
// void dispatch_io_write ( dispatch_io_t channel, off_t offset, dispatch_data_t data, dispatch_queue_t queue, dispatch_io_handler_t io_handler );
// void dispatch_io_close ( dispatch_io_t channel, dispatch_io_close_flags_t flags );
// void dispatch_io_barrier ( dispatch_io_t channel, dispatch_block_t barrier );
// void dispatch_io_set_high_water ( dispatch_io_t channel, size_t high_water );
// void dispatch_io_set_low_water ( dispatch_io_t channel, size_t low_water );
// void dispatch_io_set_interval ( dispatch_io_t channel, uint64_t interval, dispatch_io_interval_flags_t flags );
// dispatch_fd_t dispatch_io_get_descriptor ( dispatch_io_t channel );
// dispatch_data_t dispatch_data_create ( const void *buffer, size_t size, dispatch_queue_t queue, dispatch_block_t destructor );
// size_t dispatch_data_get_size ( dispatch_data_t data );
// dispatch_data_t dispatch_data_create_map ( dispatch_data_t data, const void **buffer_ptr, size_t *size_ptr );
// dispatch_data_t dispatch_data_create_concat ( dispatch_data_t data1, dispatch_data_t data2 );
// dispatch_data_t dispatch_data_create_subrange ( dispatch_data_t data, size_t offset, size_t length );
// bool dispatch_data_apply ( dispatch_data_t data, dispatch_data_applier_t applier );
// dispatch_data_t dispatch_data_copy_region ( dispatch_data_t data, size_t location, size_t *offset_ptr );
pub fn dispatch_time(when: dispatch_time_t, delta: i64) -> dispatch_time_t;
// dispatch_time_t dispatch_walltime( const struct timespec *when, int64_t delta);
// void dispatch_queue_set_specific ( dispatch_queue_t queue, const void *key, void *context, dispatch_function_t destructor );
// void * dispatch_queue_get_specific ( dispatch_queue_t queue, const void *key );
// void * dispatch_get_specific ( const void *key );
// dispatch_block_t dispatch_block_create(dispatch_block_flags_t flags, dispatch_block_t block);
// dispatch_block_t dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, dispatch_qos_class_t qos_class, int relative_priority, dispatch_block_t block);
// void dispatch_block_perform(dispatch_block_flags_t flags, dispatch_block_t block);
// long dispatch_block_wait(dispatch_block_t block, dispatch_time_t timeout);
// dispatch_block_notify(dispatch_block_t block, dispatch_queue_t queue, dispatch_block_t notification_block);
// void dispatch_block_cancel(dispatch_block_t block);
// long dispatch_block_testcancel(dispatch_block_t block);
}
pub fn dispatch_get_main_queue() -> dispatch_queue_t {
unsafe { &_dispatch_main_q as *const _ as dispatch_queue_t }
}
pub const DISPATCH_QUEUE_SERIAL: dispatch_queue_attr_t = 0 as dispatch_queue_attr_t;
pub static DISPATCH_QUEUE_CONCURRENT: &'static dispatch_object_s = unsafe { &_dispatch_queue_attr_concurrent };
pub const DISPATCH_QUEUE_PRIORITY_HIGH: c_long = 2;
pub const DISPATCH_QUEUE_PRIORITY_DEFAULT: c_long = 0;
pub const DISPATCH_QUEUE_PRIORITY_LOW: c_long = -2;
pub const DISPATCH_QUEUE_PRIORITY_BACKGROUND: c_long = -1 << 15;
pub const DISPATCH_TIME_NOW: dispatch_time_t = 0;
pub const DISPATCH_TIME_FOREVER: dispatch_time_t = !0;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_ffi_serial_queue() {
use std::os::raw::c_void;
use std::ptr;
extern fn serial_queue_test_add(num: *mut c_void) {
unsafe {
*(num as *mut u32) = 1;
}
}
let mut num: u32 = 0;
let num_ptr: *mut u32 = &mut num;
unsafe {
let q = dispatch_queue_create(ptr::null(), DISPATCH_QUEUE_SERIAL);
dispatch_sync_f(q, num_ptr as *mut c_void, serial_queue_test_add);
dispatch_release(q);
}
assert!(num == 1);
}
}

180
vendor/dispatch/src/group.rs vendored Normal file
View File

@@ -0,0 +1,180 @@
use std::time::Duration;
use crate::ffi::*;
use crate::{context_and_function, time_after_delay, WaitTimeout};
use crate::queue::Queue;
/// A Grand Central Dispatch group.
///
/// A `Group` is a mechanism for grouping closures and monitoring them. This
/// allows for aggregate synchronization, so you can track when all the
/// closures complete, even if they are running on different queues.
#[derive(Debug)]
pub struct Group {
ptr: dispatch_group_t,
}
impl Group {
/// Creates a new dispatch `Group`.
pub fn create() -> Group {
unsafe {
Group { ptr: dispatch_group_create() }
}
}
/// Indicates that a closure has entered self, and increments the current
/// count of outstanding tasks. Returns a `GroupGuard` that should be
/// dropped when the closure leaves self, decrementing the count.
pub fn enter(&self) -> GroupGuard {
GroupGuard::new(self)
}
/// Submits a closure asynchronously to the given `Queue` and associates it
/// with self.
pub fn exec_async<F>(&self, queue: &Queue, work: F)
where F: 'static + Send + FnOnce() {
let (context, work) = context_and_function(work);
unsafe {
dispatch_group_async_f(self.ptr, queue.ptr, context, work);
}
}
/// Schedules a closure to be submitted to the given `Queue` when all tasks
/// associated with self have completed.
/// If self is empty, the closure is submitted immediately.
pub fn notify<F>(&self, queue: &Queue, work: F)
where F: 'static + Send + FnOnce() {
let (context, work) = context_and_function(work);
unsafe {
dispatch_group_notify_f(self.ptr, queue.ptr, context, work);
}
}
/// Waits synchronously for all tasks associated with self to complete.
pub fn wait(&self) {
let result = unsafe {
dispatch_group_wait(self.ptr, DISPATCH_TIME_FOREVER)
};
assert!(result == 0, "Dispatch group wait errored");
}
/// Waits for all tasks associated with self to complete within the
/// specified duration.
/// Returns true if the tasks completed or false if the timeout elapsed.
pub fn wait_timeout(&self, timeout: Duration) -> Result<(), WaitTimeout> {
let when = time_after_delay(timeout);
let result = unsafe {
dispatch_group_wait(self.ptr, when)
};
if result == 0 {
Ok(())
} else {
Err(WaitTimeout { duration: timeout })
}
}
/// Returns whether self is currently empty.
pub fn is_empty(&self) -> bool {
let result = unsafe {
dispatch_group_wait(self.ptr, DISPATCH_TIME_NOW)
};
result == 0
}
}
unsafe impl Sync for Group { }
unsafe impl Send for Group { }
impl Clone for Group {
fn clone(&self) -> Self {
unsafe {
dispatch_retain(self.ptr);
}
Group { ptr: self.ptr }
}
}
impl Drop for Group {
fn drop(&mut self) {
unsafe {
dispatch_release(self.ptr);
}
}
}
/// An RAII guard which will leave a `Group` when dropped.
#[derive(Debug)]
pub struct GroupGuard {
group: Group,
}
impl GroupGuard {
fn new(group: &Group) -> GroupGuard {
unsafe {
dispatch_group_enter(group.ptr);
}
GroupGuard { group: group.clone() }
}
/// Drops self, leaving the `Group`.
pub fn leave(self) { }
}
impl Clone for GroupGuard {
fn clone(&self) -> Self {
GroupGuard::new(&self.group)
}
}
impl Drop for GroupGuard {
fn drop(&mut self) {
unsafe {
dispatch_group_leave(self.group.ptr);
}
}
}
#[cfg(test)]
mod tests {
use std::sync::{Arc, Mutex};
use crate::{Queue, QueueAttribute};
use super::Group;
#[test]
fn test_group() {
let group = Group::create();
let q = Queue::create("", QueueAttribute::Serial);
let num = Arc::new(Mutex::new(0));
let num2 = num.clone();
group.exec_async(&q, move || {
let mut num = num2.lock().unwrap();
*num += 1;
});
let guard = group.enter();
assert!(!group.is_empty());
let num3 = num.clone();
q.exec_async(move || {
let mut num = num3.lock().unwrap();
*num += 1;
guard.leave();
});
let notify_group = Group::create();
let guard = notify_group.enter();
let num4 = num.clone();
group.notify(&q, move || {
let mut num = num4.lock().unwrap();
*num *= 5;
guard.leave();
});
// Wait for the notify block to finish
notify_group.wait();
// If the notify ran, the group should be empty
assert!(group.is_empty());
// The notify must have run after the two blocks of the group
assert_eq!(*num.lock().unwrap(), 10);
}
}

133
vendor/dispatch/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,133 @@
/*!
Rust wrapper for Apple's Grand Central Dispatch (GCD).
GCD is an implementation of task parallelism that allows tasks to be submitted
to queues where they are scheduled to execute.
For more information, see Apple's [Grand Central Dispatch reference](
https://developer.apple.com/library/mac/documentation/Performance/Reference/GCD_libdispatch_Ref/index.html).
# Serial Queues
Serial queues execute tasks serially in FIFO order. The application's main
queue is serial and can be accessed through the `Queue::main` function.
```
use dispatch::{Queue, QueueAttribute};
let queue = Queue::create("com.example.rust", QueueAttribute::Serial);
queue.exec_async(|| println!("Hello"));
queue.exec_async(|| println!("World"));
```
# Concurrent Queues
Concurrent dispatch queues execute tasks concurrently. GCD provides global
concurrent queues that can be accessed through the `Queue::global` function.
`Queue` has two methods that can simplify processing data in parallel, `foreach`
and `map`:
```
use dispatch::{Queue, QueuePriority};
let queue = Queue::global(QueuePriority::Default);
let mut nums = vec![1, 2];
queue.for_each(&mut nums, |x| *x += 1);
assert!(nums == [2, 3]);
let nums = queue.map(nums, |x| x.to_string());
assert!(nums[0] == "2");
```
*/
#![warn(missing_docs)]
use std::error::Error;
use std::fmt;
use std::mem;
use std::os::raw::c_void;
use std::time::Duration;
use crate::ffi::*;
pub use crate::group::{Group, GroupGuard};
pub use crate::once::Once;
pub use crate::queue::{Queue, QueueAttribute, QueuePriority, SuspendGuard};
pub use crate::sem::{Semaphore, SemaphoreGuard};
/// Raw foreign function interface for libdispatch.
pub mod ffi;
mod group;
mod queue;
mod once;
mod sem;
/// An error indicating a wait timed out.
#[derive(Clone, Debug)]
pub struct WaitTimeout {
duration: Duration,
}
impl fmt::Display for WaitTimeout {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Wait timed out after duration {:?}", self.duration)
}
}
impl Error for WaitTimeout { }
fn time_after_delay(delay: Duration) -> dispatch_time_t {
delay.as_secs().checked_mul(1_000_000_000).and_then(|i| {
i.checked_add(delay.subsec_nanos() as u64)
}).and_then(|i| {
if i < (i64::max_value() as u64) { Some(i as i64) } else { None }
}).map_or(DISPATCH_TIME_FOREVER, |i| unsafe {
dispatch_time(DISPATCH_TIME_NOW, i)
})
}
fn context_and_function<F>(closure: F) -> (*mut c_void, dispatch_function_t)
where F: FnOnce() {
extern fn work_execute_closure<F>(context: Box<F>) where F: FnOnce() {
(*context)();
}
let closure = Box::new(closure);
let func: extern fn(Box<F>) = work_execute_closure::<F>;
unsafe {
(mem::transmute(closure), mem::transmute(func))
}
}
fn context_and_sync_function<F>(closure: &mut Option<F>) ->
(*mut c_void, dispatch_function_t)
where F: FnOnce() {
extern fn work_read_closure<F>(context: &mut Option<F>) where F: FnOnce() {
// This is always passed Some, so it's safe to unwrap
let closure = context.take().unwrap();
closure();
}
let context: *mut Option<F> = closure;
let func: extern fn(&mut Option<F>) = work_read_closure::<F>;
unsafe {
(context as *mut c_void, mem::transmute(func))
}
}
fn context_and_apply_function<F>(closure: &F) ->
(*mut c_void, extern fn(*mut c_void, usize))
where F: Fn(usize) {
extern fn work_apply_closure<F>(context: &F, iter: usize)
where F: Fn(usize) {
context(iter);
}
let context: *const F = closure;
let func: extern fn(&F, usize) = work_apply_closure::<F>;
unsafe {
(context as *mut c_void, mem::transmute(func))
}
}

60
vendor/dispatch/src/once.rs vendored Normal file
View File

@@ -0,0 +1,60 @@
use std::cell::UnsafeCell;
use crate::ffi::*;
use crate::context_and_sync_function;
/// A predicate used to execute a closure only once for the lifetime of an
/// application.
#[derive(Debug)]
pub struct Once {
predicate: UnsafeCell<dispatch_once_t>,
}
impl Once {
/// Creates a new `Once`.
pub const fn new() -> Once {
Once { predicate: UnsafeCell::new(0) }
}
/// Executes a closure once, ensuring that no other closure has been or
/// will be executed by self for the lifetype of the application.
///
/// If called simultaneously from multiple threads, waits synchronously
// until the work has completed.
#[inline(always)]
pub fn call_once<F>(&'static self, work: F) where F: FnOnce() {
#[cold]
#[inline(never)]
fn once<F>(predicate: *mut dispatch_once_t, work: F)
where F: FnOnce() {
let mut work = Some(work);
let (context, work) = context_and_sync_function(&mut work);
unsafe {
dispatch_once_f(predicate, context, work);
}
}
unsafe {
let predicate = self.predicate.get();
if *predicate != !0 {
once(predicate, work);
}
}
}
}
unsafe impl Sync for Once { }
#[cfg(test)]
mod tests {
use super::Once;
#[test]
fn test_once() {
static ONCE: Once = Once::new();
let mut num = 0;
ONCE.call_once(|| num += 1);
ONCE.call_once(|| num += 1);
assert!(num == 1);
}
}

506
vendor/dispatch/src/queue.rs vendored Normal file
View File

@@ -0,0 +1,506 @@
use std::ffi::{CStr, CString};
use std::os::raw::c_long;
use std::ptr;
use std::str;
use std::time::Duration;
use crate::ffi::*;
use crate::{
context_and_function, context_and_sync_function, context_and_apply_function,
time_after_delay,
};
/// The type of a dispatch queue.
#[derive(Clone, Debug, Hash, PartialEq)]
pub enum QueueAttribute {
/// The queue executes blocks serially in FIFO order.
Serial,
/// The queue executes blocks concurrently.
Concurrent,
}
impl QueueAttribute {
#[cfg(not(all(test, target_os = "linux")))]
fn as_raw(&self) -> dispatch_queue_attr_t {
match *self {
QueueAttribute::Serial => DISPATCH_QUEUE_SERIAL,
QueueAttribute::Concurrent => DISPATCH_QUEUE_CONCURRENT,
}
}
#[cfg(all(test, target_os = "linux"))]
fn as_raw(&self) -> dispatch_queue_attr_t {
// The Linux tests use Ubuntu's libdispatch-dev package, which is
// apparently really old from before OSX 10.7.
// Back then, the attr for dispatch_queue_create must be NULL.
ptr::null()
}
}
/// The priority of a global concurrent queue.
#[derive(Clone, Debug, Hash, PartialEq)]
pub enum QueuePriority {
/// The queue is scheduled for execution before any default priority or low
/// priority queue.
High,
/// The queue is scheduled for execution after all high priority queues,
/// but before any low priority queues.
Default,
/// The queue is scheduled for execution after all default priority and
/// high priority queues.
Low,
/// The queue is scheduled for execution after all high priority queues
/// have been scheduled. The system runs items on a thread whose
/// priority is set for background status and any disk I/O is throttled to
/// minimize the impact on the system.
Background,
}
impl QueuePriority {
fn as_raw(&self) -> c_long {
match *self {
QueuePriority::High => DISPATCH_QUEUE_PRIORITY_HIGH,
QueuePriority::Default => DISPATCH_QUEUE_PRIORITY_DEFAULT,
QueuePriority::Low => DISPATCH_QUEUE_PRIORITY_LOW,
QueuePriority::Background => DISPATCH_QUEUE_PRIORITY_BACKGROUND,
}
}
}
/// A Grand Central Dispatch queue.
///
/// For more information, see Apple's [Grand Central Dispatch reference](
/// https://developer.apple.com/library/mac/documentation/Performance/Reference/GCD_libdispatch_Ref/index.html).
#[derive(Debug)]
pub struct Queue {
pub(crate) ptr: dispatch_queue_t,
}
impl Queue {
/// Returns the serial dispatch `Queue` associated with the application's
/// main thread.
pub fn main() -> Self {
let queue = dispatch_get_main_queue();
unsafe {
dispatch_retain(queue);
}
Queue { ptr: queue }
}
/// Returns a system-defined global concurrent `Queue` with the specified
/// priority.
pub fn global(priority: QueuePriority) -> Self {
unsafe {
let queue = dispatch_get_global_queue(priority.as_raw(), 0);
dispatch_retain(queue);
Queue { ptr: queue }
}
}
/// Creates a new dispatch `Queue`.
pub fn create(label: &str, attr: QueueAttribute) -> Self {
let label = CString::new(label).unwrap();
let queue = unsafe {
dispatch_queue_create(label.as_ptr(), attr.as_raw())
};
Queue { ptr: queue }
}
/// Creates a new dispatch `Queue` with the given target queue.
///
/// A dispatch queue's priority is inherited from its target queue.
/// Additionally, if both the queue and its target are serial queues,
/// their blocks will not be invoked concurrently.
pub fn with_target_queue(label: &str, attr: QueueAttribute, target: &Queue)
-> Self {
let queue = Queue::create(label, attr);
unsafe {
dispatch_set_target_queue(queue.ptr, target.ptr);
}
queue
}
/// Returns the label that was specified for self.
pub fn label(&self) -> &str {
let label = unsafe {
let label_ptr = dispatch_queue_get_label(self.ptr);
if label_ptr.is_null() {
return "";
}
CStr::from_ptr(label_ptr)
};
str::from_utf8(label.to_bytes()).unwrap()
}
/// Submits a closure for execution on self and waits until it completes.
pub fn exec_sync<T, F>(&self, work: F) -> T
where F: Send + FnOnce() -> T, T: Send {
let mut result = None;
{
let result_ref = &mut result;
let work = move || {
*result_ref = Some(work());
};
let mut work = Some(work);
let (context, work) = context_and_sync_function(&mut work);
unsafe {
dispatch_sync_f(self.ptr, context, work);
}
}
// This was set so it's safe to unwrap
result.unwrap()
}
/// Submits a closure for asynchronous execution on self and returns
/// immediately.
pub fn exec_async<F>(&self, work: F) where F: 'static + Send + FnOnce() {
let (context, work) = context_and_function(work);
unsafe {
dispatch_async_f(self.ptr, context, work);
}
}
/// After the specified delay, submits a closure for asynchronous execution
/// on self.
pub fn exec_after<F>(&self, delay: Duration, work: F)
where F: 'static + Send + FnOnce() {
let when = time_after_delay(delay);
let (context, work) = context_and_function(work);
unsafe {
dispatch_after_f(when, self.ptr, context, work);
}
}
/// Submits a closure to be executed on self the given number of iterations
/// and waits until it completes.
pub fn apply<F>(&self, iterations: usize, work: F)
where F: Sync + Fn(usize) {
let (context, work) = context_and_apply_function(&work);
unsafe {
dispatch_apply_f(iterations, self.ptr, context, work);
}
}
/// Submits a closure to be executed on self for each element of the
/// provided slice and waits until it completes.
pub fn for_each<T, F>(&self, slice: &mut [T], work: F)
where F: Sync + Fn(&mut T), T: Send {
let slice_ptr = slice.as_mut_ptr();
let work = move |i| unsafe {
work(&mut *slice_ptr.offset(i as isize));
};
let (context, work) = context_and_apply_function(&work);
unsafe {
dispatch_apply_f(slice.len(), self.ptr, context, work);
}
}
/// Submits a closure to be executed on self for each element of the
/// provided vector and returns a `Vec` of the mapped elements.
pub fn map<T, U, F>(&self, vec: Vec<T>, work: F) -> Vec<U>
where F: Sync + Fn(T) -> U, T: Send, U: Send {
let mut src = vec;
let len = src.len();
let src_ptr = src.as_ptr();
let mut dest: Vec<U> = Vec::with_capacity(len);
let dest_ptr = dest.as_mut_ptr();
let work = move |i| unsafe {
let result = work(ptr::read(src_ptr.offset(i as isize)));
ptr::write(dest_ptr.offset(i as isize), result);
};
let (context, work) = context_and_apply_function(&work);
unsafe {
src.set_len(0);
dispatch_apply_f(len, self.ptr, context, work);
dest.set_len(len);
}
dest
}
/// Submits a closure to be executed on self as a barrier and waits until
/// it completes.
///
/// Barriers create synchronization points within a concurrent queue.
/// If self is concurrent, when it encounters a barrier it delays execution
/// of the closure (and any further ones) until all closures submitted
/// before the barrier finish executing.
/// At that point, the barrier closure executes by itself.
/// Upon completion, self resumes its normal execution behavior.
///
/// If self is a serial queue or one of the global concurrent queues,
/// this method behaves like the normal `sync` method.
pub fn barrier_sync<T, F>(&self, work: F) -> T
where F: Send + FnOnce() -> T, T: Send {
let mut result = None;
{
let result_ref = &mut result;
let work = move || {
*result_ref = Some(work());
};
let mut work = Some(work);
let (context, work) = context_and_sync_function(&mut work);
unsafe {
dispatch_barrier_sync_f(self.ptr, context, work);
}
}
// This was set so it's safe to unwrap
result.unwrap()
}
/// Submits a closure to be executed on self as a barrier and returns
/// immediately.
///
/// Barriers create synchronization points within a concurrent queue.
/// If self is concurrent, when it encounters a barrier it delays execution
/// of the closure (and any further ones) until all closures submitted
/// before the barrier finish executing.
/// At that point, the barrier closure executes by itself.
/// Upon completion, self resumes its normal execution behavior.
///
/// If self is a serial queue or one of the global concurrent queues,
/// this method behaves like the normal `async` method.
pub fn barrier_async<F>(&self, work: F)
where F: 'static + Send + FnOnce() {
let (context, work) = context_and_function(work);
unsafe {
dispatch_barrier_async_f(self.ptr, context, work);
}
}
/// Suspends the invocation of blocks on self and returns a `SuspendGuard`
/// that can be dropped to resume.
///
/// The suspension occurs after completion of any blocks running at the
/// time of the call.
/// Invocation does not resume until all `SuspendGuard`s have been dropped.
pub fn suspend(&self) -> SuspendGuard {
SuspendGuard::new(self)
}
}
unsafe impl Sync for Queue { }
unsafe impl Send for Queue { }
impl Clone for Queue {
fn clone(&self) -> Self {
unsafe {
dispatch_retain(self.ptr);
}
Queue { ptr: self.ptr }
}
}
impl Drop for Queue {
fn drop(&mut self) {
unsafe {
dispatch_release(self.ptr);
}
}
}
/// An RAII guard which will resume a suspended `Queue` when dropped.
#[derive(Debug)]
pub struct SuspendGuard {
queue: Queue,
}
impl SuspendGuard {
fn new(queue: &Queue) -> SuspendGuard {
unsafe {
dispatch_suspend(queue.ptr);
}
SuspendGuard { queue: queue.clone() }
}
/// Drops self, allowing the suspended `Queue` to resume.
pub fn resume(self) { }
}
impl Clone for SuspendGuard {
fn clone(&self) -> Self {
SuspendGuard::new(&self.queue)
}
}
impl Drop for SuspendGuard {
fn drop(&mut self) {
unsafe {
dispatch_resume(self.queue.ptr);
}
}
}
#[cfg(test)]
mod tests {
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
use crate::Group;
use super::*;
fn async_increment(queue: &Queue, num: &Arc<Mutex<i32>>) {
let num = num.clone();
queue.exec_async(move || {
let mut num = num.lock().unwrap();
*num += 1;
});
}
#[test]
fn test_serial_queue() {
let q = Queue::create("", QueueAttribute::Serial);
let mut num = 0;
q.exec_sync(|| num = 1);
assert_eq!(num, 1);
assert_eq!(q.exec_sync(|| num), 1);
}
#[test]
fn test_sync_owned() {
let q = Queue::create("", QueueAttribute::Serial);
let s = "Hello, world!".to_string();
let len = q.exec_sync(move || s.len());
assert_eq!(len, 13);
}
#[test]
fn test_serial_queue_async() {
let q = Queue::create("", QueueAttribute::Serial);
let num = Arc::new(Mutex::new(0));
async_increment(&q, &num);
// Sync an empty block to ensure the async one finishes
q.exec_sync(|| ());
assert_eq!(*num.lock().unwrap(), 1);
}
#[test]
fn test_after() {
let q = Queue::create("", QueueAttribute::Serial);
let group = Group::create();
let num = Arc::new(Mutex::new(0));
let delay = Duration::from_millis(5);
let num2 = num.clone();
let guard = group.enter();
let start = Instant::now();
q.exec_after(delay, move || {
let mut num = num2.lock().unwrap();
*num = 1;
guard.leave();
});
// Wait for the previous block to complete
group.wait_timeout(Duration::from_millis(5000)).unwrap();
assert!(start.elapsed() >= delay);
assert_eq!(*num.lock().unwrap(), 1);
}
#[test]
fn test_queue_label() {
let q = Queue::create("com.example.rust", QueueAttribute::Serial);
assert_eq!(q.label(), "com.example.rust");
}
#[test]
fn test_apply() {
let q = Queue::create("", QueueAttribute::Serial);
let num = Arc::new(Mutex::new(0));
q.apply(5, |_| *num.lock().unwrap() += 1);
assert_eq!(*num.lock().unwrap(), 5);
}
#[test]
fn test_for_each() {
let q = Queue::create("", QueueAttribute::Serial);
let mut nums = [0, 1];
q.for_each(&mut nums, |x| *x += 1);
assert_eq!(nums, [1, 2]);
}
#[test]
fn test_map() {
let q = Queue::create("", QueueAttribute::Serial);
let nums = vec![0, 1];
let result = q.map(nums, |x| x + 1);
assert_eq!(result, [1, 2]);
}
#[test]
fn test_barrier_sync() {
let q = Queue::create("", QueueAttribute::Concurrent);
let num = Arc::new(Mutex::new(0));
async_increment(&q, &num);
async_increment(&q, &num);
let num2 = num.clone();
let result = q.barrier_sync(move || {
let mut num = num2.lock().unwrap();
if *num == 2 {
*num = 10;
}
*num
});
assert_eq!(result, 10);
async_increment(&q, &num);
async_increment(&q, &num);
q.barrier_sync(|| ());
assert_eq!(*num.lock().unwrap(), 12);
}
#[test]
fn test_barrier_async() {
let q = Queue::create("", QueueAttribute::Concurrent);
let num = Arc::new(Mutex::new(0));
async_increment(&q, &num);
async_increment(&q, &num);
let num2 = num.clone();
q.barrier_async(move || {
let mut num = num2.lock().unwrap();
if *num == 2 {
*num = 10;
}
});
async_increment(&q, &num);
async_increment(&q, &num);
q.barrier_sync(|| ());
assert_eq!(*num.lock().unwrap(), 12);
}
#[test]
fn test_suspend() {
let q = Queue::create("", QueueAttribute::Serial);
let num = Arc::new(Mutex::new(0));
// Suspend the queue and then dispatch some work to it
let guard = q.suspend();
async_increment(&q, &num);
// Sleep and ensure the work doesn't occur
::std::thread::sleep(Duration::from_millis(5));
assert_eq!(*num.lock().unwrap(), 0);
// But ensure the work does complete after we resume
guard.resume();
q.exec_sync(|| ());
assert_eq!(*num.lock().unwrap(), 1);
}
}

138
vendor/dispatch/src/sem.rs vendored Normal file
View File

@@ -0,0 +1,138 @@
use std::os::raw::c_long;
use std::time::Duration;
use crate::ffi::*;
use crate::{time_after_delay, WaitTimeout};
/// A counting semaphore.
#[derive(Debug)]
pub struct Semaphore {
ptr: dispatch_semaphore_t,
}
impl Semaphore {
/// Creates a new `Semaphore` with an initial value.
///
/// A `Semaphore` created with a value greater than 0 cannot be disposed if
/// it has been decremented below its original value. If there are more
/// successful calls to `wait` than `signal`, the system assumes the
/// `Semaphore` is still in use and will abort if it is disposed.
pub fn new(value: u32) -> Self {
let ptr = unsafe {
dispatch_semaphore_create(value as c_long)
};
Semaphore { ptr }
}
/// Wait for (decrement) self.
pub fn wait(&self) {
let result = unsafe {
dispatch_semaphore_wait(self.ptr, DISPATCH_TIME_FOREVER)
};
assert!(result == 0, "Dispatch semaphore wait errored");
}
/// Wait for (decrement) self until the specified timeout has elapsed.
pub fn wait_timeout(&self, timeout: Duration) -> Result<(), WaitTimeout> {
let when = time_after_delay(timeout);
let result = unsafe {
dispatch_semaphore_wait(self.ptr, when)
};
if result == 0 {
Ok(())
} else {
Err(WaitTimeout { duration: timeout })
}
}
/// Signal (increment) self.
///
/// If the previous value was less than zero, this method wakes a waiting thread.
/// Returns `true` if a thread is woken or `false` otherwise.
pub fn signal(&self) -> bool {
unsafe {
dispatch_semaphore_signal(self.ptr) != 0
}
}
/// Wait to access a resource protected by self.
/// This decrements self and returns a guard that increments when dropped.
pub fn access(&self) -> SemaphoreGuard {
self.wait();
SemaphoreGuard::new(self.clone())
}
/// Wait until the specified timeout to access a resource protected by self.
/// This decrements self and returns a guard that increments when dropped.
pub fn access_timeout(&self, timeout: Duration)
-> Result<SemaphoreGuard, WaitTimeout> {
self.wait_timeout(timeout)?;
Ok(SemaphoreGuard::new(self.clone()))
}
}
unsafe impl Sync for Semaphore {}
unsafe impl Send for Semaphore {}
impl Clone for Semaphore {
fn clone(&self) -> Self {
unsafe {
dispatch_retain(self.ptr);
}
Semaphore { ptr: self.ptr }
}
}
impl Drop for Semaphore {
fn drop(&mut self) {
unsafe {
dispatch_release(self.ptr);
}
}
}
/// An RAII guard which will signal a `Semaphore` when dropped.
#[derive(Debug)]
pub struct SemaphoreGuard {
sem: Semaphore,
}
impl SemaphoreGuard {
fn new(sem: Semaphore) -> SemaphoreGuard {
SemaphoreGuard { sem }
}
/// Drops self, signaling the `Semaphore`.
pub fn signal(self) { }
}
impl Drop for SemaphoreGuard {
fn drop(&mut self) {
self.sem.signal();
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_semaphore() {
let sem = Semaphore::new(0);
assert!(!sem.signal());
sem.wait();
assert!(sem.wait_timeout(Duration::from_millis(5)).is_err());
}
#[test]
fn test_semaphore_guard() {
let sem = Semaphore::new(1);
let guard = sem.access();
assert!(sem.access_timeout(Duration::from_millis(5)).is_err());
drop(guard);
assert!(sem.access_timeout(Duration::from_millis(5)).is_ok());
}
}