Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

85
vendor/rayon/src/array.rs vendored Normal file
View File

@@ -0,0 +1,85 @@
//! Parallel iterator types for [arrays] (`[T; N]`)
//!
//! You will rarely need to interact with this module directly unless you need
//! to name one of the iterator types.
//!
//! [arrays]: primitive@array
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::slice::{Iter, IterMut};
use crate::vec::DrainProducer;
use std::mem::ManuallyDrop;
impl<'data, T: Sync + 'data, const N: usize> IntoParallelIterator for &'data [T; N] {
type Item = &'data T;
type Iter = Iter<'data, T>;
fn into_par_iter(self) -> Self::Iter {
<&[T]>::into_par_iter(self)
}
}
impl<'data, T: Send + 'data, const N: usize> IntoParallelIterator for &'data mut [T; N] {
type Item = &'data mut T;
type Iter = IterMut<'data, T>;
fn into_par_iter(self) -> Self::Iter {
<&mut [T]>::into_par_iter(self)
}
}
impl<T: Send, const N: usize> IntoParallelIterator for [T; N] {
type Item = T;
type Iter = IntoIter<T, N>;
fn into_par_iter(self) -> Self::Iter {
IntoIter { array: self }
}
}
/// Parallel iterator that moves out of an array.
#[derive(Debug, Clone)]
pub struct IntoIter<T, const N: usize> {
array: [T; N],
}
impl<T: Send, const N: usize> ParallelIterator for IntoIter<T, N> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(N)
}
}
impl<T: Send, const N: usize> IndexedParallelIterator for IntoIter<T, N> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
N
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
unsafe {
// Drain every item, and then the local array can just fall out of scope.
let mut array = ManuallyDrop::new(self.array);
let producer = DrainProducer::new(array.as_mut_slice());
callback.callback(producer)
}
}
}

View File

@@ -0,0 +1,129 @@
//! This module contains the parallel iterator types for heaps
//! (`BinaryHeap<T>`). You will rarely need to interact with it directly
//! unless you have need to name one of the iterator types.
use std::collections::BinaryHeap;
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::slice;
use crate::vec;
/// Parallel iterator over a binary heap
#[derive(Debug, Clone)]
pub struct IntoIter<T> {
inner: vec::IntoIter<T>,
}
impl<T: Send> IntoParallelIterator for BinaryHeap<T> {
type Item = T;
type Iter = IntoIter<T>;
fn into_par_iter(self) -> Self::Iter {
IntoIter {
inner: Vec::from(self).into_par_iter(),
}
}
}
delegate_indexed_iterator! {
IntoIter<T> => T,
impl<T: Send>
}
/// Parallel iterator over an immutable reference to a binary heap
#[derive(Debug)]
pub struct Iter<'a, T> {
inner: slice::Iter<'a, T>,
}
impl<T> Clone for Iter<'_, T> {
fn clone(&self) -> Self {
Iter {
inner: self.inner.clone(),
}
}
}
impl<'a, T: Sync> IntoParallelIterator for &'a BinaryHeap<T> {
type Item = &'a T;
type Iter = Iter<'a, T>;
fn into_par_iter(self) -> Self::Iter {
Iter {
inner: self.as_slice().into_par_iter(),
}
}
}
delegate_indexed_iterator! {
Iter<'a, T> => &'a T,
impl<'a, T: Sync + 'a>
}
// `BinaryHeap` doesn't have a mutable `Iterator`
/// Draining parallel iterator that moves out of a binary heap,
/// but keeps the total capacity.
#[derive(Debug)]
pub struct Drain<'a, T> {
heap: &'a mut BinaryHeap<T>,
}
// NB: The only reason we require `T: Ord` is for `DrainGuard` to reconstruct
// the heap `From<Vec<T>>` afterward, even though that will actually be empty.
impl<'a, T: Ord + Send> ParallelDrainFull for &'a mut BinaryHeap<T> {
type Iter = Drain<'a, T>;
type Item = T;
fn par_drain(self) -> Self::Iter {
Drain { heap: self }
}
}
impl<T: Ord + Send> ParallelIterator for Drain<'_, T> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<T: Ord + Send> IndexedParallelIterator for Drain<'_, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.heap.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
super::DrainGuard::new(self.heap)
.par_drain(..)
.with_producer(callback)
}
}
impl<T> Drop for Drain<'_, T> {
fn drop(&mut self) {
if !self.heap.is_empty() {
// We must not have produced, so just call a normal drain to remove the items.
self.heap.drain();
}
}
}

View File

@@ -0,0 +1,66 @@
//! This module contains the parallel iterator types for B-Tree maps
//! (`BTreeMap<K, V>`). You will rarely need to interact with it directly
//! unless you have need to name one of the iterator types.
use std::collections::BTreeMap;
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::vec;
/// Parallel iterator over a B-Tree map
#[derive(Debug)] // std doesn't Clone
pub struct IntoIter<K, V> {
inner: vec::IntoIter<(K, V)>,
}
into_par_vec! {
BTreeMap<K, V> => IntoIter<K, V>,
impl<K: Send, V: Send>
}
delegate_iterator! {
IntoIter<K, V> => (K, V),
impl<K: Send, V: Send>
}
/// Parallel iterator over an immutable reference to a B-Tree map
#[derive(Debug)]
pub struct Iter<'a, K, V> {
inner: vec::IntoIter<(&'a K, &'a V)>,
}
impl<K, V> Clone for Iter<'_, K, V> {
fn clone(&self) -> Self {
Iter {
inner: self.inner.clone(),
}
}
}
into_par_vec! {
&'a BTreeMap<K, V> => Iter<'a, K, V>,
impl<'a, K: Sync, V: Sync>
}
delegate_iterator! {
Iter<'a, K, V> => (&'a K, &'a V),
impl<'a, K: Sync + 'a, V: Sync + 'a>
}
/// Parallel iterator over a mutable reference to a B-Tree map
#[derive(Debug)]
pub struct IterMut<'a, K, V> {
inner: vec::IntoIter<(&'a K, &'a mut V)>,
}
into_par_vec! {
&'a mut BTreeMap<K, V> => IterMut<'a, K, V>,
impl<'a, K: Sync, V: Send>
}
delegate_iterator! {
IterMut<'a, K, V> => (&'a K, &'a mut V),
impl<'a, K: Sync + 'a, V: Send + 'a>
}

View File

@@ -0,0 +1,52 @@
//! This module contains the parallel iterator types for B-Tree sets
//! (`BTreeSet<T>`). You will rarely need to interact with it directly
//! unless you have need to name one of the iterator types.
use std::collections::BTreeSet;
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::vec;
/// Parallel iterator over a B-Tree set
#[derive(Debug)] // std doesn't Clone
pub struct IntoIter<T> {
inner: vec::IntoIter<T>,
}
into_par_vec! {
BTreeSet<T> => IntoIter<T>,
impl<T: Send>
}
delegate_iterator! {
IntoIter<T> => T,
impl<T: Send>
}
/// Parallel iterator over an immutable reference to a B-Tree set
#[derive(Debug)]
pub struct Iter<'a, T> {
inner: vec::IntoIter<&'a T>,
}
impl<T> Clone for Iter<'_, T> {
fn clone(&self) -> Self {
Iter {
inner: self.inner.clone(),
}
}
}
into_par_vec! {
&'a BTreeSet<T> => Iter<'a, T>,
impl<'a, T: Sync>
}
delegate_iterator! {
Iter<'a, T> => &'a T,
impl<'a, T: Sync + 'a>
}
// `BTreeSet` doesn't have a mutable `Iterator`

View File

@@ -0,0 +1,93 @@
//! This module contains the parallel iterator types for hash maps
//! (`HashMap<K, V>`). You will rarely need to interact with it directly
//! unless you have need to name one of the iterator types.
use std::collections::HashMap;
use std::marker::PhantomData;
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::vec;
/// Parallel iterator over a hash map
#[derive(Debug)] // std doesn't Clone
pub struct IntoIter<K, V> {
inner: vec::IntoIter<(K, V)>,
}
into_par_vec! {
HashMap<K, V, S> => IntoIter<K, V>,
impl<K: Send, V: Send, S>
}
delegate_iterator! {
IntoIter<K, V> => (K, V),
impl<K: Send, V: Send>
}
/// Parallel iterator over an immutable reference to a hash map
#[derive(Debug)]
pub struct Iter<'a, K, V> {
inner: vec::IntoIter<(&'a K, &'a V)>,
}
impl<K, V> Clone for Iter<'_, K, V> {
fn clone(&self) -> Self {
Iter {
inner: self.inner.clone(),
}
}
}
into_par_vec! {
&'a HashMap<K, V, S> => Iter<'a, K, V>,
impl<'a, K: Sync, V: Sync, S>
}
delegate_iterator! {
Iter<'a, K, V> => (&'a K, &'a V),
impl<'a, K: Sync, V: Sync>
}
/// Parallel iterator over a mutable reference to a hash map
#[derive(Debug)]
pub struct IterMut<'a, K, V> {
inner: vec::IntoIter<(&'a K, &'a mut V)>,
}
into_par_vec! {
&'a mut HashMap<K, V, S> => IterMut<'a, K, V>,
impl<'a, K: Sync, V: Send, S>
}
delegate_iterator! {
IterMut<'a, K, V> => (&'a K, &'a mut V),
impl<'a, K: Sync, V: Send>
}
/// Draining parallel iterator that moves out of a hash map,
/// but keeps the total capacity.
#[derive(Debug)]
pub struct Drain<'a, K, V> {
inner: vec::IntoIter<(K, V)>,
marker: PhantomData<&'a mut HashMap<K, V>>,
}
impl<'a, K: Send, V: Send, S> ParallelDrainFull for &'a mut HashMap<K, V, S> {
type Iter = Drain<'a, K, V>;
type Item = (K, V);
fn par_drain(self) -> Self::Iter {
let vec: Vec<_> = self.drain().collect();
Drain {
inner: vec.into_par_iter(),
marker: PhantomData,
}
}
}
delegate_iterator! {
Drain<'_, K, V> => (K, V),
impl<K: Send, V: Send>
}

View File

@@ -0,0 +1,79 @@
//! This module contains the parallel iterator types for hash sets
//! (`HashSet<T>`). You will rarely need to interact with it directly
//! unless you have need to name one of the iterator types.
use std::collections::HashSet;
use std::marker::PhantomData;
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::vec;
/// Parallel iterator over a hash set
#[derive(Debug)] // std doesn't Clone
pub struct IntoIter<T> {
inner: vec::IntoIter<T>,
}
into_par_vec! {
HashSet<T, S> => IntoIter<T>,
impl<T: Send, S>
}
delegate_iterator! {
IntoIter<T> => T,
impl<T: Send>
}
/// Parallel iterator over an immutable reference to a hash set
#[derive(Debug)]
pub struct Iter<'a, T> {
inner: vec::IntoIter<&'a T>,
}
impl<T> Clone for Iter<'_, T> {
fn clone(&self) -> Self {
Iter {
inner: self.inner.clone(),
}
}
}
into_par_vec! {
&'a HashSet<T, S> => Iter<'a, T>,
impl<'a, T: Sync, S>
}
delegate_iterator! {
Iter<'a, T> => &'a T,
impl<'a, T: Sync>
}
// `HashSet` doesn't have a mutable `Iterator`
/// Draining parallel iterator that moves out of a hash set,
/// but keeps the total capacity.
#[derive(Debug)]
pub struct Drain<'a, T> {
inner: vec::IntoIter<T>,
marker: PhantomData<&'a mut HashSet<T>>,
}
impl<'a, T: Send, S> ParallelDrainFull for &'a mut HashSet<T, S> {
type Iter = Drain<'a, T>;
type Item = T;
fn par_drain(self) -> Self::Iter {
let vec: Vec<_> = self.drain().collect();
Drain {
inner: vec.into_par_iter(),
marker: PhantomData,
}
}
}
delegate_iterator! {
Drain<'_, T> => T,
impl<T: Send>
}

View File

@@ -0,0 +1,66 @@
//! This module contains the parallel iterator types for linked lists
//! (`LinkedList<T>`). You will rarely need to interact with it directly
//! unless you have need to name one of the iterator types.
use std::collections::LinkedList;
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::vec;
/// Parallel iterator over a linked list
#[derive(Debug, Clone)]
pub struct IntoIter<T> {
inner: vec::IntoIter<T>,
}
into_par_vec! {
LinkedList<T> => IntoIter<T>,
impl<T: Send>
}
delegate_iterator! {
IntoIter<T> => T,
impl<T: Send>
}
/// Parallel iterator over an immutable reference to a linked list
#[derive(Debug)]
pub struct Iter<'a, T> {
inner: vec::IntoIter<&'a T>,
}
impl<T> Clone for Iter<'_, T> {
fn clone(&self) -> Self {
Iter {
inner: self.inner.clone(),
}
}
}
into_par_vec! {
&'a LinkedList<T> => Iter<'a, T>,
impl<'a, T: Sync>
}
delegate_iterator! {
Iter<'a, T> => &'a T,
impl<'a, T: Sync>
}
/// Parallel iterator over a mutable reference to a linked list
#[derive(Debug)]
pub struct IterMut<'a, T> {
inner: vec::IntoIter<&'a mut T>,
}
into_par_vec! {
&'a mut LinkedList<T> => IterMut<'a, T>,
impl<'a, T: Send>
}
delegate_iterator! {
IterMut<'a, T> => &'a mut T,
impl<'a, T: Send>
}

84
vendor/rayon/src/collections/mod.rs vendored Normal file
View File

@@ -0,0 +1,84 @@
//! Parallel iterator types for [standard collections]
//!
//! You will rarely need to interact with this module directly unless you need
//! to name one of the iterator types.
//!
//! [standard collections]: std::collections
/// Convert an iterable collection into a parallel iterator by first
/// collecting into a temporary `Vec`, then iterating that.
macro_rules! into_par_vec {
($t:ty => $iter:ident<$($i:tt),*>, impl $($args:tt)*) => {
impl $($args)* IntoParallelIterator for $t {
type Item = <$t as IntoIterator>::Item;
type Iter = $iter<$($i),*>;
fn into_par_iter(self) -> Self::Iter {
use std::iter::FromIterator;
$iter { inner: Vec::from_iter(self).into_par_iter() }
}
}
};
}
pub mod binary_heap;
pub mod btree_map;
pub mod btree_set;
pub mod hash_map;
pub mod hash_set;
pub mod linked_list;
pub mod vec_deque;
use self::drain_guard::DrainGuard;
mod drain_guard {
use crate::iter::ParallelDrainRange;
use std::mem;
use std::ops::RangeBounds;
/// A proxy for draining a collection by converting to a `Vec` and back.
///
/// This is used for draining `BinaryHeap` and `VecDeque`, which both have
/// zero-allocation conversions to/from `Vec`, though not zero-cost:
/// - `BinaryHeap` will heapify from `Vec`, but at least that will be empty.
/// - `VecDeque` has to shift items to offset 0 when converting to `Vec`.
#[allow(missing_debug_implementations)]
pub(super) struct DrainGuard<'a, T, C: From<Vec<T>>> {
collection: &'a mut C,
vec: Vec<T>,
}
impl<'a, T, C> DrainGuard<'a, T, C>
where
C: Default + From<Vec<T>>,
Vec<T>: From<C>,
{
pub(super) fn new(collection: &'a mut C) -> Self {
Self {
// Temporarily steal the inner `Vec` so we can drain in place.
vec: Vec::from(mem::take(collection)),
collection,
}
}
}
impl<'a, T, C: From<Vec<T>>> Drop for DrainGuard<'a, T, C> {
fn drop(&mut self) {
// Restore the collection from the `Vec` with its original capacity.
*self.collection = C::from(mem::take(&mut self.vec));
}
}
impl<'a, T, C> ParallelDrainRange<usize> for &'a mut DrainGuard<'_, T, C>
where
T: Send,
C: From<Vec<T>>,
{
type Iter = crate::vec::Drain<'a, T>;
type Item = T;
fn par_drain<R: RangeBounds<usize>>(self, range: R) -> Self::Iter {
self.vec.par_drain(range)
}
}
}

View File

@@ -0,0 +1,159 @@
//! This module contains the parallel iterator types for double-ended queues
//! (`VecDeque<T>`). You will rarely need to interact with it directly
//! unless you have need to name one of the iterator types.
use std::collections::VecDeque;
use std::ops::{Range, RangeBounds};
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::math::simplify_range;
use crate::slice;
use crate::vec;
/// Parallel iterator over a double-ended queue
#[derive(Debug, Clone)]
pub struct IntoIter<T: Send> {
inner: vec::IntoIter<T>,
}
impl<T: Send> IntoParallelIterator for VecDeque<T> {
type Item = T;
type Iter = IntoIter<T>;
fn into_par_iter(self) -> Self::Iter {
// NOTE: requires data movement if the deque doesn't start at offset 0.
let inner = Vec::from(self).into_par_iter();
IntoIter { inner }
}
}
delegate_indexed_iterator! {
IntoIter<T> => T,
impl<T: Send>
}
/// Parallel iterator over an immutable reference to a double-ended queue
#[derive(Debug)]
pub struct Iter<'a, T> {
inner: Chain<slice::Iter<'a, T>, slice::Iter<'a, T>>,
}
impl<T> Clone for Iter<'_, T> {
fn clone(&self) -> Self {
Iter {
inner: self.inner.clone(),
}
}
}
impl<'a, T: Sync> IntoParallelIterator for &'a VecDeque<T> {
type Item = &'a T;
type Iter = Iter<'a, T>;
fn into_par_iter(self) -> Self::Iter {
let (a, b) = self.as_slices();
Iter {
inner: a.into_par_iter().chain(b),
}
}
}
delegate_indexed_iterator! {
Iter<'a, T> => &'a T,
impl<'a, T: Sync>
}
/// Parallel iterator over a mutable reference to a double-ended queue
#[derive(Debug)]
pub struct IterMut<'a, T> {
inner: Chain<slice::IterMut<'a, T>, slice::IterMut<'a, T>>,
}
impl<'a, T: Send> IntoParallelIterator for &'a mut VecDeque<T> {
type Item = &'a mut T;
type Iter = IterMut<'a, T>;
fn into_par_iter(self) -> Self::Iter {
let (a, b) = self.as_mut_slices();
IterMut {
inner: a.into_par_iter().chain(b),
}
}
}
delegate_indexed_iterator! {
IterMut<'a, T> => &'a mut T,
impl<'a, T: Send>
}
/// Draining parallel iterator that moves a range out of a double-ended queue,
/// but keeps the total capacity.
#[derive(Debug)]
pub struct Drain<'a, T> {
deque: &'a mut VecDeque<T>,
range: Range<usize>,
orig_len: usize,
}
impl<'a, T: Send> ParallelDrainRange<usize> for &'a mut VecDeque<T> {
type Iter = Drain<'a, T>;
type Item = T;
fn par_drain<R: RangeBounds<usize>>(self, range: R) -> Self::Iter {
Drain {
orig_len: self.len(),
range: simplify_range(range, self.len()),
deque: self,
}
}
}
impl<T: Send> ParallelIterator for Drain<'_, T> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<T: Send> IndexedParallelIterator for Drain<'_, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.range.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
// NOTE: requires data movement if the deque doesn't start at offset 0.
super::DrainGuard::new(self.deque)
.par_drain(self.range.clone())
.with_producer(callback)
}
}
impl<T> Drop for Drain<'_, T> {
fn drop(&mut self) {
if self.deque.len() != self.orig_len - self.range.len() {
// We must not have produced, so just call a normal drain to remove the items.
assert_eq!(self.deque.len(), self.orig_len);
self.deque.drain(self.range.clone());
}
}
}

View File

@@ -0,0 +1,14 @@
/*! ```compile_fail,E0599
use rayon::prelude::*;
// zip requires data of exact size, but filter yields only bounded
// size, so check that we cannot apply it.
let a: Vec<usize> = (0..1024).collect();
let mut v = vec![];
a.par_iter()
.filter_map(|&x| Some(x as f32))
.collect_into_vec(&mut v); //~ ERROR no method
``` */

View File

@@ -0,0 +1,14 @@
/*! ```compile_fail,E0277
use rayon::prelude::*;
// zip requires data of exact size, but filter yields only bounded
// size, so check that we cannot apply it.
let mut a: Vec<usize> = (0..1024).rev().collect();
let b: Vec<usize> = (0..1024).collect();
a.par_iter()
.zip(b.par_iter().filter(|&&x| x > 3)); //~ ERROR
``` */

View File

@@ -0,0 +1,13 @@
/*! ```compile_fail,E0277
// Check that we can't use the par-iter API to access contents of a `Cell`.
use rayon::prelude::*;
use std::cell::Cell;
let c = Cell::new(42_i32);
(0_i32..1024).into_par_iter()
.map(|_| c.get()) //~ ERROR E0277
.min();
``` */

7
vendor/rayon/src/compile_fail/mod.rs vendored Normal file
View File

@@ -0,0 +1,7 @@
// These modules contain `compile_fail` doc tests.
mod cannot_collect_filtermap_data;
mod cannot_zip_filtered_data;
mod cell_par_iter;
mod must_use;
mod no_send_par_iter;
mod rc_par_iter;

View File

@@ -0,0 +1,71 @@
// Check that we are flagged for ignoring `must_use` parallel adaptors.
// (unfortunately there's no error code for `unused_must_use`)
macro_rules! must_use {
($( $name:ident #[$expr:meta] )*) => {$(
/// First sanity check that the expression is OK.
///
/// ```
/// #![deny(unused_must_use)]
///
/// use rayon::prelude::*;
///
/// let v: Vec<_> = (0..100).map(Some).collect();
/// let _ =
#[$expr]
/// ```
///
/// Now trigger the `must_use`.
///
/// ```compile_fail
/// #![deny(unused_must_use)]
///
/// use rayon::prelude::*;
///
/// let v: Vec<_> = (0..100).map(Some).collect();
#[$expr]
/// ```
mod $name {}
)*}
}
must_use! {
by_exponential_blocks /** v.par_iter().by_exponential_blocks(); */
by_uniform_blocks /** v.par_iter().by_uniform_blocks(2); */
step_by /** v.par_iter().step_by(2); */
chain /** v.par_iter().chain(&v); */
chunks /** v.par_iter().chunks(2); */
fold_chunks /** v.par_iter().fold_chunks(2, || 0, |x, _| x); */
fold_chunks_with /** v.par_iter().fold_chunks_with(2, 0, |x, _| x); */
cloned /** v.par_iter().cloned(); */
copied /** v.par_iter().copied(); */
enumerate /** v.par_iter().enumerate(); */
filter /** v.par_iter().filter(|_| true); */
filter_map /** v.par_iter().filter_map(|x| *x); */
flat_map /** v.par_iter().flat_map(|x| *x); */
flat_map_iter /** v.par_iter().flat_map_iter(|x| *x); */
flatten /** v.par_iter().flatten(); */
flatten_iter /** v.par_iter().flatten_iter(); */
fold /** v.par_iter().fold(|| 0, |x, _| x); */
fold_with /** v.par_iter().fold_with(0, |x, _| x); */
try_fold /** v.par_iter().try_fold(|| 0, |x, _| Some(x)); */
try_fold_with /** v.par_iter().try_fold_with(0, |x, _| Some(x)); */
inspect /** v.par_iter().inspect(|_| {}); */
interleave /** v.par_iter().interleave(&v); */
interleave_shortest /** v.par_iter().interleave_shortest(&v); */
intersperse /** v.par_iter().intersperse(&None); */
map /** v.par_iter().map(|x| x); */
map_with /** v.par_iter().map_with(0, |_, x| x); */
map_init /** v.par_iter().map_init(|| 0, |_, x| x); */
panic_fuse /** v.par_iter().panic_fuse(); */
positions /** v.par_iter().positions(|_| true); */
rev /** v.par_iter().rev(); */
skip /** v.par_iter().skip(1); */
take /** v.par_iter().take(1); */
update /** v.par_iter().update(|_| {}); */
while_some /** v.par_iter().cloned().while_some(); */
with_max_len /** v.par_iter().with_max_len(1); */
with_min_len /** v.par_iter().with_min_len(1); */
zip /** v.par_iter().zip(&v); */
zip_eq /** v.par_iter().zip_eq(&v); */
}

View File

@@ -0,0 +1,58 @@
// Check that `!Send` types fail early.
/** ```compile_fail,E0277
use rayon::prelude::*;
use std::ptr::null;
#[derive(Copy, Clone)]
struct NoSend(*const ());
unsafe impl Sync for NoSend {}
let x = Some(NoSend(null()));
x.par_iter()
.map(|&x| x) //~ ERROR
.count(); //~ ERROR
``` */
mod map {}
/** ```compile_fail,E0277
use rayon::prelude::*;
use std::ptr::null;
#[derive(Copy, Clone)]
struct NoSend(*const ());
unsafe impl Sync for NoSend {}
let x = Some(NoSend(null()));
x.par_iter()
.filter_map(|&x| Some(x)) //~ ERROR
.count(); //~ ERROR
``` */
mod filter_map {}
/** ```compile_fail,E0277
use rayon::prelude::*;
use std::ptr::null;
#[derive(Copy, Clone)]
struct NoSend(*const ());
unsafe impl Sync for NoSend {}
let x = Some(NoSend(null()));
x.par_iter()
.cloned() //~ ERROR
.count(); //~ ERROR
``` */
mod cloned {}

View File

@@ -0,0 +1,15 @@
/*! ```compile_fail,E0599
// Check that we can't use the par-iter API to access contents of an
// `Rc`.
use rayon::prelude::*;
use std::rc::Rc;
let x = vec![Rc::new(22), Rc::new(23)];
let mut y = vec![];
x.into_par_iter() //~ ERROR no method named `into_par_iter`
.map(|rc| *rc)
.collect_into_vec(&mut y);
``` */

109
vendor/rayon/src/delegate.rs vendored Normal file
View File

@@ -0,0 +1,109 @@
//! Macros for delegating newtype iterators to inner types.
// Note: these place `impl` bounds at the end, as token gobbling is the only way
// I know how to consume an arbitrary list of constraints, with `$($args:tt)*`.
/// Creates a parallel iterator implementation which simply wraps an inner type
/// and delegates all methods inward. The actual struct must already be
/// declared with an `inner` field.
///
/// The implementation of `IntoParallelIterator` should be added separately.
macro_rules! delegate_iterator {
($iter:ty => $item:ty ,
impl $( $args:tt )*
) => {
impl $( $args )* ParallelIterator for $iter {
type Item = $item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where C: UnindexedConsumer<Self::Item>
{
self.inner.drive_unindexed(consumer)
}
fn opt_len(&self) -> Option<usize> {
self.inner.opt_len()
}
}
}
}
/// Creates an indexed parallel iterator implementation which simply wraps an
/// inner type and delegates all methods inward. The actual struct must already
/// be declared with an `inner` field.
macro_rules! delegate_indexed_iterator {
($iter:ty => $item:ty ,
impl $( $args:tt )*
) => {
delegate_iterator!{
$iter => $item ,
impl $( $args )*
}
impl $( $args )* IndexedParallelIterator for $iter {
fn drive<C>(self, consumer: C) -> C::Result
where C: Consumer<Self::Item>
{
self.inner.drive(consumer)
}
fn len(&self) -> usize {
self.inner.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where CB: ProducerCallback<Self::Item>
{
self.inner.with_producer(callback)
}
}
}
}
#[test]
fn unindexed_example() {
use crate::collections::btree_map::IntoIter;
use crate::iter::plumbing::*;
use crate::prelude::*;
use std::collections::BTreeMap;
struct MyIntoIter<T: Ord + Send, U: Send> {
inner: IntoIter<T, U>,
}
delegate_iterator! {
MyIntoIter<T, U> => (T, U),
impl<T: Ord + Send, U: Send>
}
let map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
let iter = MyIntoIter {
inner: map.into_par_iter(),
};
let vec: Vec<_> = iter.map(|(k, _)| k).collect();
assert_eq!(vec, &[1, 2, 3]);
}
#[test]
fn indexed_example() {
use crate::iter::plumbing::*;
use crate::prelude::*;
use crate::vec::IntoIter;
struct MyIntoIter<T: Send> {
inner: IntoIter<T>,
}
delegate_indexed_iterator! {
MyIntoIter<T> => T,
impl<T: Send>
}
let iter = MyIntoIter {
inner: vec![1, 2, 3].into_par_iter(),
};
let mut vec = vec![];
iter.collect_into_vec(&mut vec);
assert_eq!(vec, &[1, 2, 3]);
}

129
vendor/rayon/src/iter/blocks.rs vendored Normal file
View File

@@ -0,0 +1,129 @@
use super::plumbing::*;
use super::*;
struct BlocksCallback<S, C> {
sizes: S,
consumer: C,
len: usize,
}
impl<T, S, C> ProducerCallback<T> for BlocksCallback<S, C>
where
C: UnindexedConsumer<T>,
S: Iterator<Item = usize>,
{
type Output = C::Result;
fn callback<P: Producer<Item = T>>(mut self, mut producer: P) -> Self::Output {
let mut remaining_len = self.len;
let mut consumer = self.consumer;
// we need a local variable for the accumulated results
// we call the reducer's identity by splitting at 0
let (left_consumer, right_consumer, _) = consumer.split_at(0);
let mut leftmost_res = left_consumer.into_folder().complete();
consumer = right_consumer;
// now we loop on each block size
while remaining_len > 0 && !consumer.full() {
// we compute the next block's size
let size = self.sizes.next().unwrap_or(usize::MAX);
let capped_size = remaining_len.min(size);
remaining_len -= capped_size;
// split the producer
let (left_producer, right_producer) = producer.split_at(capped_size);
producer = right_producer;
// split the consumer
let (left_consumer, right_consumer, _) = consumer.split_at(capped_size);
consumer = right_consumer;
leftmost_res = consumer.to_reducer().reduce(
leftmost_res,
bridge_producer_consumer(capped_size, left_producer, left_consumer),
);
}
leftmost_res
}
}
/// `ExponentialBlocks` is a parallel iterator that consumes itself as a sequence
/// of parallel blocks of increasing sizes (exponentially).
///
/// This struct is created by the [`by_exponential_blocks()`] method on [`IndexedParallelIterator`]
///
/// [`by_exponential_blocks()`]: IndexedParallelIterator::by_exponential_blocks()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct ExponentialBlocks<I> {
base: I,
}
impl<I> ExponentialBlocks<I> {
pub(super) fn new(base: I) -> Self {
Self { base }
}
}
impl<I> ParallelIterator for ExponentialBlocks<I>
where
I: IndexedParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let first = crate::current_num_threads();
let callback = BlocksCallback {
consumer,
sizes: std::iter::successors(Some(first), exponential_size),
len: self.base.len(),
};
self.base.with_producer(callback)
}
}
fn exponential_size(size: &usize) -> Option<usize> {
Some(size.saturating_mul(2))
}
/// `UniformBlocks` is a parallel iterator that consumes itself as a sequence
/// of parallel blocks of constant sizes.
///
/// This struct is created by the [`by_uniform_blocks()`] method on [`IndexedParallelIterator`]
///
/// [`by_uniform_blocks()`]: IndexedParallelIterator::by_uniform_blocks()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct UniformBlocks<I> {
base: I,
block_size: usize,
}
impl<I> UniformBlocks<I> {
pub(super) fn new(base: I, block_size: usize) -> Self {
Self { base, block_size }
}
}
impl<I> ParallelIterator for UniformBlocks<I>
where
I: IndexedParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let callback = BlocksCallback {
consumer,
sizes: std::iter::repeat(self.block_size),
len: self.base.len(),
};
self.base.with_producer(callback)
}
}

258
vendor/rayon/src/iter/chain.rs vendored Normal file
View File

@@ -0,0 +1,258 @@
use super::plumbing::*;
use super::*;
use rayon_core::join;
use std::iter;
/// `Chain` is an iterator that joins `b` after `a` in one continuous iterator.
/// This struct is created by the [`chain()`] method on [`ParallelIterator`]
///
/// [`chain()`]: ParallelIterator::chain()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Chain<A, B> {
a: A,
b: B,
}
impl<A, B> Chain<A, B> {
/// Creates a new `Chain` iterator.
pub(super) fn new(a: A, b: B) -> Self {
Chain { a, b }
}
}
impl<A, B> ParallelIterator for Chain<A, B>
where
A: ParallelIterator,
B: ParallelIterator<Item = A::Item>,
{
type Item = A::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let Chain { a, b } = self;
// If we returned a value from our own `opt_len`, then the collect consumer in particular
// will balk at being treated like an actual `UnindexedConsumer`. But when we do know the
// length, we can use `Consumer::split_at` instead, and this is still harmless for other
// truly-unindexed consumers too.
let (left, right, reducer) = if let Some(len) = a.opt_len() {
consumer.split_at(len)
} else {
let reducer = consumer.to_reducer();
(consumer.split_off_left(), consumer, reducer)
};
let (a, b) = join(|| a.drive_unindexed(left), || b.drive_unindexed(right));
reducer.reduce(a, b)
}
fn opt_len(&self) -> Option<usize> {
self.a.opt_len()?.checked_add(self.b.opt_len()?)
}
}
impl<A, B> IndexedParallelIterator for Chain<A, B>
where
A: IndexedParallelIterator,
B: IndexedParallelIterator<Item = A::Item>,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let Chain { a, b } = self;
let (left, right, reducer) = consumer.split_at(a.len());
let (a, b) = join(|| a.drive(left), || b.drive(right));
reducer.reduce(a, b)
}
fn len(&self) -> usize {
self.a.len().checked_add(self.b.len()).expect("overflow")
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
let a_len = self.a.len();
return self.a.with_producer(CallbackA {
callback,
a_len,
b: self.b,
});
struct CallbackA<CB, B> {
callback: CB,
a_len: usize,
b: B,
}
impl<CB, B> ProducerCallback<B::Item> for CallbackA<CB, B>
where
B: IndexedParallelIterator,
CB: ProducerCallback<B::Item>,
{
type Output = CB::Output;
fn callback<A>(self, a_producer: A) -> Self::Output
where
A: Producer<Item = B::Item>,
{
self.b.with_producer(CallbackB {
callback: self.callback,
a_len: self.a_len,
a_producer,
})
}
}
struct CallbackB<CB, A> {
callback: CB,
a_len: usize,
a_producer: A,
}
impl<CB, A> ProducerCallback<A::Item> for CallbackB<CB, A>
where
A: Producer,
CB: ProducerCallback<A::Item>,
{
type Output = CB::Output;
fn callback<B>(self, b_producer: B) -> Self::Output
where
B: Producer<Item = A::Item>,
{
let producer = ChainProducer::new(self.a_len, self.a_producer, b_producer);
self.callback.callback(producer)
}
}
}
}
// ////////////////////////////////////////////////////////////////////////
struct ChainProducer<A, B>
where
A: Producer,
B: Producer<Item = A::Item>,
{
a_len: usize,
a: A,
b: B,
}
impl<A, B> ChainProducer<A, B>
where
A: Producer,
B: Producer<Item = A::Item>,
{
fn new(a_len: usize, a: A, b: B) -> Self {
ChainProducer { a_len, a, b }
}
}
impl<A, B> Producer for ChainProducer<A, B>
where
A: Producer,
B: Producer<Item = A::Item>,
{
type Item = A::Item;
type IntoIter = ChainSeq<A::IntoIter, B::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
ChainSeq::new(self.a.into_iter(), self.b.into_iter())
}
fn min_len(&self) -> usize {
Ord::max(self.a.min_len(), self.b.min_len())
}
fn max_len(&self) -> usize {
Ord::min(self.a.max_len(), self.b.max_len())
}
fn split_at(self, index: usize) -> (Self, Self) {
if index <= self.a_len {
let a_rem = self.a_len - index;
let (a_left, a_right) = self.a.split_at(index);
let (b_left, b_right) = self.b.split_at(0);
(
ChainProducer::new(index, a_left, b_left),
ChainProducer::new(a_rem, a_right, b_right),
)
} else {
let (a_left, a_right) = self.a.split_at(self.a_len);
let (b_left, b_right) = self.b.split_at(index - self.a_len);
(
ChainProducer::new(self.a_len, a_left, b_left),
ChainProducer::new(0, a_right, b_right),
)
}
}
fn fold_with<F>(self, mut folder: F) -> F
where
F: Folder<A::Item>,
{
folder = self.a.fold_with(folder);
if folder.full() {
folder
} else {
self.b.fold_with(folder)
}
}
}
// ////////////////////////////////////////////////////////////////////////
/// Wrapper for `Chain` to implement `ExactSizeIterator`
struct ChainSeq<A, B> {
chain: iter::Chain<A, B>,
}
impl<A, B> ChainSeq<A, B> {
fn new(a: A, b: B) -> ChainSeq<A, B>
where
A: ExactSizeIterator,
B: ExactSizeIterator<Item = A::Item>,
{
ChainSeq { chain: a.chain(b) }
}
}
impl<A, B> Iterator for ChainSeq<A, B>
where
A: Iterator,
B: Iterator<Item = A::Item>,
{
type Item = A::Item;
fn next(&mut self) -> Option<Self::Item> {
self.chain.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.chain.size_hint()
}
}
impl<A, B> ExactSizeIterator for ChainSeq<A, B>
where
A: ExactSizeIterator,
B: ExactSizeIterator<Item = A::Item>,
{
}
impl<A, B> DoubleEndedIterator for ChainSeq<A, B>
where
A: DoubleEndedIterator,
B: DoubleEndedIterator<Item = A::Item>,
{
fn next_back(&mut self) -> Option<Self::Item> {
self.chain.next_back()
}
}

216
vendor/rayon/src/iter/chunks.rs vendored Normal file
View File

@@ -0,0 +1,216 @@
use super::plumbing::*;
use super::*;
/// `Chunks` is an iterator that groups elements of an underlying iterator.
///
/// This struct is created by the [`chunks()`] method on [`IndexedParallelIterator`]
///
/// [`chunks()`]: IndexedParallelIterator::chunks()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Chunks<I> {
size: usize,
i: I,
}
impl<I> Chunks<I> {
/// Creates a new `Chunks` iterator
pub(super) fn new(i: I, size: usize) -> Self {
Chunks { i, size }
}
}
impl<I> ParallelIterator for Chunks<I>
where
I: IndexedParallelIterator,
{
type Item = Vec<I::Item>;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: Consumer<Vec<I::Item>>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I> IndexedParallelIterator for Chunks<I>
where
I: IndexedParallelIterator,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.i.len().div_ceil(self.size)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
let len = self.i.len();
return self.i.with_producer(Callback {
size: self.size,
len,
callback,
});
struct Callback<CB> {
size: usize,
len: usize,
callback: CB,
}
impl<T, CB> ProducerCallback<T> for Callback<CB>
where
CB: ProducerCallback<Vec<T>>,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = ChunkProducer::new(self.size, self.len, base, Vec::from_iter);
self.callback.callback(producer)
}
}
}
}
pub(super) struct ChunkProducer<P, F> {
chunk_size: usize,
len: usize,
base: P,
map: F,
}
impl<P, F> ChunkProducer<P, F> {
pub(super) fn new(chunk_size: usize, len: usize, base: P, map: F) -> Self {
Self {
chunk_size,
len,
base,
map,
}
}
}
impl<P, F, T> Producer for ChunkProducer<P, F>
where
P: Producer,
F: Fn(P::IntoIter) -> T + Send + Clone,
{
type Item = T;
type IntoIter = std::iter::Map<ChunkSeq<P>, F>;
fn into_iter(self) -> Self::IntoIter {
let chunks = ChunkSeq {
chunk_size: self.chunk_size,
len: self.len,
inner: if self.len > 0 { Some(self.base) } else { None },
};
chunks.map(self.map)
}
fn split_at(self, index: usize) -> (Self, Self) {
let elem_index = Ord::min(index * self.chunk_size, self.len);
let (left, right) = self.base.split_at(elem_index);
(
ChunkProducer {
chunk_size: self.chunk_size,
len: elem_index,
base: left,
map: self.map.clone(),
},
ChunkProducer {
chunk_size: self.chunk_size,
len: self.len - elem_index,
base: right,
map: self.map,
},
)
}
fn min_len(&self) -> usize {
self.base.min_len().div_ceil(self.chunk_size)
}
fn max_len(&self) -> usize {
self.base.max_len() / self.chunk_size
}
}
pub(super) struct ChunkSeq<P> {
chunk_size: usize,
len: usize,
inner: Option<P>,
}
impl<P> Iterator for ChunkSeq<P>
where
P: Producer,
{
type Item = P::IntoIter;
fn next(&mut self) -> Option<Self::Item> {
let producer = self.inner.take()?;
if self.len > self.chunk_size {
let (left, right) = producer.split_at(self.chunk_size);
self.inner = Some(right);
self.len -= self.chunk_size;
Some(left.into_iter())
} else {
debug_assert!(self.len > 0);
self.len = 0;
Some(producer.into_iter())
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
}
impl<P> ExactSizeIterator for ChunkSeq<P>
where
P: Producer,
{
#[inline]
fn len(&self) -> usize {
self.len.div_ceil(self.chunk_size)
}
}
impl<P> DoubleEndedIterator for ChunkSeq<P>
where
P: Producer,
{
fn next_back(&mut self) -> Option<Self::Item> {
let producer = self.inner.take()?;
if self.len > self.chunk_size {
let mut size = self.len % self.chunk_size;
if size == 0 {
size = self.chunk_size;
}
let (left, right) = producer.split_at(self.len - size);
self.inner = Some(left);
self.len -= size;
Some(right.into_iter())
} else {
debug_assert!(self.len > 0);
self.len = 0;
Some(producer.into_iter())
}
}
}

219
vendor/rayon/src/iter/cloned.rs vendored Normal file
View File

@@ -0,0 +1,219 @@
use super::plumbing::*;
use super::*;
use std::iter;
/// `Cloned` is an iterator that clones the elements of an underlying iterator.
///
/// This struct is created by the [`cloned()`] method on [`ParallelIterator`]
///
/// [`cloned()`]: ParallelIterator::cloned()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Cloned<I> {
base: I,
}
impl<I> Cloned<I> {
/// Creates a new `Cloned` iterator.
pub(super) fn new(base: I) -> Self {
Cloned { base }
}
}
impl<'a, T, I> ParallelIterator for Cloned<I>
where
I: ParallelIterator<Item = &'a T>,
T: 'a + Clone + Send + Sync,
{
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = ClonedConsumer::new(consumer);
self.base.drive_unindexed(consumer1)
}
fn opt_len(&self) -> Option<usize> {
self.base.opt_len()
}
}
impl<'a, T, I> IndexedParallelIterator for Cloned<I>
where
I: IndexedParallelIterator<Item = &'a T>,
T: 'a + Clone + Send + Sync,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let consumer1 = ClonedConsumer::new(consumer);
self.base.drive(consumer1)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback { callback });
struct Callback<CB> {
callback: CB,
}
impl<'a, T, CB> ProducerCallback<&'a T> for Callback<CB>
where
CB: ProducerCallback<T>,
T: 'a + Clone + Send,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = &'a T>,
{
let producer = ClonedProducer { base };
self.callback.callback(producer)
}
}
}
}
// ////////////////////////////////////////////////////////////////////////
struct ClonedProducer<P> {
base: P,
}
impl<'a, T, P> Producer for ClonedProducer<P>
where
P: Producer<Item = &'a T>,
T: 'a + Clone,
{
type Item = T;
type IntoIter = iter::Cloned<P::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
self.base.into_iter().cloned()
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
ClonedProducer { base: left },
ClonedProducer { base: right },
)
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
self.base.fold_with(ClonedFolder { base: folder }).base
}
}
// ////////////////////////////////////////////////////////////////////////
// Consumer implementation
struct ClonedConsumer<C> {
base: C,
}
impl<C> ClonedConsumer<C> {
fn new(base: C) -> Self {
ClonedConsumer { base }
}
}
impl<'a, T, C> Consumer<&'a T> for ClonedConsumer<C>
where
C: Consumer<T>,
T: 'a + Clone,
{
type Folder = ClonedFolder<C::Folder>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
ClonedConsumer::new(left),
ClonedConsumer::new(right),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
ClonedFolder {
base: self.base.into_folder(),
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'a, T, C> UnindexedConsumer<&'a T> for ClonedConsumer<C>
where
C: UnindexedConsumer<T>,
T: 'a + Clone,
{
fn split_off_left(&self) -> Self {
ClonedConsumer::new(self.base.split_off_left())
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct ClonedFolder<F> {
base: F,
}
impl<'a, T, F> Folder<&'a T> for ClonedFolder<F>
where
F: Folder<T>,
T: 'a + Clone,
{
type Result = F::Result;
fn consume(self, item: &'a T) -> Self {
ClonedFolder {
base: self.base.consume(item.clone()),
}
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = &'a T>,
{
self.base = self.base.consume_iter(iter.into_iter().cloned());
self
}
fn complete(self) -> F::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

View File

@@ -0,0 +1,186 @@
use super::super::plumbing::*;
use crate::SendPtr;
use std::marker::PhantomData;
use std::ptr;
use std::slice;
pub(super) struct CollectConsumer<'c, T: Send> {
/// See `CollectResult` for explanation of why this is not a slice
start: SendPtr<T>,
len: usize,
marker: PhantomData<&'c mut T>,
}
impl<T: Send> CollectConsumer<'_, T> {
/// Create a collector for `len` items in the unused capacity of the vector.
pub(super) fn appender(vec: &mut Vec<T>, len: usize) -> CollectConsumer<'_, T> {
let start = vec.len();
assert!(vec.capacity() - start >= len);
// SAFETY: We already made sure to have the additional space allocated.
// The pointer is derived from `Vec` directly, not through a `Deref`,
// so it has provenance over the whole allocation.
unsafe { CollectConsumer::new(vec.as_mut_ptr().add(start), len) }
}
}
impl<'c, T: Send + 'c> CollectConsumer<'c, T> {
/// The target memory is considered uninitialized, and will be
/// overwritten without reading or dropping existing values.
unsafe fn new(start: *mut T, len: usize) -> Self {
CollectConsumer {
start: SendPtr(start),
len,
marker: PhantomData,
}
}
}
/// CollectResult represents an initialized part of the target slice.
///
/// This is a proxy owner of the elements in the slice; when it drops,
/// the elements will be dropped, unless its ownership is released before then.
#[must_use]
pub(super) struct CollectResult<'c, T> {
/// This pointer and length has the same representation as a slice,
/// but retains the provenance of the entire array so that we can merge
/// these regions together in `CollectReducer`.
start: SendPtr<T>,
total_len: usize,
/// The current initialized length after `start`
initialized_len: usize,
/// Lifetime invariance guarantees that the data flows from consumer to result,
/// especially for the `scope_fn` callback in `Collect::with_consumer`.
invariant_lifetime: PhantomData<&'c mut &'c mut [T]>,
}
unsafe impl<'c, T> Send for CollectResult<'c, T> where T: Send {}
impl<'c, T> CollectResult<'c, T> {
/// The current length of the collect result
pub(super) fn len(&self) -> usize {
self.initialized_len
}
/// Release ownership of the slice of elements, and return the length
pub(super) fn release_ownership(mut self) -> usize {
let ret = self.initialized_len;
self.initialized_len = 0;
ret
}
}
impl<'c, T> Drop for CollectResult<'c, T> {
fn drop(&mut self) {
// Drop the first `self.initialized_len` elements, which have been recorded
// to be initialized by the folder.
unsafe {
ptr::drop_in_place(slice::from_raw_parts_mut(
self.start.0,
self.initialized_len,
));
}
}
}
impl<'c, T: Send + 'c> Consumer<T> for CollectConsumer<'c, T> {
type Folder = CollectResult<'c, T>;
type Reducer = CollectReducer;
type Result = CollectResult<'c, T>;
fn split_at(self, index: usize) -> (Self, Self, CollectReducer) {
let CollectConsumer { start, len, .. } = self;
// Produce new consumers.
// SAFETY: This assert checks that `index` is a valid offset for `start`
unsafe {
assert!(index <= len);
(
CollectConsumer::new(start.0, index),
CollectConsumer::new(start.0.add(index), len - index),
CollectReducer,
)
}
}
fn into_folder(self) -> Self::Folder {
// Create a result/folder that consumes values and writes them
// into the region after start. The initial result has length 0.
CollectResult {
start: self.start,
total_len: self.len,
initialized_len: 0,
invariant_lifetime: PhantomData,
}
}
fn full(&self) -> bool {
false
}
}
impl<'c, T: Send + 'c> Folder<T> for CollectResult<'c, T> {
type Result = Self;
fn consume(mut self, item: T) -> Self {
assert!(
self.initialized_len < self.total_len,
"too many values pushed to consumer"
);
// SAFETY: The assert above is a bounds check for this write, and we
// avoid assignment here so we do not drop an uninitialized T.
unsafe {
// Write item and increase the initialized length
self.start.0.add(self.initialized_len).write(item);
self.initialized_len += 1;
}
self
}
fn complete(self) -> Self::Result {
// NB: We don't explicitly check that the local writes were complete,
// but Collect will assert the total result length in the end.
self
}
fn full(&self) -> bool {
false
}
}
/// Pretend to be unindexed for `special_collect_into_vec`,
/// but we should never actually get used that way...
impl<'c, T: Send + 'c> UnindexedConsumer<T> for CollectConsumer<'c, T> {
fn split_off_left(&self) -> Self {
unreachable!("CollectConsumer must be indexed!")
}
fn to_reducer(&self) -> Self::Reducer {
CollectReducer
}
}
/// CollectReducer combines adjacent chunks; the result must always
/// be contiguous so that it is one combined slice.
pub(super) struct CollectReducer;
impl<'c, T> Reducer<CollectResult<'c, T>> for CollectReducer {
fn reduce(
self,
mut left: CollectResult<'c, T>,
right: CollectResult<'c, T>,
) -> CollectResult<'c, T> {
// Merge if the CollectResults are adjacent and in left to right order
// else: drop the right piece now and total length will end up short in the end,
// when the correctness of the collected result is asserted.
unsafe {
let left_end = left.start.0.add(left.initialized_len);
if left_end == right.start.0 {
left.total_len += right.total_len;
left.initialized_len += right.release_ownership();
}
left
}
}
}

114
vendor/rayon/src/iter/collect/mod.rs vendored Normal file
View File

@@ -0,0 +1,114 @@
use super::{IndexedParallelIterator, ParallelIterator};
mod consumer;
use self::consumer::CollectConsumer;
use self::consumer::CollectResult;
use super::unzip::unzip_indexed;
mod test;
/// Collects the results of the exact iterator into the specified vector.
///
/// This is called by `IndexedParallelIterator::collect_into_vec`.
pub(super) fn collect_into_vec<I, T>(pi: I, v: &mut Vec<T>)
where
I: IndexedParallelIterator<Item = T>,
T: Send,
{
v.truncate(0); // clear any old data
let len = pi.len();
collect_with_consumer(v, len, |consumer| pi.drive(consumer));
}
/// Collects the results of the iterator into the specified vector.
///
/// Technically, this only works for `IndexedParallelIterator`, but we're faking a
/// bit of specialization here until Rust can do that natively. Callers are
/// using `opt_len` to find the length before calling this, and only exact
/// iterators will return anything but `None` there.
///
/// Since the type system doesn't understand that contract, we have to allow
/// *any* `ParallelIterator` here, and `CollectConsumer` has to also implement
/// `UnindexedConsumer`. That implementation panics `unreachable!` in case
/// there's a bug where we actually do try to use this unindexed.
pub(super) fn special_extend<I, T>(pi: I, len: usize, v: &mut Vec<T>)
where
I: ParallelIterator<Item = T>,
T: Send,
{
collect_with_consumer(v, len, |consumer| pi.drive_unindexed(consumer));
}
/// Unzips the results of the exact iterator into the specified vectors.
///
/// This is called by `IndexedParallelIterator::unzip_into_vecs`.
pub(super) fn unzip_into_vecs<I, A, B>(pi: I, left: &mut Vec<A>, right: &mut Vec<B>)
where
I: IndexedParallelIterator<Item = (A, B)>,
A: Send,
B: Send,
{
// clear any old data
left.truncate(0);
right.truncate(0);
let len = pi.len();
collect_with_consumer(right, len, |right_consumer| {
let mut right_result = None;
collect_with_consumer(left, len, |left_consumer| {
let (left_r, right_r) = unzip_indexed(pi, left_consumer, right_consumer);
right_result = Some(right_r);
left_r
});
right_result.unwrap()
});
}
/// Create a consumer on the slice of memory we are collecting into.
///
/// The consumer needs to be used inside the scope function, and the
/// complete collect result passed back.
///
/// This method will verify the collect result, and panic if the slice
/// was not fully written into. Otherwise, in the successful case,
/// the vector is complete with the collected result.
fn collect_with_consumer<T, F>(vec: &mut Vec<T>, len: usize, scope_fn: F)
where
T: Send,
F: FnOnce(CollectConsumer<'_, T>) -> CollectResult<'_, T>,
{
// Reserve space for `len` more elements in the vector,
vec.reserve(len);
// Create the consumer and run the callback for collection.
let result = scope_fn(CollectConsumer::appender(vec, len));
// The `CollectResult` represents a contiguous part of the slice, that has
// been written to. On unwind here, the `CollectResult` will be dropped. If
// some producers on the way did not produce enough elements, partial
// `CollectResult`s may have been dropped without being reduced to the final
// result, and we will see that as the length coming up short.
//
// Here, we assert that added length is fully initialized. This is checked
// by the following assert, which verifies if a complete `CollectResult`
// was produced; if the length is correct, it is necessarily covering the
// target slice. Since we know that the consumer cannot have escaped from
// `drive` (by parametricity, essentially), we know that any stores that
// will happen, have happened. Unless some code is buggy, that means we
// should have seen `len` total writes.
let actual_writes = result.len();
assert!(
actual_writes == len,
"expected {len} total writes, but got {actual_writes}"
);
// Release the result's mutable borrow and "proxy ownership"
// of the elements, before the vector takes it over.
result.release_ownership();
let new_len = vec.len() + len;
unsafe {
vec.set_len(new_len);
}
}

368
vendor/rayon/src/iter/collect/test.rs vendored Normal file
View File

@@ -0,0 +1,368 @@
#![cfg(test)]
#![allow(unused_assignments)]
// These tests are primarily targeting "abusive" producers that will
// try to drive the "collect consumer" incorrectly. These should
// result in panics.
use super::collect_with_consumer;
use crate::iter::plumbing::*;
use rayon_core::join;
use std::fmt;
use std::panic;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread::Result as ThreadResult;
/// Promises to produce 2 items, but then produces 3. Does not do any
/// splits at all.
#[test]
#[should_panic(expected = "too many values")]
fn produce_too_many_items() {
let mut v = vec![];
collect_with_consumer(&mut v, 2, |consumer| {
let mut folder = consumer.into_folder();
folder = folder.consume(22);
folder = folder.consume(23);
folder = folder.consume(24);
unreachable!("folder does not complete")
});
}
/// Produces fewer items than promised. Does not do any
/// splits at all.
#[test]
#[should_panic(expected = "expected 5 total writes, but got 2")]
fn produce_fewer_items() {
let mut v = vec![];
collect_with_consumer(&mut v, 5, |consumer| {
let mut folder = consumer.into_folder();
folder = folder.consume(22);
folder = folder.consume(23);
folder.complete()
});
}
// Complete is not called by the consumer. Hence,the collection vector is not fully initialized.
#[test]
#[should_panic(expected = "expected 4 total writes, but got 2")]
fn left_produces_items_with_no_complete() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let mut left_folder = left_consumer.into_folder();
let mut right_folder = right_consumer.into_folder();
left_folder = left_folder.consume(0).consume(1);
right_folder = right_folder.consume(2).consume(3);
right_folder.complete()
});
}
// Complete is not called by the right consumer. Hence,the
// collection vector is not fully initialized.
#[test]
#[should_panic(expected = "expected 4 total writes, but got 2")]
fn right_produces_items_with_no_complete() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let mut left_folder = left_consumer.into_folder();
let mut right_folder = right_consumer.into_folder();
left_folder = left_folder.consume(0).consume(1);
right_folder = right_folder.consume(2).consume(3);
left_folder.complete()
});
}
// Complete is not called by the consumer. Hence,the collection vector is not fully initialized.
#[test]
#[cfg_attr(not(panic = "unwind"), ignore)]
fn produces_items_with_no_complete() {
let counter = DropCounter::default();
let mut v = vec![];
let panic_result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
collect_with_consumer(&mut v, 2, |consumer| {
let mut folder = consumer.into_folder();
folder = folder.consume(counter.element());
folder = folder.consume(counter.element());
panic!("folder does not complete");
});
}));
assert!(v.is_empty());
assert_is_panic_with_message(&panic_result, "folder does not complete");
counter.assert_drop_count();
}
// The left consumer produces too many items while the right
// consumer produces correct number.
#[test]
#[should_panic(expected = "too many values")]
fn left_produces_too_many_items() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let mut left_folder = left_consumer.into_folder();
let mut right_folder = right_consumer.into_folder();
left_folder = left_folder.consume(0).consume(1).consume(2);
right_folder = right_folder.consume(2).consume(3);
let _ = right_folder.complete();
unreachable!("folder does not complete");
});
}
// The right consumer produces too many items while the left
// consumer produces correct number.
#[test]
#[should_panic(expected = "too many values")]
fn right_produces_too_many_items() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let mut left_folder = left_consumer.into_folder();
let mut right_folder = right_consumer.into_folder();
left_folder = left_folder.consume(0).consume(1);
right_folder = right_folder.consume(2).consume(3).consume(4);
let _ = left_folder.complete();
unreachable!("folder does not complete");
});
}
// The left consumer produces fewer items while the right
// consumer produces correct number.
#[test]
#[should_panic(expected = "expected 4 total writes, but got 1")]
fn left_produces_fewer_items() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let reducer = consumer.to_reducer();
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let mut left_folder = left_consumer.into_folder();
let mut right_folder = right_consumer.into_folder();
left_folder = left_folder.consume(0);
right_folder = right_folder.consume(2).consume(3);
let left_result = left_folder.complete();
let right_result = right_folder.complete();
reducer.reduce(left_result, right_result)
});
}
// The left and right consumer produce the correct number but
// only left result is returned
#[test]
#[should_panic(expected = "expected 4 total writes, but got 2")]
fn only_left_result() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let mut left_folder = left_consumer.into_folder();
let mut right_folder = right_consumer.into_folder();
left_folder = left_folder.consume(0).consume(1);
right_folder = right_folder.consume(2).consume(3);
let left_result = left_folder.complete();
let _ = right_folder.complete();
left_result
});
}
// The left and right consumer produce the correct number but
// only right result is returned
#[test]
#[should_panic(expected = "expected 4 total writes, but got 2")]
fn only_right_result() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let mut left_folder = left_consumer.into_folder();
let mut right_folder = right_consumer.into_folder();
left_folder = left_folder.consume(0).consume(1);
right_folder = right_folder.consume(2).consume(3);
let _ = left_folder.complete();
right_folder.complete()
});
}
// The left and right consumer produce the correct number but reduce
// in the wrong order.
#[test]
#[should_panic(expected = "expected 4 total writes, but got 2")]
fn reducer_does_not_preserve_order() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let reducer = consumer.to_reducer();
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let mut left_folder = left_consumer.into_folder();
let mut right_folder = right_consumer.into_folder();
left_folder = left_folder.consume(0).consume(1);
right_folder = right_folder.consume(2).consume(3);
let left_result = left_folder.complete();
let right_result = right_folder.complete();
reducer.reduce(right_result, left_result)
});
}
// The right consumer produces fewer items while the left
// consumer produces correct number.
#[test]
#[should_panic(expected = "expected 4 total writes, but got 3")]
fn right_produces_fewer_items() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let reducer = consumer.to_reducer();
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let mut left_folder = left_consumer.into_folder();
let mut right_folder = right_consumer.into_folder();
left_folder = left_folder.consume(0).consume(1);
right_folder = right_folder.consume(2);
let left_result = left_folder.complete();
let right_result = right_folder.complete();
reducer.reduce(left_result, right_result)
});
}
// The left consumer panics and the right stops short, like `panic_fuse()`.
// We should get the left panic without finishing `collect_with_consumer`.
#[test]
#[should_panic(expected = "left consumer panic")]
fn left_panics() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let reducer = consumer.to_reducer();
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let (left_result, right_result) = join(
|| {
let mut left_folder = left_consumer.into_folder();
left_folder = left_folder.consume(0);
panic!("left consumer panic");
},
|| {
let mut right_folder = right_consumer.into_folder();
right_folder = right_folder.consume(2);
right_folder.complete() // early return
},
);
reducer.reduce(left_result, right_result)
});
unreachable!();
}
// The right consumer panics and the left stops short, like `panic_fuse()`.
// We should get the right panic without finishing `collect_with_consumer`.
#[test]
#[should_panic(expected = "right consumer panic")]
fn right_panics() {
let mut v = vec![];
collect_with_consumer(&mut v, 4, |consumer| {
let reducer = consumer.to_reducer();
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let (left_result, right_result) = join(
|| {
let mut left_folder = left_consumer.into_folder();
left_folder = left_folder.consume(0);
left_folder.complete() // early return
},
|| {
let mut right_folder = right_consumer.into_folder();
right_folder = right_folder.consume(2);
panic!("right consumer panic");
},
);
reducer.reduce(left_result, right_result)
});
unreachable!();
}
// The left consumer produces fewer items while the right
// consumer produces correct number; check that created elements are dropped
#[test]
#[cfg_attr(not(panic = "unwind"), ignore)]
fn left_produces_fewer_items_drops() {
let counter = DropCounter::default();
let mut v = vec![];
let panic_result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
collect_with_consumer(&mut v, 4, |consumer| {
let reducer = consumer.to_reducer();
let (left_consumer, right_consumer, _) = consumer.split_at(2);
let mut left_folder = left_consumer.into_folder();
let mut right_folder = right_consumer.into_folder();
left_folder = left_folder.consume(counter.element());
right_folder = right_folder
.consume(counter.element())
.consume(counter.element());
let left_result = left_folder.complete();
let right_result = right_folder.complete();
reducer.reduce(left_result, right_result)
});
}));
assert!(v.is_empty());
assert_is_panic_with_message(&panic_result, "expected 4 total writes, but got 1");
counter.assert_drop_count();
}
/// This counter can create elements, and then count and verify
/// the number of which have actually been dropped again.
#[derive(Default)]
struct DropCounter {
created: AtomicUsize,
dropped: AtomicUsize,
}
struct Element<'a>(&'a AtomicUsize);
impl DropCounter {
fn created(&self) -> usize {
self.created.load(Ordering::SeqCst)
}
fn dropped(&self) -> usize {
self.dropped.load(Ordering::SeqCst)
}
fn element(&self) -> Element<'_> {
self.created.fetch_add(1, Ordering::SeqCst);
Element(&self.dropped)
}
fn assert_drop_count(&self) {
assert_eq!(
self.created(),
self.dropped(),
"Expected {} dropped elements, but found {}",
self.created(),
self.dropped()
);
}
}
impl<'a> Drop for Element<'a> {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
/// Assert that the result from catch_unwind is a panic that contains expected message
fn assert_is_panic_with_message<T>(result: &ThreadResult<T>, expected: &str)
where
T: fmt::Debug,
{
match result {
Ok(value) => {
panic!("assertion failure: Expected panic, got successful {value:?}");
}
Err(error) => {
let message_str = error.downcast_ref::<&'static str>().cloned();
let message_string = error.downcast_ref::<String>().map(String::as_str);
if let Some(message) = message_str.or(message_string) {
if !message.contains(expected) {
panic!(
"assertion failure: Expected {expected:?}, but found panic with {message:?}"
);
}
// assertion passes
} else {
panic!(
"assertion failure: Expected {expected:?}, but found panic with unknown value"
);
}
}
}
}

219
vendor/rayon/src/iter/copied.rs vendored Normal file
View File

@@ -0,0 +1,219 @@
use super::plumbing::*;
use super::*;
use std::iter;
/// `Copied` is an iterator that copies the elements of an underlying iterator.
///
/// This struct is created by the [`copied()`] method on [`ParallelIterator`]
///
/// [`copied()`]: ParallelIterator::copied()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Copied<I> {
base: I,
}
impl<I> Copied<I> {
/// Creates a new `Copied` iterator.
pub(super) fn new(base: I) -> Self {
Copied { base }
}
}
impl<'a, T, I> ParallelIterator for Copied<I>
where
I: ParallelIterator<Item = &'a T>,
T: 'a + Copy + Send + Sync,
{
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = CopiedConsumer::new(consumer);
self.base.drive_unindexed(consumer1)
}
fn opt_len(&self) -> Option<usize> {
self.base.opt_len()
}
}
impl<'a, T, I> IndexedParallelIterator for Copied<I>
where
I: IndexedParallelIterator<Item = &'a T>,
T: 'a + Copy + Send + Sync,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let consumer1 = CopiedConsumer::new(consumer);
self.base.drive(consumer1)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback { callback });
struct Callback<CB> {
callback: CB,
}
impl<'a, T, CB> ProducerCallback<&'a T> for Callback<CB>
where
CB: ProducerCallback<T>,
T: 'a + Copy + Send,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = &'a T>,
{
let producer = CopiedProducer { base };
self.callback.callback(producer)
}
}
}
}
// ////////////////////////////////////////////////////////////////////////
struct CopiedProducer<P> {
base: P,
}
impl<'a, T, P> Producer for CopiedProducer<P>
where
P: Producer<Item = &'a T>,
T: 'a + Copy,
{
type Item = T;
type IntoIter = iter::Copied<P::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
self.base.into_iter().copied()
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
CopiedProducer { base: left },
CopiedProducer { base: right },
)
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
self.base.fold_with(CopiedFolder { base: folder }).base
}
}
// ////////////////////////////////////////////////////////////////////////
// Consumer implementation
struct CopiedConsumer<C> {
base: C,
}
impl<C> CopiedConsumer<C> {
fn new(base: C) -> Self {
CopiedConsumer { base }
}
}
impl<'a, T, C> Consumer<&'a T> for CopiedConsumer<C>
where
C: Consumer<T>,
T: 'a + Copy,
{
type Folder = CopiedFolder<C::Folder>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
CopiedConsumer::new(left),
CopiedConsumer::new(right),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
CopiedFolder {
base: self.base.into_folder(),
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'a, T, C> UnindexedConsumer<&'a T> for CopiedConsumer<C>
where
C: UnindexedConsumer<T>,
T: 'a + Copy,
{
fn split_off_left(&self) -> Self {
CopiedConsumer::new(self.base.split_off_left())
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct CopiedFolder<F> {
base: F,
}
impl<'a, T, F> Folder<&'a T> for CopiedFolder<F>
where
F: Folder<T>,
T: 'a + Copy,
{
type Result = F::Result;
fn consume(self, &item: &'a T) -> Self {
CopiedFolder {
base: self.base.consume(item),
}
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = &'a T>,
{
self.base = self.base.consume_iter(iter.into_iter().copied());
self
}
fn complete(self) -> F::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

108
vendor/rayon/src/iter/empty.rs vendored Normal file
View File

@@ -0,0 +1,108 @@
use crate::iter::plumbing::*;
use crate::iter::*;
use std::fmt;
use std::marker::PhantomData;
/// Creates a parallel iterator that produces nothing.
///
/// This admits no parallelism on its own, but it could be used for code that
/// deals with generic parallel iterators.
///
/// # Examples
///
/// ```
/// use rayon::prelude::*;
/// use rayon::iter::empty;
///
/// let pi = (0..1234).into_par_iter()
/// .chain(empty())
/// .chain(1234..10_000);
///
/// assert_eq!(pi.count(), 10_000);
/// ```
pub fn empty<T: Send>() -> Empty<T> {
Empty {
marker: PhantomData,
}
}
/// Iterator adaptor for [the `empty()` function].
///
/// [the `empty()` function]: empty()
pub struct Empty<T> {
marker: PhantomData<T>,
}
impl<T> Clone for Empty<T> {
fn clone(&self) -> Self {
Empty {
marker: PhantomData,
}
}
}
impl<T: Send> fmt::Debug for Empty<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("Empty")
}
}
impl<T: Send> ParallelIterator for Empty<T> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.drive(consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(0)
}
}
impl<T: Send> IndexedParallelIterator for Empty<T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
consumer.into_folder().complete()
}
fn len(&self) -> usize {
0
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(EmptyProducer(PhantomData))
}
}
/// Private empty producer
struct EmptyProducer<T: Send>(PhantomData<T>);
impl<T: Send> Producer for EmptyProducer<T> {
type Item = T;
type IntoIter = std::iter::Empty<T>;
fn into_iter(self) -> Self::IntoIter {
std::iter::empty()
}
fn split_at(self, index: usize) -> (Self, Self) {
debug_assert_eq!(index, 0);
(self, EmptyProducer(PhantomData))
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
folder
}
}

128
vendor/rayon/src/iter/enumerate.rs vendored Normal file
View File

@@ -0,0 +1,128 @@
use super::plumbing::*;
use super::*;
use std::iter;
use std::ops::Range;
/// `Enumerate` is an iterator that returns the current count along with the element.
/// This struct is created by the [`enumerate()`] method on [`IndexedParallelIterator`]
///
/// [`enumerate()`]: IndexedParallelIterator::enumerate()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Enumerate<I> {
base: I,
}
impl<I> Enumerate<I> {
/// Creates a new `Enumerate` iterator.
pub(super) fn new(base: I) -> Self {
Enumerate { base }
}
}
impl<I> ParallelIterator for Enumerate<I>
where
I: IndexedParallelIterator,
{
type Item = (usize, I::Item);
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I> IndexedParallelIterator for Enumerate<I>
where
I: IndexedParallelIterator,
{
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
bridge(self, consumer)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback { callback });
struct Callback<CB> {
callback: CB,
}
impl<I, CB> ProducerCallback<I> for Callback<CB>
where
CB: ProducerCallback<(usize, I)>,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = I>,
{
let producer = EnumerateProducer { base, offset: 0 };
self.callback.callback(producer)
}
}
}
}
// ////////////////////////////////////////////////////////////////////////
// Producer implementation
struct EnumerateProducer<P> {
base: P,
offset: usize,
}
impl<P> Producer for EnumerateProducer<P>
where
P: Producer,
{
type Item = (usize, P::Item);
type IntoIter = iter::Zip<Range<usize>, P::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
// Enumerate only works for IndexedParallelIterators. Since those
// have a max length of usize::MAX, their max index is
// usize::MAX - 1, so the range 0..usize::MAX includes all
// possible indices.
//
// However, we should to use a precise end to the range, otherwise
// reversing the iterator may have to walk back a long ways before
// `Zip::next_back` can produce anything.
let base = self.base.into_iter();
let end = self.offset + base.len();
(self.offset..end).zip(base)
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
EnumerateProducer {
base: left,
offset: self.offset,
},
EnumerateProducer {
base: right,
offset: self.offset + index,
},
)
}
}

619
vendor/rayon/src/iter/extend.rs vendored Normal file
View File

@@ -0,0 +1,619 @@
use super::noop::NoopConsumer;
use super::plumbing::{Consumer, Folder, Reducer, UnindexedConsumer};
use super::{IntoParallelIterator, ParallelExtend, ParallelIterator};
use either::Either;
use std::borrow::Cow;
use std::collections::LinkedList;
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
use std::collections::{BinaryHeap, VecDeque};
use std::ffi::{OsStr, OsString};
use std::hash::{BuildHasher, Hash};
/// Performs a generic `par_extend` by collecting to a `LinkedList<Vec<_>>` in
/// parallel, then extending the collection sequentially.
macro_rules! extend {
($self:ident, $par_iter:ident) => {
extend!($self <- fast_collect($par_iter))
};
($self:ident <- $vecs:expr) => {
match $vecs {
Either::Left(vec) => $self.extend(vec),
Either::Right(list) => {
for vec in list {
$self.extend(vec);
}
}
}
};
}
macro_rules! extend_reserved {
($self:ident, $par_iter:ident, $len:ident) => {
let vecs = fast_collect($par_iter);
$self.reserve($len(&vecs));
extend!($self <- vecs)
};
($self:ident, $par_iter:ident) => {
extend_reserved!($self, $par_iter, len)
};
}
/// Computes the total length of a `fast_collect` result.
fn len<T>(vecs: &Either<Vec<T>, LinkedList<Vec<T>>>) -> usize {
match vecs {
Either::Left(vec) => vec.len(),
Either::Right(list) => list.iter().map(Vec::len).sum(),
}
}
/// Computes the total string length of a `fast_collect` result.
fn string_len<T: AsRef<str>>(vecs: &Either<Vec<T>, LinkedList<Vec<T>>>) -> usize {
let strs = match vecs {
Either::Left(vec) => Either::Left(vec.iter()),
Either::Right(list) => Either::Right(list.iter().flatten()),
};
strs.map(AsRef::as_ref).map(str::len).sum()
}
/// Computes the total OS-string length of a `fast_collect` result.
fn osstring_len<T: AsRef<OsStr>>(vecs: &Either<Vec<T>, LinkedList<Vec<T>>>) -> usize {
let osstrs = match vecs {
Either::Left(vec) => Either::Left(vec.iter()),
Either::Right(list) => Either::Right(list.iter().flatten()),
};
osstrs.map(AsRef::as_ref).map(OsStr::len).sum()
}
pub(super) fn fast_collect<I, T>(pi: I) -> Either<Vec<T>, LinkedList<Vec<T>>>
where
I: IntoParallelIterator<Item = T>,
T: Send,
{
let par_iter = pi.into_par_iter();
match par_iter.opt_len() {
Some(len) => {
// Pseudo-specialization. See impl of ParallelExtend for Vec for more details.
let mut vec = Vec::new();
super::collect::special_extend(par_iter, len, &mut vec);
Either::Left(vec)
}
None => Either::Right(par_iter.drive_unindexed(ListVecConsumer)),
}
}
struct ListVecConsumer;
struct ListVecFolder<T> {
vec: Vec<T>,
}
impl<T: Send> Consumer<T> for ListVecConsumer {
type Folder = ListVecFolder<T>;
type Reducer = ListReducer;
type Result = LinkedList<Vec<T>>;
fn split_at(self, _index: usize) -> (Self, Self, Self::Reducer) {
(Self, Self, ListReducer)
}
fn into_folder(self) -> Self::Folder {
ListVecFolder { vec: Vec::new() }
}
fn full(&self) -> bool {
false
}
}
impl<T: Send> UnindexedConsumer<T> for ListVecConsumer {
fn split_off_left(&self) -> Self {
Self
}
fn to_reducer(&self) -> Self::Reducer {
ListReducer
}
}
impl<T> Folder<T> for ListVecFolder<T> {
type Result = LinkedList<Vec<T>>;
fn consume(mut self, item: T) -> Self {
self.vec.push(item);
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
self.vec.extend(iter);
self
}
fn complete(self) -> Self::Result {
let mut list = LinkedList::new();
if !self.vec.is_empty() {
list.push_back(self.vec);
}
list
}
fn full(&self) -> bool {
false
}
}
/// Extends a binary heap with items from a parallel iterator.
impl<T> ParallelExtend<T> for BinaryHeap<T>
where
T: Ord + Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = T>,
{
extend_reserved!(self, par_iter);
}
}
/// Extends a binary heap with copied items from a parallel iterator.
impl<'a, T> ParallelExtend<&'a T> for BinaryHeap<T>
where
T: 'a + Copy + Ord + Send + Sync,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = &'a T>,
{
extend_reserved!(self, par_iter);
}
}
/// Extends a B-tree map with items from a parallel iterator.
impl<K, V> ParallelExtend<(K, V)> for BTreeMap<K, V>
where
K: Ord + Send,
V: Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = (K, V)>,
{
extend!(self, par_iter);
}
}
/// Extends a B-tree map with copied items from a parallel iterator.
impl<'a, K: 'a, V: 'a> ParallelExtend<(&'a K, &'a V)> for BTreeMap<K, V>
where
K: Copy + Ord + Send + Sync,
V: Copy + Send + Sync,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = (&'a K, &'a V)>,
{
extend!(self, par_iter);
}
}
/// Extends a B-tree set with items from a parallel iterator.
impl<T> ParallelExtend<T> for BTreeSet<T>
where
T: Ord + Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = T>,
{
extend!(self, par_iter);
}
}
/// Extends a B-tree set with copied items from a parallel iterator.
impl<'a, T> ParallelExtend<&'a T> for BTreeSet<T>
where
T: 'a + Copy + Ord + Send + Sync,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = &'a T>,
{
extend!(self, par_iter);
}
}
/// Extends a hash map with items from a parallel iterator.
impl<K, V, S> ParallelExtend<(K, V)> for HashMap<K, V, S>
where
K: Eq + Hash + Send,
V: Send,
S: BuildHasher + Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = (K, V)>,
{
// See the map_collect benchmarks in rayon-demo for different strategies.
extend_reserved!(self, par_iter);
}
}
/// Extends a hash map with copied items from a parallel iterator.
impl<'a, K: 'a, V: 'a, S> ParallelExtend<(&'a K, &'a V)> for HashMap<K, V, S>
where
K: Copy + Eq + Hash + Send + Sync,
V: Copy + Send + Sync,
S: BuildHasher + Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = (&'a K, &'a V)>,
{
extend_reserved!(self, par_iter);
}
}
/// Extends a hash set with items from a parallel iterator.
impl<T, S> ParallelExtend<T> for HashSet<T, S>
where
T: Eq + Hash + Send,
S: BuildHasher + Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = T>,
{
extend_reserved!(self, par_iter);
}
}
/// Extends a hash set with copied items from a parallel iterator.
impl<'a, T, S> ParallelExtend<&'a T> for HashSet<T, S>
where
T: 'a + Copy + Eq + Hash + Send + Sync,
S: BuildHasher + Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = &'a T>,
{
extend_reserved!(self, par_iter);
}
}
/// Extends a linked list with items from a parallel iterator.
impl<T> ParallelExtend<T> for LinkedList<T>
where
T: Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = T>,
{
let mut list = par_iter.into_par_iter().drive_unindexed(ListConsumer);
self.append(&mut list);
}
}
/// Extends a linked list with copied items from a parallel iterator.
impl<'a, T> ParallelExtend<&'a T> for LinkedList<T>
where
T: 'a + Copy + Send + Sync,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = &'a T>,
{
self.par_extend(par_iter.into_par_iter().copied())
}
}
struct ListConsumer;
struct ListFolder<T> {
list: LinkedList<T>,
}
struct ListReducer;
impl<T: Send> Consumer<T> for ListConsumer {
type Folder = ListFolder<T>;
type Reducer = ListReducer;
type Result = LinkedList<T>;
fn split_at(self, _index: usize) -> (Self, Self, Self::Reducer) {
(Self, Self, ListReducer)
}
fn into_folder(self) -> Self::Folder {
ListFolder {
list: LinkedList::new(),
}
}
fn full(&self) -> bool {
false
}
}
impl<T: Send> UnindexedConsumer<T> for ListConsumer {
fn split_off_left(&self) -> Self {
Self
}
fn to_reducer(&self) -> Self::Reducer {
ListReducer
}
}
impl<T> Folder<T> for ListFolder<T> {
type Result = LinkedList<T>;
fn consume(mut self, item: T) -> Self {
self.list.push_back(item);
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
self.list.extend(iter);
self
}
fn complete(self) -> Self::Result {
self.list
}
fn full(&self) -> bool {
false
}
}
impl<T> Reducer<LinkedList<T>> for ListReducer {
fn reduce(self, mut left: LinkedList<T>, mut right: LinkedList<T>) -> LinkedList<T> {
left.append(&mut right);
left
}
}
/// Extends an OS-string with string slices from a parallel iterator.
impl<'a> ParallelExtend<&'a OsStr> for OsString {
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = &'a OsStr>,
{
extend_reserved!(self, par_iter, osstring_len);
}
}
/// Extends an OS-string with strings from a parallel iterator.
impl ParallelExtend<OsString> for OsString {
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = OsString>,
{
extend_reserved!(self, par_iter, osstring_len);
}
}
/// Extends an OS-string with string slices from a parallel iterator.
impl<'a> ParallelExtend<Cow<'a, OsStr>> for OsString {
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = Cow<'a, OsStr>>,
{
extend_reserved!(self, par_iter, osstring_len);
}
}
/// Extends a string with characters from a parallel iterator.
impl ParallelExtend<char> for String {
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = char>,
{
// This is like `extend`, but `Vec<char>` is less efficient to deal
// with than `String`, so instead collect to `LinkedList<String>`.
let list = par_iter.into_par_iter().drive_unindexed(ListStringConsumer);
self.reserve(list.iter().map(String::len).sum());
self.extend(list);
}
}
/// Extends a string with copied characters from a parallel iterator.
impl<'a> ParallelExtend<&'a char> for String {
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = &'a char>,
{
self.par_extend(par_iter.into_par_iter().copied())
}
}
struct ListStringConsumer;
struct ListStringFolder {
string: String,
}
impl Consumer<char> for ListStringConsumer {
type Folder = ListStringFolder;
type Reducer = ListReducer;
type Result = LinkedList<String>;
fn split_at(self, _index: usize) -> (Self, Self, Self::Reducer) {
(Self, Self, ListReducer)
}
fn into_folder(self) -> Self::Folder {
ListStringFolder {
string: String::new(),
}
}
fn full(&self) -> bool {
false
}
}
impl UnindexedConsumer<char> for ListStringConsumer {
fn split_off_left(&self) -> Self {
Self
}
fn to_reducer(&self) -> Self::Reducer {
ListReducer
}
}
impl Folder<char> for ListStringFolder {
type Result = LinkedList<String>;
fn consume(mut self, item: char) -> Self {
self.string.push(item);
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = char>,
{
self.string.extend(iter);
self
}
fn complete(self) -> Self::Result {
let mut list = LinkedList::new();
if !self.string.is_empty() {
list.push_back(self.string);
}
list
}
fn full(&self) -> bool {
false
}
}
/// Extends a string with string slices from a parallel iterator.
impl<'a> ParallelExtend<&'a str> for String {
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = &'a str>,
{
extend_reserved!(self, par_iter, string_len);
}
}
/// Extends a string with strings from a parallel iterator.
impl ParallelExtend<String> for String {
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = String>,
{
extend_reserved!(self, par_iter, string_len);
}
}
/// Extends a string with boxed strings from a parallel iterator.
impl ParallelExtend<Box<str>> for String {
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = Box<str>>,
{
extend_reserved!(self, par_iter, string_len);
}
}
/// Extends a string with string slices from a parallel iterator.
impl<'a> ParallelExtend<Cow<'a, str>> for String {
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = Cow<'a, str>>,
{
extend_reserved!(self, par_iter, string_len);
}
}
/// Extends a deque with items from a parallel iterator.
impl<T> ParallelExtend<T> for VecDeque<T>
where
T: Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = T>,
{
extend_reserved!(self, par_iter);
}
}
/// Extends a deque with copied items from a parallel iterator.
impl<'a, T> ParallelExtend<&'a T> for VecDeque<T>
where
T: 'a + Copy + Send + Sync,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = &'a T>,
{
extend_reserved!(self, par_iter);
}
}
/// Extends a vector with items from a parallel iterator.
impl<T> ParallelExtend<T> for Vec<T>
where
T: Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = T>,
{
// See the vec_collect benchmarks in rayon-demo for different strategies.
let par_iter = par_iter.into_par_iter();
match par_iter.opt_len() {
Some(len) => {
// When Rust gets specialization, we can get here for indexed iterators
// without relying on `opt_len`. Until then, `special_extend()` fakes
// an unindexed mode on the promise that `opt_len()` is accurate.
super::collect::special_extend(par_iter, len, self);
}
None => {
// This works like `extend`, but `Vec::append` is more efficient.
let list = par_iter.drive_unindexed(ListVecConsumer);
self.reserve(list.iter().map(Vec::len).sum());
for mut other in list {
self.append(&mut other);
}
}
}
}
}
/// Extends a vector with copied items from a parallel iterator.
impl<'a, T> ParallelExtend<&'a T> for Vec<T>
where
T: 'a + Copy + Send + Sync,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = &'a T>,
{
self.par_extend(par_iter.into_par_iter().copied())
}
}
/// Collapses all unit items from a parallel iterator into one.
impl ParallelExtend<()> for () {
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = ()>,
{
par_iter.into_par_iter().drive_unindexed(NoopConsumer)
}
}

137
vendor/rayon/src/iter/filter.rs vendored Normal file
View File

@@ -0,0 +1,137 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
/// `Filter` takes a predicate `filter_op` and filters out elements that match.
/// This struct is created by the [`filter()`] method on [`ParallelIterator`]
///
/// [`filter()`]: ParallelIterator::filter()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct Filter<I, P> {
base: I,
filter_op: P,
}
impl<I: Debug, P> Debug for Filter<I, P> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Filter").field("base", &self.base).finish()
}
}
impl<I, P> Filter<I, P> {
/// Creates a new `Filter` iterator.
pub(super) fn new(base: I, filter_op: P) -> Self {
Filter { base, filter_op }
}
}
impl<I, P> ParallelIterator for Filter<I, P>
where
I: ParallelIterator,
P: Fn(&I::Item) -> bool + Sync + Send,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = FilterConsumer::new(consumer, &self.filter_op);
self.base.drive_unindexed(consumer1)
}
}
// ////////////////////////////////////////////////////////////////////////
// Consumer implementation
struct FilterConsumer<'p, C, P> {
base: C,
filter_op: &'p P,
}
impl<'p, C, P> FilterConsumer<'p, C, P> {
fn new(base: C, filter_op: &'p P) -> Self {
FilterConsumer { base, filter_op }
}
}
impl<'p, T, C, P: 'p> Consumer<T> for FilterConsumer<'p, C, P>
where
C: Consumer<T>,
P: Fn(&T) -> bool + Sync,
{
type Folder = FilterFolder<'p, C::Folder, P>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, C::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
FilterConsumer::new(left, self.filter_op),
FilterConsumer::new(right, self.filter_op),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
FilterFolder {
base: self.base.into_folder(),
filter_op: self.filter_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'p, T, C, P: 'p> UnindexedConsumer<T> for FilterConsumer<'p, C, P>
where
C: UnindexedConsumer<T>,
P: Fn(&T) -> bool + Sync,
{
fn split_off_left(&self) -> Self {
FilterConsumer::new(self.base.split_off_left(), self.filter_op)
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct FilterFolder<'p, C, P> {
base: C,
filter_op: &'p P,
}
impl<'p, C, P, T> Folder<T> for FilterFolder<'p, C, P>
where
C: Folder<T>,
P: Fn(&T) -> bool + 'p,
{
type Result = C::Result;
fn consume(self, item: T) -> Self {
let filter_op = self.filter_op;
if filter_op(&item) {
let base = self.base.consume(item);
FilterFolder { base, filter_op }
} else {
self
}
}
// This cannot easily specialize `consume_iter` to be better than
// the default, because that requires checking `self.base.full()`
// during a call to `self.base.consume_iter()`. (#632)
fn complete(self) -> Self::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

141
vendor/rayon/src/iter/filter_map.rs vendored Normal file
View File

@@ -0,0 +1,141 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
/// `FilterMap` creates an iterator that uses `filter_op` to both filter and map elements.
/// This struct is created by the [`filter_map()`] method on [`ParallelIterator`].
///
/// [`filter_map()`]: ParallelIterator::filter_map()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct FilterMap<I, P> {
base: I,
filter_op: P,
}
impl<I: Debug, P> Debug for FilterMap<I, P> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FilterMap")
.field("base", &self.base)
.finish()
}
}
impl<I, P> FilterMap<I, P> {
/// Creates a new `FilterMap` iterator.
pub(super) fn new(base: I, filter_op: P) -> Self {
FilterMap { base, filter_op }
}
}
impl<I, P, R> ParallelIterator for FilterMap<I, P>
where
I: ParallelIterator,
P: Fn(I::Item) -> Option<R> + Sync + Send,
R: Send,
{
type Item = R;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer = FilterMapConsumer::new(consumer, &self.filter_op);
self.base.drive_unindexed(consumer)
}
}
// ////////////////////////////////////////////////////////////////////////
// Consumer implementation
struct FilterMapConsumer<'p, C, P> {
base: C,
filter_op: &'p P,
}
impl<'p, C, P: 'p> FilterMapConsumer<'p, C, P> {
fn new(base: C, filter_op: &'p P) -> Self {
FilterMapConsumer { base, filter_op }
}
}
impl<'p, T, U, C, P> Consumer<T> for FilterMapConsumer<'p, C, P>
where
C: Consumer<U>,
P: Fn(T) -> Option<U> + Sync + 'p,
{
type Folder = FilterMapFolder<'p, C::Folder, P>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
FilterMapConsumer::new(left, self.filter_op),
FilterMapConsumer::new(right, self.filter_op),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
let base = self.base.into_folder();
FilterMapFolder {
base,
filter_op: self.filter_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'p, T, U, C, P> UnindexedConsumer<T> for FilterMapConsumer<'p, C, P>
where
C: UnindexedConsumer<U>,
P: Fn(T) -> Option<U> + Sync + 'p,
{
fn split_off_left(&self) -> Self {
FilterMapConsumer::new(self.base.split_off_left(), self.filter_op)
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct FilterMapFolder<'p, C, P> {
base: C,
filter_op: &'p P,
}
impl<'p, T, U, C, P> Folder<T> for FilterMapFolder<'p, C, P>
where
C: Folder<U>,
P: Fn(T) -> Option<U> + Sync + 'p,
{
type Result = C::Result;
fn consume(self, item: T) -> Self {
let filter_op = self.filter_op;
if let Some(mapped_item) = filter_op(item) {
let base = self.base.consume(mapped_item);
FilterMapFolder { base, filter_op }
} else {
self
}
}
// This cannot easily specialize `consume_iter` to be better than
// the default, because that requires checking `self.base.full()`
// during a call to `self.base.consume_iter()`. (#632)
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

120
vendor/rayon/src/iter/find.rs vendored Normal file
View File

@@ -0,0 +1,120 @@
use super::plumbing::*;
use super::*;
use std::sync::atomic::{AtomicBool, Ordering};
pub(super) fn find<I, P>(pi: I, find_op: P) -> Option<I::Item>
where
I: ParallelIterator,
P: Fn(&I::Item) -> bool + Sync,
{
let found = AtomicBool::new(false);
let consumer = FindConsumer::new(&find_op, &found);
pi.drive_unindexed(consumer)
}
struct FindConsumer<'p, P> {
find_op: &'p P,
found: &'p AtomicBool,
}
impl<'p, P> FindConsumer<'p, P> {
fn new(find_op: &'p P, found: &'p AtomicBool) -> Self {
FindConsumer { find_op, found }
}
}
impl<'p, T, P: 'p> Consumer<T> for FindConsumer<'p, P>
where
T: Send,
P: Fn(&T) -> bool + Sync,
{
type Folder = FindFolder<'p, T, P>;
type Reducer = FindReducer;
type Result = Option<T>;
fn split_at(self, _index: usize) -> (Self, Self, Self::Reducer) {
(self.split_off_left(), self, FindReducer)
}
fn into_folder(self) -> Self::Folder {
FindFolder {
find_op: self.find_op,
found: self.found,
item: None,
}
}
fn full(&self) -> bool {
self.found.load(Ordering::Relaxed)
}
}
impl<'p, T, P: 'p> UnindexedConsumer<T> for FindConsumer<'p, P>
where
T: Send,
P: Fn(&T) -> bool + Sync,
{
fn split_off_left(&self) -> Self {
FindConsumer::new(self.find_op, self.found)
}
fn to_reducer(&self) -> Self::Reducer {
FindReducer
}
}
struct FindFolder<'p, T, P> {
find_op: &'p P,
found: &'p AtomicBool,
item: Option<T>,
}
impl<'p, T, P> Folder<T> for FindFolder<'p, T, P>
where
P: Fn(&T) -> bool + 'p,
{
type Result = Option<T>;
fn consume(mut self, item: T) -> Self {
if (self.find_op)(&item) {
self.found.store(true, Ordering::Relaxed);
self.item = Some(item);
}
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
fn not_full<T>(found: &AtomicBool) -> impl Fn(&T) -> bool + '_ {
move |_| !found.load(Ordering::Relaxed)
}
self.item = iter
.into_iter()
// stop iterating if another thread has found something
.take_while(not_full(self.found))
.find(self.find_op);
if self.item.is_some() {
self.found.store(true, Ordering::Relaxed)
}
self
}
fn complete(self) -> Self::Result {
self.item
}
fn full(&self) -> bool {
self.found.load(Ordering::Relaxed)
}
}
struct FindReducer;
impl<T> Reducer<Option<T>> for FindReducer {
fn reduce(self, left: Option<T>, right: Option<T>) -> Option<T> {
left.or(right)
}
}

View File

@@ -0,0 +1,230 @@
use super::plumbing::*;
use super::*;
use std::cell::Cell;
use std::sync::atomic::{AtomicUsize, Ordering};
#[cfg(test)]
mod test;
// The key optimization for find_first is that a consumer can stop its search if
// some consumer to its left already found a match (and similarly for consumers
// to the right for find_last). To make this work, all consumers need some
// notion of their position in the data relative to other consumers, including
// unindexed consumers that have no built-in notion of position.
//
// To solve this, we assign each consumer a lower and upper bound for an
// imaginary "range" of data that it consumes. The initial consumer starts with
// the range 0..usize::MAX. The split divides this range in half so that
// one resulting consumer has the range 0..(usize::MAX / 2), and the
// other has (usize::MAX / 2)..usize::max_value(). Every subsequent
// split divides the range in half again until it cannot be split anymore
// (i.e. its length is 1), in which case the split returns two consumers with
// the same range. In that case both consumers will continue to consume all
// their data regardless of whether a better match is found, but the reducer
// will still return the correct answer.
#[derive(Copy, Clone)]
enum MatchPosition {
Leftmost,
Rightmost,
}
/// Returns true if pos1 is a better match than pos2 according to MatchPosition
#[inline]
fn better_position(pos1: usize, pos2: usize, mp: MatchPosition) -> bool {
match mp {
MatchPosition::Leftmost => pos1 < pos2,
MatchPosition::Rightmost => pos1 > pos2,
}
}
pub(super) fn find_first<I, P>(pi: I, find_op: P) -> Option<I::Item>
where
I: ParallelIterator,
P: Fn(&I::Item) -> bool + Sync,
{
let best_found = AtomicUsize::new(usize::MAX);
let consumer = FindConsumer::new(&find_op, MatchPosition::Leftmost, &best_found);
pi.drive_unindexed(consumer)
}
pub(super) fn find_last<I, P>(pi: I, find_op: P) -> Option<I::Item>
where
I: ParallelIterator,
P: Fn(&I::Item) -> bool + Sync,
{
let best_found = AtomicUsize::new(0);
let consumer = FindConsumer::new(&find_op, MatchPosition::Rightmost, &best_found);
pi.drive_unindexed(consumer)
}
struct FindConsumer<'p, P> {
find_op: &'p P,
lower_bound: Cell<usize>,
upper_bound: usize,
match_position: MatchPosition,
best_found: &'p AtomicUsize,
}
impl<'p, P> FindConsumer<'p, P> {
fn new(find_op: &'p P, match_position: MatchPosition, best_found: &'p AtomicUsize) -> Self {
FindConsumer {
find_op,
lower_bound: Cell::new(0),
upper_bound: usize::MAX,
match_position,
best_found,
}
}
fn current_index(&self) -> usize {
match self.match_position {
MatchPosition::Leftmost => self.lower_bound.get(),
MatchPosition::Rightmost => self.upper_bound,
}
}
}
impl<'p, T, P> Consumer<T> for FindConsumer<'p, P>
where
T: Send,
P: Fn(&T) -> bool + Sync,
{
type Folder = FindFolder<'p, T, P>;
type Reducer = FindReducer;
type Result = Option<T>;
fn split_at(self, _index: usize) -> (Self, Self, Self::Reducer) {
let dir = self.match_position;
(
self.split_off_left(),
self,
FindReducer {
match_position: dir,
},
)
}
fn into_folder(self) -> Self::Folder {
FindFolder {
find_op: self.find_op,
boundary: self.current_index(),
match_position: self.match_position,
best_found: self.best_found,
item: None,
}
}
fn full(&self) -> bool {
// can stop consuming if the best found index so far is *strictly*
// better than anything this consumer will find
better_position(
self.best_found.load(Ordering::Relaxed),
self.current_index(),
self.match_position,
)
}
}
impl<'p, T, P> UnindexedConsumer<T> for FindConsumer<'p, P>
where
T: Send,
P: Fn(&T) -> bool + Sync,
{
fn split_off_left(&self) -> Self {
// Upper bound for one consumer will be lower bound for the other. This
// overlap is okay, because only one of the bounds will be used for
// comparing against best_found; the other is kept only to be able to
// divide the range in half.
//
// When the resolution of usize has been exhausted (i.e. when
// upper_bound = lower_bound), both results of this split will have the
// same range. When that happens, we lose the ability to tell one
// consumer to stop working when the other finds a better match, but the
// reducer ensures that the best answer is still returned (see the test
// above).
let old_lower_bound = self.lower_bound.get();
let median = old_lower_bound + ((self.upper_bound - old_lower_bound) / 2);
self.lower_bound.set(median);
FindConsumer {
find_op: self.find_op,
lower_bound: Cell::new(old_lower_bound),
upper_bound: median,
match_position: self.match_position,
best_found: self.best_found,
}
}
fn to_reducer(&self) -> Self::Reducer {
FindReducer {
match_position: self.match_position,
}
}
}
struct FindFolder<'p, T, P> {
find_op: &'p P,
boundary: usize,
match_position: MatchPosition,
best_found: &'p AtomicUsize,
item: Option<T>,
}
impl<'p, P: 'p + Fn(&T) -> bool, T> Folder<T> for FindFolder<'p, T, P> {
type Result = Option<T>;
fn consume(mut self, item: T) -> Self {
let found_best_in_range = match self.match_position {
MatchPosition::Leftmost => self.item.is_some(),
MatchPosition::Rightmost => false,
};
if !found_best_in_range && (self.find_op)(&item) {
// Update the best found index if ours is better.
let update =
self.best_found
.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |current| {
better_position(self.boundary, current, self.match_position)
.then_some(self.boundary)
});
// Save this item if our index was better or equal.
if update.is_ok() || update == Err(self.boundary) {
self.item = Some(item);
}
}
self
}
fn complete(self) -> Self::Result {
self.item
}
fn full(&self) -> bool {
let found_best_in_range = match self.match_position {
MatchPosition::Leftmost => self.item.is_some(),
MatchPosition::Rightmost => false,
};
found_best_in_range
|| better_position(
self.best_found.load(Ordering::Relaxed),
self.boundary,
self.match_position,
)
}
}
struct FindReducer {
match_position: MatchPosition,
}
impl<T> Reducer<Option<T>> for FindReducer {
fn reduce(self, left: Option<T>, right: Option<T>) -> Option<T> {
match self.match_position {
MatchPosition::Leftmost => left.or(right),
MatchPosition::Rightmost => right.or(left),
}
}
}

View File

@@ -0,0 +1,102 @@
use super::*;
#[test]
fn same_range_first_consumers_return_correct_answer() {
let find_op = |x: &i32| x % 2 == 0;
let first_found = AtomicUsize::new(usize::MAX);
let far_right_consumer = FindConsumer::new(&find_op, MatchPosition::Leftmost, &first_found);
// We save a consumer that will be far to the right of the main consumer (and therefore not
// sharing an index range with that consumer) for fullness testing
let consumer = far_right_consumer.split_off_left();
// split until we have an indivisible range
for _ in 0..usize::BITS {
consumer.split_off_left();
}
let reducer = consumer.to_reducer();
// the left and right folders should now have the same range, having
// exhausted the resolution of usize
let left_folder = consumer.split_off_left().into_folder();
let right_folder = consumer.into_folder();
let left_folder = left_folder.consume(0).consume(1);
assert_eq!(left_folder.boundary, right_folder.boundary);
// expect not full even though a better match has been found because the
// ranges are the same
assert!(!right_folder.full());
assert!(far_right_consumer.full());
let right_folder = right_folder.consume(2).consume(3);
assert_eq!(
reducer.reduce(left_folder.complete(), right_folder.complete()),
Some(0)
);
}
#[test]
fn same_range_last_consumers_return_correct_answer() {
let find_op = |x: &i32| x % 2 == 0;
let last_found = AtomicUsize::new(0);
let consumer = FindConsumer::new(&find_op, MatchPosition::Rightmost, &last_found);
// We save a consumer that will be far to the left of the main consumer (and therefore not
// sharing an index range with that consumer) for fullness testing
let far_left_consumer = consumer.split_off_left();
// split until we have an indivisible range
for _ in 0..usize::BITS {
consumer.split_off_left();
}
let reducer = consumer.to_reducer();
// due to the exact calculation in split_off_left, the very last consumer has a
// range of width 2, so we use the second-to-last consumer instead to get
// the same boundary on both folders
let consumer = consumer.split_off_left();
let left_folder = consumer.split_off_left().into_folder();
let right_folder = consumer.into_folder();
let right_folder = right_folder.consume(2).consume(3);
assert_eq!(left_folder.boundary, right_folder.boundary);
// expect not full even though a better match has been found because the
// ranges are the same
assert!(!left_folder.full());
assert!(far_left_consumer.full());
let left_folder = left_folder.consume(0).consume(1);
assert_eq!(
reducer.reduce(left_folder.complete(), right_folder.complete()),
Some(2)
);
}
// These tests requires that a folder be assigned to an iterator with more than
// one element. We can't necessarily determine when that will happen for a given
// input to find_first/find_last, so we test the folder directly here instead.
#[test]
fn find_first_folder_does_not_clobber_first_found() {
let best_found = AtomicUsize::new(usize::MAX);
let f = FindFolder {
find_op: &(|&_: &i32| -> bool { true }),
boundary: 0,
match_position: MatchPosition::Leftmost,
best_found: &best_found,
item: None,
};
let f = f.consume(0_i32).consume(1_i32).consume(2_i32);
assert!(f.full());
assert_eq!(f.complete(), Some(0_i32));
}
#[test]
fn find_last_folder_yields_last_match() {
let best_found = AtomicUsize::new(0);
let f = FindFolder {
find_op: &(|&_: &i32| -> bool { true }),
boundary: 0,
match_position: MatchPosition::Rightmost,
best_found: &best_found,
item: None,
};
let f = f.consume(0_i32).consume(1_i32).consume(2_i32);
assert_eq!(f.complete(), Some(2_i32));
}

153
vendor/rayon/src/iter/flat_map.rs vendored Normal file
View File

@@ -0,0 +1,153 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
/// `FlatMap` maps each element to a parallel iterator, then flattens these iterators together.
/// This struct is created by the [`flat_map()`] method on [`ParallelIterator`]
///
/// [`flat_map()`]: ParallelIterator::flat_map()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct FlatMap<I, F> {
base: I,
map_op: F,
}
impl<I: Debug, F> Debug for FlatMap<I, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FlatMap").field("base", &self.base).finish()
}
}
impl<I, F> FlatMap<I, F> {
/// Creates a new `FlatMap` iterator.
pub(super) fn new(base: I, map_op: F) -> Self {
FlatMap { base, map_op }
}
}
impl<I, F, PI> ParallelIterator for FlatMap<I, F>
where
I: ParallelIterator,
F: Fn(I::Item) -> PI + Sync + Send,
PI: IntoParallelIterator,
{
type Item = PI::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer = FlatMapConsumer::new(consumer, &self.map_op);
self.base.drive_unindexed(consumer)
}
}
// ////////////////////////////////////////////////////////////////////////
// Consumer implementation
struct FlatMapConsumer<'f, C, F> {
base: C,
map_op: &'f F,
}
impl<'f, C, F> FlatMapConsumer<'f, C, F> {
fn new(base: C, map_op: &'f F) -> Self {
FlatMapConsumer { base, map_op }
}
}
impl<'f, T, U, C, F> Consumer<T> for FlatMapConsumer<'f, C, F>
where
C: UnindexedConsumer<U::Item>,
F: Fn(T) -> U + Sync,
U: IntoParallelIterator,
{
type Folder = FlatMapFolder<'f, C, F, C::Result>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, C::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
FlatMapConsumer::new(left, self.map_op),
FlatMapConsumer::new(right, self.map_op),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
FlatMapFolder {
base: self.base,
map_op: self.map_op,
previous: None,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'f, T, U, C, F> UnindexedConsumer<T> for FlatMapConsumer<'f, C, F>
where
C: UnindexedConsumer<U::Item>,
F: Fn(T) -> U + Sync,
U: IntoParallelIterator,
{
fn split_off_left(&self) -> Self {
FlatMapConsumer::new(self.base.split_off_left(), self.map_op)
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct FlatMapFolder<'f, C, F, R> {
base: C,
map_op: &'f F,
previous: Option<R>,
}
impl<'f, T, U, C, F> Folder<T> for FlatMapFolder<'f, C, F, C::Result>
where
C: UnindexedConsumer<U::Item>,
F: Fn(T) -> U + Sync,
U: IntoParallelIterator,
{
type Result = C::Result;
fn consume(self, item: T) -> Self {
let map_op = self.map_op;
let par_iter = map_op(item).into_par_iter();
let consumer = self.base.split_off_left();
let result = par_iter.drive_unindexed(consumer);
let previous = match self.previous {
None => Some(result),
Some(previous) => {
let reducer = self.base.to_reducer();
Some(reducer.reduce(previous, result))
}
};
FlatMapFolder {
base: self.base,
map_op,
previous,
}
}
fn complete(self) -> Self::Result {
match self.previous {
Some(previous) => previous,
None => self.base.into_folder().complete(),
}
}
fn full(&self) -> bool {
self.base.full()
}
}

145
vendor/rayon/src/iter/flat_map_iter.rs vendored Normal file
View File

@@ -0,0 +1,145 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
/// `FlatMapIter` maps each element to a serial iterator, then flattens these iterators together.
/// This struct is created by the [`flat_map_iter()`] method on [`ParallelIterator`]
///
/// [`flat_map_iter()`]: ParallelIterator::flat_map_iter()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct FlatMapIter<I, F> {
base: I,
map_op: F,
}
impl<I: Debug, F> Debug for FlatMapIter<I, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FlatMapIter")
.field("base", &self.base)
.finish()
}
}
impl<I, F> FlatMapIter<I, F> {
/// Creates a new `FlatMapIter` iterator.
pub(super) fn new(base: I, map_op: F) -> Self {
FlatMapIter { base, map_op }
}
}
impl<I, F, SI> ParallelIterator for FlatMapIter<I, F>
where
I: ParallelIterator,
F: Fn(I::Item) -> SI + Sync + Send,
SI: IntoIterator<Item: Send>,
{
type Item = SI::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer = FlatMapIterConsumer::new(consumer, &self.map_op);
self.base.drive_unindexed(consumer)
}
}
// ////////////////////////////////////////////////////////////////////////
// Consumer implementation
struct FlatMapIterConsumer<'f, C, F> {
base: C,
map_op: &'f F,
}
impl<'f, C, F> FlatMapIterConsumer<'f, C, F> {
fn new(base: C, map_op: &'f F) -> Self {
FlatMapIterConsumer { base, map_op }
}
}
impl<'f, T, U, C, F> Consumer<T> for FlatMapIterConsumer<'f, C, F>
where
C: UnindexedConsumer<U::Item>,
F: Fn(T) -> U + Sync,
U: IntoIterator,
{
type Folder = FlatMapIterFolder<'f, C::Folder, F>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, C::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
FlatMapIterConsumer::new(left, self.map_op),
FlatMapIterConsumer::new(right, self.map_op),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
FlatMapIterFolder {
base: self.base.into_folder(),
map_op: self.map_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'f, T, U, C, F> UnindexedConsumer<T> for FlatMapIterConsumer<'f, C, F>
where
C: UnindexedConsumer<U::Item>,
F: Fn(T) -> U + Sync,
U: IntoIterator,
{
fn split_off_left(&self) -> Self {
FlatMapIterConsumer::new(self.base.split_off_left(), self.map_op)
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct FlatMapIterFolder<'f, C, F> {
base: C,
map_op: &'f F,
}
impl<'f, T, U, C, F> Folder<T> for FlatMapIterFolder<'f, C, F>
where
C: Folder<U::Item>,
F: Fn(T) -> U,
U: IntoIterator,
{
type Result = C::Result;
fn consume(self, item: T) -> Self {
let map_op = self.map_op;
let base = self.base.consume_iter(map_op(item));
FlatMapIterFolder { base, map_op }
}
fn consume_iter<I>(self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
let map_op = self.map_op;
let iter = iter.into_iter().flat_map(map_op);
let base = self.base.consume_iter(iter);
FlatMapIterFolder { base, map_op }
}
fn complete(self) -> Self::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

134
vendor/rayon/src/iter/flatten.rs vendored Normal file
View File

@@ -0,0 +1,134 @@
use super::plumbing::*;
use super::*;
/// `Flatten` turns each element to a parallel iterator, then flattens these iterators
/// together. This struct is created by the [`flatten()`] method on [`ParallelIterator`].
///
/// [`flatten()`]: ParallelIterator::flatten()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Flatten<I> {
base: I,
}
impl<I> Flatten<I> {
/// Creates a new `Flatten` iterator.
pub(super) fn new(base: I) -> Self {
Flatten { base }
}
}
impl<I> ParallelIterator for Flatten<I>
where
I: ParallelIterator<Item: IntoParallelIterator>,
{
type Item = <I::Item as IntoParallelIterator>::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer = FlattenConsumer::new(consumer);
self.base.drive_unindexed(consumer)
}
}
// ////////////////////////////////////////////////////////////////////////
// Consumer implementation
struct FlattenConsumer<C> {
base: C,
}
impl<C> FlattenConsumer<C> {
fn new(base: C) -> Self {
FlattenConsumer { base }
}
}
impl<T, C> Consumer<T> for FlattenConsumer<C>
where
C: UnindexedConsumer<T::Item>,
T: IntoParallelIterator,
{
type Folder = FlattenFolder<C, C::Result>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, C::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
FlattenConsumer::new(left),
FlattenConsumer::new(right),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
FlattenFolder {
base: self.base,
previous: None,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<T, C> UnindexedConsumer<T> for FlattenConsumer<C>
where
C: UnindexedConsumer<T::Item>,
T: IntoParallelIterator,
{
fn split_off_left(&self) -> Self {
FlattenConsumer::new(self.base.split_off_left())
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct FlattenFolder<C, R> {
base: C,
previous: Option<R>,
}
impl<T, C> Folder<T> for FlattenFolder<C, C::Result>
where
C: UnindexedConsumer<T::Item>,
T: IntoParallelIterator,
{
type Result = C::Result;
fn consume(self, item: T) -> Self {
let par_iter = item.into_par_iter();
let consumer = self.base.split_off_left();
let result = par_iter.drive_unindexed(consumer);
let previous = match self.previous {
None => Some(result),
Some(previous) => {
let reducer = self.base.to_reducer();
Some(reducer.reduce(previous, result))
}
};
FlattenFolder {
base: self.base,
previous,
}
}
fn complete(self) -> Self::Result {
match self.previous {
Some(previous) => previous,
None => self.base.into_folder().complete(),
}
}
fn full(&self) -> bool {
self.base.full()
}
}

124
vendor/rayon/src/iter/flatten_iter.rs vendored Normal file
View File

@@ -0,0 +1,124 @@
use super::plumbing::*;
use super::*;
/// `FlattenIter` turns each element to a serial iterator, then flattens these iterators
/// together. This struct is created by the [`flatten_iter()`] method on [`ParallelIterator`].
///
/// [`flatten_iter()`]: ParallelIterator::flatten_iter()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct FlattenIter<I> {
base: I,
}
impl<I> FlattenIter<I> {
/// Creates a new `FlattenIter` iterator.
pub(super) fn new(base: I) -> Self {
FlattenIter { base }
}
}
impl<I> ParallelIterator for FlattenIter<I>
where
I: ParallelIterator<Item: IntoIterator<Item: Send>>,
{
type Item = <I::Item as IntoIterator>::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer = FlattenIterConsumer::new(consumer);
self.base.drive_unindexed(consumer)
}
}
// ////////////////////////////////////////////////////////////////////////
// Consumer implementation
struct FlattenIterConsumer<C> {
base: C,
}
impl<C> FlattenIterConsumer<C> {
fn new(base: C) -> Self {
FlattenIterConsumer { base }
}
}
impl<T, C> Consumer<T> for FlattenIterConsumer<C>
where
C: UnindexedConsumer<T::Item>,
T: IntoIterator,
{
type Folder = FlattenIterFolder<C::Folder>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, C::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
FlattenIterConsumer::new(left),
FlattenIterConsumer::new(right),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
FlattenIterFolder {
base: self.base.into_folder(),
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<T, C> UnindexedConsumer<T> for FlattenIterConsumer<C>
where
C: UnindexedConsumer<T::Item>,
T: IntoIterator,
{
fn split_off_left(&self) -> Self {
FlattenIterConsumer::new(self.base.split_off_left())
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct FlattenIterFolder<C> {
base: C,
}
impl<T, C> Folder<T> for FlattenIterFolder<C>
where
C: Folder<T::Item>,
T: IntoIterator,
{
type Result = C::Result;
fn consume(self, item: T) -> Self {
let base = self.base.consume_iter(item);
FlattenIterFolder { base }
}
fn consume_iter<I>(self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
let iter = iter.into_iter().flatten();
let base = self.base.consume_iter(iter);
FlattenIterFolder { base }
}
fn complete(self) -> Self::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

289
vendor/rayon/src/iter/fold.rs vendored Normal file
View File

@@ -0,0 +1,289 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
impl<I, ID, F> Fold<I, ID, F> {
pub(super) fn new(base: I, identity: ID, fold_op: F) -> Self {
Fold {
base,
identity,
fold_op,
}
}
}
/// `Fold` is an iterator that applies a function over an iterator producing a single value.
/// This struct is created by the [`fold()`] method on [`ParallelIterator`]
///
/// [`fold()`]: ParallelIterator::fold()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct Fold<I, ID, F> {
base: I,
identity: ID,
fold_op: F,
}
impl<I: Debug, ID, F> Debug for Fold<I, ID, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Fold").field("base", &self.base).finish()
}
}
impl<U, I, ID, F> ParallelIterator for Fold<I, ID, F>
where
I: ParallelIterator,
F: Fn(U, I::Item) -> U + Sync + Send,
ID: Fn() -> U + Sync + Send,
U: Send,
{
type Item = U;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = FoldConsumer {
base: consumer,
fold_op: &self.fold_op,
identity: &self.identity,
};
self.base.drive_unindexed(consumer1)
}
}
struct FoldConsumer<'c, C, ID, F> {
base: C,
fold_op: &'c F,
identity: &'c ID,
}
impl<'r, U, T, C, ID, F> Consumer<T> for FoldConsumer<'r, C, ID, F>
where
C: Consumer<U>,
F: Fn(U, T) -> U + Sync,
ID: Fn() -> U + Sync,
U: Send,
{
type Folder = FoldFolder<'r, C::Folder, U, F>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
FoldConsumer { base: left, ..self },
FoldConsumer {
base: right,
..self
},
reducer,
)
}
fn into_folder(self) -> Self::Folder {
FoldFolder {
base: self.base.into_folder(),
item: (self.identity)(),
fold_op: self.fold_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'r, U, T, C, ID, F> UnindexedConsumer<T> for FoldConsumer<'r, C, ID, F>
where
C: UnindexedConsumer<U>,
F: Fn(U, T) -> U + Sync,
ID: Fn() -> U + Sync,
U: Send,
{
fn split_off_left(&self) -> Self {
FoldConsumer {
base: self.base.split_off_left(),
..*self
}
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct FoldFolder<'r, C, ID, F> {
base: C,
fold_op: &'r F,
item: ID,
}
impl<'r, C, ID, F, T> Folder<T> for FoldFolder<'r, C, ID, F>
where
C: Folder<ID>,
F: Fn(ID, T) -> ID + Sync,
{
type Result = C::Result;
fn consume(self, item: T) -> Self {
let item = (self.fold_op)(self.item, item);
FoldFolder {
base: self.base,
fold_op: self.fold_op,
item,
}
}
fn consume_iter<I>(self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
fn not_full<C, ID, T>(base: &C) -> impl Fn(&T) -> bool + '_
where
C: Folder<ID>,
{
move |_| !base.full()
}
let base = self.base;
let item = iter
.into_iter()
// stop iterating if another thread has finished
.take_while(not_full(&base))
.fold(self.item, self.fold_op);
FoldFolder {
base,
item,
fold_op: self.fold_op,
}
}
fn complete(self) -> C::Result {
self.base.consume(self.item).complete()
}
fn full(&self) -> bool {
self.base.full()
}
}
// ///////////////////////////////////////////////////////////////////////////
impl<I, U, F> FoldWith<I, U, F> {
pub(super) fn new(base: I, item: U, fold_op: F) -> Self {
FoldWith {
base,
item,
fold_op,
}
}
}
/// `FoldWith` is an iterator that applies a function over an iterator producing a single value.
/// This struct is created by the [`fold_with()`] method on [`ParallelIterator`]
///
/// [`fold_with()`]: ParallelIterator::fold_with()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct FoldWith<I, U, F> {
base: I,
item: U,
fold_op: F,
}
impl<I: Debug, U: Debug, F> Debug for FoldWith<I, U, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FoldWith")
.field("base", &self.base)
.field("item", &self.item)
.finish()
}
}
impl<U, I, F> ParallelIterator for FoldWith<I, U, F>
where
I: ParallelIterator,
F: Fn(U, I::Item) -> U + Sync + Send,
U: Send + Clone,
{
type Item = U;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = FoldWithConsumer {
base: consumer,
item: self.item,
fold_op: &self.fold_op,
};
self.base.drive_unindexed(consumer1)
}
}
struct FoldWithConsumer<'c, C, U, F> {
base: C,
item: U,
fold_op: &'c F,
}
impl<'r, U, T, C, F> Consumer<T> for FoldWithConsumer<'r, C, U, F>
where
C: Consumer<U>,
F: Fn(U, T) -> U + Sync,
U: Send + Clone,
{
type Folder = FoldFolder<'r, C::Folder, U, F>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
FoldWithConsumer {
base: left,
item: self.item.clone(),
..self
},
FoldWithConsumer {
base: right,
..self
},
reducer,
)
}
fn into_folder(self) -> Self::Folder {
FoldFolder {
base: self.base.into_folder(),
item: self.item,
fold_op: self.fold_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'r, U, T, C, F> UnindexedConsumer<T> for FoldWithConsumer<'r, C, U, F>
where
C: UnindexedConsumer<U>,
F: Fn(U, T) -> U + Sync,
U: Send + Clone,
{
fn split_off_left(&self) -> Self {
FoldWithConsumer {
base: self.base.split_off_left(),
item: self.item.clone(),
..*self
}
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}

224
vendor/rayon/src/iter/fold_chunks.rs vendored Normal file
View File

@@ -0,0 +1,224 @@
use std::fmt::{self, Debug};
use super::chunks::ChunkProducer;
use super::plumbing::*;
use super::*;
/// `FoldChunks` is an iterator that groups elements of an underlying iterator and applies a
/// function over them, producing a single value for each group.
///
/// This struct is created by the [`fold_chunks()`] method on [`IndexedParallelIterator`]
///
/// [`fold_chunks()`]: IndexedParallelIterator::fold_chunks()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct FoldChunks<I, ID, F> {
base: I,
chunk_size: usize,
fold_op: F,
identity: ID,
}
impl<I: Debug, ID, F> Debug for FoldChunks<I, ID, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Fold")
.field("base", &self.base)
.field("chunk_size", &self.chunk_size)
.finish()
}
}
impl<I, ID, F> FoldChunks<I, ID, F> {
/// Creates a new `FoldChunks` iterator
pub(super) fn new(base: I, chunk_size: usize, identity: ID, fold_op: F) -> Self {
FoldChunks {
base,
chunk_size,
identity,
fold_op,
}
}
}
impl<I, ID, U, F> ParallelIterator for FoldChunks<I, ID, F>
where
I: IndexedParallelIterator,
ID: Fn() -> U + Send + Sync,
F: Fn(U, I::Item) -> U + Send + Sync,
U: Send,
{
type Item = U;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: Consumer<U>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I, ID, U, F> IndexedParallelIterator for FoldChunks<I, ID, F>
where
I: IndexedParallelIterator,
ID: Fn() -> U + Send + Sync,
F: Fn(U, I::Item) -> U + Send + Sync,
U: Send,
{
fn len(&self) -> usize {
self.base.len().div_ceil(self.chunk_size)
}
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
let len = self.base.len();
return self.base.with_producer(Callback {
chunk_size: self.chunk_size,
len,
identity: self.identity,
fold_op: self.fold_op,
callback,
});
struct Callback<CB, ID, F> {
chunk_size: usize,
len: usize,
identity: ID,
fold_op: F,
callback: CB,
}
impl<T, CB, ID, U, F> ProducerCallback<T> for Callback<CB, ID, F>
where
CB: ProducerCallback<U>,
ID: Fn() -> U + Send + Sync,
F: Fn(U, T) -> U + Send + Sync,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let identity = &self.identity;
let fold_op = &self.fold_op;
let fold_iter = move |iter: P::IntoIter| iter.fold(identity(), fold_op);
let producer = ChunkProducer::new(self.chunk_size, self.len, base, fold_iter);
self.callback.callback(producer)
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use std::ops::Add;
#[test]
fn check_fold_chunks() {
let words = "bishbashbosh!"
.chars()
.collect::<Vec<_>>()
.into_par_iter()
.fold_chunks(4, String::new, |mut s, c| {
s.push(c);
s
})
.collect::<Vec<_>>();
assert_eq!(words, vec!["bish", "bash", "bosh", "!"]);
}
// 'closure' values for tests below
fn id() -> i32 {
0
}
fn sum<T, U>(x: T, y: U) -> T
where
T: Add<U, Output = T>,
{
x + y
}
#[test]
#[should_panic(expected = "chunk_size must not be zero")]
fn check_fold_chunks_zero_size() {
let _: Vec<i32> = vec![1, 2, 3]
.into_par_iter()
.fold_chunks(0, id, sum)
.collect();
}
#[test]
fn check_fold_chunks_even_size() {
assert_eq!(
vec![1 + 2 + 3, 4 + 5 + 6, 7 + 8 + 9],
(1..10)
.into_par_iter()
.fold_chunks(3, id, sum)
.collect::<Vec<i32>>()
);
}
#[test]
fn check_fold_chunks_empty() {
let v: Vec<i32> = vec![];
let expected: Vec<i32> = vec![];
assert_eq!(
expected,
v.into_par_iter()
.fold_chunks(2, id, sum)
.collect::<Vec<i32>>()
);
}
#[test]
fn check_fold_chunks_len() {
assert_eq!(4, (0..8).into_par_iter().fold_chunks(2, id, sum).len());
assert_eq!(3, (0..9).into_par_iter().fold_chunks(3, id, sum).len());
assert_eq!(3, (0..8).into_par_iter().fold_chunks(3, id, sum).len());
assert_eq!(1, [1].par_iter().fold_chunks(3, id, sum).len());
assert_eq!(0, (0..0).into_par_iter().fold_chunks(3, id, sum).len());
}
#[test]
fn check_fold_chunks_uneven() {
let cases: Vec<(Vec<u32>, usize, Vec<u32>)> = vec![
((0..5).collect(), 3, vec![1 + 2, 3 + 4]),
(vec![1], 5, vec![1]),
((0..4).collect(), 3, vec![1 + 2, 3]),
];
for (i, (v, n, expected)) in cases.into_iter().enumerate() {
let mut res: Vec<u32> = vec![];
v.par_iter()
.fold_chunks(n, || 0, sum)
.collect_into_vec(&mut res);
assert_eq!(expected, res, "Case {i} failed");
res.truncate(0);
v.into_par_iter()
.fold_chunks(n, || 0, sum)
.rev()
.collect_into_vec(&mut res);
assert_eq!(
expected.into_iter().rev().collect::<Vec<u32>>(),
res,
"Case {i} reversed failed"
);
}
}
}

View File

@@ -0,0 +1,220 @@
use std::fmt::{self, Debug};
use super::chunks::ChunkProducer;
use super::plumbing::*;
use super::*;
/// `FoldChunksWith` is an iterator that groups elements of an underlying iterator and applies a
/// function over them, producing a single value for each group.
///
/// This struct is created by the [`fold_chunks_with()`] method on [`IndexedParallelIterator`]
///
/// [`fold_chunks_with()`]: IndexedParallelIterator::fold_chunks()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct FoldChunksWith<I, U, F> {
base: I,
chunk_size: usize,
item: U,
fold_op: F,
}
impl<I: Debug, U: Debug, F> Debug for FoldChunksWith<I, U, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Fold")
.field("base", &self.base)
.field("chunk_size", &self.chunk_size)
.field("item", &self.item)
.finish()
}
}
impl<I, U, F> FoldChunksWith<I, U, F> {
/// Creates a new `FoldChunksWith` iterator
pub(super) fn new(base: I, chunk_size: usize, item: U, fold_op: F) -> Self {
FoldChunksWith {
base,
chunk_size,
item,
fold_op,
}
}
}
impl<I, U, F> ParallelIterator for FoldChunksWith<I, U, F>
where
I: IndexedParallelIterator,
U: Send + Clone,
F: Fn(U, I::Item) -> U + Send + Sync,
{
type Item = U;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: Consumer<U>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I, U, F> IndexedParallelIterator for FoldChunksWith<I, U, F>
where
I: IndexedParallelIterator,
U: Send + Clone,
F: Fn(U, I::Item) -> U + Send + Sync,
{
fn len(&self) -> usize {
self.base.len().div_ceil(self.chunk_size)
}
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
let len = self.base.len();
return self.base.with_producer(Callback {
chunk_size: self.chunk_size,
len,
item: self.item,
fold_op: self.fold_op,
callback,
});
struct Callback<CB, T, F> {
chunk_size: usize,
len: usize,
item: T,
fold_op: F,
callback: CB,
}
impl<T, U, F, CB> ProducerCallback<T> for Callback<CB, U, F>
where
CB: ProducerCallback<U>,
U: Send + Clone,
F: Fn(U, T) -> U + Send + Sync,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let item = self.item;
let fold_op = &self.fold_op;
let fold_iter = move |iter: P::IntoIter| iter.fold(item.clone(), fold_op);
let producer = ChunkProducer::new(self.chunk_size, self.len, base, fold_iter);
self.callback.callback(producer)
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use std::ops::Add;
#[test]
fn check_fold_chunks_with() {
let words = "bishbashbosh!"
.chars()
.collect::<Vec<_>>()
.into_par_iter()
.fold_chunks_with(4, String::new(), |mut s, c| {
s.push(c);
s
})
.collect::<Vec<_>>();
assert_eq!(words, vec!["bish", "bash", "bosh", "!"]);
}
// 'closure' value for tests below
fn sum<T, U>(x: T, y: U) -> T
where
T: Add<U, Output = T>,
{
x + y
}
#[test]
#[should_panic(expected = "chunk_size must not be zero")]
fn check_fold_chunks_zero_size() {
let _: Vec<i32> = vec![1, 2, 3]
.into_par_iter()
.fold_chunks_with(0, 0, sum)
.collect();
}
#[test]
fn check_fold_chunks_even_size() {
assert_eq!(
vec![1 + 2 + 3, 4 + 5 + 6, 7 + 8 + 9],
(1..10)
.into_par_iter()
.fold_chunks_with(3, 0, sum)
.collect::<Vec<i32>>()
);
}
#[test]
fn check_fold_chunks_with_empty() {
let v: Vec<i32> = vec![];
let expected: Vec<i32> = vec![];
assert_eq!(
expected,
v.into_par_iter()
.fold_chunks_with(2, 0, sum)
.collect::<Vec<i32>>()
);
}
#[test]
fn check_fold_chunks_len() {
assert_eq!(4, (0..8).into_par_iter().fold_chunks_with(2, 0, sum).len());
assert_eq!(3, (0..9).into_par_iter().fold_chunks_with(3, 0, sum).len());
assert_eq!(3, (0..8).into_par_iter().fold_chunks_with(3, 0, sum).len());
assert_eq!(1, [1].par_iter().fold_chunks_with(3, 0, sum).len());
assert_eq!(0, (0..0).into_par_iter().fold_chunks_with(3, 0, sum).len());
}
#[test]
fn check_fold_chunks_uneven() {
let cases: Vec<(Vec<u32>, usize, Vec<u32>)> = vec![
((0..5).collect(), 3, vec![1 + 2, 3 + 4]),
(vec![1], 5, vec![1]),
((0..4).collect(), 3, vec![1 + 2, 3]),
];
for (i, (v, n, expected)) in cases.into_iter().enumerate() {
let mut res: Vec<u32> = vec![];
v.par_iter()
.fold_chunks_with(n, 0, sum)
.collect_into_vec(&mut res);
assert_eq!(expected, res, "Case {i} failed");
res.truncate(0);
v.into_par_iter()
.fold_chunks_with(n, 0, sum)
.rev()
.collect_into_vec(&mut res);
assert_eq!(
expected.into_iter().rev().collect::<Vec<u32>>(),
res,
"Case {i} reversed failed"
);
}
}
}

77
vendor/rayon/src/iter/for_each.rs vendored Normal file
View File

@@ -0,0 +1,77 @@
use super::noop::*;
use super::plumbing::*;
use super::ParallelIterator;
pub(super) fn for_each<I, F, T>(pi: I, op: &F)
where
I: ParallelIterator<Item = T>,
F: Fn(T) + Sync,
T: Send,
{
let consumer = ForEachConsumer { op };
pi.drive_unindexed(consumer)
}
struct ForEachConsumer<'f, F> {
op: &'f F,
}
impl<'f, F, T> Consumer<T> for ForEachConsumer<'f, F>
where
F: Fn(T) + Sync,
{
type Folder = ForEachConsumer<'f, F>;
type Reducer = NoopReducer;
type Result = ();
fn split_at(self, _index: usize) -> (Self, Self, NoopReducer) {
(self.split_off_left(), self, NoopReducer)
}
fn into_folder(self) -> Self {
self
}
fn full(&self) -> bool {
false
}
}
impl<'f, F, T> Folder<T> for ForEachConsumer<'f, F>
where
F: Fn(T) + Sync,
{
type Result = ();
fn consume(self, item: T) -> Self {
(self.op)(item);
self
}
fn consume_iter<I>(self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
iter.into_iter().for_each(self.op);
self
}
fn complete(self) {}
fn full(&self) -> bool {
false
}
}
impl<'f, F, T> UnindexedConsumer<T> for ForEachConsumer<'f, F>
where
F: Fn(T) + Sync,
{
fn split_off_left(&self) -> Self {
ForEachConsumer { op: self.op }
}
fn to_reducer(&self) -> NoopReducer {
NoopReducer
}
}

280
vendor/rayon/src/iter/from_par_iter.rs vendored Normal file
View File

@@ -0,0 +1,280 @@
use super::noop::NoopConsumer;
use super::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator};
use std::borrow::Cow;
use std::collections::LinkedList;
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
use std::collections::{BinaryHeap, VecDeque};
use std::ffi::{OsStr, OsString};
use std::hash::{BuildHasher, Hash};
use std::rc::Rc;
use std::sync::Arc;
/// Creates an empty default collection and extends it.
fn collect_extended<C, I>(par_iter: I) -> C
where
I: IntoParallelIterator,
C: ParallelExtend<I::Item> + Default,
{
let mut collection = C::default();
collection.par_extend(par_iter);
collection
}
/// Collects items from a parallel iterator into a vector.
impl<T> FromParallelIterator<T> for Vec<T>
where
T: Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = T>,
{
collect_extended(par_iter)
}
}
/// Collects items from a parallel iterator into a boxed slice.
impl<T> FromParallelIterator<T> for Box<[T]>
where
T: Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = T>,
{
Vec::from_par_iter(par_iter).into()
}
}
/// Collects items from a parallel iterator into a reference-counted slice.
impl<T> FromParallelIterator<T> for Rc<[T]>
where
T: Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = T>,
{
Vec::from_par_iter(par_iter).into()
}
}
/// Collects items from a parallel iterator into an atomically-reference-counted slice.
impl<T> FromParallelIterator<T> for Arc<[T]>
where
T: Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = T>,
{
Vec::from_par_iter(par_iter).into()
}
}
/// Collects items from a parallel iterator into a vecdeque.
impl<T> FromParallelIterator<T> for VecDeque<T>
where
T: Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = T>,
{
Vec::from_par_iter(par_iter).into()
}
}
/// Collects items from a parallel iterator into a binaryheap.
/// The heap-ordering is calculated serially after all items are collected.
impl<T> FromParallelIterator<T> for BinaryHeap<T>
where
T: Ord + Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = T>,
{
Vec::from_par_iter(par_iter).into()
}
}
/// Collects items from a parallel iterator into a freshly allocated
/// linked list.
impl<T> FromParallelIterator<T> for LinkedList<T>
where
T: Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = T>,
{
collect_extended(par_iter)
}
}
/// Collects (key, value) pairs from a parallel iterator into a
/// hashmap. If multiple pairs correspond to the same key, then the
/// ones produced earlier in the parallel iterator will be
/// overwritten, just as with a sequential iterator.
impl<K, V, S> FromParallelIterator<(K, V)> for HashMap<K, V, S>
where
K: Eq + Hash + Send,
V: Send,
S: BuildHasher + Default + Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = (K, V)>,
{
collect_extended(par_iter)
}
}
/// Collects (key, value) pairs from a parallel iterator into a
/// btreemap. If multiple pairs correspond to the same key, then the
/// ones produced earlier in the parallel iterator will be
/// overwritten, just as with a sequential iterator.
impl<K, V> FromParallelIterator<(K, V)> for BTreeMap<K, V>
where
K: Ord + Send,
V: Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = (K, V)>,
{
collect_extended(par_iter)
}
}
/// Collects values from a parallel iterator into a hashset.
impl<V, S> FromParallelIterator<V> for HashSet<V, S>
where
V: Eq + Hash + Send,
S: BuildHasher + Default + Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = V>,
{
collect_extended(par_iter)
}
}
/// Collects values from a parallel iterator into a btreeset.
impl<V> FromParallelIterator<V> for BTreeSet<V>
where
V: Send + Ord,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = V>,
{
collect_extended(par_iter)
}
}
macro_rules! collect_string {
($desc:literal, $item:ty $(, $a:lifetime)?) => {
#[doc = concat!("Collects ", $desc, " from a parallel iterator into a string.")]
impl$(<$a>)? FromParallelIterator<$item> for String {
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = $item>,
{
collect_extended(par_iter)
}
}
#[doc = concat!("Collects ", $desc, " from a parallel iterator into a boxed string.")]
impl$(<$a>)? FromParallelIterator<$item> for Box<str> {
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = $item>,
{
String::from_par_iter(par_iter).into_boxed_str()
}
}
}
}
collect_string!("characters", char);
collect_string!("characters", &'a char, 'a);
collect_string!("string slices", &'a str, 'a);
collect_string!("string slices", Cow<'a, str>, 'a);
collect_string!("boxed strings", Box<str>);
collect_string!("strings", String);
/// Collects OS-string slices from a parallel iterator into an OS-string.
impl<'a> FromParallelIterator<&'a OsStr> for OsString {
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = &'a OsStr>,
{
collect_extended(par_iter)
}
}
/// Collects OS-strings from a parallel iterator into one large OS-string.
impl FromParallelIterator<OsString> for OsString {
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = OsString>,
{
collect_extended(par_iter)
}
}
/// Collects OS-string slices from a parallel iterator into an OS-string.
impl<'a> FromParallelIterator<Cow<'a, OsStr>> for OsString {
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = Cow<'a, OsStr>>,
{
collect_extended(par_iter)
}
}
/// Collects an arbitrary `Cow` collection.
///
/// Note, the standard library only has `FromIterator` for `Cow<'a, str>` and
/// `Cow<'a, [T]>`, because no one thought to add a blanket implementation
/// before it was stabilized.
impl<'a, C, T> FromParallelIterator<T> for Cow<'a, C>
where
C: ToOwned<Owned: FromParallelIterator<T>> + ?Sized,
T: Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = T>,
{
Cow::Owned(C::Owned::from_par_iter(par_iter))
}
}
/// Collapses all unit items from a parallel iterator into one.
///
/// This is more useful when combined with higher-level abstractions, like
/// collecting to a `Result<(), E>` where you only care about errors:
///
/// ```
/// use std::io::*;
/// use rayon::prelude::*;
///
/// let data = vec![1, 2, 3, 4, 5];
/// let res: Result<()> = data.par_iter()
/// .map(|x| writeln!(stdout(), "{}", x))
/// .collect();
/// assert!(res.is_ok());
/// ```
impl FromParallelIterator<()> for () {
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = ()>,
{
par_iter.into_par_iter().drive_unindexed(NoopConsumer)
}
}

253
vendor/rayon/src/iter/inspect.rs vendored Normal file
View File

@@ -0,0 +1,253 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
use std::iter;
/// `Inspect` is an iterator that calls a function with a reference to each
/// element before yielding it.
///
/// This struct is created by the [`inspect()`] method on [`ParallelIterator`]
///
/// [`inspect()`]: ParallelIterator::inspect()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct Inspect<I, F> {
base: I,
inspect_op: F,
}
impl<I: Debug, F> Debug for Inspect<I, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Inspect").field("base", &self.base).finish()
}
}
impl<I, F> Inspect<I, F> {
/// Creates a new `Inspect` iterator.
pub(super) fn new(base: I, inspect_op: F) -> Self {
Inspect { base, inspect_op }
}
}
impl<I, F> ParallelIterator for Inspect<I, F>
where
I: ParallelIterator,
F: Fn(&I::Item) + Sync + Send,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = InspectConsumer::new(consumer, &self.inspect_op);
self.base.drive_unindexed(consumer1)
}
fn opt_len(&self) -> Option<usize> {
self.base.opt_len()
}
}
impl<I, F> IndexedParallelIterator for Inspect<I, F>
where
I: IndexedParallelIterator,
F: Fn(&I::Item) + Sync + Send,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let consumer1 = InspectConsumer::new(consumer, &self.inspect_op);
self.base.drive(consumer1)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback {
callback,
inspect_op: self.inspect_op,
});
struct Callback<CB, F> {
callback: CB,
inspect_op: F,
}
impl<T, F, CB> ProducerCallback<T> for Callback<CB, F>
where
CB: ProducerCallback<T>,
F: Fn(&T) + Sync,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = InspectProducer {
base,
inspect_op: &self.inspect_op,
};
self.callback.callback(producer)
}
}
}
}
// ////////////////////////////////////////////////////////////////////////
struct InspectProducer<'f, P, F> {
base: P,
inspect_op: &'f F,
}
impl<'f, P, F> Producer for InspectProducer<'f, P, F>
where
P: Producer,
F: Fn(&P::Item) + Sync,
{
type Item = P::Item;
type IntoIter = iter::Inspect<P::IntoIter, &'f F>;
fn into_iter(self) -> Self::IntoIter {
self.base.into_iter().inspect(self.inspect_op)
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
InspectProducer {
base: left,
inspect_op: self.inspect_op,
},
InspectProducer {
base: right,
inspect_op: self.inspect_op,
},
)
}
fn fold_with<G>(self, folder: G) -> G
where
G: Folder<Self::Item>,
{
let folder1 = InspectFolder {
base: folder,
inspect_op: self.inspect_op,
};
self.base.fold_with(folder1).base
}
}
// ////////////////////////////////////////////////////////////////////////
// Consumer implementation
struct InspectConsumer<'f, C, F> {
base: C,
inspect_op: &'f F,
}
impl<'f, C, F> InspectConsumer<'f, C, F> {
fn new(base: C, inspect_op: &'f F) -> Self {
InspectConsumer { base, inspect_op }
}
}
impl<'f, T, C, F> Consumer<T> for InspectConsumer<'f, C, F>
where
C: Consumer<T>,
F: Fn(&T) + Sync,
{
type Folder = InspectFolder<'f, C::Folder, F>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
InspectConsumer::new(left, self.inspect_op),
InspectConsumer::new(right, self.inspect_op),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
InspectFolder {
base: self.base.into_folder(),
inspect_op: self.inspect_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'f, T, C, F> UnindexedConsumer<T> for InspectConsumer<'f, C, F>
where
C: UnindexedConsumer<T>,
F: Fn(&T) + Sync,
{
fn split_off_left(&self) -> Self {
InspectConsumer::new(self.base.split_off_left(), self.inspect_op)
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct InspectFolder<'f, C, F> {
base: C,
inspect_op: &'f F,
}
impl<'f, T, C, F> Folder<T> for InspectFolder<'f, C, F>
where
C: Folder<T>,
F: Fn(&T),
{
type Result = C::Result;
fn consume(self, item: T) -> Self {
(self.inspect_op)(&item);
InspectFolder {
base: self.base.consume(item),
inspect_op: self.inspect_op,
}
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
self.base = self
.base
.consume_iter(iter.into_iter().inspect(self.inspect_op));
self
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

326
vendor/rayon/src/iter/interleave.rs vendored Normal file
View File

@@ -0,0 +1,326 @@
use super::plumbing::*;
use super::*;
use std::iter::Fuse;
/// `Interleave` is an iterator that interleaves elements of iterators
/// `i` and `j` in one continuous iterator. This struct is created by
/// the [`interleave()`] method on [`IndexedParallelIterator`]
///
/// [`interleave()`]: IndexedParallelIterator::interleave()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Interleave<I, J> {
i: I,
j: J,
}
impl<I, J> Interleave<I, J> {
/// Creates a new `Interleave` iterator
pub(super) fn new(i: I, j: J) -> Self {
Interleave { i, j }
}
}
impl<I, J> ParallelIterator for Interleave<I, J>
where
I: IndexedParallelIterator,
J: IndexedParallelIterator<Item = I::Item>,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: Consumer<I::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I, J> IndexedParallelIterator for Interleave<I, J>
where
I: IndexedParallelIterator,
J: IndexedParallelIterator<Item = I::Item>,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.i.len().checked_add(self.j.len()).expect("overflow")
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
let (i_len, j_len) = (self.i.len(), self.j.len());
return self.i.with_producer(CallbackI {
callback,
i_len,
j_len,
i_next: false,
j: self.j,
});
struct CallbackI<CB, J> {
callback: CB,
i_len: usize,
j_len: usize,
i_next: bool,
j: J,
}
impl<CB, J> ProducerCallback<J::Item> for CallbackI<CB, J>
where
J: IndexedParallelIterator,
CB: ProducerCallback<J::Item>,
{
type Output = CB::Output;
fn callback<I>(self, i_producer: I) -> Self::Output
where
I: Producer<Item = J::Item>,
{
self.j.with_producer(CallbackJ {
i_producer,
i_len: self.i_len,
j_len: self.j_len,
i_next: self.i_next,
callback: self.callback,
})
}
}
struct CallbackJ<CB, I> {
callback: CB,
i_len: usize,
j_len: usize,
i_next: bool,
i_producer: I,
}
impl<CB, I> ProducerCallback<I::Item> for CallbackJ<CB, I>
where
I: Producer,
CB: ProducerCallback<I::Item>,
{
type Output = CB::Output;
fn callback<J>(self, j_producer: J) -> Self::Output
where
J: Producer<Item = I::Item>,
{
let producer = InterleaveProducer::new(
self.i_producer,
j_producer,
self.i_len,
self.j_len,
self.i_next,
);
self.callback.callback(producer)
}
}
}
}
struct InterleaveProducer<I, J>
where
I: Producer,
J: Producer<Item = I::Item>,
{
i: I,
j: J,
i_len: usize,
j_len: usize,
i_next: bool,
}
impl<I, J> InterleaveProducer<I, J>
where
I: Producer,
J: Producer<Item = I::Item>,
{
fn new(i: I, j: J, i_len: usize, j_len: usize, i_next: bool) -> InterleaveProducer<I, J> {
InterleaveProducer {
i,
j,
i_len,
j_len,
i_next,
}
}
}
impl<I, J> Producer for InterleaveProducer<I, J>
where
I: Producer,
J: Producer<Item = I::Item>,
{
type Item = I::Item;
type IntoIter = InterleaveSeq<I::IntoIter, J::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
InterleaveSeq {
i: self.i.into_iter().fuse(),
j: self.j.into_iter().fuse(),
i_next: self.i_next,
}
}
fn min_len(&self) -> usize {
Ord::max(self.i.min_len(), self.j.min_len())
}
fn max_len(&self) -> usize {
Ord::min(self.i.max_len(), self.j.max_len())
}
/// We know 0 < index <= self.i_len + self.j_len
///
/// Find a, b satisfying:
///
/// (1) 0 < a <= self.i_len
/// (2) 0 < b <= self.j_len
/// (3) a + b == index
///
/// For even splits, set a = b = index/2.
/// For odd splits, set a = (index/2)+1, b = index/2, if `i`
/// should yield the next element, otherwise, if `j` should yield
/// the next element, set a = index/2 and b = (index/2)+1
fn split_at(self, index: usize) -> (Self, Self) {
#[inline]
fn odd_offset(flag: bool) -> usize {
(!flag) as usize
}
let even = index % 2 == 0;
let idx = index >> 1;
// desired split
let (i_idx, j_idx) = (
idx + odd_offset(even || self.i_next),
idx + odd_offset(even || !self.i_next),
);
let (i_split, j_split) = if self.i_len >= i_idx && self.j_len >= j_idx {
(i_idx, j_idx)
} else if self.i_len >= i_idx {
// j too short
(index - self.j_len, self.j_len)
} else {
// i too short
(self.i_len, index - self.i_len)
};
let trailing_i_next = even == self.i_next;
let (i_left, i_right) = self.i.split_at(i_split);
let (j_left, j_right) = self.j.split_at(j_split);
(
InterleaveProducer::new(i_left, j_left, i_split, j_split, self.i_next),
InterleaveProducer::new(
i_right,
j_right,
self.i_len - i_split,
self.j_len - j_split,
trailing_i_next,
),
)
}
}
/// Wrapper for Interleave to implement DoubleEndedIterator and
/// ExactSizeIterator.
///
/// This iterator is fused.
struct InterleaveSeq<I, J> {
i: Fuse<I>,
j: Fuse<J>,
/// Flag to control which iterator should provide the next element. When
/// `false` then `i` produces the next element, otherwise `j` produces the
/// next element.
i_next: bool,
}
/// Iterator implementation for InterleaveSeq. This implementation is
/// taken more or less verbatim from itertools. It is replicated here
/// (instead of calling itertools directly), because we also need to
/// implement `DoubledEndedIterator` and `ExactSizeIterator`.
impl<I, J> Iterator for InterleaveSeq<I, J>
where
I: Iterator,
J: Iterator<Item = I::Item>,
{
type Item = I::Item;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.i_next = !self.i_next;
if self.i_next {
match self.i.next() {
None => self.j.next(),
r => r,
}
} else {
match self.j.next() {
None => self.i.next(),
r => r,
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let (ih, jh) = (self.i.size_hint(), self.j.size_hint());
let min = ih.0.saturating_add(jh.0);
let max = match (ih.1, jh.1) {
(Some(x), Some(y)) => x.checked_add(y),
_ => None,
};
(min, max)
}
}
// The implementation for DoubleEndedIterator requires
// ExactSizeIterator to provide `next_back()`. The last element will
// come from the iterator that runs out last (ie has the most elements
// in it). If the iterators have the same number of elements, then the
// last iterator will provide the last element.
impl<I, J> DoubleEndedIterator for InterleaveSeq<I, J>
where
I: DoubleEndedIterator + ExactSizeIterator,
J: DoubleEndedIterator<Item = I::Item> + ExactSizeIterator<Item = I::Item>,
{
#[inline]
fn next_back(&mut self) -> Option<I::Item> {
match self.i.len().cmp(&self.j.len()) {
Ordering::Less => self.j.next_back(),
Ordering::Equal => {
if self.i_next {
self.i.next_back()
} else {
self.j.next_back()
}
}
Ordering::Greater => self.i.next_back(),
}
}
}
impl<I, J> ExactSizeIterator for InterleaveSeq<I, J>
where
I: ExactSizeIterator,
J: ExactSizeIterator<Item = I::Item>,
{
#[inline]
fn len(&self) -> usize {
self.i.len() + self.j.len()
}
}

View File

@@ -0,0 +1,80 @@
use super::plumbing::*;
use super::*;
/// `InterleaveShortest` is an iterator that works similarly to
/// `Interleave`, but this version stops returning elements once one
/// of the iterators run out.
///
/// This struct is created by the [`interleave_shortest()`] method on
/// [`IndexedParallelIterator`].
///
/// [`interleave_shortest()`]: IndexedParallelIterator::interleave_shortest()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct InterleaveShortest<I, J> {
interleave: Interleave<Take<I>, Take<J>>,
}
impl<I, J> InterleaveShortest<I, J>
where
I: IndexedParallelIterator,
J: IndexedParallelIterator<Item = I::Item>,
{
/// Creates a new `InterleaveShortest` iterator
pub(super) fn new(i: I, j: J) -> Self {
InterleaveShortest {
interleave: if i.len() <= j.len() {
// take equal lengths from both iterators
let n = i.len();
i.take(n).interleave(j.take(n))
} else {
// take one extra item from the first iterator
let n = j.len();
i.take(n + 1).interleave(j.take(n))
},
}
}
}
impl<I, J> ParallelIterator for InterleaveShortest<I, J>
where
I: IndexedParallelIterator,
J: IndexedParallelIterator<Item = I::Item>,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: Consumer<I::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I, J> IndexedParallelIterator for InterleaveShortest<I, J>
where
I: IndexedParallelIterator,
J: IndexedParallelIterator<Item = I::Item>,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.interleave.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
self.interleave.with_producer(callback)
}
}

401
vendor/rayon/src/iter/intersperse.rs vendored Normal file
View File

@@ -0,0 +1,401 @@
use super::plumbing::*;
use super::*;
use std::cell::Cell;
use std::iter::{self, Fuse};
/// `Intersperse` is an iterator that inserts a particular item between each
/// item of the adapted iterator. This struct is created by the
/// [`intersperse()`] method on [`ParallelIterator`]
///
/// [`intersperse()`]: ParallelIterator::intersperse()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone, Debug)]
pub struct Intersperse<I>
where
I: ParallelIterator,
{
base: I,
item: I::Item,
}
impl<I> Intersperse<I>
where
I: ParallelIterator<Item: Clone>,
{
/// Creates a new `Intersperse` iterator
pub(super) fn new(base: I, item: I::Item) -> Self {
Intersperse { base, item }
}
}
impl<I> ParallelIterator for Intersperse<I>
where
I: ParallelIterator<Item: Clone>,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<I::Item>,
{
let consumer1 = IntersperseConsumer::new(consumer, self.item);
self.base.drive_unindexed(consumer1)
}
fn opt_len(&self) -> Option<usize> {
match self.base.opt_len()? {
0 => Some(0),
len => len.checked_add(len - 1),
}
}
}
impl<I> IndexedParallelIterator for Intersperse<I>
where
I: IndexedParallelIterator<Item: Clone>,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let consumer1 = IntersperseConsumer::new(consumer, self.item);
self.base.drive(consumer1)
}
fn len(&self) -> usize {
let len = self.base.len();
if len > 0 {
len.checked_add(len - 1).expect("overflow")
} else {
0
}
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
let len = self.len();
return self.base.with_producer(Callback {
callback,
item: self.item,
len,
});
struct Callback<CB, T> {
callback: CB,
item: T,
len: usize,
}
impl<T, CB> ProducerCallback<T> for Callback<CB, T>
where
CB: ProducerCallback<T>,
T: Clone + Send,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = IntersperseProducer::new(base, self.item, self.len);
self.callback.callback(producer)
}
}
}
}
struct IntersperseProducer<P>
where
P: Producer,
{
base: P,
item: P::Item,
len: usize,
clone_first: bool,
}
impl<P> IntersperseProducer<P>
where
P: Producer,
{
fn new(base: P, item: P::Item, len: usize) -> Self {
IntersperseProducer {
base,
item,
len,
clone_first: false,
}
}
}
impl<P> Producer for IntersperseProducer<P>
where
P: Producer<Item: Clone + Send>,
{
type Item = P::Item;
type IntoIter = IntersperseIter<P::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
IntersperseIter {
base: self.base.into_iter().fuse(),
item: self.item,
clone_first: self.len > 0 && self.clone_first,
// If there's more than one item, then even lengths end the opposite
// of how they started with respect to interspersed clones.
clone_last: self.len > 1 && ((self.len & 1 == 0) ^ self.clone_first),
}
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
debug_assert!(index <= self.len);
// The left needs half of the items from the base producer, and the
// other half will be our interspersed item. If we're not leading with
// a cloned item, then we need to round up the base number of items,
// otherwise round down.
let base_index = (index + !self.clone_first as usize) / 2;
let (left_base, right_base) = self.base.split_at(base_index);
let left = IntersperseProducer {
base: left_base,
item: self.item.clone(),
len: index,
clone_first: self.clone_first,
};
let right = IntersperseProducer {
base: right_base,
item: self.item,
len: self.len - index,
// If the index is odd, the right side toggles `clone_first`.
clone_first: (index & 1 == 1) ^ self.clone_first,
};
(left, right)
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
let folder1 = IntersperseFolder {
base: folder,
item: self.item,
clone_first: self.clone_first,
};
self.base.fold_with(folder1).base
}
}
struct IntersperseIter<I>
where
I: Iterator,
{
base: Fuse<I>,
item: I::Item,
clone_first: bool,
clone_last: bool,
}
impl<I> Iterator for IntersperseIter<I>
where
I: DoubleEndedIterator<Item: Clone> + ExactSizeIterator,
{
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
if self.clone_first {
self.clone_first = false;
Some(self.item.clone())
} else if let next @ Some(_) = self.base.next() {
// If there are any items left, we'll need another clone in front.
self.clone_first = self.base.len() != 0;
next
} else if self.clone_last {
self.clone_last = false;
Some(self.item.clone())
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
}
impl<I> DoubleEndedIterator for IntersperseIter<I>
where
I: DoubleEndedIterator<Item: Clone> + ExactSizeIterator,
{
fn next_back(&mut self) -> Option<Self::Item> {
if self.clone_last {
self.clone_last = false;
Some(self.item.clone())
} else if let next_back @ Some(_) = self.base.next_back() {
// If there are any items left, we'll need another clone in back.
self.clone_last = self.base.len() != 0;
next_back
} else if self.clone_first {
self.clone_first = false;
Some(self.item.clone())
} else {
None
}
}
}
impl<I> ExactSizeIterator for IntersperseIter<I>
where
I: DoubleEndedIterator<Item: Clone> + ExactSizeIterator,
{
fn len(&self) -> usize {
let len = self.base.len();
len + len.saturating_sub(1) + self.clone_first as usize + self.clone_last as usize
}
}
struct IntersperseConsumer<C, T> {
base: C,
item: T,
clone_first: Cell<bool>,
}
impl<C, T> IntersperseConsumer<C, T>
where
C: Consumer<T>,
{
fn new(base: C, item: T) -> Self {
IntersperseConsumer {
base,
item,
clone_first: false.into(),
}
}
}
impl<C, T> Consumer<T> for IntersperseConsumer<C, T>
where
C: Consumer<T>,
T: Clone + Send,
{
type Folder = IntersperseFolder<C::Folder, T>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(mut self, index: usize) -> (Self, Self, Self::Reducer) {
// We'll feed twice as many items to the base consumer, except if we're
// not currently leading with a cloned item, then it's one less.
let base_index = index + index.saturating_sub(!self.clone_first.get() as usize);
let (left, right, reducer) = self.base.split_at(base_index);
let right = IntersperseConsumer {
base: right,
item: self.item.clone(),
clone_first: true.into(),
};
self.base = left;
(self, right, reducer)
}
fn into_folder(self) -> Self::Folder {
IntersperseFolder {
base: self.base.into_folder(),
item: self.item,
clone_first: self.clone_first.get(),
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<C, T> UnindexedConsumer<T> for IntersperseConsumer<C, T>
where
C: UnindexedConsumer<T>,
T: Clone + Send,
{
fn split_off_left(&self) -> Self {
let left = IntersperseConsumer {
base: self.base.split_off_left(),
item: self.item.clone(),
clone_first: self.clone_first.clone(),
};
self.clone_first.set(true);
left
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct IntersperseFolder<C, T> {
base: C,
item: T,
clone_first: bool,
}
impl<C, T> Folder<T> for IntersperseFolder<C, T>
where
C: Folder<T>,
T: Clone,
{
type Result = C::Result;
fn consume(mut self, item: T) -> Self {
if self.clone_first {
self.base = self.base.consume(self.item.clone());
if self.base.full() {
return self;
}
} else {
self.clone_first = true;
}
self.base = self.base.consume(item);
self
}
fn consume_iter<I>(self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
let mut clone_first = self.clone_first;
let between_item = self.item;
let base = self.base.consume_iter(iter.into_iter().flat_map(|item| {
let first = if clone_first {
Some(between_item.clone())
} else {
clone_first = true;
None
};
first.into_iter().chain(iter::once(item))
}));
IntersperseFolder {
base,
item: between_item,
clone_first,
}
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

262
vendor/rayon/src/iter/len.rs vendored Normal file
View File

@@ -0,0 +1,262 @@
use super::plumbing::*;
use super::*;
/// `MinLen` is an iterator that imposes a minimum length on iterator splits.
/// This struct is created by the [`with_min_len()`] method on [`IndexedParallelIterator`]
///
/// [`with_min_len()`]: IndexedParallelIterator::with_min_len()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct MinLen<I> {
base: I,
min: usize,
}
impl<I> MinLen<I> {
/// Creates a new `MinLen` iterator.
pub(super) fn new(base: I, min: usize) -> Self {
MinLen { base, min }
}
}
impl<I> ParallelIterator for MinLen<I>
where
I: IndexedParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I> IndexedParallelIterator for MinLen<I>
where
I: IndexedParallelIterator,
{
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
bridge(self, consumer)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback {
callback,
min: self.min,
});
struct Callback<CB> {
callback: CB,
min: usize,
}
impl<T, CB> ProducerCallback<T> for Callback<CB>
where
CB: ProducerCallback<T>,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = MinLenProducer {
base,
min: self.min,
};
self.callback.callback(producer)
}
}
}
}
// ////////////////////////////////////////////////////////////////////////
// `MinLenProducer` implementation
struct MinLenProducer<P> {
base: P,
min: usize,
}
impl<P> Producer for MinLenProducer<P>
where
P: Producer,
{
type Item = P::Item;
type IntoIter = P::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.base.into_iter()
}
fn min_len(&self) -> usize {
Ord::max(self.min, self.base.min_len())
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
MinLenProducer {
base: left,
min: self.min,
},
MinLenProducer {
base: right,
min: self.min,
},
)
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
self.base.fold_with(folder)
}
}
/// `MaxLen` is an iterator that imposes a maximum length on iterator splits.
/// This struct is created by the [`with_max_len()`] method on [`IndexedParallelIterator`]
///
/// [`with_max_len()`]: IndexedParallelIterator::with_max_len()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct MaxLen<I> {
base: I,
max: usize,
}
impl<I> MaxLen<I> {
/// Creates a new `MaxLen` iterator.
pub(super) fn new(base: I, max: usize) -> Self {
MaxLen { base, max }
}
}
impl<I> ParallelIterator for MaxLen<I>
where
I: IndexedParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I> IndexedParallelIterator for MaxLen<I>
where
I: IndexedParallelIterator,
{
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
bridge(self, consumer)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback {
callback,
max: self.max,
});
struct Callback<CB> {
callback: CB,
max: usize,
}
impl<T, CB> ProducerCallback<T> for Callback<CB>
where
CB: ProducerCallback<T>,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = MaxLenProducer {
base,
max: self.max,
};
self.callback.callback(producer)
}
}
}
}
// ////////////////////////////////////////////////////////////////////////
// `MaxLenProducer` implementation
struct MaxLenProducer<P> {
base: P,
max: usize,
}
impl<P> Producer for MaxLenProducer<P>
where
P: Producer,
{
type Item = P::Item;
type IntoIter = P::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.base.into_iter()
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
Ord::min(self.max, self.base.max_len())
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
MaxLenProducer {
base: left,
max: self.max,
},
MaxLenProducer {
base: right,
max: self.max,
},
)
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
self.base.fold_with(folder)
}
}

255
vendor/rayon/src/iter/map.rs vendored Normal file
View File

@@ -0,0 +1,255 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
use std::iter;
/// `Map` is an iterator that transforms the elements of an underlying iterator.
///
/// This struct is created by the [`map()`] method on [`ParallelIterator`]
///
/// [`map()`]: ParallelIterator::map()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct Map<I, F> {
base: I,
map_op: F,
}
impl<I: Debug, F> Debug for Map<I, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Map").field("base", &self.base).finish()
}
}
impl<I, F> Map<I, F> {
/// Creates a new `Map` iterator.
pub(super) fn new(base: I, map_op: F) -> Self {
Map { base, map_op }
}
}
impl<I, F, R> ParallelIterator for Map<I, F>
where
I: ParallelIterator,
F: Fn(I::Item) -> R + Sync + Send,
R: Send,
{
type Item = F::Output;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = MapConsumer::new(consumer, &self.map_op);
self.base.drive_unindexed(consumer1)
}
fn opt_len(&self) -> Option<usize> {
self.base.opt_len()
}
}
impl<I, F, R> IndexedParallelIterator for Map<I, F>
where
I: IndexedParallelIterator,
F: Fn(I::Item) -> R + Sync + Send,
R: Send,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let consumer1 = MapConsumer::new(consumer, &self.map_op);
self.base.drive(consumer1)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback {
callback,
map_op: self.map_op,
});
struct Callback<CB, F> {
callback: CB,
map_op: F,
}
impl<T, F, R, CB> ProducerCallback<T> for Callback<CB, F>
where
CB: ProducerCallback<R>,
F: Fn(T) -> R + Sync,
R: Send,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = MapProducer {
base,
map_op: &self.map_op,
};
self.callback.callback(producer)
}
}
}
}
// ////////////////////////////////////////////////////////////////////////
struct MapProducer<'f, P, F> {
base: P,
map_op: &'f F,
}
impl<'f, P, F, R> Producer for MapProducer<'f, P, F>
where
P: Producer,
F: Fn(P::Item) -> R + Sync,
R: Send,
{
type Item = F::Output;
type IntoIter = iter::Map<P::IntoIter, &'f F>;
fn into_iter(self) -> Self::IntoIter {
self.base.into_iter().map(self.map_op)
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
MapProducer {
base: left,
map_op: self.map_op,
},
MapProducer {
base: right,
map_op: self.map_op,
},
)
}
fn fold_with<G>(self, folder: G) -> G
where
G: Folder<Self::Item>,
{
let folder1 = MapFolder {
base: folder,
map_op: self.map_op,
};
self.base.fold_with(folder1).base
}
}
// ////////////////////////////////////////////////////////////////////////
// Consumer implementation
struct MapConsumer<'f, C, F> {
base: C,
map_op: &'f F,
}
impl<'f, C, F> MapConsumer<'f, C, F> {
fn new(base: C, map_op: &'f F) -> Self {
MapConsumer { base, map_op }
}
}
impl<'f, T, R, C, F> Consumer<T> for MapConsumer<'f, C, F>
where
C: Consumer<F::Output>,
F: Fn(T) -> R + Sync,
R: Send,
{
type Folder = MapFolder<'f, C::Folder, F>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
MapConsumer::new(left, self.map_op),
MapConsumer::new(right, self.map_op),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
MapFolder {
base: self.base.into_folder(),
map_op: self.map_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'f, T, R, C, F> UnindexedConsumer<T> for MapConsumer<'f, C, F>
where
C: UnindexedConsumer<F::Output>,
F: Fn(T) -> R + Sync,
R: Send,
{
fn split_off_left(&self) -> Self {
MapConsumer::new(self.base.split_off_left(), self.map_op)
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct MapFolder<'f, C, F> {
base: C,
map_op: &'f F,
}
impl<'f, T, R, C, F> Folder<T> for MapFolder<'f, C, F>
where
C: Folder<F::Output>,
F: Fn(T) -> R,
{
type Result = C::Result;
fn consume(self, item: T) -> Self {
let mapped_item = (self.map_op)(item);
MapFolder {
base: self.base.consume(mapped_item),
map_op: self.map_op,
}
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
self.base = self.base.consume_iter(iter.into_iter().map(self.map_op));
self
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

565
vendor/rayon/src/iter/map_with.rs vendored Normal file
View File

@@ -0,0 +1,565 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
/// `MapWith` is an iterator that transforms the elements of an underlying iterator.
///
/// This struct is created by the [`map_with()`] method on [`ParallelIterator`]
///
/// [`map_with()`]: ParallelIterator::map_with()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct MapWith<I, T, F> {
base: I,
item: T,
map_op: F,
}
impl<I: Debug, T: Debug, F> Debug for MapWith<I, T, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MapWith")
.field("base", &self.base)
.field("item", &self.item)
.finish()
}
}
impl<I, T, F> MapWith<I, T, F> {
/// Creates a new `MapWith` iterator.
pub(super) fn new(base: I, item: T, map_op: F) -> Self {
MapWith { base, item, map_op }
}
}
impl<I, T, F, R> ParallelIterator for MapWith<I, T, F>
where
I: ParallelIterator,
T: Send + Clone,
F: Fn(&mut T, I::Item) -> R + Sync + Send,
R: Send,
{
type Item = R;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = MapWithConsumer::new(consumer, self.item, &self.map_op);
self.base.drive_unindexed(consumer1)
}
fn opt_len(&self) -> Option<usize> {
self.base.opt_len()
}
}
impl<I, T, F, R> IndexedParallelIterator for MapWith<I, T, F>
where
I: IndexedParallelIterator,
T: Send + Clone,
F: Fn(&mut T, I::Item) -> R + Sync + Send,
R: Send,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let consumer1 = MapWithConsumer::new(consumer, self.item, &self.map_op);
self.base.drive(consumer1)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback {
callback,
item: self.item,
map_op: self.map_op,
});
struct Callback<CB, U, F> {
callback: CB,
item: U,
map_op: F,
}
impl<T, U, F, R, CB> ProducerCallback<T> for Callback<CB, U, F>
where
CB: ProducerCallback<R>,
U: Send + Clone,
F: Fn(&mut U, T) -> R + Sync,
R: Send,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = MapWithProducer {
base,
item: self.item,
map_op: &self.map_op,
};
self.callback.callback(producer)
}
}
}
}
// ////////////////////////////////////////////////////////////////////////
struct MapWithProducer<'f, P, U, F> {
base: P,
item: U,
map_op: &'f F,
}
impl<'f, P, U, F, R> Producer for MapWithProducer<'f, P, U, F>
where
P: Producer,
U: Send + Clone,
F: Fn(&mut U, P::Item) -> R + Sync,
R: Send,
{
type Item = R;
type IntoIter = MapWithIter<'f, P::IntoIter, U, F>;
fn into_iter(self) -> Self::IntoIter {
MapWithIter {
base: self.base.into_iter(),
item: self.item,
map_op: self.map_op,
}
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
MapWithProducer {
base: left,
item: self.item.clone(),
map_op: self.map_op,
},
MapWithProducer {
base: right,
item: self.item,
map_op: self.map_op,
},
)
}
fn fold_with<G>(self, folder: G) -> G
where
G: Folder<Self::Item>,
{
let folder1 = MapWithFolder {
base: folder,
item: self.item,
map_op: self.map_op,
};
self.base.fold_with(folder1).base
}
}
struct MapWithIter<'f, I, U, F> {
base: I,
item: U,
map_op: &'f F,
}
impl<'f, I, U, F, R> Iterator for MapWithIter<'f, I, U, F>
where
I: Iterator,
F: Fn(&mut U, I::Item) -> R + Sync,
R: Send,
{
type Item = R;
fn next(&mut self) -> Option<R> {
let item = self.base.next()?;
Some((self.map_op)(&mut self.item, item))
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.base.size_hint()
}
}
impl<'f, I, U, F, R> DoubleEndedIterator for MapWithIter<'f, I, U, F>
where
I: DoubleEndedIterator,
F: Fn(&mut U, I::Item) -> R + Sync,
R: Send,
{
fn next_back(&mut self) -> Option<R> {
let item = self.base.next_back()?;
Some((self.map_op)(&mut self.item, item))
}
}
impl<'f, I, U, F, R> ExactSizeIterator for MapWithIter<'f, I, U, F>
where
I: ExactSizeIterator,
F: Fn(&mut U, I::Item) -> R + Sync,
R: Send,
{
}
// ////////////////////////////////////////////////////////////////////////
// Consumer implementation
struct MapWithConsumer<'f, C, U, F> {
base: C,
item: U,
map_op: &'f F,
}
impl<'f, C, U, F> MapWithConsumer<'f, C, U, F> {
fn new(base: C, item: U, map_op: &'f F) -> Self {
MapWithConsumer { base, item, map_op }
}
}
impl<'f, T, U, R, C, F> Consumer<T> for MapWithConsumer<'f, C, U, F>
where
C: Consumer<R>,
U: Send + Clone,
F: Fn(&mut U, T) -> R + Sync,
R: Send,
{
type Folder = MapWithFolder<'f, C::Folder, U, F>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
MapWithConsumer::new(left, self.item.clone(), self.map_op),
MapWithConsumer::new(right, self.item, self.map_op),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
MapWithFolder {
base: self.base.into_folder(),
item: self.item,
map_op: self.map_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'f, T, U, R, C, F> UnindexedConsumer<T> for MapWithConsumer<'f, C, U, F>
where
C: UnindexedConsumer<R>,
U: Send + Clone,
F: Fn(&mut U, T) -> R + Sync,
R: Send,
{
fn split_off_left(&self) -> Self {
MapWithConsumer::new(self.base.split_off_left(), self.item.clone(), self.map_op)
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct MapWithFolder<'f, C, U, F> {
base: C,
item: U,
map_op: &'f F,
}
impl<'f, T, U, R, C, F> Folder<T> for MapWithFolder<'f, C, U, F>
where
C: Folder<R>,
F: Fn(&mut U, T) -> R,
{
type Result = C::Result;
fn consume(mut self, item: T) -> Self {
let mapped_item = (self.map_op)(&mut self.item, item);
self.base = self.base.consume(mapped_item);
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
fn with<'f, T, U, R>(
item: &'f mut U,
map_op: impl Fn(&mut U, T) -> R + 'f,
) -> impl FnMut(T) -> R + 'f {
move |x| map_op(item, x)
}
{
let mapped_iter = iter.into_iter().map(with(&mut self.item, self.map_op));
self.base = self.base.consume_iter(mapped_iter);
}
self
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}
// ------------------------------------------------------------------------------------------------
/// `MapInit` is an iterator that transforms the elements of an underlying iterator.
///
/// This struct is created by the [`map_init()`] method on [`ParallelIterator`]
///
/// [`map_init()`]: ParallelIterator::map_init()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct MapInit<I, INIT, F> {
base: I,
init: INIT,
map_op: F,
}
impl<I: Debug, INIT, F> Debug for MapInit<I, INIT, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MapInit").field("base", &self.base).finish()
}
}
impl<I, INIT, F> MapInit<I, INIT, F> {
/// Creates a new `MapInit` iterator.
pub(super) fn new(base: I, init: INIT, map_op: F) -> Self {
MapInit { base, init, map_op }
}
}
impl<I, INIT, T, F, R> ParallelIterator for MapInit<I, INIT, F>
where
I: ParallelIterator,
INIT: Fn() -> T + Sync + Send,
F: Fn(&mut T, I::Item) -> R + Sync + Send,
R: Send,
{
type Item = R;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = MapInitConsumer::new(consumer, &self.init, &self.map_op);
self.base.drive_unindexed(consumer1)
}
fn opt_len(&self) -> Option<usize> {
self.base.opt_len()
}
}
impl<I, INIT, T, F, R> IndexedParallelIterator for MapInit<I, INIT, F>
where
I: IndexedParallelIterator,
INIT: Fn() -> T + Sync + Send,
F: Fn(&mut T, I::Item) -> R + Sync + Send,
R: Send,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let consumer1 = MapInitConsumer::new(consumer, &self.init, &self.map_op);
self.base.drive(consumer1)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback {
callback,
init: self.init,
map_op: self.map_op,
});
struct Callback<CB, INIT, F> {
callback: CB,
init: INIT,
map_op: F,
}
impl<T, INIT, U, F, R, CB> ProducerCallback<T> for Callback<CB, INIT, F>
where
CB: ProducerCallback<R>,
INIT: Fn() -> U + Sync,
F: Fn(&mut U, T) -> R + Sync,
R: Send,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = MapInitProducer {
base,
init: &self.init,
map_op: &self.map_op,
};
self.callback.callback(producer)
}
}
}
}
// ////////////////////////////////////////////////////////////////////////
struct MapInitProducer<'f, P, INIT, F> {
base: P,
init: &'f INIT,
map_op: &'f F,
}
impl<'f, P, INIT, U, F, R> Producer for MapInitProducer<'f, P, INIT, F>
where
P: Producer,
INIT: Fn() -> U + Sync,
F: Fn(&mut U, P::Item) -> R + Sync,
R: Send,
{
type Item = R;
type IntoIter = MapWithIter<'f, P::IntoIter, U, F>;
fn into_iter(self) -> Self::IntoIter {
MapWithIter {
base: self.base.into_iter(),
item: (self.init)(),
map_op: self.map_op,
}
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
MapInitProducer {
base: left,
init: self.init,
map_op: self.map_op,
},
MapInitProducer {
base: right,
init: self.init,
map_op: self.map_op,
},
)
}
fn fold_with<G>(self, folder: G) -> G
where
G: Folder<Self::Item>,
{
let folder1 = MapWithFolder {
base: folder,
item: (self.init)(),
map_op: self.map_op,
};
self.base.fold_with(folder1).base
}
}
// ////////////////////////////////////////////////////////////////////////
// Consumer implementation
struct MapInitConsumer<'f, C, INIT, F> {
base: C,
init: &'f INIT,
map_op: &'f F,
}
impl<'f, C, INIT, F> MapInitConsumer<'f, C, INIT, F> {
fn new(base: C, init: &'f INIT, map_op: &'f F) -> Self {
MapInitConsumer { base, init, map_op }
}
}
impl<'f, T, INIT, U, R, C, F> Consumer<T> for MapInitConsumer<'f, C, INIT, F>
where
C: Consumer<R>,
INIT: Fn() -> U + Sync,
F: Fn(&mut U, T) -> R + Sync,
R: Send,
{
type Folder = MapWithFolder<'f, C::Folder, U, F>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
MapInitConsumer::new(left, self.init, self.map_op),
MapInitConsumer::new(right, self.init, self.map_op),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
MapWithFolder {
base: self.base.into_folder(),
item: (self.init)(),
map_op: self.map_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'f, T, INIT, U, R, C, F> UnindexedConsumer<T> for MapInitConsumer<'f, C, INIT, F>
where
C: UnindexedConsumer<R>,
INIT: Fn() -> U + Sync,
F: Fn(&mut U, T) -> R + Sync,
R: Send,
{
fn split_off_left(&self) -> Self {
MapInitConsumer::new(self.base.split_off_left(), self.init, self.map_op)
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}

3627
vendor/rayon/src/iter/mod.rs vendored Normal file

File diff suppressed because it is too large Load Diff

335
vendor/rayon/src/iter/multizip.rs vendored Normal file
View File

@@ -0,0 +1,335 @@
use super::plumbing::*;
use super::*;
/// `MultiZip` is an iterator that zips up a tuple of parallel iterators to
/// produce tuples of their items.
///
/// It is created by calling `into_par_iter()` on a tuple of types that
/// implement `IntoParallelIterator`, or `par_iter()`/`par_iter_mut()` with
/// types that are iterable by reference.
///
/// The implementation currently support tuples up to length 12.
///
/// # Examples
///
/// ```
/// use rayon::prelude::*;
///
/// // This will iterate `r` by mutable reference, like `par_iter_mut()`, while
/// // ranges are all iterated by value like `into_par_iter()`.
/// // Note that the zipped iterator is only as long as the shortest input.
/// let mut r = vec![0; 3];
/// (&mut r, 1..10, 10..100, 100..1000).into_par_iter()
/// .for_each(|(r, x, y, z)| *r = x * y + z);
///
/// assert_eq!(&r, &[1 * 10 + 100, 2 * 11 + 101, 3 * 12 + 102]);
/// ```
///
/// For a group that should all be iterated by reference, you can use a tuple reference.
///
/// ```
/// use rayon::prelude::*;
///
/// let xs: Vec<_> = (1..10).collect();
/// let ys: Vec<_> = (10..100).collect();
/// let zs: Vec<_> = (100..1000).collect();
///
/// // Reference each input separately with `IntoParallelIterator`:
/// let r1: Vec<_> = (&xs, &ys, &zs).into_par_iter()
/// .map(|(x, y, z)| x * y + z)
/// .collect();
///
/// // Reference them all together with `IntoParallelRefIterator`:
/// let r2: Vec<_> = (xs, ys, zs).par_iter()
/// .map(|(x, y, z)| x * y + z)
/// .collect();
///
/// assert_eq!(r1, r2);
/// ```
///
/// Mutable references to a tuple will work similarly.
///
/// ```
/// use rayon::prelude::*;
///
/// let mut xs: Vec<_> = (1..4).collect();
/// let mut ys: Vec<_> = (-4..-1).collect();
/// let mut zs = vec![0; 3];
///
/// // Mutably reference each input separately with `IntoParallelIterator`:
/// (&mut xs, &mut ys, &mut zs).into_par_iter().for_each(|(x, y, z)| {
/// *z += *x + *y;
/// std::mem::swap(x, y);
/// });
///
/// assert_eq!(xs, (vec![-4, -3, -2]));
/// assert_eq!(ys, (vec![1, 2, 3]));
/// assert_eq!(zs, (vec![-3, -1, 1]));
///
/// // Mutably reference them all together with `IntoParallelRefMutIterator`:
/// let mut tuple = (xs, ys, zs);
/// tuple.par_iter_mut().for_each(|(x, y, z)| {
/// *z += *x + *y;
/// std::mem::swap(x, y);
/// });
///
/// assert_eq!(tuple, (vec![1, 2, 3], vec![-4, -3, -2], vec![-6, -2, 2]));
/// ```
#[derive(Debug, Clone)]
pub struct MultiZip<T> {
tuple: T,
}
// These macros greedily consume 4 or 2 items first to achieve log2 nesting depth.
// For example, 5 => 4,1 => (2,2),1.
//
// The tuples go up to 12, so we might want to greedily consume 8 too, but
// the depth works out the same if we let that expand on the right:
// 9 => 4,5 => (2,2),(4,1) => (2,2),((2,2),1)
// 12 => 4,8 => (2,2),(4,4) => (2,2),((2,2),(2,2))
//
// But if we ever increase to 13, we would want to split 8,5 rather than 4,9.
macro_rules! reduce {
($a:expr, $b:expr, $c:expr, $d:expr, $( $x:expr ),+ => $fn:path) => {
reduce!(reduce!($a, $b, $c, $d => $fn),
reduce!($( $x ),+ => $fn)
=> $fn)
};
($a:expr, $b:expr, $( $x:expr ),+ => $fn:path) => {
reduce!(reduce!($a, $b => $fn),
reduce!($( $x ),+ => $fn)
=> $fn)
};
($a:expr, $b:expr => $fn:path) => { $fn($a, $b) };
($a:expr => $fn:path) => { $a };
}
macro_rules! nest {
($A:tt, $B:tt, $C:tt, $D:tt, $( $X:tt ),+) => {
(nest!($A, $B, $C, $D), nest!($( $X ),+))
};
($A:tt, $B:tt, $( $X:tt ),+) => {
(($A, $B), nest!($( $X ),+))
};
($A:tt, $B:tt) => { ($A, $B) };
($A:tt) => { $A };
}
macro_rules! flatten {
($( $T:ident ),+) => {{
#[allow(non_snake_case)]
fn flatten<$( $T ),+>(nest!($( $T ),+) : nest!($( $T ),+)) -> ($( $T, )+) {
($( $T, )+)
}
flatten
}};
}
macro_rules! multizip_impls {
($(
$Tuple:ident {
$(($idx:tt) -> $T:ident)+
}
)+) => {
$(
impl<$( $T, )+> IntoParallelIterator for ($( $T, )+)
where
$(
$T: IntoParallelIterator<Iter: IndexedParallelIterator>,
)+
{
type Item = ($( $T::Item, )+);
type Iter = MultiZip<($( $T::Iter, )+)>;
fn into_par_iter(self) -> Self::Iter {
MultiZip {
tuple: ( $( self.$idx.into_par_iter(), )+ ),
}
}
}
impl<'a, $( $T, )+> IntoParallelIterator for &'a ($( $T, )+)
where
$(
$T: IntoParallelRefIterator<'a, Iter: IndexedParallelIterator>,
)+
{
type Item = ($( $T::Item, )+);
type Iter = MultiZip<($( $T::Iter, )+)>;
fn into_par_iter(self) -> Self::Iter {
MultiZip {
tuple: ( $( self.$idx.par_iter(), )+ ),
}
}
}
impl<'a, $( $T, )+> IntoParallelIterator for &'a mut ($( $T, )+)
where
$(
$T: IntoParallelRefMutIterator<'a, Iter: IndexedParallelIterator>,
)+
{
type Item = ($( $T::Item, )+);
type Iter = MultiZip<($( $T::Iter, )+)>;
fn into_par_iter(self) -> Self::Iter {
MultiZip {
tuple: ( $( self.$idx.par_iter_mut(), )+ ),
}
}
}
impl<$( $T, )+> ParallelIterator for MultiZip<($( $T, )+)>
where
$( $T: IndexedParallelIterator, )+
{
type Item = ($( $T::Item, )+);
fn drive_unindexed<CONSUMER>(self, consumer: CONSUMER) -> CONSUMER::Result
where
CONSUMER: UnindexedConsumer<Self::Item>,
{
self.drive(consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<$( $T, )+> IndexedParallelIterator for MultiZip<($( $T, )+)>
where
$( $T: IndexedParallelIterator, )+
{
fn drive<CONSUMER>(self, consumer: CONSUMER) -> CONSUMER::Result
where
CONSUMER: Consumer<Self::Item>,
{
reduce!($( self.tuple.$idx ),+ => IndexedParallelIterator::zip)
.map(flatten!($( $T ),+))
.drive(consumer)
}
fn len(&self) -> usize {
reduce!($( self.tuple.$idx.len() ),+ => Ord::min)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
reduce!($( self.tuple.$idx ),+ => IndexedParallelIterator::zip)
.map(flatten!($( $T ),+))
.with_producer(callback)
}
}
)+
}
}
multizip_impls! {
Tuple1 {
(0) -> A
}
Tuple2 {
(0) -> A
(1) -> B
}
Tuple3 {
(0) -> A
(1) -> B
(2) -> C
}
Tuple4 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
}
Tuple5 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
}
Tuple6 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
}
Tuple7 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
}
Tuple8 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
}
Tuple9 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
}
Tuple10 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
}
Tuple11 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
(10) -> K
}
Tuple12 {
(0) -> A
(1) -> B
(2) -> C
(3) -> D
(4) -> E
(5) -> F
(6) -> G
(7) -> H
(8) -> I
(9) -> J
(10) -> K
(11) -> L
}
}

59
vendor/rayon/src/iter/noop.rs vendored Normal file
View File

@@ -0,0 +1,59 @@
use super::plumbing::*;
pub(super) struct NoopConsumer;
impl<T> Consumer<T> for NoopConsumer {
type Folder = NoopConsumer;
type Reducer = NoopReducer;
type Result = ();
fn split_at(self, _index: usize) -> (Self, Self, NoopReducer) {
(NoopConsumer, NoopConsumer, NoopReducer)
}
fn into_folder(self) -> Self {
self
}
fn full(&self) -> bool {
false
}
}
impl<T> Folder<T> for NoopConsumer {
type Result = ();
fn consume(self, _item: T) -> Self {
self
}
fn consume_iter<I>(self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
iter.into_iter().for_each(drop);
self
}
fn complete(self) {}
fn full(&self) -> bool {
false
}
}
impl<T> UnindexedConsumer<T> for NoopConsumer {
fn split_off_left(&self) -> Self {
NoopConsumer
}
fn to_reducer(&self) -> NoopReducer {
NoopReducer
}
}
pub(super) struct NoopReducer;
impl Reducer<()> for NoopReducer {
fn reduce(self, _left: (), _right: ()) {}
}

70
vendor/rayon/src/iter/once.rs vendored Normal file
View File

@@ -0,0 +1,70 @@
use crate::iter::plumbing::*;
use crate::iter::*;
/// Creates a parallel iterator that produces an element exactly once.
///
/// This admits no parallelism on its own, but it could be chained to existing
/// parallel iterators to extend their contents, or otherwise used for any code
/// that deals with generic parallel iterators.
///
/// # Examples
///
/// ```
/// use rayon::prelude::*;
/// use rayon::iter::once;
///
/// let pi = (0..1234).into_par_iter()
/// .chain(once(-1))
/// .chain(1234..10_000);
///
/// assert_eq!(pi.clone().count(), 10_001);
/// assert_eq!(pi.clone().filter(|&x| x < 0).count(), 1);
/// assert_eq!(pi.position_any(|x| x < 0), Some(1234));
/// ```
pub fn once<T: Send>(item: T) -> Once<T> {
Once { item }
}
/// Iterator adaptor for [the `once()` function].
///
/// [the `once()` function]: once()
#[derive(Clone, Debug)]
pub struct Once<T> {
item: T,
}
impl<T: Send> ParallelIterator for Once<T> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.drive(consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(1)
}
}
impl<T: Send> IndexedParallelIterator for Once<T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
consumer.into_folder().consume(self.item).complete()
}
fn len(&self) -> usize {
1
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
// Let `OptionProducer` handle it.
Some(self.item).into_par_iter().with_producer(callback)
}
}

338
vendor/rayon/src/iter/panic_fuse.rs vendored Normal file
View File

@@ -0,0 +1,338 @@
use super::plumbing::*;
use super::*;
use std::sync::atomic::{AtomicBool, Ordering};
use std::thread;
/// `PanicFuse` is an adaptor that wraps an iterator with a fuse in case
/// of panics, to halt all threads as soon as possible.
///
/// This struct is created by the [`panic_fuse()`] method on [`ParallelIterator`]
///
/// [`panic_fuse()`]: ParallelIterator::panic_fuse()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct PanicFuse<I> {
base: I,
}
/// Helper that sets a bool to `true` if dropped while unwinding.
#[derive(Clone)]
struct Fuse<'a>(&'a AtomicBool);
impl<'a> Drop for Fuse<'a> {
#[inline]
fn drop(&mut self) {
if thread::panicking() {
self.0.store(true, Ordering::Relaxed);
}
}
}
impl<'a> Fuse<'a> {
#[inline]
fn panicked(&self) -> bool {
self.0.load(Ordering::Relaxed)
}
}
impl<I> PanicFuse<I> {
/// Creates a new `PanicFuse` iterator.
pub(super) fn new(base: I) -> PanicFuse<I> {
PanicFuse { base }
}
}
impl<I> ParallelIterator for PanicFuse<I>
where
I: ParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let panicked = AtomicBool::new(false);
let consumer1 = PanicFuseConsumer {
base: consumer,
fuse: Fuse(&panicked),
};
self.base.drive_unindexed(consumer1)
}
fn opt_len(&self) -> Option<usize> {
self.base.opt_len()
}
}
impl<I> IndexedParallelIterator for PanicFuse<I>
where
I: IndexedParallelIterator,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let panicked = AtomicBool::new(false);
let consumer1 = PanicFuseConsumer {
base: consumer,
fuse: Fuse(&panicked),
};
self.base.drive(consumer1)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback { callback });
struct Callback<CB> {
callback: CB,
}
impl<T, CB> ProducerCallback<T> for Callback<CB>
where
CB: ProducerCallback<T>,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let panicked = AtomicBool::new(false);
let producer = PanicFuseProducer {
base,
fuse: Fuse(&panicked),
};
self.callback.callback(producer)
}
}
}
}
// ////////////////////////////////////////////////////////////////////////
// Producer implementation
struct PanicFuseProducer<'a, P> {
base: P,
fuse: Fuse<'a>,
}
impl<'a, P> Producer for PanicFuseProducer<'a, P>
where
P: Producer,
{
type Item = P::Item;
type IntoIter = PanicFuseIter<'a, P::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
PanicFuseIter {
base: self.base.into_iter(),
fuse: self.fuse,
}
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
PanicFuseProducer {
base: left,
fuse: self.fuse.clone(),
},
PanicFuseProducer {
base: right,
fuse: self.fuse,
},
)
}
fn fold_with<G>(self, folder: G) -> G
where
G: Folder<Self::Item>,
{
let folder1 = PanicFuseFolder {
base: folder,
fuse: self.fuse,
};
self.base.fold_with(folder1).base
}
}
struct PanicFuseIter<'a, I> {
base: I,
fuse: Fuse<'a>,
}
impl<'a, I> Iterator for PanicFuseIter<'a, I>
where
I: Iterator,
{
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
if self.fuse.panicked() {
None
} else {
self.base.next()
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.base.size_hint()
}
}
impl<'a, I> DoubleEndedIterator for PanicFuseIter<'a, I>
where
I: DoubleEndedIterator,
{
fn next_back(&mut self) -> Option<Self::Item> {
if self.fuse.panicked() {
None
} else {
self.base.next_back()
}
}
}
impl<'a, I> ExactSizeIterator for PanicFuseIter<'a, I>
where
I: ExactSizeIterator,
{
fn len(&self) -> usize {
self.base.len()
}
}
// ////////////////////////////////////////////////////////////////////////
// Consumer implementation
struct PanicFuseConsumer<'a, C> {
base: C,
fuse: Fuse<'a>,
}
impl<'a, T, C> Consumer<T> for PanicFuseConsumer<'a, C>
where
C: Consumer<T>,
{
type Folder = PanicFuseFolder<'a, C::Folder>;
type Reducer = PanicFuseReducer<'a, C::Reducer>;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
PanicFuseConsumer {
base: left,
fuse: self.fuse.clone(),
},
PanicFuseConsumer {
base: right,
fuse: self.fuse.clone(),
},
PanicFuseReducer {
base: reducer,
_fuse: self.fuse,
},
)
}
fn into_folder(self) -> Self::Folder {
PanicFuseFolder {
base: self.base.into_folder(),
fuse: self.fuse,
}
}
fn full(&self) -> bool {
self.fuse.panicked() || self.base.full()
}
}
impl<'a, T, C> UnindexedConsumer<T> for PanicFuseConsumer<'a, C>
where
C: UnindexedConsumer<T>,
{
fn split_off_left(&self) -> Self {
PanicFuseConsumer {
base: self.base.split_off_left(),
fuse: self.fuse.clone(),
}
}
fn to_reducer(&self) -> Self::Reducer {
PanicFuseReducer {
base: self.base.to_reducer(),
_fuse: self.fuse.clone(),
}
}
}
struct PanicFuseFolder<'a, C> {
base: C,
fuse: Fuse<'a>,
}
impl<'a, T, C> Folder<T> for PanicFuseFolder<'a, C>
where
C: Folder<T>,
{
type Result = C::Result;
fn consume(mut self, item: T) -> Self {
self.base = self.base.consume(item);
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
fn cool<'a, T>(fuse: &'a Fuse<'_>) -> impl Fn(&T) -> bool + 'a {
move |_| !fuse.panicked()
}
self.base = {
let fuse = &self.fuse;
let iter = iter.into_iter().take_while(cool(fuse));
self.base.consume_iter(iter)
};
self
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.fuse.panicked() || self.base.full()
}
}
struct PanicFuseReducer<'a, C> {
base: C,
_fuse: Fuse<'a>,
}
impl<'a, T, C> Reducer<T> for PanicFuseReducer<'a, C>
where
C: Reducer<T>,
{
fn reduce(self, left: T, right: T) -> T {
self.base.reduce(left, right)
}
}

157
vendor/rayon/src/iter/par_bridge.rs vendored Normal file
View File

@@ -0,0 +1,157 @@
#[cfg(not(feature = "web_spin_lock"))]
use std::sync::Mutex;
#[cfg(feature = "web_spin_lock")]
use wasm_sync::Mutex;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use crate::iter::plumbing::{bridge_unindexed, Folder, UnindexedConsumer, UnindexedProducer};
use crate::iter::ParallelIterator;
use crate::{current_num_threads, current_thread_index};
/// Conversion trait to convert an `Iterator` to a `ParallelIterator`.
///
/// This creates a "bridge" from a sequential iterator to a parallel one, by distributing its items
/// across the Rayon thread pool. This has the advantage of being able to parallelize just about
/// anything, but the resulting `ParallelIterator` can be less efficient than if you started with
/// `par_iter` instead. However, it can still be useful for iterators that are difficult to
/// parallelize by other means, like channels or file or network I/O.
///
/// Iterator items are pulled by `next()` one at a time, synchronized from each thread that is
/// ready for work, so this may become a bottleneck if the serial iterator can't keep up with the
/// parallel demand. The items are not buffered by `IterBridge`, so it's fine to use this with
/// large or even unbounded iterators.
///
/// The resulting iterator is not guaranteed to keep the order of the original iterator.
///
/// # Examples
///
/// To use this trait, take an existing `Iterator` and call `par_bridge` on it. After that, you can
/// use any of the `ParallelIterator` methods:
///
/// ```
/// use rayon::iter::ParallelBridge;
/// use rayon::prelude::ParallelIterator;
/// use std::sync::mpsc::channel;
///
/// let rx = {
/// let (tx, rx) = channel();
///
/// tx.send("one!");
/// tx.send("two!");
/// tx.send("three!");
///
/// rx
/// };
///
/// let mut output: Vec<&'static str> = rx.into_iter().par_bridge().collect();
/// output.sort_unstable();
///
/// assert_eq!(&*output, &["one!", "three!", "two!"]);
/// ```
pub trait ParallelBridge: Sized {
/// Creates a bridge from this type to a `ParallelIterator`.
fn par_bridge(self) -> IterBridge<Self>;
}
impl<T> ParallelBridge for T
where
T: Iterator<Item: Send> + Send,
{
fn par_bridge(self) -> IterBridge<Self> {
IterBridge { iter: self }
}
}
/// `IterBridge` is a parallel iterator that wraps a sequential iterator.
///
/// This type is created when using the `par_bridge` method on `ParallelBridge`. See the
/// [`ParallelBridge`] documentation for details.
#[derive(Debug, Clone)]
pub struct IterBridge<Iter> {
iter: Iter,
}
impl<Iter> ParallelIterator for IterBridge<Iter>
where
Iter: Iterator<Item: Send> + Send,
{
type Item = Iter::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let num_threads = current_num_threads();
let threads_started: Vec<_> = (0..num_threads).map(|_| AtomicBool::new(false)).collect();
bridge_unindexed(
&IterParallelProducer {
split_count: AtomicUsize::new(num_threads),
iter: Mutex::new(self.iter.fuse()),
threads_started: &threads_started,
},
consumer,
)
}
}
struct IterParallelProducer<'a, Iter> {
split_count: AtomicUsize,
iter: Mutex<std::iter::Fuse<Iter>>,
threads_started: &'a [AtomicBool],
}
impl<Iter: Iterator + Send> UnindexedProducer for &IterParallelProducer<'_, Iter> {
type Item = Iter::Item;
fn split(self) -> (Self, Option<Self>) {
// Check if the iterator is exhausted
let update = self
.split_count
.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |c| c.checked_sub(1));
(self, update.is_ok().then_some(self))
}
fn fold_with<F>(self, mut folder: F) -> F
where
F: Folder<Self::Item>,
{
// Guard against work-stealing-induced recursion, in case `Iter::next()`
// calls rayon internally, so we don't deadlock our mutex. We might also
// be recursing via `folder` methods, which doesn't present a mutex hazard,
// but it's lower overhead for us to just check this once, rather than
// updating additional shared state on every mutex lock/unlock.
// (If this isn't a rayon thread, then there's no work-stealing anyway...)
if let Some(i) = current_thread_index() {
// Note: If the number of threads in the pool ever grows dynamically, then
// we'll end up sharing flags and may falsely detect recursion -- that's
// still fine for overall correctness, just not optimal for parallelism.
let thread_started = &self.threads_started[i % self.threads_started.len()];
if thread_started.swap(true, Ordering::Relaxed) {
// We can't make progress with a nested mutex, so just return and let
// the outermost loop continue with the rest of the iterator items.
return folder;
}
}
loop {
if let Ok(mut iter) = self.iter.lock() {
if let Some(it) = iter.next() {
drop(iter);
folder = folder.consume(it);
if folder.full() {
return folder;
}
} else {
return folder;
}
} else {
// any panics from other threads will have been caught by the pool,
// and will be re-thrown when joined - just exit
return folder;
}
}
}
}

315
vendor/rayon/src/iter/plumbing/README.md vendored Normal file
View File

@@ -0,0 +1,315 @@
# Parallel Iterators
These are some notes on the design of the parallel iterator traits.
This file does not describe how to **use** parallel iterators.
## The challenge
Parallel iterators are more complicated than sequential iterators.
The reason is that they have to be able to split themselves up and
operate in parallel across the two halves.
The current design for parallel iterators has two distinct modes in
which they can be used; as we will see, not all iterators support both
modes (which is why there are two):
- **Pull mode** (the `Producer` and `UnindexedProducer` traits): in this mode,
the iterator is asked to produce the next item using a call to `next`. This
is basically like a normal iterator, but with a twist: you can split the
iterator in half to produce disjoint items in separate threads.
- in the `Producer` trait, splitting is done with `split_at`, which accepts
an index where the split should be performed. Only indexed iterators can
work in this mode, as they know exactly how much data they will produce,
and how to locate the requested index.
- in the `UnindexedProducer` trait, splitting is done with `split`, which
simply requests that the producer divide itself *approximately* in half.
This is useful when the exact length and/or layout is unknown, as with
`String` characters, or when the length might exceed `usize`, as with
`Range<u64>` on 32-bit platforms.
- In theory, any `Producer` could act unindexed, but we don't currently
use that possibility. When you know the exact length, a `split` can
simply be implemented as `split_at(length/2)`.
- **Push mode** (the `Consumer` and `UnindexedConsumer` traits): in
this mode, the iterator instead is *given* each item in turn, which
is then processed. This is the opposite of a normal iterator. It's
more like a `for_each` call: each time a new item is produced, the
`consume` method is called with that item. (The traits themselves are
a bit more complex, as they support state that can be threaded
through and ultimately reduced.) Like producers, there are two
variants of consumers which differ in how the split is performed:
- in the `Consumer` trait, splitting is done with `split_at`, which
accepts an index where the split should be performed. All
iterators can work in this mode. The resulting halves thus have an
idea about how much data they expect to consume.
- in the `UnindexedConsumer` trait, splitting is done with
`split_off_left`. There is no index: the resulting halves must be
prepared to process any amount of data, and they don't know where that
data falls in the overall stream.
- Not all consumers can operate in this mode. It works for
`for_each` and `reduce`, for example, but it does not work for
`collect_into_vec`, since in that case the position of each item is
important for knowing where it ends up in the target collection.
## How iterator execution proceeds
We'll walk through this example iterator chain to start. This chain
demonstrates more-or-less the full complexity of what can happen.
```rust
vec1.par_iter()
.zip(vec2.par_iter())
.flat_map(some_function)
.for_each(some_other_function)
```
To handle an iterator chain, we start by creating consumers. This
works from the end. So in this case, the call to `for_each` is the
final step, so it will create a `ForEachConsumer` that, given an item,
just calls `some_other_function` with that item. (`ForEachConsumer` is
a very simple consumer because it doesn't need to thread any state
between items at all.)
Now, the `for_each` call will pass this consumer to the base iterator,
which is the `flat_map`. It will do this by calling the `drive_unindexed`
method on the `ParallelIterator` trait. `drive_unindexed` basically
says "produce items for this iterator and feed them to this consumer";
it only works for unindexed consumers.
(As an aside, it is interesting that only some consumers can work in
unindexed mode, but all producers can *drive* an unindexed consumer.
In contrast, only some producers can drive an *indexed* consumer, but
all consumers can be supplied indexes. Isn't variance neat.)
As it happens, `FlatMap` only works with unindexed consumers anyway.
This is because flat-map basically has no idea how many items it will
produce. If you ask flat-map to produce the 22nd item, it can't do it,
at least not without some intermediate state. It doesn't know whether
processing the first item will create 1 item, 3 items, or 100;
therefore, to produce an arbitrary item, it would basically just have
to start at the beginning and execute sequentially, which is not what
we want. But for unindexed consumers, this doesn't matter, since they
don't need to know how much data they will get.
Therefore, `FlatMap` can wrap the `ForEachConsumer` with a
`FlatMapConsumer` that feeds to it. This `FlatMapConsumer` will be
given one item. It will then invoke `some_function` to get a parallel
iterator out. It will then ask this new parallel iterator to drive the
`ForEachConsumer`. The `drive_unindexed` method on `flat_map` can then
pass the `FlatMapConsumer` up the chain to the previous item, which is
`zip`. At this point, something interesting happens.
## Switching from push to pull mode
If you think about `zip`, it can't really be implemented as a
consumer, at least not without an intermediate thread and some
channels or something (or maybe coroutines). The problem is that it
has to walk two iterators *in lockstep*. Basically, it can't call two
`drive` methods simultaneously, it can only call one at a time. So at
this point, the `zip` iterator needs to switch from *push mode* into
*pull mode*.
You'll note that `Zip` is only usable if its inputs implement
`IndexedParallelIterator`, meaning that they can produce data starting
at random points in the stream. This need to switch to push mode is
exactly why. If we want to split a zip iterator at position 22, we
need to be able to start zipping items from index 22 right away,
without having to start from index 0.
Anyway, so at this point, the `drive_unindexed` method for `Zip` stops
creating consumers. Instead, it creates a *producer*, a `ZipProducer`,
to be exact, and calls the `bridge` function in the `internals`
module. Creating a `ZipProducer` will in turn create producers for
the two iterators being zipped. This is possible because they both
implement `IndexedParallelIterator`.
The `bridge` function will then connect the consumer, which is
handling the `flat_map` and `for_each`, with the producer, which is
handling the `zip` and its predecessors. It will split down until the
chunks seem reasonably small, then pull items from the producer and
feed them to the consumer.
## The base case
The other time that `bridge` gets used is when we bottom out in an
indexed producer, such as a slice or range. There is also a
`bridge_unindexed` equivalent for - you guessed it - unindexed producers,
such as string characters.
<a name="producer-callback">
## What on earth is `ProducerCallback`?
We saw that when you call a parallel action method like
`par_iter.reduce()`, that will create a "reducing" consumer and then
invoke `par_iter.drive_unindexed()` (or `par_iter.drive()`) as
appropriate. This may create yet more consumers as we proceed up the
parallel iterator chain. But at some point we're going to get to the
start of the chain, or to a parallel iterator (like `zip()`) that has
to coordinate multiple inputs. At that point, we need to start
converting parallel iterators into producers.
The way we do this is by invoking the method `with_producer()`, defined on
`IndexedParallelIterator`. This is a callback scheme. In an ideal world,
it would work like this:
```rust
base_iter.with_producer(|base_producer| {
// here, `base_producer` is the producer for `base_iter`
});
```
In that case, we could implement a combinator like `map()` by getting
the producer for the base iterator, wrapping it to make our own
`MapProducer`, and then passing that to the callback. Something like
this:
```rust
struct MapProducer<'f, P, F: 'f> {
base: P,
map_op: &'f F,
}
impl<I, F> IndexedParallelIterator for Map<I, F>
where I: IndexedParallelIterator,
F: MapOp<I::Item>,
{
fn with_producer<CB>(self, callback: CB) -> CB::Output {
let map_op = &self.map_op;
self.base_iter.with_producer(|base_producer| {
// Here `producer` is the producer for `self.base_iter`.
// Wrap that to make a `MapProducer`
let map_producer = MapProducer {
base: base_producer,
map_op: map_op
};
// invoke the callback with the wrapped version
callback(map_producer)
});
}
});
```
This example demonstrates some of the power of the callback scheme.
It winds up being a very flexible setup. For one thing, it means we
can take ownership of `par_iter`; we can then in turn give ownership
away of its bits and pieces into the producer (this is very useful if
the iterator owns an `&mut` slice, for example), or create shared
references and put *those* in the producer. In the case of map, for
example, the parallel iterator owns the `map_op`, and we borrow
references to it which we then put into the `MapProducer` (this means
the `MapProducer` can easily split itself and share those references).
The `with_producer` method can also create resources that are needed
during the parallel execution, since the producer does not have to be
returned.
Unfortunately there is a catch. We can't actually use closures the way
I showed you. To see why, think about the type that `map_producer`
would have to have. If we were going to write the `with_producer`
method using a closure, it would have to look something like this:
```rust
pub trait IndexedParallelIterator: ParallelIterator {
type Producer;
fn with_producer<CB, R>(self, callback: CB) -> R
where CB: FnOnce(Self::Producer) -> R;
...
}
```
Note that we had to add this associated type `Producer` so that
we could specify the argument of the callback to be `Self::Producer`.
Now, imagine trying to write that `MapProducer` impl using this style:
```rust
impl<I, F> IndexedParallelIterator for Map<I, F>
where I: IndexedParallelIterator,
F: MapOp<I::Item>,
{
type MapProducer = MapProducer<'f, P::Producer, F>;
// ^^ wait, what is this `'f`?
fn with_producer<CB, R>(self, callback: CB) -> R
where CB: FnOnce(Self::Producer) -> R
{
let map_op = &self.map_op;
// ^^^^^^ `'f` is (conceptually) the lifetime of this reference,
// so it will be different for each call to `with_producer`!
}
}
```
This may look familiar to you: it's the same problem that we have
trying to define an `Iterable` trait. Basically, the producer type
needs to include a lifetime (here, `'f`) that refers to the body of
`with_producer` and hence is not in scope at the impl level.
If we had [associated type constructors][1598], we could solve this
problem that way. But there is another solution. We can use a
dedicated callback trait like `ProducerCallback`, instead of `FnOnce`:
[1598]: https://github.com/rust-lang/rfcs/pull/1598
```rust
pub trait ProducerCallback<T> {
type Output;
fn callback<P>(self, producer: P) -> Self::Output
where P: Producer<Item=T>;
}
```
Using this trait, the signature of `with_producer()` looks like this:
```rust
fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output;
```
Notice that this signature **never has to name the producer type** --
there is no associated type `Producer` anymore. This is because the
`callback()` method is generically over **all** producers `P`.
The problem is that now the `||` sugar doesn't work anymore. So we
have to manually create the callback struct, which is a mite tedious.
So our `MapProducer` code looks like this:
```rust
impl<I, F> IndexedParallelIterator for Map<I, F>
where I: IndexedParallelIterator,
F: MapOp<I::Item>,
{
fn with_producer<CB>(self, callback: CB) -> CB::Output
where CB: ProducerCallback<Self::Item>
{
return self.base.with_producer(Callback { callback: callback, map_op: self.map_op });
// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
// Manual version of the closure sugar: create an instance
// of a struct that implements `ProducerCallback`.
// The struct declaration. Each field is something that need to capture from the
// creating scope.
struct Callback<CB, F> {
callback: CB,
map_op: F,
}
// Implement the `ProducerCallback` trait. This is pure boilerplate.
impl<T, F, CB> ProducerCallback<T> for Callback<CB, F>
where F: MapOp<T>,
CB: ProducerCallback<F::Output>
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where P: Producer<Item=T>
{
// The body of the closure is here:
let producer = MapProducer { base: base,
map_op: &self.map_op };
self.callback.callback(producer)
}
}
}
}
```
OK, a bit tedious, but it works!

476
vendor/rayon/src/iter/plumbing/mod.rs vendored Normal file
View File

@@ -0,0 +1,476 @@
//! Traits and functions used to implement parallel iteration. These are
//! low-level details -- users of parallel iterators should not need to
//! interact with them directly. See [the `plumbing` README][r] for a general overview.
//!
//! [r]: https://github.com/rayon-rs/rayon/blob/main/src/iter/plumbing/README.md
use crate::join_context;
use super::IndexedParallelIterator;
/// The `ProducerCallback` trait is a kind of generic closure,
/// [analogous to `FnOnce`][FnOnce]. See [the corresponding section in
/// the plumbing README][r] for more details.
///
/// [r]: https://github.com/rayon-rs/rayon/blob/main/src/iter/plumbing/README.md#producer-callback
/// [FnOnce]: std::ops::FnOnce
pub trait ProducerCallback<T> {
/// The type of value returned by this callback. Analogous to
/// [`Output` from the `FnOnce` trait][Output].
///
/// [Output]: std::ops::FnOnce::Output
type Output;
/// Invokes the callback with the given producer as argument. The
/// key point of this trait is that this method is generic over
/// `P`, and hence implementors must be defined for any producer.
fn callback<P>(self, producer: P) -> Self::Output
where
P: Producer<Item = T>;
}
/// A `Producer` is effectively a "splittable `IntoIterator`". That
/// is, a producer is a value which can be converted into an iterator
/// at any time: at that point, it simply produces items on demand,
/// like any iterator. But what makes a `Producer` special is that,
/// *before* we convert to an iterator, we can also **split** it at a
/// particular point using the `split_at` method. This will yield up
/// two producers, one producing the items before that point, and one
/// producing the items after that point (these two producers can then
/// independently be split further, or be converted into iterators).
/// In Rayon, this splitting is used to divide between threads.
/// See [the `plumbing` README][r] for further details.
///
/// Note that each producer will always produce a fixed number of
/// items N. However, this number N is not queryable through the API;
/// the consumer is expected to track it.
///
/// NB. You might expect `Producer` to extend the `IntoIterator`
/// trait. However, [rust-lang/rust#20671][20671] prevents us from
/// declaring the DoubleEndedIterator and ExactSizeIterator
/// constraints on a required IntoIterator trait, so we inline
/// IntoIterator here until that issue is fixed.
///
/// [r]: https://github.com/rayon-rs/rayon/blob/main/src/iter/plumbing/README.md
/// [20671]: https://github.com/rust-lang/rust/issues/20671
pub trait Producer: Send + Sized {
/// The type of item that will be produced by this producer once
/// it is converted into an iterator.
type Item;
/// The type of iterator we will become.
type IntoIter: Iterator<Item = Self::Item> + DoubleEndedIterator + ExactSizeIterator;
/// Convert `self` into an iterator; at this point, no more parallel splits
/// are possible.
fn into_iter(self) -> Self::IntoIter;
/// The minimum number of items that we will process
/// sequentially. Defaults to 1, which means that we will split
/// all the way down to a single item. This can be raised higher
/// using the [`with_min_len`] method, which will force us to
/// create sequential tasks at a larger granularity. Note that
/// Rayon automatically normally attempts to adjust the size of
/// parallel splits to reduce overhead, so this should not be
/// needed.
///
/// [`with_min_len`]: super::IndexedParallelIterator::with_min_len()
fn min_len(&self) -> usize {
1
}
/// The maximum number of items that we will process
/// sequentially. Defaults to MAX, which means that we can choose
/// not to split at all. This can be lowered using the
/// [`with_max_len`] method, which will force us to create more
/// parallel tasks. Note that Rayon automatically normally
/// attempts to adjust the size of parallel splits to reduce
/// overhead, so this should not be needed.
///
/// [`with_max_len`]: super::IndexedParallelIterator::with_max_len()
fn max_len(&self) -> usize {
usize::MAX
}
/// Split into two producers; one produces items `0..index`, the
/// other `index..N`. Index must be less than or equal to `N`.
fn split_at(self, index: usize) -> (Self, Self);
/// Iterate the producer, feeding each element to `folder`, and
/// stop when the folder is full (or all elements have been consumed).
///
/// The provided implementation is sufficient for most iterables.
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
folder.consume_iter(self.into_iter())
}
}
/// A consumer is effectively a [generalized "fold" operation][fold],
/// and in fact each consumer will eventually be converted into a
/// [`Folder`]. What makes a consumer special is that, like a
/// [`Producer`], it can be **split** into multiple consumers using
/// the `split_at` method. When a consumer is split, it produces two
/// consumers, as well as a **reducer**. The two consumers can be fed
/// items independently, and when they are done the reducer is used to
/// combine their two results into one. See [the `plumbing`
/// README][r] for further details.
///
/// [r]: https://github.com/rayon-rs/rayon/blob/main/src/iter/plumbing/README.md
/// [fold]: Iterator::fold()
pub trait Consumer<Item>: Send + Sized {
/// The type of folder that this consumer can be converted into.
type Folder: Folder<Item, Result = Self::Result>;
/// The type of reducer that is produced if this consumer is split.
type Reducer: Reducer<Self::Result>;
/// The type of result that this consumer will ultimately produce.
type Result: Send;
/// Divide the consumer into two consumers, one processing items
/// `0..index` and one processing items from `index..`. Also
/// produces a reducer that can be used to reduce the results at
/// the end.
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer);
/// Convert the consumer into a folder that can consume items
/// sequentially, eventually producing a final result.
fn into_folder(self) -> Self::Folder;
/// Hint whether this `Consumer` would like to stop processing
/// further items, e.g. if a search has been completed.
fn full(&self) -> bool;
}
/// The `Folder` trait encapsulates [the standard fold
/// operation][fold]. It can be fed many items using the `consume`
/// method. At the end, once all items have been consumed, it can then
/// be converted (using `complete`) into a final value.
///
/// [fold]: Iterator::fold()
pub trait Folder<Item>: Sized {
/// The type of result that will ultimately be produced by the folder.
type Result;
/// Consume next item and return new sequential state.
fn consume(self, item: Item) -> Self;
/// Consume items from the iterator until full, and return new sequential state.
///
/// This method is **optional**. The default simply iterates over
/// `iter`, invoking `consume` and checking after each iteration
/// whether `full` returns false.
///
/// The main reason to override it is if you can provide a more
/// specialized, efficient implementation.
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = Item>,
{
for item in iter {
self = self.consume(item);
if self.full() {
break;
}
}
self
}
/// Finish consuming items, produce final result.
fn complete(self) -> Self::Result;
/// Hint whether this `Folder` would like to stop processing
/// further items, e.g. if a search has been completed.
fn full(&self) -> bool;
}
/// The reducer is the final step of a `Consumer` -- after a consumer
/// has been split into two parts, and each of those parts has been
/// fully processed, we are left with two results. The reducer is then
/// used to combine those two results into one. See [the `plumbing`
/// README][r] for further details.
///
/// [r]: https://github.com/rayon-rs/rayon/blob/main/src/iter/plumbing/README.md
pub trait Reducer<Result> {
/// Reduce two final results into one; this is executed after a
/// split.
fn reduce(self, left: Result, right: Result) -> Result;
}
/// A stateless consumer can be freely copied. These consumers can be
/// used like regular consumers, but they also support a
/// `split_off_left` method that does not take an index to split, but
/// simply splits at some arbitrary point (`for_each`, for example,
/// produces an unindexed consumer).
pub trait UnindexedConsumer<I>: Consumer<I> {
/// Splits off a "left" consumer and returns it. The `self`
/// consumer should then be used to consume the "right" portion of
/// the data. (The ordering matters for methods like find_first --
/// values produced by the returned value are given precedence
/// over values produced by `self`.) Once the left and right
/// halves have been fully consumed, you should reduce the results
/// with the result of `to_reducer`.
fn split_off_left(&self) -> Self;
/// Creates a reducer that can be used to combine the results from
/// a split consumer.
fn to_reducer(&self) -> Self::Reducer;
}
/// A variant on `Producer` which does not know its exact length or
/// cannot represent it in a `usize`. These producers act like
/// ordinary producers except that they cannot be told to split at a
/// particular point. Instead, you just ask them to split 'somewhere'.
///
/// (In principle, `Producer` could extend this trait; however, it
/// does not because to do so would require producers to carry their
/// own length with them.)
pub trait UnindexedProducer: Send + Sized {
/// The type of item returned by this producer.
type Item;
/// Split midway into a new producer if possible, otherwise return `None`.
fn split(self) -> (Self, Option<Self>);
/// Iterate the producer, feeding each element to `folder`, and
/// stop when the folder is full (or all elements have been consumed).
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>;
}
/// A splitter controls the policy for splitting into smaller work items.
///
/// Thief-splitting is an adaptive policy that starts by splitting into
/// enough jobs for every worker thread, and then resets itself whenever a
/// job is actually stolen into a different thread.
#[derive(Clone, Copy)]
struct Splitter {
/// The `splits` tell us approximately how many remaining times we'd
/// like to split this job. We always just divide it by two though, so
/// the effective number of pieces will be `next_power_of_two()`.
splits: usize,
}
impl Splitter {
#[inline]
fn new() -> Splitter {
Splitter {
splits: crate::current_num_threads(),
}
}
#[inline]
fn try_split(&mut self, stolen: bool) -> bool {
let Splitter { splits } = *self;
if stolen {
// This job was stolen! Reset the number of desired splits to the
// thread count, if that's more than we had remaining anyway.
self.splits = Ord::max(crate::current_num_threads(), self.splits / 2);
true
} else if splits > 0 {
// We have splits remaining, make it so.
self.splits /= 2;
true
} else {
// Not stolen, and no more splits -- we're done!
false
}
}
}
/// The length splitter is built on thief-splitting, but additionally takes
/// into account the remaining length of the iterator.
#[derive(Clone, Copy)]
struct LengthSplitter {
inner: Splitter,
/// The smallest we're willing to divide into. Usually this is just 1,
/// but you can choose a larger working size with `with_min_len()`.
min: usize,
}
impl LengthSplitter {
/// Creates a new splitter based on lengths.
///
/// The `min` is a hard lower bound. We'll never split below that, but
/// of course an iterator might start out smaller already.
///
/// The `max` is an upper bound on the working size, used to determine
/// the minimum number of times we need to split to get under that limit.
/// The adaptive algorithm may very well split even further, but never
/// smaller than the `min`.
#[inline]
fn new(min: usize, max: usize, len: usize) -> LengthSplitter {
let mut splitter = LengthSplitter {
inner: Splitter::new(),
min: Ord::max(min, 1),
};
// Divide the given length by the max working length to get the minimum
// number of splits we need to get under that max. This rounds down,
// but the splitter actually gives `next_power_of_two()` pieces anyway.
// e.g. len 12345 / max 100 = 123 min_splits -> 128 pieces.
let min_splits = len / Ord::max(max, 1);
// Only update the value if it's not splitting enough already.
if min_splits > splitter.inner.splits {
splitter.inner.splits = min_splits;
}
splitter
}
#[inline]
fn try_split(&mut self, len: usize, stolen: bool) -> bool {
// If splitting wouldn't make us too small, try the inner splitter.
len / 2 >= self.min && self.inner.try_split(stolen)
}
}
/// This helper function is used to "connect" a parallel iterator to a
/// consumer. It will convert the `par_iter` into a producer P and
/// then pull items from P and feed them to `consumer`, splitting and
/// creating parallel threads as needed.
///
/// This is useful when you are implementing your own parallel
/// iterators: it is often used as the definition of the
/// [`drive_unindexed`] or [`drive`] methods.
///
/// [`drive_unindexed`]: super::ParallelIterator::drive_unindexed()
/// [`drive`]: super::IndexedParallelIterator::drive()
pub fn bridge<I, C>(par_iter: I, consumer: C) -> C::Result
where
I: IndexedParallelIterator,
C: Consumer<I::Item>,
{
let len = par_iter.len();
return par_iter.with_producer(Callback { len, consumer });
struct Callback<C> {
len: usize,
consumer: C,
}
impl<C, I> ProducerCallback<I> for Callback<C>
where
C: Consumer<I>,
{
type Output = C::Result;
fn callback<P>(self, producer: P) -> C::Result
where
P: Producer<Item = I>,
{
bridge_producer_consumer(self.len, producer, self.consumer)
}
}
}
/// This helper function is used to "connect" a producer and a
/// consumer. You may prefer to call [`bridge()`], which wraps this
/// function. This function will draw items from `producer` and feed
/// them to `consumer`, splitting and creating parallel tasks when
/// needed.
///
/// This is useful when you are implementing your own parallel
/// iterators: it is often used as the definition of the
/// [`drive_unindexed`] or [`drive`] methods.
///
/// [`drive_unindexed`]: super::ParallelIterator::drive_unindexed()
/// [`drive`]: super::IndexedParallelIterator::drive()
pub fn bridge_producer_consumer<P, C>(len: usize, producer: P, consumer: C) -> C::Result
where
P: Producer,
C: Consumer<P::Item>,
{
let splitter = LengthSplitter::new(producer.min_len(), producer.max_len(), len);
return helper(len, false, splitter, producer, consumer);
fn helper<P, C>(
len: usize,
migrated: bool,
mut splitter: LengthSplitter,
producer: P,
consumer: C,
) -> C::Result
where
P: Producer,
C: Consumer<P::Item>,
{
if consumer.full() {
consumer.into_folder().complete()
} else if splitter.try_split(len, migrated) {
let mid = len / 2;
let (left_producer, right_producer) = producer.split_at(mid);
let (left_consumer, right_consumer, reducer) = consumer.split_at(mid);
let (left_result, right_result) = join_context(
|context| {
helper(
mid,
context.migrated(),
splitter,
left_producer,
left_consumer,
)
},
|context| {
helper(
len - mid,
context.migrated(),
splitter,
right_producer,
right_consumer,
)
},
);
reducer.reduce(left_result, right_result)
} else {
producer.fold_with(consumer.into_folder()).complete()
}
}
}
/// A variant of [`bridge_producer_consumer()`] where the producer is an unindexed producer.
pub fn bridge_unindexed<P, C>(producer: P, consumer: C) -> C::Result
where
P: UnindexedProducer,
C: UnindexedConsumer<P::Item>,
{
let splitter = Splitter::new();
bridge_unindexed_producer_consumer(false, splitter, producer, consumer)
}
fn bridge_unindexed_producer_consumer<P, C>(
migrated: bool,
mut splitter: Splitter,
producer: P,
consumer: C,
) -> C::Result
where
P: UnindexedProducer,
C: UnindexedConsumer<P::Item>,
{
if consumer.full() {
consumer.into_folder().complete()
} else if splitter.try_split(migrated) {
match producer.split() {
(left_producer, Some(right_producer)) => {
let (reducer, left_consumer, right_consumer) =
(consumer.to_reducer(), consumer.split_off_left(), consumer);
let bridge = bridge_unindexed_producer_consumer;
let (left_result, right_result) = join_context(
|context| bridge(context.migrated(), splitter, left_producer, left_consumer),
|context| bridge(context.migrated(), splitter, right_producer, right_consumer),
);
reducer.reduce(left_result, right_result)
}
(producer, None) => producer.fold_with(consumer.into_folder()).complete(),
}
} else {
producer.fold_with(consumer.into_folder()).complete()
}
}

133
vendor/rayon/src/iter/positions.rs vendored Normal file
View File

@@ -0,0 +1,133 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
/// `Positions` takes a predicate `predicate` and filters out elements that match,
/// yielding their indices.
///
/// This struct is created by the [`positions()`] method on [`IndexedParallelIterator`]
///
/// [`positions()`]: IndexedParallelIterator::positions()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct Positions<I, P> {
base: I,
predicate: P,
}
impl<I: Debug, P> Debug for Positions<I, P> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Positions")
.field("base", &self.base)
.finish()
}
}
impl<I, P> Positions<I, P> {
/// Create a new `Positions` iterator.
pub(super) fn new(base: I, predicate: P) -> Self {
Positions { base, predicate }
}
}
impl<I, P> ParallelIterator for Positions<I, P>
where
I: IndexedParallelIterator,
P: Fn(I::Item) -> bool + Sync + Send,
{
type Item = usize;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = PositionsConsumer::new(consumer, &self.predicate, 0);
self.base.drive(consumer1)
}
}
// ////////////////////////////////////////////////////////////////////////
// Consumer implementation
struct PositionsConsumer<'p, C, P> {
base: C,
predicate: &'p P,
offset: usize,
}
impl<'p, C, P> PositionsConsumer<'p, C, P> {
fn new(base: C, predicate: &'p P, offset: usize) -> Self {
PositionsConsumer {
base,
predicate,
offset,
}
}
}
impl<'p, T, C, P> Consumer<T> for PositionsConsumer<'p, C, P>
where
C: Consumer<usize>,
P: Fn(T) -> bool + Sync,
{
type Folder = PositionsFolder<'p, C::Folder, P>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, C::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
PositionsConsumer::new(left, self.predicate, self.offset),
PositionsConsumer::new(right, self.predicate, self.offset + index),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
PositionsFolder {
base: self.base.into_folder(),
predicate: self.predicate,
offset: self.offset,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
struct PositionsFolder<'p, F, P> {
base: F,
predicate: &'p P,
offset: usize,
}
impl<F, P, T> Folder<T> for PositionsFolder<'_, F, P>
where
F: Folder<usize>,
P: Fn(T) -> bool,
{
type Result = F::Result;
fn consume(mut self, item: T) -> Self {
let index = self.offset;
self.offset += 1;
if (self.predicate)(item) {
self.base = self.base.consume(index);
}
self
}
// This cannot easily specialize `consume_iter` to be better than
// the default, because that requires checking `self.base.full()`
// during a call to `self.base.consume_iter()`. (#632)
fn complete(self) -> Self::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

114
vendor/rayon/src/iter/product.rs vendored Normal file
View File

@@ -0,0 +1,114 @@
use super::plumbing::*;
use super::ParallelIterator;
use std::iter::{self, Product};
use std::marker::PhantomData;
pub(super) fn product<PI, P>(pi: PI) -> P
where
PI: ParallelIterator,
P: Send + Product<PI::Item> + Product,
{
pi.drive_unindexed(ProductConsumer::new())
}
fn mul<T: Product>(left: T, right: T) -> T {
[left, right].into_iter().product()
}
struct ProductConsumer<P: Send> {
_marker: PhantomData<*const P>,
}
unsafe impl<P: Send> Send for ProductConsumer<P> {}
impl<P: Send> ProductConsumer<P> {
fn new() -> ProductConsumer<P> {
ProductConsumer {
_marker: PhantomData,
}
}
}
impl<P, T> Consumer<T> for ProductConsumer<P>
where
P: Send + Product<T> + Product,
{
type Folder = ProductFolder<P>;
type Reducer = Self;
type Result = P;
fn split_at(self, _index: usize) -> (Self, Self, Self) {
(
ProductConsumer::new(),
ProductConsumer::new(),
ProductConsumer::new(),
)
}
fn into_folder(self) -> Self::Folder {
ProductFolder {
product: iter::empty::<T>().product(),
}
}
fn full(&self) -> bool {
false
}
}
impl<P, T> UnindexedConsumer<T> for ProductConsumer<P>
where
P: Send + Product<T> + Product,
{
fn split_off_left(&self) -> Self {
ProductConsumer::new()
}
fn to_reducer(&self) -> Self::Reducer {
ProductConsumer::new()
}
}
impl<P> Reducer<P> for ProductConsumer<P>
where
P: Send + Product,
{
fn reduce(self, left: P, right: P) -> P {
mul(left, right)
}
}
struct ProductFolder<P> {
product: P,
}
impl<P, T> Folder<T> for ProductFolder<P>
where
P: Product<T> + Product,
{
type Result = P;
fn consume(self, item: T) -> Self {
ProductFolder {
product: mul(self.product, iter::once(item).product()),
}
}
fn consume_iter<I>(self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
ProductFolder {
product: mul(self.product, iter.into_iter().product()),
}
}
fn complete(self) -> P {
self.product
}
fn full(&self) -> bool {
false
}
}

116
vendor/rayon/src/iter/reduce.rs vendored Normal file
View File

@@ -0,0 +1,116 @@
use super::plumbing::*;
use super::ParallelIterator;
pub(super) fn reduce<PI, R, ID, T>(pi: PI, identity: ID, reduce_op: R) -> T
where
PI: ParallelIterator<Item = T>,
R: Fn(T, T) -> T + Sync,
ID: Fn() -> T + Sync,
T: Send,
{
let consumer = ReduceConsumer {
identity: &identity,
reduce_op: &reduce_op,
};
pi.drive_unindexed(consumer)
}
struct ReduceConsumer<'r, R, ID> {
identity: &'r ID,
reduce_op: &'r R,
}
impl<'r, R, ID> Copy for ReduceConsumer<'r, R, ID> {}
impl<'r, R, ID> Clone for ReduceConsumer<'r, R, ID> {
fn clone(&self) -> Self {
*self
}
}
impl<'r, R, ID, T> Consumer<T> for ReduceConsumer<'r, R, ID>
where
R: Fn(T, T) -> T + Sync,
ID: Fn() -> T + Sync,
T: Send,
{
type Folder = ReduceFolder<'r, R, T>;
type Reducer = Self;
type Result = T;
fn split_at(self, _index: usize) -> (Self, Self, Self) {
(self, self, self)
}
fn into_folder(self) -> Self::Folder {
ReduceFolder {
reduce_op: self.reduce_op,
item: (self.identity)(),
}
}
fn full(&self) -> bool {
false
}
}
impl<'r, R, ID, T> UnindexedConsumer<T> for ReduceConsumer<'r, R, ID>
where
R: Fn(T, T) -> T + Sync,
ID: Fn() -> T + Sync,
T: Send,
{
fn split_off_left(&self) -> Self {
*self
}
fn to_reducer(&self) -> Self::Reducer {
*self
}
}
impl<'r, R, ID, T> Reducer<T> for ReduceConsumer<'r, R, ID>
where
R: Fn(T, T) -> T + Sync,
{
fn reduce(self, left: T, right: T) -> T {
(self.reduce_op)(left, right)
}
}
struct ReduceFolder<'r, R, T> {
reduce_op: &'r R,
item: T,
}
impl<'r, R, T> Folder<T> for ReduceFolder<'r, R, T>
where
R: Fn(T, T) -> T,
{
type Result = T;
fn consume(self, item: T) -> Self {
ReduceFolder {
reduce_op: self.reduce_op,
item: (self.reduce_op)(self.item, item),
}
}
fn consume_iter<I>(self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
ReduceFolder {
reduce_op: self.reduce_op,
item: iter.into_iter().fold(self.item, self.reduce_op),
}
}
fn complete(self) -> T {
self.item
}
fn full(&self) -> bool {
false
}
}

295
vendor/rayon/src/iter/repeat.rs vendored Normal file
View File

@@ -0,0 +1,295 @@
use super::plumbing::*;
use super::*;
use std::num::NonZeroUsize;
use std::{fmt, iter, mem};
/// Iterator adaptor for [the `repeat()` function].
///
/// [the `repeat()` function]: repeat()
#[derive(Debug, Clone)]
pub struct Repeat<T> {
element: T,
}
/// Creates a parallel iterator that endlessly repeats `element` (by
/// cloning it). Note that this iterator has "infinite" length, so
/// typically you would want to use `zip` or `take` or some other
/// means to shorten it, or consider using
/// [the `repeat_n()` function] instead.
///
/// [the `repeat_n()` function]: repeat_n()
///
/// # Examples
///
/// ```
/// use rayon::prelude::*;
/// use rayon::iter::repeat;
/// let x: Vec<(i32, i32)> = repeat(22).zip(0..3).collect();
/// assert_eq!(x, vec![(22, 0), (22, 1), (22, 2)]);
/// ```
pub fn repeat<T: Clone + Send>(element: T) -> Repeat<T> {
Repeat { element }
}
impl<T> Repeat<T>
where
T: Clone + Send,
{
/// Takes only `n` repeats of the element, similar to the general
/// [`take()`].
///
/// The resulting `RepeatN` is an `IndexedParallelIterator`, allowing
/// more functionality than `Repeat` alone.
///
/// [`take()`]: IndexedParallelIterator::take()
pub fn take(self, n: usize) -> RepeatN<T> {
repeat_n(self.element, n)
}
/// Iterates tuples, repeating the element with items from another
/// iterator, similar to the general [`zip()`].
///
/// [`zip()`]: IndexedParallelIterator::zip()
pub fn zip<Z>(self, zip_op: Z) -> Zip<RepeatN<T>, Z::Iter>
where
Z: IntoParallelIterator<Iter: IndexedParallelIterator>,
{
let z = zip_op.into_par_iter();
let n = z.len();
self.take(n).zip(z)
}
}
impl<T> ParallelIterator for Repeat<T>
where
T: Clone + Send,
{
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let producer = RepeatProducer {
element: self.element,
};
bridge_unindexed(producer, consumer)
}
}
/// Unindexed producer for `Repeat`.
struct RepeatProducer<T: Clone + Send> {
element: T,
}
impl<T: Clone + Send> UnindexedProducer for RepeatProducer<T> {
type Item = T;
fn split(self) -> (Self, Option<Self>) {
(
RepeatProducer {
element: self.element.clone(),
},
Some(RepeatProducer {
element: self.element,
}),
)
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<T>,
{
folder.consume_iter(iter::repeat(self.element))
}
}
/// Iterator adaptor for [the `repeat_n()` function].
///
/// [the `repeat_n()` function]: repeat_n()
#[derive(Clone)]
pub struct RepeatN<T> {
inner: RepeatNProducer<T>,
}
/// Creates a parallel iterator that produces `n` repeats of `element`
/// (by cloning it).
///
/// # Examples
///
/// ```
/// use rayon::prelude::*;
/// use rayon::iter::repeat_n;
/// let x: Vec<(i32, i32)> = repeat_n(22, 3).zip(0..3).collect();
/// assert_eq!(x, vec![(22, 0), (22, 1), (22, 2)]);
/// ```
pub fn repeat_n<T: Clone + Send>(element: T, n: usize) -> RepeatN<T> {
let inner = match NonZeroUsize::new(n) {
Some(count) => RepeatNProducer::Repeats(element, count),
None => RepeatNProducer::Empty,
};
RepeatN { inner }
}
/// Creates a parallel iterator that produces `n` repeats of `element`
/// (by cloning it).
///
/// Deprecated in favor of [`repeat_n`] for consistency with the standard library.
#[deprecated(note = "use `repeat_n`")]
pub fn repeatn<T: Clone + Send>(element: T, n: usize) -> RepeatN<T> {
repeat_n(element, n)
}
impl<T: fmt::Debug> fmt::Debug for RepeatN<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut dbg = f.debug_struct("RepeatN");
if let RepeatNProducer::Repeats(element, count) = &self.inner {
dbg.field("count", &count.get())
.field("element", element)
.finish()
} else {
dbg.field("count", &0usize).finish_non_exhaustive()
}
}
}
impl<T> ParallelIterator for RepeatN<T>
where
T: Clone + Send,
{
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.inner.len())
}
}
impl<T> IndexedParallelIterator for RepeatN<T>
where
T: Clone + Send,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(self.inner)
}
fn len(&self) -> usize {
self.inner.len()
}
}
/// Producer for `RepeatN`.
#[derive(Clone)]
enum RepeatNProducer<T> {
Repeats(T, NonZeroUsize),
Empty,
}
impl<T: Clone + Send> Producer for RepeatNProducer<T> {
type Item = T;
type IntoIter = Self;
fn into_iter(self) -> Self::IntoIter {
// We could potentially use `std::iter::RepeatN` with MSRV 1.82, but we have no way to
// create an empty instance without a value in hand, like `repeat_n(value, 0)`.
self
}
fn split_at(self, index: usize) -> (Self, Self) {
if let Self::Repeats(element, count) = self {
assert!(index <= count.get());
match (
NonZeroUsize::new(index),
NonZeroUsize::new(count.get() - index),
) {
(Some(left), Some(right)) => (
Self::Repeats(element.clone(), left),
Self::Repeats(element, right),
),
(Some(left), None) => (Self::Repeats(element, left), Self::Empty),
(None, Some(right)) => (Self::Empty, Self::Repeats(element, right)),
(None, None) => unreachable!(),
}
} else {
assert!(index == 0);
(Self::Empty, Self::Empty)
}
}
}
impl<T: Clone> Iterator for RepeatNProducer<T> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
if let Self::Repeats(element, count) = self {
if let Some(rem) = NonZeroUsize::new(count.get() - 1) {
*count = rem;
Some(element.clone())
} else {
match mem::replace(self, Self::Empty) {
Self::Repeats(element, _) => Some(element),
Self::Empty => unreachable!(),
}
}
} else {
None
}
}
#[inline]
fn nth(&mut self, n: usize) -> Option<T> {
if let Self::Repeats(_, count) = self {
if let Some(rem) = NonZeroUsize::new(count.get().saturating_sub(n)) {
*count = rem;
return self.next();
}
*self = Self::Empty;
}
None
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
}
impl<T: Clone> DoubleEndedIterator for RepeatNProducer<T> {
#[inline]
fn next_back(&mut self) -> Option<T> {
self.next()
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<T> {
self.nth(n)
}
}
impl<T: Clone> ExactSizeIterator for RepeatNProducer<T> {
#[inline]
fn len(&self) -> usize {
match self {
Self::Repeats(_, count) => count.get(),
Self::Empty => 0,
}
}
}

119
vendor/rayon/src/iter/rev.rs vendored Normal file
View File

@@ -0,0 +1,119 @@
use super::plumbing::*;
use super::*;
use std::iter;
/// `Rev` is an iterator that produces elements in reverse order. This struct
/// is created by the [`rev()`] method on [`IndexedParallelIterator`]
///
/// [`rev()`]: IndexedParallelIterator::rev()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Rev<I> {
base: I,
}
impl<I> Rev<I> {
/// Creates a new `Rev` iterator.
pub(super) fn new(base: I) -> Self {
Rev { base }
}
}
impl<I> ParallelIterator for Rev<I>
where
I: IndexedParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I> IndexedParallelIterator for Rev<I>
where
I: IndexedParallelIterator,
{
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
bridge(self, consumer)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
let len = self.base.len();
return self.base.with_producer(Callback { callback, len });
struct Callback<CB> {
callback: CB,
len: usize,
}
impl<T, CB> ProducerCallback<T> for Callback<CB>
where
CB: ProducerCallback<T>,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = RevProducer {
base,
len: self.len,
};
self.callback.callback(producer)
}
}
}
}
struct RevProducer<P> {
base: P,
len: usize,
}
impl<P> Producer for RevProducer<P>
where
P: Producer,
{
type Item = P::Item;
type IntoIter = iter::Rev<P::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
self.base.into_iter().rev()
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(self.len - index);
(
RevProducer {
base: right,
len: index,
},
RevProducer {
base: left,
len: self.len - index,
},
)
}
}

93
vendor/rayon/src/iter/skip.rs vendored Normal file
View File

@@ -0,0 +1,93 @@
use super::noop::NoopConsumer;
use super::plumbing::*;
use super::*;
/// `Skip` is an iterator that skips over the first `n` elements.
/// This struct is created by the [`skip()`] method on [`IndexedParallelIterator`]
///
/// [`skip()`]: IndexedParallelIterator::skip()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Skip<I> {
base: I,
n: usize,
}
impl<I> Skip<I>
where
I: IndexedParallelIterator,
{
/// Creates a new `Skip` iterator.
pub(super) fn new(base: I, n: usize) -> Self {
let n = Ord::min(base.len(), n);
Skip { base, n }
}
}
impl<I> ParallelIterator for Skip<I>
where
I: IndexedParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I> IndexedParallelIterator for Skip<I>
where
I: IndexedParallelIterator,
{
fn len(&self) -> usize {
self.base.len() - self.n
}
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
bridge(self, consumer)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback {
callback,
n: self.n,
});
struct Callback<CB> {
callback: CB,
n: usize,
}
impl<T, CB> ProducerCallback<T> for Callback<CB>
where
CB: ProducerCallback<T>,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
crate::in_place_scope(|scope| {
let Self { callback, n } = self;
let (before_skip, after_skip) = base.split_at(n);
// Run the skipped part separately for side effects.
// We'll still get any panics propagated back by the scope.
scope.spawn(move |_| bridge_producer_consumer(n, before_skip, NoopConsumer));
callback.callback(after_skip)
})
}
}
}
}

140
vendor/rayon/src/iter/skip_any.rs vendored Normal file
View File

@@ -0,0 +1,140 @@
use super::plumbing::*;
use super::*;
use std::sync::atomic::{AtomicUsize, Ordering};
/// `SkipAny` is an iterator that skips over `n` elements from anywhere in `I`.
/// This struct is created by the [`skip_any()`] method on [`ParallelIterator`]
///
/// [`skip_any()`]: ParallelIterator::skip_any()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone, Debug)]
pub struct SkipAny<I> {
base: I,
count: usize,
}
impl<I> SkipAny<I> {
/// Creates a new `SkipAny` iterator.
pub(super) fn new(base: I, count: usize) -> Self {
SkipAny { base, count }
}
}
impl<I> ParallelIterator for SkipAny<I>
where
I: ParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = SkipAnyConsumer {
base: consumer,
count: &AtomicUsize::new(self.count),
};
self.base.drive_unindexed(consumer1)
}
}
// ////////////////////////////////////////////////////////////////////////
// Consumer implementation
struct SkipAnyConsumer<'f, C> {
base: C,
count: &'f AtomicUsize,
}
impl<'f, T, C> Consumer<T> for SkipAnyConsumer<'f, C>
where
C: Consumer<T>,
T: Send,
{
type Folder = SkipAnyFolder<'f, C::Folder>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
SkipAnyConsumer { base: left, ..self },
SkipAnyConsumer {
base: right,
..self
},
reducer,
)
}
fn into_folder(self) -> Self::Folder {
SkipAnyFolder {
base: self.base.into_folder(),
count: self.count,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'f, T, C> UnindexedConsumer<T> for SkipAnyConsumer<'f, C>
where
C: UnindexedConsumer<T>,
T: Send,
{
fn split_off_left(&self) -> Self {
SkipAnyConsumer {
base: self.base.split_off_left(),
..*self
}
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct SkipAnyFolder<'f, C> {
base: C,
count: &'f AtomicUsize,
}
fn checked_decrement(u: &AtomicUsize) -> bool {
u.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |u| u.checked_sub(1))
.is_ok()
}
impl<'f, T, C> Folder<T> for SkipAnyFolder<'f, C>
where
C: Folder<T>,
{
type Result = C::Result;
fn consume(mut self, item: T) -> Self {
if !checked_decrement(self.count) {
self.base = self.base.consume(item);
}
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
self.base = self.base.consume_iter(
iter.into_iter()
.skip_while(move |_| checked_decrement(self.count)),
);
self
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

162
vendor/rayon/src/iter/skip_any_while.rs vendored Normal file
View File

@@ -0,0 +1,162 @@
use super::plumbing::*;
use super::*;
use std::fmt;
use std::sync::atomic::{AtomicBool, Ordering};
/// `SkipAnyWhile` is an iterator that skips over elements from anywhere in `I`
/// until the callback returns `false`.
/// This struct is created by the [`skip_any_while()`] method on [`ParallelIterator`]
///
/// [`skip_any_while()`]: ParallelIterator::skip_any_while()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct SkipAnyWhile<I, P> {
base: I,
predicate: P,
}
impl<I: fmt::Debug, P> fmt::Debug for SkipAnyWhile<I, P> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SkipAnyWhile")
.field("base", &self.base)
.finish()
}
}
impl<I, P> SkipAnyWhile<I, P> {
/// Creates a new `SkipAnyWhile` iterator.
pub(super) fn new(base: I, predicate: P) -> Self {
SkipAnyWhile { base, predicate }
}
}
impl<I, P> ParallelIterator for SkipAnyWhile<I, P>
where
I: ParallelIterator,
P: Fn(&I::Item) -> bool + Sync + Send,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = SkipAnyWhileConsumer {
base: consumer,
predicate: &self.predicate,
skipping: &AtomicBool::new(true),
};
self.base.drive_unindexed(consumer1)
}
}
// ////////////////////////////////////////////////////////////////////////
// Consumer implementation
struct SkipAnyWhileConsumer<'p, C, P> {
base: C,
predicate: &'p P,
skipping: &'p AtomicBool,
}
impl<'p, T, C, P> Consumer<T> for SkipAnyWhileConsumer<'p, C, P>
where
C: Consumer<T>,
P: Fn(&T) -> bool + Sync,
{
type Folder = SkipAnyWhileFolder<'p, C::Folder, P>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
SkipAnyWhileConsumer { base: left, ..self },
SkipAnyWhileConsumer {
base: right,
..self
},
reducer,
)
}
fn into_folder(self) -> Self::Folder {
SkipAnyWhileFolder {
base: self.base.into_folder(),
predicate: self.predicate,
skipping: self.skipping,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'p, T, C, P> UnindexedConsumer<T> for SkipAnyWhileConsumer<'p, C, P>
where
C: UnindexedConsumer<T>,
P: Fn(&T) -> bool + Sync,
{
fn split_off_left(&self) -> Self {
SkipAnyWhileConsumer {
base: self.base.split_off_left(),
..*self
}
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct SkipAnyWhileFolder<'p, C, P> {
base: C,
predicate: &'p P,
skipping: &'p AtomicBool,
}
fn skip<T>(item: &T, skipping: &AtomicBool, predicate: &impl Fn(&T) -> bool) -> bool {
if !skipping.load(Ordering::Relaxed) {
return false;
}
if predicate(item) {
return true;
}
skipping.store(false, Ordering::Relaxed);
false
}
impl<'p, T, C, P> Folder<T> for SkipAnyWhileFolder<'p, C, P>
where
C: Folder<T>,
P: Fn(&T) -> bool + 'p,
{
type Result = C::Result;
fn consume(mut self, item: T) -> Self {
if !skip(&item, self.skipping, self.predicate) {
self.base = self.base.consume(item);
}
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
self.base = self.base.consume_iter(
iter.into_iter()
.skip_while(move |x| skip(x, self.skipping, self.predicate)),
);
self
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}

172
vendor/rayon/src/iter/splitter.rs vendored Normal file
View File

@@ -0,0 +1,172 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
/// The `split` function takes arbitrary data and a closure that knows how to
/// split it, and turns this into a `ParallelIterator`.
///
/// # Examples
///
/// As a simple example, Rayon can recursively split ranges of indices
///
/// ```
/// use rayon::iter;
/// use rayon::prelude::*;
/// use std::ops::Range;
///
///
/// // We define a range of indices as follows
/// type Range1D = Range<usize>;
///
/// // Splitting it in two can be done like this
/// fn split_range1(r: Range1D) -> (Range1D, Option<Range1D>) {
/// // We are mathematically unable to split the range if there is only
/// // one point inside of it, but we could stop splitting before that.
/// if r.end - r.start <= 1 { return (r, None); }
///
/// // Here, our range is considered large enough to be splittable
/// let midpoint = r.start + (r.end - r.start) / 2;
/// (r.start..midpoint, Some(midpoint..r.end))
/// }
///
/// // By using iter::split, Rayon will split the range until it has enough work
/// // to feed the CPU cores, then give us the resulting sub-ranges
/// iter::split(0..4096, split_range1).for_each(|sub_range| {
/// // As our initial range had a power-of-two size, the final sub-ranges
/// // should have power-of-two sizes too
/// assert!((sub_range.end - sub_range.start).is_power_of_two());
/// });
/// ```
///
/// This recursive splitting can be extended to two or three dimensions,
/// to reproduce a classic "block-wise" parallelization scheme of graphics and
/// numerical simulations:
///
/// ```
/// # use rayon::iter;
/// # use rayon::prelude::*;
/// # use std::ops::Range;
/// # type Range1D = Range<usize>;
/// # fn split_range1(r: Range1D) -> (Range1D, Option<Range1D>) {
/// # if r.end - r.start <= 1 { return (r, None); }
/// # let midpoint = r.start + (r.end - r.start) / 2;
/// # (r.start..midpoint, Some(midpoint..r.end))
/// # }
/// #
/// // A two-dimensional range of indices can be built out of two 1D ones
/// struct Range2D {
/// // Range of horizontal indices
/// pub rx: Range1D,
///
/// // Range of vertical indices
/// pub ry: Range1D,
/// }
///
/// // We want to recursively split them by the largest dimension until we have
/// // enough sub-ranges to feed our mighty multi-core CPU. This function
/// // carries out one such split.
/// fn split_range2(r2: Range2D) -> (Range2D, Option<Range2D>) {
/// // Decide on which axis (horizontal/vertical) the range should be split
/// let width = r2.rx.end - r2.rx.start;
/// let height = r2.ry.end - r2.ry.start;
/// if width >= height {
/// // This is a wide range, split it on the horizontal axis
/// let (split_rx, ry) = (split_range1(r2.rx), r2.ry);
/// let out1 = Range2D {
/// rx: split_rx.0,
/// ry: ry.clone(),
/// };
/// let out2 = split_rx.1.map(|rx| Range2D { rx, ry });
/// (out1, out2)
/// } else {
/// // This is a tall range, split it on the vertical axis
/// let (rx, split_ry) = (r2.rx, split_range1(r2.ry));
/// let out1 = Range2D {
/// rx: rx.clone(),
/// ry: split_ry.0,
/// };
/// let out2 = split_ry.1.map(|ry| Range2D { rx, ry, });
/// (out1, out2)
/// }
/// }
///
/// // Again, rayon can handle the recursive splitting for us
/// let range = Range2D { rx: 0..800, ry: 0..600 };
/// iter::split(range, split_range2).for_each(|sub_range| {
/// // If the sub-ranges were indeed split by the largest dimension, then
/// // if no dimension was twice larger than the other initially, this
/// // property will remain true in the final sub-ranges.
/// let width = sub_range.rx.end - sub_range.rx.start;
/// let height = sub_range.ry.end - sub_range.ry.start;
/// assert!((width / 2 <= height) && (height / 2 <= width));
/// });
/// ```
///
pub fn split<D, S>(data: D, splitter: S) -> Split<D, S>
where
D: Send,
S: Fn(D) -> (D, Option<D>) + Sync,
{
Split { data, splitter }
}
/// `Split` is a parallel iterator using arbitrary data and a splitting function.
/// This struct is created by the [`split()`] function.
#[derive(Clone)]
pub struct Split<D, S> {
data: D,
splitter: S,
}
impl<D: Debug, S> Debug for Split<D, S> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Split").field("data", &self.data).finish()
}
}
impl<D, S> ParallelIterator for Split<D, S>
where
D: Send,
S: Fn(D) -> (D, Option<D>) + Sync + Send,
{
type Item = D;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let producer = SplitProducer {
data: self.data,
splitter: &self.splitter,
};
bridge_unindexed(producer, consumer)
}
}
struct SplitProducer<'a, D, S> {
data: D,
splitter: &'a S,
}
impl<'a, D, S> UnindexedProducer for SplitProducer<'a, D, S>
where
D: Send,
S: Fn(D) -> (D, Option<D>) + Sync,
{
type Item = D;
fn split(mut self) -> (Self, Option<Self>) {
let splitter = self.splitter;
let (left, right) = splitter(self.data);
self.data = left;
(self, right.map(|data| SplitProducer { data, splitter }))
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
folder.consume(self.data)
}
}

135
vendor/rayon/src/iter/step_by.rs vendored Normal file
View File

@@ -0,0 +1,135 @@
use super::plumbing::*;
use super::*;
use std::iter;
/// `StepBy` is an iterator that skips `n` elements between each yield, where `n` is the given step.
/// This struct is created by the [`step_by()`] method on [`IndexedParallelIterator`]
///
/// [`step_by()`]: IndexedParallelIterator::step_by()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct StepBy<I> {
base: I,
step: usize,
}
impl<I> StepBy<I> {
/// Creates a new `StepBy` iterator.
pub(super) fn new(base: I, step: usize) -> Self {
StepBy { base, step }
}
}
impl<I> ParallelIterator for StepBy<I>
where
I: IndexedParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I> IndexedParallelIterator for StepBy<I>
where
I: IndexedParallelIterator,
{
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
bridge(self, consumer)
}
fn len(&self) -> usize {
self.base.len().div_ceil(self.step)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
let len = self.base.len();
return self.base.with_producer(Callback {
callback,
step: self.step,
len,
});
struct Callback<CB> {
callback: CB,
step: usize,
len: usize,
}
impl<T, CB> ProducerCallback<T> for Callback<CB>
where
CB: ProducerCallback<T>,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = StepByProducer {
base,
step: self.step,
len: self.len,
};
self.callback.callback(producer)
}
}
}
}
// ////////////////////////////////////////////////////////////////////////
// Producer implementation
struct StepByProducer<P> {
base: P,
step: usize,
len: usize,
}
impl<P> Producer for StepByProducer<P>
where
P: Producer,
{
type Item = P::Item;
type IntoIter = iter::StepBy<P::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
self.base.into_iter().step_by(self.step)
}
fn split_at(self, index: usize) -> (Self, Self) {
let elem_index = Ord::min(index * self.step, self.len);
let (left, right) = self.base.split_at(elem_index);
(
StepByProducer {
base: left,
step: self.step,
len: elem_index,
},
StepByProducer {
base: right,
step: self.step,
len: self.len - elem_index,
},
)
}
fn min_len(&self) -> usize {
self.base.min_len().div_ceil(self.step)
}
fn max_len(&self) -> usize {
self.base.max_len() / self.step
}
}

110
vendor/rayon/src/iter/sum.rs vendored Normal file
View File

@@ -0,0 +1,110 @@
use super::plumbing::*;
use super::ParallelIterator;
use std::iter::{self, Sum};
use std::marker::PhantomData;
pub(super) fn sum<PI, S>(pi: PI) -> S
where
PI: ParallelIterator,
S: Send + Sum<PI::Item> + Sum,
{
pi.drive_unindexed(SumConsumer::new())
}
fn add<T: Sum>(left: T, right: T) -> T {
[left, right].into_iter().sum()
}
struct SumConsumer<S: Send> {
_marker: PhantomData<*const S>,
}
unsafe impl<S: Send> Send for SumConsumer<S> {}
impl<S: Send> SumConsumer<S> {
fn new() -> SumConsumer<S> {
SumConsumer {
_marker: PhantomData,
}
}
}
impl<S, T> Consumer<T> for SumConsumer<S>
where
S: Send + Sum<T> + Sum,
{
type Folder = SumFolder<S>;
type Reducer = Self;
type Result = S;
fn split_at(self, _index: usize) -> (Self, Self, Self) {
(SumConsumer::new(), SumConsumer::new(), SumConsumer::new())
}
fn into_folder(self) -> Self::Folder {
SumFolder {
sum: iter::empty::<T>().sum(),
}
}
fn full(&self) -> bool {
false
}
}
impl<S, T> UnindexedConsumer<T> for SumConsumer<S>
where
S: Send + Sum<T> + Sum,
{
fn split_off_left(&self) -> Self {
SumConsumer::new()
}
fn to_reducer(&self) -> Self::Reducer {
SumConsumer::new()
}
}
impl<S> Reducer<S> for SumConsumer<S>
where
S: Send + Sum,
{
fn reduce(self, left: S, right: S) -> S {
add(left, right)
}
}
struct SumFolder<S> {
sum: S,
}
impl<S, T> Folder<T> for SumFolder<S>
where
S: Sum<T> + Sum,
{
type Result = S;
fn consume(self, item: T) -> Self {
SumFolder {
sum: add(self.sum, iter::once(item).sum()),
}
}
fn consume_iter<I>(self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
SumFolder {
sum: add(self.sum, iter.into_iter().sum()),
}
}
fn complete(self) -> S {
self.sum
}
fn full(&self) -> bool {
false
}
}

84
vendor/rayon/src/iter/take.rs vendored Normal file
View File

@@ -0,0 +1,84 @@
use super::plumbing::*;
use super::*;
/// `Take` is an iterator that iterates over the first `n` elements.
/// This struct is created by the [`take()`] method on [`IndexedParallelIterator`]
///
/// [`take()`]: IndexedParallelIterator::take()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Take<I> {
base: I,
n: usize,
}
impl<I> Take<I>
where
I: IndexedParallelIterator,
{
/// Creates a new `Take` iterator.
pub(super) fn new(base: I, n: usize) -> Self {
let n = Ord::min(base.len(), n);
Take { base, n }
}
}
impl<I> ParallelIterator for Take<I>
where
I: IndexedParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<I> IndexedParallelIterator for Take<I>
where
I: IndexedParallelIterator,
{
fn len(&self) -> usize {
self.n
}
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
bridge(self, consumer)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback {
callback,
n: self.n,
});
struct Callback<CB> {
callback: CB,
n: usize,
}
impl<T, CB> ProducerCallback<T> for Callback<CB>
where
CB: ProducerCallback<T>,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let (producer, _) = base.split_at(self.n);
self.callback.callback(producer)
}
}
}
}

140
vendor/rayon/src/iter/take_any.rs vendored Normal file
View File

@@ -0,0 +1,140 @@
use super::plumbing::*;
use super::*;
use std::sync::atomic::{AtomicUsize, Ordering};
/// `TakeAny` is an iterator that iterates over `n` elements from anywhere in `I`.
/// This struct is created by the [`take_any()`] method on [`ParallelIterator`]
///
/// [`take_any()`]: ParallelIterator::take_any()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone, Debug)]
pub struct TakeAny<I> {
base: I,
count: usize,
}
impl<I> TakeAny<I> {
/// Creates a new `TakeAny` iterator.
pub(super) fn new(base: I, count: usize) -> Self {
TakeAny { base, count }
}
}
impl<I> ParallelIterator for TakeAny<I>
where
I: ParallelIterator,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = TakeAnyConsumer {
base: consumer,
count: &AtomicUsize::new(self.count),
};
self.base.drive_unindexed(consumer1)
}
}
// ////////////////////////////////////////////////////////////////////////
// Consumer implementation
struct TakeAnyConsumer<'f, C> {
base: C,
count: &'f AtomicUsize,
}
impl<'f, T, C> Consumer<T> for TakeAnyConsumer<'f, C>
where
C: Consumer<T>,
T: Send,
{
type Folder = TakeAnyFolder<'f, C::Folder>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
TakeAnyConsumer { base: left, ..self },
TakeAnyConsumer {
base: right,
..self
},
reducer,
)
}
fn into_folder(self) -> Self::Folder {
TakeAnyFolder {
base: self.base.into_folder(),
count: self.count,
}
}
fn full(&self) -> bool {
self.count.load(Ordering::Relaxed) == 0 || self.base.full()
}
}
impl<'f, T, C> UnindexedConsumer<T> for TakeAnyConsumer<'f, C>
where
C: UnindexedConsumer<T>,
T: Send,
{
fn split_off_left(&self) -> Self {
TakeAnyConsumer {
base: self.base.split_off_left(),
..*self
}
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct TakeAnyFolder<'f, C> {
base: C,
count: &'f AtomicUsize,
}
fn checked_decrement(u: &AtomicUsize) -> bool {
u.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |u| u.checked_sub(1))
.is_ok()
}
impl<'f, T, C> Folder<T> for TakeAnyFolder<'f, C>
where
C: Folder<T>,
{
type Result = C::Result;
fn consume(mut self, item: T) -> Self {
if checked_decrement(self.count) {
self.base = self.base.consume(item);
}
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
self.base = self.base.consume_iter(
iter.into_iter()
.take_while(move |_| checked_decrement(self.count)),
);
self
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.count.load(Ordering::Relaxed) == 0 || self.base.full()
}
}

162
vendor/rayon/src/iter/take_any_while.rs vendored Normal file
View File

@@ -0,0 +1,162 @@
use super::plumbing::*;
use super::*;
use std::fmt;
use std::sync::atomic::{AtomicBool, Ordering};
/// `TakeAnyWhile` is an iterator that iterates over elements from anywhere in `I`
/// until the callback returns `false`.
/// This struct is created by the [`take_any_while()`] method on [`ParallelIterator`]
///
/// [`take_any_while()`]: ParallelIterator::take_any_while()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct TakeAnyWhile<I, P> {
base: I,
predicate: P,
}
impl<I: fmt::Debug, P> fmt::Debug for TakeAnyWhile<I, P> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TakeAnyWhile")
.field("base", &self.base)
.finish()
}
}
impl<I, P> TakeAnyWhile<I, P> {
/// Creates a new `TakeAnyWhile` iterator.
pub(super) fn new(base: I, predicate: P) -> Self {
TakeAnyWhile { base, predicate }
}
}
impl<I, P> ParallelIterator for TakeAnyWhile<I, P>
where
I: ParallelIterator,
P: Fn(&I::Item) -> bool + Sync + Send,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = TakeAnyWhileConsumer {
base: consumer,
predicate: &self.predicate,
taking: &AtomicBool::new(true),
};
self.base.drive_unindexed(consumer1)
}
}
// ////////////////////////////////////////////////////////////////////////
// Consumer implementation
struct TakeAnyWhileConsumer<'p, C, P> {
base: C,
predicate: &'p P,
taking: &'p AtomicBool,
}
impl<'p, T, C, P> Consumer<T> for TakeAnyWhileConsumer<'p, C, P>
where
C: Consumer<T>,
P: Fn(&T) -> bool + Sync,
{
type Folder = TakeAnyWhileFolder<'p, C::Folder, P>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
TakeAnyWhileConsumer { base: left, ..self },
TakeAnyWhileConsumer {
base: right,
..self
},
reducer,
)
}
fn into_folder(self) -> Self::Folder {
TakeAnyWhileFolder {
base: self.base.into_folder(),
predicate: self.predicate,
taking: self.taking,
}
}
fn full(&self) -> bool {
!self.taking.load(Ordering::Relaxed) || self.base.full()
}
}
impl<'p, T, C, P> UnindexedConsumer<T> for TakeAnyWhileConsumer<'p, C, P>
where
C: UnindexedConsumer<T>,
P: Fn(&T) -> bool + Sync,
{
fn split_off_left(&self) -> Self {
TakeAnyWhileConsumer {
base: self.base.split_off_left(),
..*self
}
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct TakeAnyWhileFolder<'p, C, P> {
base: C,
predicate: &'p P,
taking: &'p AtomicBool,
}
fn take<T>(item: &T, taking: &AtomicBool, predicate: &impl Fn(&T) -> bool) -> bool {
if !taking.load(Ordering::Relaxed) {
return false;
}
if predicate(item) {
return true;
}
taking.store(false, Ordering::Relaxed);
false
}
impl<'p, T, C, P> Folder<T> for TakeAnyWhileFolder<'p, C, P>
where
C: Folder<T>,
P: Fn(&T) -> bool + 'p,
{
type Result = C::Result;
fn consume(mut self, item: T) -> Self {
if take(&item, self.taking, self.predicate) {
self.base = self.base.consume(item);
}
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
self.base = self.base.consume_iter(
iter.into_iter()
.take_while(move |x| take(x, self.taking, self.predicate)),
);
self
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
!self.taking.load(Ordering::Relaxed) || self.base.full()
}
}

2392
vendor/rayon/src/iter/test.rs vendored Normal file

File diff suppressed because it is too large Load Diff

282
vendor/rayon/src/iter/try_fold.rs vendored Normal file
View File

@@ -0,0 +1,282 @@
use super::plumbing::*;
use super::ParallelIterator;
use super::Try;
use std::fmt::{self, Debug};
use std::marker::PhantomData;
use std::ops::ControlFlow::{self, Break, Continue};
impl<I, U, ID, F> TryFold<I, U, ID, F> {
pub(super) fn new(base: I, identity: ID, fold_op: F) -> Self {
TryFold {
base,
identity,
fold_op,
marker: PhantomData,
}
}
}
/// `TryFold` is an iterator that applies a function over an iterator producing a single value.
/// This struct is created by the [`try_fold()`] method on [`ParallelIterator`]
///
/// [`try_fold()`]: ParallelIterator::try_fold()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct TryFold<I, U, ID, F> {
base: I,
identity: ID,
fold_op: F,
marker: PhantomData<U>,
}
impl<U, I: ParallelIterator + Debug, ID, F> Debug for TryFold<I, U, ID, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TryFold").field("base", &self.base).finish()
}
}
impl<U, I, ID, F> ParallelIterator for TryFold<I, U, ID, F>
where
I: ParallelIterator,
F: Fn(U::Output, I::Item) -> U + Sync + Send,
ID: Fn() -> U::Output + Sync + Send,
U: Try + Send,
{
type Item = U;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = TryFoldConsumer {
base: consumer,
identity: &self.identity,
fold_op: &self.fold_op,
marker: PhantomData,
};
self.base.drive_unindexed(consumer1)
}
}
struct TryFoldConsumer<'c, U, C, ID, F> {
base: C,
identity: &'c ID,
fold_op: &'c F,
marker: PhantomData<U>,
}
impl<'r, U, T, C, ID, F> Consumer<T> for TryFoldConsumer<'r, U, C, ID, F>
where
C: Consumer<U>,
F: Fn(U::Output, T) -> U + Sync,
ID: Fn() -> U::Output + Sync,
U: Try + Send,
{
type Folder = TryFoldFolder<'r, C::Folder, U, F>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
TryFoldConsumer { base: left, ..self },
TryFoldConsumer {
base: right,
..self
},
reducer,
)
}
fn into_folder(self) -> Self::Folder {
TryFoldFolder {
base: self.base.into_folder(),
control: Continue((self.identity)()),
fold_op: self.fold_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'r, U, T, C, ID, F> UnindexedConsumer<T> for TryFoldConsumer<'r, U, C, ID, F>
where
C: UnindexedConsumer<U>,
F: Fn(U::Output, T) -> U + Sync,
ID: Fn() -> U::Output + Sync,
U: Try + Send,
{
fn split_off_left(&self) -> Self {
TryFoldConsumer {
base: self.base.split_off_left(),
..*self
}
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct TryFoldFolder<'r, C, U: Try, F> {
base: C,
fold_op: &'r F,
control: ControlFlow<U::Residual, U::Output>,
}
impl<'r, C, U, F, T> Folder<T> for TryFoldFolder<'r, C, U, F>
where
C: Folder<U>,
F: Fn(U::Output, T) -> U + Sync,
U: Try,
{
type Result = C::Result;
fn consume(mut self, item: T) -> Self {
let fold_op = self.fold_op;
if let Continue(acc) = self.control {
self.control = fold_op(acc, item).branch();
}
self
}
fn complete(self) -> C::Result {
let item = match self.control {
Continue(c) => U::from_output(c),
Break(r) => U::from_residual(r),
};
self.base.consume(item).complete()
}
fn full(&self) -> bool {
match self.control {
Break(_) => true,
_ => self.base.full(),
}
}
}
// ///////////////////////////////////////////////////////////////////////////
impl<I, U: Try, F> TryFoldWith<I, U, F> {
pub(super) fn new(base: I, item: U::Output, fold_op: F) -> Self {
TryFoldWith {
base,
item,
fold_op,
}
}
}
/// `TryFoldWith` is an iterator that applies a function over an iterator producing a single value.
/// This struct is created by the [`try_fold_with()`] method on [`ParallelIterator`]
///
/// [`try_fold_with()`]: ParallelIterator::try_fold_with()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct TryFoldWith<I, U: Try, F> {
base: I,
item: U::Output,
fold_op: F,
}
impl<I, U, F> Debug for TryFoldWith<I, U, F>
where
I: Debug,
U: Try<Output: Debug>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TryFoldWith")
.field("base", &self.base)
.field("item", &self.item)
.finish()
}
}
impl<U, I, F> ParallelIterator for TryFoldWith<I, U, F>
where
I: ParallelIterator,
F: Fn(U::Output, I::Item) -> U + Sync + Send,
U: Try<Output: Clone + Send> + Send,
{
type Item = U;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = TryFoldWithConsumer {
base: consumer,
item: self.item,
fold_op: &self.fold_op,
};
self.base.drive_unindexed(consumer1)
}
}
struct TryFoldWithConsumer<'c, C, U: Try, F> {
base: C,
item: U::Output,
fold_op: &'c F,
}
impl<'r, U, T, C, F> Consumer<T> for TryFoldWithConsumer<'r, C, U, F>
where
C: Consumer<U>,
F: Fn(U::Output, T) -> U + Sync,
U: Try<Output: Clone + Send> + Send,
{
type Folder = TryFoldFolder<'r, C::Folder, U, F>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
TryFoldWithConsumer {
base: left,
item: self.item.clone(),
..self
},
TryFoldWithConsumer {
base: right,
..self
},
reducer,
)
}
fn into_folder(self) -> Self::Folder {
TryFoldFolder {
base: self.base.into_folder(),
control: Continue(self.item),
fold_op: self.fold_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'r, U, T, C, F> UnindexedConsumer<T> for TryFoldWithConsumer<'r, C, U, F>
where
C: UnindexedConsumer<U>,
F: Fn(U::Output, T) -> U + Sync,
U: Try<Output: Clone + Send> + Send,
{
fn split_off_left(&self) -> Self {
TryFoldWithConsumer {
base: self.base.split_off_left(),
item: self.item.clone(),
..*self
}
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}

131
vendor/rayon/src/iter/try_reduce.rs vendored Normal file
View File

@@ -0,0 +1,131 @@
use super::plumbing::*;
use super::ParallelIterator;
use super::Try;
use std::ops::ControlFlow::{self, Break, Continue};
use std::sync::atomic::{AtomicBool, Ordering};
pub(super) fn try_reduce<PI, R, ID, T>(pi: PI, identity: ID, reduce_op: R) -> T
where
PI: ParallelIterator<Item = T>,
R: Fn(T::Output, T::Output) -> T + Sync,
ID: Fn() -> T::Output + Sync,
T: Try + Send,
{
let full = AtomicBool::new(false);
let consumer = TryReduceConsumer {
identity: &identity,
reduce_op: &reduce_op,
full: &full,
};
pi.drive_unindexed(consumer)
}
struct TryReduceConsumer<'r, R, ID> {
identity: &'r ID,
reduce_op: &'r R,
full: &'r AtomicBool,
}
impl<'r, R, ID> Copy for TryReduceConsumer<'r, R, ID> {}
impl<'r, R, ID> Clone for TryReduceConsumer<'r, R, ID> {
fn clone(&self) -> Self {
*self
}
}
impl<'r, R, ID, T> Consumer<T> for TryReduceConsumer<'r, R, ID>
where
R: Fn(T::Output, T::Output) -> T + Sync,
ID: Fn() -> T::Output + Sync,
T: Try + Send,
{
type Folder = TryReduceFolder<'r, R, T>;
type Reducer = Self;
type Result = T;
fn split_at(self, _index: usize) -> (Self, Self, Self) {
(self, self, self)
}
fn into_folder(self) -> Self::Folder {
TryReduceFolder {
reduce_op: self.reduce_op,
control: Continue((self.identity)()),
full: self.full,
}
}
fn full(&self) -> bool {
self.full.load(Ordering::Relaxed)
}
}
impl<'r, R, ID, T> UnindexedConsumer<T> for TryReduceConsumer<'r, R, ID>
where
R: Fn(T::Output, T::Output) -> T + Sync,
ID: Fn() -> T::Output + Sync,
T: Try + Send,
{
fn split_off_left(&self) -> Self {
*self
}
fn to_reducer(&self) -> Self::Reducer {
*self
}
}
impl<'r, R, ID, T> Reducer<T> for TryReduceConsumer<'r, R, ID>
where
R: Fn(T::Output, T::Output) -> T + Sync,
T: Try,
{
fn reduce(self, left: T, right: T) -> T {
match (left.branch(), right.branch()) {
(Continue(left), Continue(right)) => (self.reduce_op)(left, right),
(Break(r), _) | (_, Break(r)) => T::from_residual(r),
}
}
}
struct TryReduceFolder<'r, R, T: Try> {
reduce_op: &'r R,
control: ControlFlow<T::Residual, T::Output>,
full: &'r AtomicBool,
}
impl<'r, R, T> Folder<T> for TryReduceFolder<'r, R, T>
where
R: Fn(T::Output, T::Output) -> T,
T: Try,
{
type Result = T;
fn consume(mut self, item: T) -> Self {
let reduce_op = self.reduce_op;
self.control = match (self.control, item.branch()) {
(Continue(left), Continue(right)) => reduce_op(left, right).branch(),
(control @ Break(_), _) | (_, control @ Break(_)) => control,
};
if let Break(_) = self.control {
self.full.store(true, Ordering::Relaxed);
}
self
}
fn complete(self) -> T {
match self.control {
Continue(c) => T::from_output(c),
Break(r) => T::from_residual(r),
}
}
fn full(&self) -> bool {
match self.control {
Break(_) => true,
_ => self.full.load(Ordering::Relaxed),
}
}
}

132
vendor/rayon/src/iter/try_reduce_with.rs vendored Normal file
View File

@@ -0,0 +1,132 @@
use super::plumbing::*;
use super::ParallelIterator;
use super::Try;
use std::ops::ControlFlow::{self, Break, Continue};
use std::sync::atomic::{AtomicBool, Ordering};
pub(super) fn try_reduce_with<PI, R, T>(pi: PI, reduce_op: R) -> Option<T>
where
PI: ParallelIterator<Item = T>,
R: Fn(T::Output, T::Output) -> T + Sync,
T: Try + Send,
{
let full = AtomicBool::new(false);
let consumer = TryReduceWithConsumer {
reduce_op: &reduce_op,
full: &full,
};
pi.drive_unindexed(consumer)
}
struct TryReduceWithConsumer<'r, R> {
reduce_op: &'r R,
full: &'r AtomicBool,
}
impl<'r, R> Copy for TryReduceWithConsumer<'r, R> {}
impl<'r, R> Clone for TryReduceWithConsumer<'r, R> {
fn clone(&self) -> Self {
*self
}
}
impl<'r, R, T> Consumer<T> for TryReduceWithConsumer<'r, R>
where
R: Fn(T::Output, T::Output) -> T + Sync,
T: Try + Send,
{
type Folder = TryReduceWithFolder<'r, R, T>;
type Reducer = Self;
type Result = Option<T>;
fn split_at(self, _index: usize) -> (Self, Self, Self) {
(self, self, self)
}
fn into_folder(self) -> Self::Folder {
TryReduceWithFolder {
reduce_op: self.reduce_op,
opt_control: None,
full: self.full,
}
}
fn full(&self) -> bool {
self.full.load(Ordering::Relaxed)
}
}
impl<'r, R, T> UnindexedConsumer<T> for TryReduceWithConsumer<'r, R>
where
R: Fn(T::Output, T::Output) -> T + Sync,
T: Try + Send,
{
fn split_off_left(&self) -> Self {
*self
}
fn to_reducer(&self) -> Self::Reducer {
*self
}
}
impl<'r, R, T> Reducer<Option<T>> for TryReduceWithConsumer<'r, R>
where
R: Fn(T::Output, T::Output) -> T + Sync,
T: Try,
{
fn reduce(self, left: Option<T>, right: Option<T>) -> Option<T> {
let reduce_op = self.reduce_op;
match (left, right) {
(Some(left), Some(right)) => match (left.branch(), right.branch()) {
(Continue(left), Continue(right)) => Some(reduce_op(left, right)),
(Break(r), _) | (_, Break(r)) => Some(T::from_residual(r)),
},
(None, x) | (x, None) => x,
}
}
}
struct TryReduceWithFolder<'r, R, T: Try> {
reduce_op: &'r R,
opt_control: Option<ControlFlow<T::Residual, T::Output>>,
full: &'r AtomicBool,
}
impl<'r, R, T> Folder<T> for TryReduceWithFolder<'r, R, T>
where
R: Fn(T::Output, T::Output) -> T,
T: Try,
{
type Result = Option<T>;
fn consume(mut self, item: T) -> Self {
let reduce_op = self.reduce_op;
let control = match (self.opt_control, item.branch()) {
(Some(Continue(left)), Continue(right)) => reduce_op(left, right).branch(),
(Some(control @ Break(_)), _) | (_, control) => control,
};
if let Break(_) = control {
self.full.store(true, Ordering::Relaxed)
}
self.opt_control = Some(control);
self
}
fn complete(self) -> Option<T> {
match self.opt_control {
Some(Continue(c)) => Some(T::from_output(c)),
Some(Break(r)) => Some(T::from_residual(r)),
None => None,
}
}
fn full(&self) -> bool {
match self.opt_control {
Some(Break(_)) => true,
_ => self.full.load(Ordering::Relaxed),
}
}
}

524
vendor/rayon/src/iter/unzip.rs vendored Normal file
View File

@@ -0,0 +1,524 @@
use super::plumbing::*;
use super::*;
/// This trait abstracts the different ways we can "unzip" one parallel
/// iterator into two distinct consumers, which we can handle almost
/// identically apart from how to process the individual items.
trait UnzipOp<T>: Sync + Send {
/// The type of item expected by the left consumer.
type Left: Send;
/// The type of item expected by the right consumer.
type Right: Send;
/// Consumes one item and feeds it to one or both of the underlying folders.
fn consume<FA, FB>(&self, item: T, left: FA, right: FB) -> (FA, FB)
where
FA: Folder<Self::Left>,
FB: Folder<Self::Right>;
/// Reports whether this op may support indexed consumers.
/// - e.g. true for `unzip` where the item count passed through directly.
/// - e.g. false for `partition` where the sorting is not yet known.
fn indexable() -> bool {
false
}
}
/// Runs an unzip-like operation into default `ParallelExtend` collections.
fn execute<I, OP, FromA, FromB>(pi: I, op: OP) -> (FromA, FromB)
where
I: ParallelIterator,
OP: UnzipOp<I::Item>,
FromA: Default + Send + ParallelExtend<OP::Left>,
FromB: Default + Send + ParallelExtend<OP::Right>,
{
let mut a = FromA::default();
let mut b = FromB::default();
execute_into(&mut a, &mut b, pi, op);
(a, b)
}
/// Runs an unzip-like operation into `ParallelExtend` collections.
fn execute_into<I, OP, FromA, FromB>(a: &mut FromA, b: &mut FromB, pi: I, op: OP)
where
I: ParallelIterator,
OP: UnzipOp<I::Item>,
FromA: Send + ParallelExtend<OP::Left>,
FromB: Send + ParallelExtend<OP::Right>,
{
// We have no idea what the consumers will look like for these
// collections' `par_extend`, but we can intercept them in our own
// `drive_unindexed`. Start with the left side, type `A`:
let iter = UnzipA { base: pi, op, b };
a.par_extend(iter);
}
/// Unzips the items of a parallel iterator into a pair of arbitrary
/// `ParallelExtend` containers.
///
/// This is called by `ParallelIterator::unzip`.
pub(super) fn unzip<I, A, B, FromA, FromB>(pi: I) -> (FromA, FromB)
where
I: ParallelIterator<Item = (A, B)>,
FromA: Default + Send + ParallelExtend<A>,
FromB: Default + Send + ParallelExtend<B>,
A: Send,
B: Send,
{
execute(pi, Unzip)
}
/// Unzips an `IndexedParallelIterator` into two arbitrary `Consumer`s.
///
/// This is called by `super::collect::unzip_into_vecs`.
pub(super) fn unzip_indexed<I, A, B, CA, CB>(pi: I, left: CA, right: CB) -> (CA::Result, CB::Result)
where
I: IndexedParallelIterator<Item = (A, B)>,
CA: Consumer<A>,
CB: Consumer<B>,
A: Send,
B: Send,
{
let consumer = UnzipConsumer {
op: &Unzip,
left,
right,
};
pi.drive(consumer)
}
/// An `UnzipOp` that splits a tuple directly into the two consumers.
struct Unzip;
impl<A: Send, B: Send> UnzipOp<(A, B)> for Unzip {
type Left = A;
type Right = B;
fn consume<FA, FB>(&self, item: (A, B), left: FA, right: FB) -> (FA, FB)
where
FA: Folder<A>,
FB: Folder<B>,
{
(left.consume(item.0), right.consume(item.1))
}
fn indexable() -> bool {
true
}
}
/// Partitions the items of a parallel iterator into a pair of arbitrary
/// `ParallelExtend` containers.
///
/// This is called by `ParallelIterator::partition`.
pub(super) fn partition<I, A, B, P>(pi: I, predicate: P) -> (A, B)
where
I: ParallelIterator,
A: Default + Send + ParallelExtend<I::Item>,
B: Default + Send + ParallelExtend<I::Item>,
P: Fn(&I::Item) -> bool + Sync + Send,
{
execute(pi, Partition { predicate })
}
/// An `UnzipOp` that routes items depending on a predicate function.
struct Partition<P> {
predicate: P,
}
impl<P, T> UnzipOp<T> for Partition<P>
where
P: Fn(&T) -> bool + Sync + Send,
T: Send,
{
type Left = T;
type Right = T;
fn consume<FA, FB>(&self, item: T, left: FA, right: FB) -> (FA, FB)
where
FA: Folder<T>,
FB: Folder<T>,
{
if (self.predicate)(&item) {
(left.consume(item), right)
} else {
(left, right.consume(item))
}
}
}
/// Partitions and maps the items of a parallel iterator into a pair of
/// arbitrary `ParallelExtend` containers.
///
/// This called by `ParallelIterator::partition_map`.
pub(super) fn partition_map<I, A, B, P, L, R>(pi: I, predicate: P) -> (A, B)
where
I: ParallelIterator,
A: Default + Send + ParallelExtend<L>,
B: Default + Send + ParallelExtend<R>,
P: Fn(I::Item) -> Either<L, R> + Sync + Send,
L: Send,
R: Send,
{
execute(pi, PartitionMap { predicate })
}
/// An `UnzipOp` that routes items depending on how they are mapped `Either`.
struct PartitionMap<P> {
predicate: P,
}
impl<P, L, R, T> UnzipOp<T> for PartitionMap<P>
where
P: Fn(T) -> Either<L, R> + Sync + Send,
L: Send,
R: Send,
{
type Left = L;
type Right = R;
fn consume<FA, FB>(&self, item: T, left: FA, right: FB) -> (FA, FB)
where
FA: Folder<L>,
FB: Folder<R>,
{
match (self.predicate)(item) {
Either::Left(item) => (left.consume(item), right),
Either::Right(item) => (left, right.consume(item)),
}
}
}
/// A fake iterator to intercept the `Consumer` for type `A`.
struct UnzipA<'b, I, OP, FromB> {
base: I,
op: OP,
b: &'b mut FromB,
}
impl<'b, I, OP, FromB> ParallelIterator for UnzipA<'b, I, OP, FromB>
where
I: ParallelIterator,
OP: UnzipOp<I::Item>,
FromB: Send + ParallelExtend<OP::Right>,
{
type Item = OP::Left;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let mut result = None;
{
// Now it's time to find the consumer for type `B`
let iter = UnzipB {
base: self.base,
op: self.op,
left_consumer: consumer,
left_result: &mut result,
};
self.b.par_extend(iter);
}
// NB: If for some reason `b.par_extend` doesn't actually drive the
// iterator, then we won't have a result for the left side to return
// at all. We can't fake an arbitrary consumer's result, so panic.
result.expect("unzip consumers didn't execute!")
}
fn opt_len(&self) -> Option<usize> {
if OP::indexable() {
self.base.opt_len()
} else {
None
}
}
}
/// A fake iterator to intercept the `Consumer` for type `B`.
struct UnzipB<'r, I, OP, CA>
where
I: ParallelIterator,
OP: UnzipOp<I::Item>,
CA: UnindexedConsumer<OP::Left>,
{
base: I,
op: OP,
left_consumer: CA,
left_result: &'r mut Option<CA::Result>,
}
impl<'r, I, OP, CA> ParallelIterator for UnzipB<'r, I, OP, CA>
where
I: ParallelIterator,
OP: UnzipOp<I::Item>,
CA: UnindexedConsumer<OP::Left>,
{
type Item = OP::Right;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
// Now that we have two consumers, we can unzip the real iterator.
let consumer = UnzipConsumer {
op: &self.op,
left: self.left_consumer,
right: consumer,
};
let result = self.base.drive_unindexed(consumer);
*self.left_result = Some(result.0);
result.1
}
fn opt_len(&self) -> Option<usize> {
if OP::indexable() {
self.base.opt_len()
} else {
None
}
}
}
/// `Consumer` that unzips into two other `Consumer`s
struct UnzipConsumer<'a, OP, CA, CB> {
op: &'a OP,
left: CA,
right: CB,
}
impl<'a, T, OP, CA, CB> Consumer<T> for UnzipConsumer<'a, OP, CA, CB>
where
OP: UnzipOp<T>,
CA: Consumer<OP::Left>,
CB: Consumer<OP::Right>,
{
type Folder = UnzipFolder<'a, OP, CA::Folder, CB::Folder>;
type Reducer = UnzipReducer<CA::Reducer, CB::Reducer>;
type Result = (CA::Result, CB::Result);
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left1, left2, left_reducer) = self.left.split_at(index);
let (right1, right2, right_reducer) = self.right.split_at(index);
(
UnzipConsumer {
op: self.op,
left: left1,
right: right1,
},
UnzipConsumer {
op: self.op,
left: left2,
right: right2,
},
UnzipReducer {
left: left_reducer,
right: right_reducer,
},
)
}
fn into_folder(self) -> Self::Folder {
UnzipFolder {
op: self.op,
left: self.left.into_folder(),
right: self.right.into_folder(),
}
}
fn full(&self) -> bool {
// don't stop until everyone is full
self.left.full() && self.right.full()
}
}
impl<'a, T, OP, CA, CB> UnindexedConsumer<T> for UnzipConsumer<'a, OP, CA, CB>
where
OP: UnzipOp<T>,
CA: UnindexedConsumer<OP::Left>,
CB: UnindexedConsumer<OP::Right>,
{
fn split_off_left(&self) -> Self {
UnzipConsumer {
op: self.op,
left: self.left.split_off_left(),
right: self.right.split_off_left(),
}
}
fn to_reducer(&self) -> Self::Reducer {
UnzipReducer {
left: self.left.to_reducer(),
right: self.right.to_reducer(),
}
}
}
/// `Folder` that unzips into two other `Folder`s
struct UnzipFolder<'a, OP, FA, FB> {
op: &'a OP,
left: FA,
right: FB,
}
impl<'a, T, OP, FA, FB> Folder<T> for UnzipFolder<'a, OP, FA, FB>
where
OP: UnzipOp<T>,
FA: Folder<OP::Left>,
FB: Folder<OP::Right>,
{
type Result = (FA::Result, FB::Result);
fn consume(self, item: T) -> Self {
let (left, right) = self.op.consume(item, self.left, self.right);
UnzipFolder {
op: self.op,
left,
right,
}
}
fn complete(self) -> Self::Result {
(self.left.complete(), self.right.complete())
}
fn full(&self) -> bool {
// don't stop until everyone is full
self.left.full() && self.right.full()
}
}
/// `Reducer` that unzips into two other `Reducer`s
struct UnzipReducer<RA, RB> {
left: RA,
right: RB,
}
impl<A, B, RA, RB> Reducer<(A, B)> for UnzipReducer<RA, RB>
where
RA: Reducer<A>,
RB: Reducer<B>,
{
fn reduce(self, left: (A, B), right: (A, B)) -> (A, B) {
(
self.left.reduce(left.0, right.0),
self.right.reduce(left.1, right.1),
)
}
}
impl<A, B, FromA, FromB> ParallelExtend<(A, B)> for (FromA, FromB)
where
A: Send,
B: Send,
FromA: Send + ParallelExtend<A>,
FromB: Send + ParallelExtend<B>,
{
fn par_extend<I>(&mut self, pi: I)
where
I: IntoParallelIterator<Item = (A, B)>,
{
execute_into(&mut self.0, &mut self.1, pi.into_par_iter(), Unzip);
}
}
impl<L, R, A, B> ParallelExtend<Either<L, R>> for (A, B)
where
L: Send,
R: Send,
A: Send + ParallelExtend<L>,
B: Send + ParallelExtend<R>,
{
fn par_extend<I>(&mut self, pi: I)
where
I: IntoParallelIterator<Item = Either<L, R>>,
{
execute_into(&mut self.0, &mut self.1, pi.into_par_iter(), UnEither);
}
}
/// An `UnzipOp` that routes items depending on their `Either` variant.
struct UnEither;
impl<L, R> UnzipOp<Either<L, R>> for UnEither
where
L: Send,
R: Send,
{
type Left = L;
type Right = R;
fn consume<FL, FR>(&self, item: Either<L, R>, left: FL, right: FR) -> (FL, FR)
where
FL: Folder<L>,
FR: Folder<R>,
{
match item {
Either::Left(item) => (left.consume(item), right),
Either::Right(item) => (left, right.consume(item)),
}
}
}
impl<A, B, FromA, FromB> FromParallelIterator<(A, B)> for (FromA, FromB)
where
A: Send,
B: Send,
FromA: Send + FromParallelIterator<A>,
FromB: Send + FromParallelIterator<B>,
{
fn from_par_iter<I>(pi: I) -> Self
where
I: IntoParallelIterator<Item = (A, B)>,
{
let (a, b): (Collector<FromA>, Collector<FromB>) = pi.into_par_iter().unzip();
(a.result.unwrap(), b.result.unwrap())
}
}
impl<L, R, A, B> FromParallelIterator<Either<L, R>> for (A, B)
where
L: Send,
R: Send,
A: Send + FromParallelIterator<L>,
B: Send + FromParallelIterator<R>,
{
fn from_par_iter<I>(pi: I) -> Self
where
I: IntoParallelIterator<Item = Either<L, R>>,
{
fn identity<T>(x: T) -> T {
x
}
let (a, b): (Collector<A>, Collector<B>) = pi.into_par_iter().partition_map(identity);
(a.result.unwrap(), b.result.unwrap())
}
}
/// Shim to implement a one-time `ParallelExtend` using `FromParallelIterator`.
struct Collector<FromT> {
result: Option<FromT>,
}
impl<FromT> Default for Collector<FromT> {
fn default() -> Self {
Collector { result: None }
}
}
impl<T, FromT> ParallelExtend<T> for Collector<FromT>
where
T: Send,
FromT: Send + FromParallelIterator<T>,
{
fn par_extend<I>(&mut self, pi: I)
where
I: IntoParallelIterator<Item = T>,
{
debug_assert!(self.result.is_none());
self.result = Some(pi.into_par_iter().collect());
}
}

323
vendor/rayon/src/iter/update.rs vendored Normal file
View File

@@ -0,0 +1,323 @@
use super::plumbing::*;
use super::*;
use std::fmt::{self, Debug};
/// `Update` is an iterator that mutates the elements of an
/// underlying iterator before they are yielded.
///
/// This struct is created by the [`update()`] method on [`ParallelIterator`]
///
/// [`update()`]: ParallelIterator::update()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone)]
pub struct Update<I, F> {
base: I,
update_op: F,
}
impl<I: Debug, F> Debug for Update<I, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Update").field("base", &self.base).finish()
}
}
impl<I, F> Update<I, F> {
/// Creates a new `Update` iterator.
pub(super) fn new(base: I, update_op: F) -> Self {
Update { base, update_op }
}
}
impl<I, F> ParallelIterator for Update<I, F>
where
I: ParallelIterator,
F: Fn(&mut I::Item) + Send + Sync,
{
type Item = I::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let consumer1 = UpdateConsumer::new(consumer, &self.update_op);
self.base.drive_unindexed(consumer1)
}
fn opt_len(&self) -> Option<usize> {
self.base.opt_len()
}
}
impl<I, F> IndexedParallelIterator for Update<I, F>
where
I: IndexedParallelIterator,
F: Fn(&mut I::Item) + Send + Sync,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let consumer1 = UpdateConsumer::new(consumer, &self.update_op);
self.base.drive(consumer1)
}
fn len(&self) -> usize {
self.base.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.base.with_producer(Callback {
callback,
update_op: self.update_op,
});
struct Callback<CB, F> {
callback: CB,
update_op: F,
}
impl<T, F, CB> ProducerCallback<T> for Callback<CB, F>
where
CB: ProducerCallback<T>,
F: Fn(&mut T) + Send + Sync,
{
type Output = CB::Output;
fn callback<P>(self, base: P) -> CB::Output
where
P: Producer<Item = T>,
{
let producer = UpdateProducer {
base,
update_op: &self.update_op,
};
self.callback.callback(producer)
}
}
}
}
// ////////////////////////////////////////////////////////////////////////
struct UpdateProducer<'f, P, F> {
base: P,
update_op: &'f F,
}
impl<'f, P, F> Producer for UpdateProducer<'f, P, F>
where
P: Producer,
F: Fn(&mut P::Item) + Send + Sync,
{
type Item = P::Item;
type IntoIter = UpdateSeq<P::IntoIter, &'f F>;
fn into_iter(self) -> Self::IntoIter {
UpdateSeq {
base: self.base.into_iter(),
update_op: self.update_op,
}
}
fn min_len(&self) -> usize {
self.base.min_len()
}
fn max_len(&self) -> usize {
self.base.max_len()
}
fn split_at(self, index: usize) -> (Self, Self) {
let (left, right) = self.base.split_at(index);
(
UpdateProducer {
base: left,
update_op: self.update_op,
},
UpdateProducer {
base: right,
update_op: self.update_op,
},
)
}
fn fold_with<G>(self, folder: G) -> G
where
G: Folder<Self::Item>,
{
let folder1 = UpdateFolder {
base: folder,
update_op: self.update_op,
};
self.base.fold_with(folder1).base
}
}
// ////////////////////////////////////////////////////////////////////////
// Consumer implementation
struct UpdateConsumer<'f, C, F> {
base: C,
update_op: &'f F,
}
impl<'f, C, F> UpdateConsumer<'f, C, F> {
fn new(base: C, update_op: &'f F) -> Self {
UpdateConsumer { base, update_op }
}
}
impl<'f, T, C, F> Consumer<T> for UpdateConsumer<'f, C, F>
where
C: Consumer<T>,
F: Fn(&mut T) + Send + Sync,
{
type Folder = UpdateFolder<'f, C::Folder, F>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
UpdateConsumer::new(left, self.update_op),
UpdateConsumer::new(right, self.update_op),
reducer,
)
}
fn into_folder(self) -> Self::Folder {
UpdateFolder {
base: self.base.into_folder(),
update_op: self.update_op,
}
}
fn full(&self) -> bool {
self.base.full()
}
}
impl<'f, T, C, F> UnindexedConsumer<T> for UpdateConsumer<'f, C, F>
where
C: UnindexedConsumer<T>,
F: Fn(&mut T) + Send + Sync,
{
fn split_off_left(&self) -> Self {
UpdateConsumer::new(self.base.split_off_left(), self.update_op)
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct UpdateFolder<'f, C, F> {
base: C,
update_op: &'f F,
}
fn apply<T>(update_op: impl Fn(&mut T)) -> impl Fn(T) -> T {
move |mut item| {
update_op(&mut item);
item
}
}
impl<'f, T, C, F> Folder<T> for UpdateFolder<'f, C, F>
where
C: Folder<T>,
F: Fn(&mut T),
{
type Result = C::Result;
fn consume(self, mut item: T) -> Self {
(self.update_op)(&mut item);
UpdateFolder {
base: self.base.consume(item),
update_op: self.update_op,
}
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
let update_op = self.update_op;
self.base = self
.base
.consume_iter(iter.into_iter().map(apply(update_op)));
self
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.base.full()
}
}
/// Standard Update adaptor, based on `itertools::adaptors::Update`
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
struct UpdateSeq<I, F> {
base: I,
update_op: F,
}
impl<I, F> Iterator for UpdateSeq<I, F>
where
I: Iterator,
F: Fn(&mut I::Item),
{
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
let mut v = self.base.next()?;
(self.update_op)(&mut v);
Some(v)
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.base.size_hint()
}
fn fold<Acc, G>(self, init: Acc, g: G) -> Acc
where
G: FnMut(Acc, Self::Item) -> Acc,
{
self.base.map(apply(self.update_op)).fold(init, g)
}
// if possible, re-use inner iterator specializations in collect
fn collect<C>(self) -> C
where
C: ::std::iter::FromIterator<Self::Item>,
{
self.base.map(apply(self.update_op)).collect()
}
}
impl<I, F> ExactSizeIterator for UpdateSeq<I, F>
where
I: ExactSizeIterator,
F: Fn(&mut I::Item),
{
}
impl<I, F> DoubleEndedIterator for UpdateSeq<I, F>
where
I: DoubleEndedIterator,
F: Fn(&mut I::Item),
{
fn next_back(&mut self) -> Option<Self::Item> {
let mut v = self.base.next_back()?;
(self.update_op)(&mut v);
Some(v)
}
}

524
vendor/rayon/src/iter/walk_tree.rs vendored Normal file
View File

@@ -0,0 +1,524 @@
use crate::iter::plumbing::{bridge_unindexed, Folder, UnindexedConsumer, UnindexedProducer};
use crate::prelude::*;
use std::iter::once;
#[derive(Debug)]
struct WalkTreePrefixProducer<'b, S, B> {
to_explore: Vec<S>, // nodes (and subtrees) we have to process
seen: Vec<S>, // nodes which have already been explored
children_of: &'b B, // function generating children
}
impl<S, B, I> UnindexedProducer for WalkTreePrefixProducer<'_, S, B>
where
S: Send,
B: Fn(&S) -> I + Send + Sync,
I: IntoIterator<Item = S, IntoIter: DoubleEndedIterator>,
{
type Item = S;
fn split(mut self) -> (Self, Option<Self>) {
// explore while front is of size one.
while self.to_explore.len() == 1 {
let front_node = self.to_explore.pop().unwrap();
self.to_explore
.extend((self.children_of)(&front_node).into_iter().rev());
self.seen.push(front_node);
}
// now take half of the front.
let right_children = split_vec(&mut self.to_explore);
let right = right_children
.map(|mut c| {
std::mem::swap(&mut c, &mut self.to_explore);
WalkTreePrefixProducer {
to_explore: c,
seen: Vec::new(),
children_of: self.children_of,
}
})
.or_else(|| {
// we can still try to divide 'seen'
let right_seen = split_vec(&mut self.seen);
right_seen.map(|s| WalkTreePrefixProducer {
to_explore: Default::default(),
seen: s,
children_of: self.children_of,
})
});
(self, right)
}
fn fold_with<F>(mut self, mut folder: F) -> F
where
F: Folder<Self::Item>,
{
// start by consuming everything seen
folder = folder.consume_iter(self.seen);
if folder.full() {
return folder;
}
// now do all remaining explorations
while let Some(e) = self.to_explore.pop() {
self.to_explore
.extend((self.children_of)(&e).into_iter().rev());
folder = folder.consume(e);
if folder.full() {
return folder;
}
}
folder
}
}
/// ParallelIterator for arbitrary tree-shaped patterns.
/// Returned by the [`walk_tree_prefix()`] function.
#[derive(Debug)]
pub struct WalkTreePrefix<S, B> {
initial_state: S,
children_of: B,
}
impl<S, B, I> ParallelIterator for WalkTreePrefix<S, B>
where
S: Send,
B: Fn(&S) -> I + Send + Sync,
I: IntoIterator<Item = S, IntoIter: DoubleEndedIterator>,
{
type Item = S;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let producer = WalkTreePrefixProducer {
to_explore: once(self.initial_state).collect(),
seen: Vec::new(),
children_of: &self.children_of,
};
bridge_unindexed(producer, consumer)
}
}
/// Create a tree-like prefix parallel iterator from an initial root node.
/// The `children_of` function should take a node and return an iterator over its child nodes.
/// The best parallelization is obtained when the tree is balanced
/// but we should also be able to handle harder cases.
///
/// # Ordering
///
/// This function guarantees a prefix ordering. See also [`walk_tree_postfix`],
/// which guarantees a postfix order.
/// If you don't care about ordering, you should use [`walk_tree`],
/// which will use whatever is believed to be fastest.
/// For example a perfect binary tree of 7 nodes will reduced in the following order:
///
/// ```text
/// a
/// / \
/// / \
/// b c
/// / \ / \
/// d e f g
///
/// reduced as a,b,d,e,c,f,g
///
/// ```
///
/// # Example
///
/// ```text
/// 4
/// / \
/// / \
/// 2 3
/// / \
/// 1 2
/// ```
///
/// ```
/// use rayon::iter::walk_tree_prefix;
/// use rayon::prelude::*;
///
/// let par_iter = walk_tree_prefix(4, |&e| {
/// if e <= 2 {
/// Vec::new()
/// } else {
/// vec![e / 2, e / 2 + 1]
/// }
/// });
/// assert_eq!(par_iter.sum::<u32>(), 12);
/// ```
///
/// # Example
///
/// ```
/// use rayon::prelude::*;
/// use rayon::iter::walk_tree_prefix;
///
/// struct Node {
/// content: u32,
/// left: Option<Box<Node>>,
/// right: Option<Box<Node>>,
/// }
///
/// // Here we loop on the following tree:
/// //
/// // 10
/// // / \
/// // / \
/// // 3 14
/// // \
/// // \
/// // 18
///
/// let root = Node {
/// content: 10,
/// left: Some(Box::new(Node {
/// content: 3,
/// left: None,
/// right: None,
/// })),
/// right: Some(Box::new(Node {
/// content: 14,
/// left: None,
/// right: Some(Box::new(Node {
/// content: 18,
/// left: None,
/// right: None,
/// })),
/// })),
/// };
///
/// let mut v: Vec<u32> = walk_tree_prefix(&root, |r| {
/// r.left
/// .as_ref()
/// .into_iter()
/// .chain(r.right.as_ref())
/// .map(|n| &**n)
/// })
/// .map(|node| node.content)
/// .collect();
/// assert_eq!(v, vec![10, 3, 14, 18]);
/// ```
///
pub fn walk_tree_prefix<S, B, I>(root: S, children_of: B) -> WalkTreePrefix<S, B>
where
S: Send,
B: Fn(&S) -> I + Send + Sync,
I: IntoIterator<Item = S, IntoIter: DoubleEndedIterator>,
{
WalkTreePrefix {
initial_state: root,
children_of,
}
}
// post fix
#[derive(Debug)]
struct WalkTreePostfixProducer<'b, S, B> {
to_explore: Vec<S>, // nodes (and subtrees) we have to process
seen: Vec<S>, // nodes which have already been explored
children_of: &'b B, // function generating children
}
impl<S, B, I> UnindexedProducer for WalkTreePostfixProducer<'_, S, B>
where
S: Send,
B: Fn(&S) -> I + Send + Sync,
I: IntoIterator<Item = S>,
{
type Item = S;
fn split(mut self) -> (Self, Option<Self>) {
// explore while front is of size one.
while self.to_explore.len() == 1 {
let front_node = self.to_explore.pop().unwrap();
self.to_explore
.extend((self.children_of)(&front_node).into_iter());
self.seen.push(front_node);
}
// now take half of the front.
let right_children = split_vec(&mut self.to_explore);
let right = right_children
.map(|c| {
let right_seen = std::mem::take(&mut self.seen); // postfix -> upper nodes are processed last
WalkTreePostfixProducer {
to_explore: c,
seen: right_seen,
children_of: self.children_of,
}
})
.or_else(|| {
// we can still try to divide 'seen'
let right_seen = split_vec(&mut self.seen);
right_seen.map(|mut s| {
std::mem::swap(&mut self.seen, &mut s);
WalkTreePostfixProducer {
to_explore: Default::default(),
seen: s,
children_of: self.children_of,
}
})
});
(self, right)
}
fn fold_with<F>(self, mut folder: F) -> F
where
F: Folder<Self::Item>,
{
// now do all remaining explorations
for e in self.to_explore {
folder = consume_rec_postfix(&self.children_of, e, folder);
if folder.full() {
return folder;
}
}
// end by consuming everything seen
folder.consume_iter(self.seen.into_iter().rev())
}
}
fn consume_rec_postfix<F, S, B, I>(children_of: &B, s: S, mut folder: F) -> F
where
F: Folder<S>,
B: Fn(&S) -> I,
I: IntoIterator<Item = S>,
{
let children = (children_of)(&s).into_iter();
for child in children {
folder = consume_rec_postfix(children_of, child, folder);
if folder.full() {
return folder;
}
}
folder.consume(s)
}
/// ParallelIterator for arbitrary tree-shaped patterns.
/// Returned by the [`walk_tree_postfix()`] function.
#[derive(Debug)]
pub struct WalkTreePostfix<S, B> {
initial_state: S,
children_of: B,
}
impl<S, B, I> ParallelIterator for WalkTreePostfix<S, B>
where
S: Send,
B: Fn(&S) -> I + Send + Sync,
I: IntoIterator<Item = S>,
{
type Item = S;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let producer = WalkTreePostfixProducer {
to_explore: once(self.initial_state).collect(),
seen: Vec::new(),
children_of: &self.children_of,
};
bridge_unindexed(producer, consumer)
}
}
/// Divide given vector in two equally sized vectors.
/// Return `None` if initial size is <=1.
/// We return the first half and keep the last half in `v`.
fn split_vec<T>(v: &mut Vec<T>) -> Option<Vec<T>> {
if v.len() <= 1 {
None
} else {
let n = v.len() / 2;
Some(v.split_off(n))
}
}
/// Create a tree like postfix parallel iterator from an initial root node.
/// The `children_of` function should take a node and iterate on all of its child nodes.
/// The best parallelization is obtained when the tree is balanced
/// but we should also be able to handle harder cases.
///
/// # Ordering
///
/// This function guarantees a postfix ordering. See also [`walk_tree_prefix`] which guarantees a
/// prefix order. If you don't care about ordering, you should use [`walk_tree`], which will use
/// whatever is believed to be fastest.
///
/// Between siblings, children are reduced in order -- that is first children are reduced first.
///
/// For example a perfect binary tree of 7 nodes will reduced in the following order:
///
/// ```text
/// a
/// / \
/// / \
/// b c
/// / \ / \
/// d e f g
///
/// reduced as d,e,b,f,g,c,a
///
/// ```
///
/// # Example
///
/// ```text
/// 4
/// / \
/// / \
/// 2 3
/// / \
/// 1 2
/// ```
///
/// ```
/// use rayon::iter::walk_tree_postfix;
/// use rayon::prelude::*;
///
/// let par_iter = walk_tree_postfix(4, |&e| {
/// if e <= 2 {
/// Vec::new()
/// } else {
/// vec![e / 2, e / 2 + 1]
/// }
/// });
/// assert_eq!(par_iter.sum::<u32>(), 12);
/// ```
///
/// # Example
///
/// ```
/// use rayon::prelude::*;
/// use rayon::iter::walk_tree_postfix;
///
/// struct Node {
/// content: u32,
/// left: Option<Box<Node>>,
/// right: Option<Box<Node>>,
/// }
///
/// // Here we loop on the following tree:
/// //
/// // 10
/// // / \
/// // / \
/// // 3 14
/// // \
/// // \
/// // 18
///
/// let root = Node {
/// content: 10,
/// left: Some(Box::new(Node {
/// content: 3,
/// left: None,
/// right: None,
/// })),
/// right: Some(Box::new(Node {
/// content: 14,
/// left: None,
/// right: Some(Box::new(Node {
/// content: 18,
/// left: None,
/// right: None,
/// })),
/// })),
/// };
///
/// let mut v: Vec<u32> = walk_tree_postfix(&root, |r| {
/// r.left
/// .as_ref()
/// .into_iter()
/// .chain(r.right.as_ref())
/// .map(|n| &**n)
/// })
/// .map(|node| node.content)
/// .collect();
/// assert_eq!(v, vec![3, 18, 14, 10]);
/// ```
///
pub fn walk_tree_postfix<S, B, I>(root: S, children_of: B) -> WalkTreePostfix<S, B>
where
S: Send,
B: Fn(&S) -> I + Send + Sync,
I: IntoIterator<Item = S>,
{
WalkTreePostfix {
initial_state: root,
children_of,
}
}
/// ParallelIterator for arbitrary tree-shaped patterns.
/// Returned by the [`walk_tree()`] function.
#[derive(Debug)]
pub struct WalkTree<S, B>(WalkTreePostfix<S, B>);
/// Create a tree like parallel iterator from an initial root node.
/// The `children_of` function should take a node and iterate on all of its child nodes.
/// The best parallelization is obtained when the tree is balanced
/// but we should also be able to handle harder cases.
///
/// # Ordering
///
/// This function does not guarantee any ordering but will
/// use whatever algorithm is thought to achieve the fastest traversal.
/// See also [`walk_tree_prefix`] which guarantees a
/// prefix order and [`walk_tree_postfix`] which guarantees a postfix order.
///
/// # Example
///
/// ```text
/// 4
/// / \
/// / \
/// 2 3
/// / \
/// 1 2
/// ```
///
/// ```
/// use rayon::iter::walk_tree;
/// use rayon::prelude::*;
///
/// let par_iter = walk_tree(4, |&e| {
/// if e <= 2 {
/// Vec::new()
/// } else {
/// vec![e / 2, e / 2 + 1]
/// }
/// });
/// assert_eq!(par_iter.sum::<u32>(), 12);
/// ```
pub fn walk_tree<S, B, I>(root: S, children_of: B) -> WalkTree<S, B>
where
S: Send,
B: Fn(&S) -> I + Send + Sync,
I: IntoIterator<Item = S, IntoIter: DoubleEndedIterator>,
{
let walker = WalkTreePostfix {
initial_state: root,
children_of,
};
WalkTree(walker)
}
impl<S, B, I> ParallelIterator for WalkTree<S, B>
where
S: Send,
B: Fn(&S) -> I + Send + Sync,
I: IntoIterator<Item = S, IntoIter: DoubleEndedIterator> + Send,
{
type Item = S;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.0.drive_unindexed(consumer)
}
}

150
vendor/rayon/src/iter/while_some.rs vendored Normal file
View File

@@ -0,0 +1,150 @@
use super::plumbing::*;
use super::*;
use std::sync::atomic::{AtomicBool, Ordering};
/// `WhileSome` is an iterator that yields the `Some` elements of an iterator,
/// halting as soon as any `None` is produced.
///
/// This struct is created by the [`while_some()`] method on [`ParallelIterator`]
///
/// [`while_some()`]: ParallelIterator::while_some()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct WhileSome<I> {
base: I,
}
impl<I> WhileSome<I> {
/// Creates a new `WhileSome` iterator.
pub(super) fn new(base: I) -> Self {
WhileSome { base }
}
}
impl<I, T> ParallelIterator for WhileSome<I>
where
I: ParallelIterator<Item = Option<T>>,
T: Send,
{
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let full = AtomicBool::new(false);
let consumer1 = WhileSomeConsumer {
base: consumer,
full: &full,
};
self.base.drive_unindexed(consumer1)
}
}
// ////////////////////////////////////////////////////////////////////////
// Consumer implementation
struct WhileSomeConsumer<'f, C> {
base: C,
full: &'f AtomicBool,
}
impl<'f, T, C> Consumer<Option<T>> for WhileSomeConsumer<'f, C>
where
C: Consumer<T>,
T: Send,
{
type Folder = WhileSomeFolder<'f, C::Folder>;
type Reducer = C::Reducer;
type Result = C::Result;
fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
let (left, right, reducer) = self.base.split_at(index);
(
WhileSomeConsumer { base: left, ..self },
WhileSomeConsumer {
base: right,
..self
},
reducer,
)
}
fn into_folder(self) -> Self::Folder {
WhileSomeFolder {
base: self.base.into_folder(),
full: self.full,
}
}
fn full(&self) -> bool {
self.full.load(Ordering::Relaxed) || self.base.full()
}
}
impl<'f, T, C> UnindexedConsumer<Option<T>> for WhileSomeConsumer<'f, C>
where
C: UnindexedConsumer<T>,
T: Send,
{
fn split_off_left(&self) -> Self {
WhileSomeConsumer {
base: self.base.split_off_left(),
..*self
}
}
fn to_reducer(&self) -> Self::Reducer {
self.base.to_reducer()
}
}
struct WhileSomeFolder<'f, C> {
base: C,
full: &'f AtomicBool,
}
impl<'f, T, C> Folder<Option<T>> for WhileSomeFolder<'f, C>
where
C: Folder<T>,
{
type Result = C::Result;
fn consume(mut self, item: Option<T>) -> Self {
match item {
Some(item) => self.base = self.base.consume(item),
None => self.full.store(true, Ordering::Relaxed),
}
self
}
fn consume_iter<I>(mut self, iter: I) -> Self
where
I: IntoIterator<Item = Option<T>>,
{
fn some<T>(full: &AtomicBool) -> impl Fn(&Option<T>) -> bool + '_ {
move |x| match *x {
Some(_) => !full.load(Ordering::Relaxed),
None => {
full.store(true, Ordering::Relaxed);
false
}
}
}
self.base = self.base.consume_iter(
iter.into_iter()
.take_while(some(self.full))
.map(Option::unwrap),
);
self
}
fn complete(self) -> C::Result {
self.base.complete()
}
fn full(&self) -> bool {
self.full.load(Ordering::Relaxed) || self.base.full()
}
}

153
vendor/rayon/src/iter/zip.rs vendored Normal file
View File

@@ -0,0 +1,153 @@
use super::plumbing::*;
use super::*;
use std::iter;
/// `Zip` is an iterator that zips up `a` and `b` into a single iterator
/// of pairs. This struct is created by the [`zip()`] method on
/// [`IndexedParallelIterator`]
///
/// [`zip()`]: IndexedParallelIterator::zip()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct Zip<A, B> {
a: A,
b: B,
}
impl<A, B> Zip<A, B> {
/// Creates a new `Zip` iterator.
pub(super) fn new(a: A, b: B) -> Self {
Zip { a, b }
}
}
impl<A, B> ParallelIterator for Zip<A, B>
where
A: IndexedParallelIterator,
B: IndexedParallelIterator,
{
type Item = (A::Item, B::Item);
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<A, B> IndexedParallelIterator for Zip<A, B>
where
A: IndexedParallelIterator,
B: IndexedParallelIterator,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
Ord::min(self.a.len(), self.b.len())
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
return self.a.with_producer(CallbackA {
callback,
b: self.b,
});
struct CallbackA<CB, B> {
callback: CB,
b: B,
}
impl<CB, ITEM, B> ProducerCallback<ITEM> for CallbackA<CB, B>
where
B: IndexedParallelIterator,
CB: ProducerCallback<(ITEM, B::Item)>,
{
type Output = CB::Output;
fn callback<A>(self, a_producer: A) -> Self::Output
where
A: Producer<Item = ITEM>,
{
self.b.with_producer(CallbackB {
a_producer,
callback: self.callback,
})
}
}
struct CallbackB<CB, A> {
a_producer: A,
callback: CB,
}
impl<CB, A, ITEM> ProducerCallback<ITEM> for CallbackB<CB, A>
where
A: Producer,
CB: ProducerCallback<(A::Item, ITEM)>,
{
type Output = CB::Output;
fn callback<B>(self, b_producer: B) -> Self::Output
where
B: Producer<Item = ITEM>,
{
self.callback.callback(ZipProducer {
a: self.a_producer,
b: b_producer,
})
}
}
}
}
// ////////////////////////////////////////////////////////////////////////
struct ZipProducer<A: Producer, B: Producer> {
a: A,
b: B,
}
impl<A: Producer, B: Producer> Producer for ZipProducer<A, B> {
type Item = (A::Item, B::Item);
type IntoIter = iter::Zip<A::IntoIter, B::IntoIter>;
fn into_iter(self) -> Self::IntoIter {
self.a.into_iter().zip(self.b.into_iter())
}
fn min_len(&self) -> usize {
Ord::max(self.a.min_len(), self.b.min_len())
}
fn max_len(&self) -> usize {
Ord::min(self.a.max_len(), self.b.max_len())
}
fn split_at(self, index: usize) -> (Self, Self) {
let (a_left, a_right) = self.a.split_at(index);
let (b_left, b_right) = self.b.split_at(index);
(
ZipProducer {
a: a_left,
b: b_left,
},
ZipProducer {
a: a_right,
b: b_right,
},
)
}
}

67
vendor/rayon/src/iter/zip_eq.rs vendored Normal file
View File

@@ -0,0 +1,67 @@
use super::plumbing::*;
use super::*;
/// An [`IndexedParallelIterator`] that iterates over two parallel iterators of equal
/// length simultaneously.
///
/// This struct is created by the [`zip_eq`] method on [`IndexedParallelIterator`],
/// see its documentation for more information.
///
/// [`zip_eq`]: IndexedParallelIterator::zip_eq()
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Debug, Clone)]
pub struct ZipEq<A, B> {
zip: Zip<A, B>,
}
impl<A, B> ZipEq<A, B> {
/// Creates a new `ZipEq` iterator.
pub(super) fn new(a: A, b: B) -> Self {
ZipEq {
zip: super::Zip::new(a, b),
}
}
}
impl<A, B> ParallelIterator for ZipEq<A, B>
where
A: IndexedParallelIterator,
B: IndexedParallelIterator,
{
type Item = (A::Item, B::Item);
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self.zip, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.zip.len())
}
}
impl<A, B> IndexedParallelIterator for ZipEq<A, B>
where
A: IndexedParallelIterator,
B: IndexedParallelIterator,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self.zip, consumer)
}
fn len(&self) -> usize {
self.zip.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
self.zip.with_producer(callback)
}
}

156
vendor/rayon/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,156 @@
//! Rayon is a data-parallelism library that makes it easy to convert sequential
//! computations into parallel.
//!
//! It is lightweight and convenient for introducing parallelism into existing
//! code. It guarantees data-race free executions and takes advantage of
//! parallelism when sensible, based on work-load at runtime.
//!
//! # How to use Rayon
//!
//! There are two ways to use Rayon:
//!
//! - **High-level parallel constructs** are the simplest way to use Rayon and also
//! typically the most efficient.
//! - [Parallel iterators] make it easy to convert a sequential iterator to
//! execute in parallel.
//! - The [`ParallelIterator`] trait defines general methods for all parallel iterators.
//! - The [`IndexedParallelIterator`] trait adds methods for iterators that support random
//! access.
//! - The [`par_sort`] method sorts `&mut [T]` slices (or vectors) in parallel.
//! - [`par_extend`] can be used to efficiently grow collections with items produced
//! by a parallel iterator.
//! - **Custom tasks** let you divide your work into parallel tasks yourself.
//! - [`join`] is used to subdivide a task into two pieces.
//! - [`scope`] creates a scope within which you can create any number of parallel tasks.
//! - [`ThreadPoolBuilder`] can be used to create your own thread pools or customize
//! the global one.
//!
//! [Parallel iterators]: iter
//! [`par_sort`]: slice::ParallelSliceMut::par_sort
//! [`par_extend`]: iter::ParallelExtend::par_extend
//! [`ParallelIterator`]: iter::ParallelIterator
//! [`IndexedParallelIterator`]: iter::IndexedParallelIterator
//!
//! # Basic usage and the Rayon prelude
//!
//! First, you will need to add `rayon` to your `Cargo.toml`.
//!
//! Next, to use parallel iterators or the other high-level methods,
//! you need to import several traits. Those traits are bundled into
//! the module [`rayon::prelude`]. It is recommended that you import
//! all of these traits at once by adding `use rayon::prelude::*` at
//! the top of each module that uses Rayon methods.
//!
//! These traits give you access to the `par_iter` method which provides
//! parallel implementations of many iterative functions such as [`map`],
//! [`for_each`], [`filter`], [`fold`], and [more].
//!
//! [`rayon::prelude`]: prelude
//! [`map`]: iter::ParallelIterator::map
//! [`for_each`]: iter::ParallelIterator::for_each
//! [`filter`]: iter::ParallelIterator::filter
//! [`fold`]: iter::ParallelIterator::fold
//! [more]: iter::ParallelIterator#provided-methods
//!
//! # Crate Layout
//!
//! Rayon extends many of the types found in the standard library with
//! parallel iterator implementations. The modules in the `rayon`
//! crate mirror [`std`] itself: so, e.g., the `option` module in
//! Rayon contains parallel iterators for the `Option` type, which is
//! found in [the `option` module of `std`]. Similarly, the
//! `collections` module in Rayon offers parallel iterator types for
//! [the `collections` from `std`]. You will rarely need to access
//! these submodules unless you need to name iterator types
//! explicitly.
//!
//! [the `option` module of `std`]: std::option
//! [the `collections` from `std`]: std::collections
//!
//! # Targets without threading
//!
//! Rayon has limited support for targets without `std` threading implementations.
//! See the [`rayon_core`] documentation for more information about its global fallback.
//!
//! # Other questions?
//!
//! See [the Rayon FAQ][faq].
//!
//! [faq]: https://github.com/rayon-rs/rayon/blob/main/FAQ.md
#![deny(missing_debug_implementations)]
#![deny(missing_docs)]
#![deny(unreachable_pub)]
#![warn(rust_2018_idioms)]
#[macro_use]
mod delegate;
#[macro_use]
mod private;
mod split_producer;
pub mod array;
pub mod collections;
pub mod iter;
pub mod option;
pub mod prelude;
pub mod range;
pub mod range_inclusive;
pub mod result;
pub mod slice;
pub mod str;
pub mod string;
pub mod vec;
mod math;
mod par_either;
mod compile_fail;
pub use rayon_core::FnContext;
pub use rayon_core::ThreadBuilder;
pub use rayon_core::ThreadPool;
pub use rayon_core::ThreadPoolBuildError;
pub use rayon_core::ThreadPoolBuilder;
pub use rayon_core::{broadcast, spawn_broadcast, BroadcastContext};
pub use rayon_core::{current_num_threads, current_thread_index, max_num_threads};
pub use rayon_core::{in_place_scope, scope, Scope};
pub use rayon_core::{in_place_scope_fifo, scope_fifo, ScopeFifo};
pub use rayon_core::{join, join_context};
pub use rayon_core::{spawn, spawn_fifo};
pub use rayon_core::{yield_local, yield_now, Yield};
/// We need to transmit raw pointers across threads. It is possible to do this
/// without any unsafe code by converting pointers to usize or to AtomicPtr<T>
/// then back to a raw pointer for use. We prefer this approach because code
/// that uses this type is more explicit.
///
/// Unsafe code is still required to dereference the pointer, so this type is
/// not unsound on its own, although it does partly lift the unconditional
/// !Send and !Sync on raw pointers. As always, dereference with care.
struct SendPtr<T>(*mut T);
// SAFETY: !Send for raw pointers is not for safety, just as a lint
unsafe impl<T: Send> Send for SendPtr<T> {}
// SAFETY: !Sync for raw pointers is not for safety, just as a lint
unsafe impl<T: Send> Sync for SendPtr<T> {}
impl<T> SendPtr<T> {
// Helper to avoid disjoint captures of `send_ptr.0`
fn get(self) -> *mut T {
self.0
}
}
// Implement Clone without the T: Clone bound from the derive
impl<T> Clone for SendPtr<T> {
fn clone(&self) -> Self {
*self
}
}
// Implement Copy without the T: Copy bound from the derive
impl<T> Copy for SendPtr<T> {}

25
vendor/rayon/src/math.rs vendored Normal file
View File

@@ -0,0 +1,25 @@
use std::ops::{Bound, Range, RangeBounds};
/// Normalize arbitrary `RangeBounds` to a `Range`
pub(super) fn simplify_range(range: impl RangeBounds<usize>, len: usize) -> Range<usize> {
let start = match range.start_bound() {
Bound::Unbounded => 0,
Bound::Included(&i) if i <= len => i,
Bound::Excluded(&i) if i < len => i + 1,
bound => panic!("range start {bound:?} should be <= length {len}"),
};
let end = match range.end_bound() {
Bound::Unbounded => len,
Bound::Excluded(&i) if i <= len => i,
Bound::Included(&i) if i < len => i + 1,
bound => panic!("range end {bound:?} should be <= length {len}"),
};
if start > end {
panic!(
"range start {:?} should be <= range end {:?}",
range.start_bound(),
range.end_bound()
);
}
start..end
}

197
vendor/rayon/src/option.rs vendored Normal file
View File

@@ -0,0 +1,197 @@
//! Parallel iterator types for [options]
//!
//! You will rarely need to interact with this module directly unless you need
//! to name one of the iterator types.
//!
//! [options]: std::option
use crate::iter::plumbing::*;
use crate::iter::*;
use std::sync::atomic::{AtomicBool, Ordering};
/// A parallel iterator over the value in [`Some`] variant of an [`Option`].
///
/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
///
/// This `struct` is created by the [`into_par_iter`] function.
///
/// [`into_par_iter`]: IntoParallelIterator::into_par_iter()
#[derive(Debug, Clone)]
pub struct IntoIter<T> {
opt: Option<T>,
}
impl<T: Send> IntoParallelIterator for Option<T> {
type Item = T;
type Iter = IntoIter<T>;
fn into_par_iter(self) -> Self::Iter {
IntoIter { opt: self }
}
}
impl<T: Send> ParallelIterator for IntoIter<T> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.drive(consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<T: Send> IndexedParallelIterator for IntoIter<T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
let mut folder = consumer.into_folder();
if let Some(item) = self.opt {
folder = folder.consume(item);
}
folder.complete()
}
fn len(&self) -> usize {
match self.opt {
Some(_) => 1,
None => 0,
}
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(OptionProducer { opt: self.opt })
}
}
/// A parallel iterator over a reference to the [`Some`] variant of an [`Option`].
///
/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
///
/// This `struct` is created by the [`par_iter`] function.
///
/// [`par_iter`]: IntoParallelRefIterator::par_iter()
#[derive(Debug)]
pub struct Iter<'a, T> {
inner: IntoIter<&'a T>,
}
impl<T> Clone for Iter<'_, T> {
fn clone(&self) -> Self {
Iter {
inner: self.inner.clone(),
}
}
}
impl<'a, T: Sync> IntoParallelIterator for &'a Option<T> {
type Item = &'a T;
type Iter = Iter<'a, T>;
fn into_par_iter(self) -> Self::Iter {
Iter {
inner: self.as_ref().into_par_iter(),
}
}
}
delegate_indexed_iterator! {
Iter<'a, T> => &'a T,
impl<'a, T: Sync>
}
/// A parallel iterator over a mutable reference to the [`Some`] variant of an [`Option`].
///
/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
///
/// This `struct` is created by the [`par_iter_mut`] function.
///
/// [`par_iter_mut`]: IntoParallelRefMutIterator::par_iter_mut()
#[derive(Debug)]
pub struct IterMut<'a, T> {
inner: IntoIter<&'a mut T>,
}
impl<'a, T: Send> IntoParallelIterator for &'a mut Option<T> {
type Item = &'a mut T;
type Iter = IterMut<'a, T>;
fn into_par_iter(self) -> Self::Iter {
IterMut {
inner: self.as_mut().into_par_iter(),
}
}
}
delegate_indexed_iterator! {
IterMut<'a, T> => &'a mut T,
impl<'a, T: Send>
}
/// Private producer for an option
struct OptionProducer<T: Send> {
opt: Option<T>,
}
impl<T: Send> Producer for OptionProducer<T> {
type Item = T;
type IntoIter = std::option::IntoIter<T>;
fn into_iter(self) -> Self::IntoIter {
self.opt.into_iter()
}
fn split_at(self, index: usize) -> (Self, Self) {
debug_assert!(index <= 1);
let none = OptionProducer { opt: None };
if index == 0 {
(none, self)
} else {
(self, none)
}
}
}
/// Collect an arbitrary `Option`-wrapped collection.
///
/// If any item is `None`, then all previous items collected are discarded,
/// and it returns only `None`.
impl<C, T> FromParallelIterator<Option<T>> for Option<C>
where
C: FromParallelIterator<T>,
T: Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = Option<T>>,
{
fn check<T>(found_none: &AtomicBool) -> impl Fn(&Option<T>) + '_ {
move |item| {
if item.is_none() {
found_none.store(true, Ordering::Relaxed);
}
}
}
let found_none = AtomicBool::new(false);
let collection = par_iter
.into_par_iter()
.inspect(check(&found_none))
.while_some()
.collect();
if found_none.load(Ordering::Relaxed) {
None
} else {
Some(collection)
}
}
}

74
vendor/rayon/src/par_either.rs vendored Normal file
View File

@@ -0,0 +1,74 @@
use crate::iter::plumbing::*;
use crate::iter::Either::{Left, Right};
use crate::iter::*;
/// `Either<L, R>` is a parallel iterator if both `L` and `R` are parallel iterators.
impl<L, R> ParallelIterator for Either<L, R>
where
L: ParallelIterator,
R: ParallelIterator<Item = L::Item>,
{
type Item = L::Item;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
match self {
Left(iter) => iter.drive_unindexed(consumer),
Right(iter) => iter.drive_unindexed(consumer),
}
}
fn opt_len(&self) -> Option<usize> {
self.as_ref().either(L::opt_len, R::opt_len)
}
}
impl<L, R> IndexedParallelIterator for Either<L, R>
where
L: IndexedParallelIterator,
R: IndexedParallelIterator<Item = L::Item>,
{
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
match self {
Left(iter) => iter.drive(consumer),
Right(iter) => iter.drive(consumer),
}
}
fn len(&self) -> usize {
self.as_ref().either(L::len, R::len)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
match self {
Left(iter) => iter.with_producer(callback),
Right(iter) => iter.with_producer(callback),
}
}
}
/// `Either<L, R>` can be extended if both `L` and `R` are parallel extendable.
impl<L, R, T> ParallelExtend<T> for Either<L, R>
where
L: ParallelExtend<T>,
R: ParallelExtend<T>,
T: Send,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = T>,
{
match self.as_mut() {
Left(collection) => collection.par_extend(par_iter),
Right(collection) => collection.par_extend(par_iter),
}
}
}

17
vendor/rayon/src/prelude.rs vendored Normal file
View File

@@ -0,0 +1,17 @@
//! The rayon prelude imports the various `ParallelIterator` traits.
//! The intention is that one can include `use rayon::prelude::*` and
//! have easy access to the various traits and methods you will need.
pub use crate::iter::FromParallelIterator;
pub use crate::iter::IndexedParallelIterator;
pub use crate::iter::IntoParallelIterator;
pub use crate::iter::IntoParallelRefIterator;
pub use crate::iter::IntoParallelRefMutIterator;
pub use crate::iter::ParallelBridge;
pub use crate::iter::ParallelDrainFull;
pub use crate::iter::ParallelDrainRange;
pub use crate::iter::ParallelExtend;
pub use crate::iter::ParallelIterator;
pub use crate::slice::ParallelSlice;
pub use crate::slice::ParallelSliceMut;
pub use crate::str::ParallelString;

26
vendor/rayon/src/private.rs vendored Normal file
View File

@@ -0,0 +1,26 @@
//! The public parts of this private module are used to create traits
//! that cannot be implemented outside of our own crate. This way we
//! can feel free to extend those traits without worrying about it
//! being a breaking change for other implementations.
/// If this type is pub but not publicly reachable, third parties
/// can't name it and can't implement traits using it.
#[allow(missing_debug_implementations)]
pub struct PrivateMarker;
macro_rules! private_decl {
() => {
/// This trait is private; this method exists to make it
/// impossible to implement outside the crate.
#[doc(hidden)]
fn __rayon_private__(&self) -> crate::private::PrivateMarker;
};
}
macro_rules! private_impl {
() => {
fn __rayon_private__(&self) -> crate::private::PrivateMarker {
crate::private::PrivateMarker
}
};
}

457
vendor/rayon/src/range.rs vendored Normal file
View File

@@ -0,0 +1,457 @@
//! Parallel iterator types for [ranges],
//! the type for values created by `a..b` expressions
//!
//! You will rarely need to interact with this module directly unless you have
//! need to name one of the iterator types.
//!
//! ```
//! use rayon::prelude::*;
//!
//! let r = (0..100u64).into_par_iter()
//! .sum();
//!
//! // compare result with sequential calculation
//! assert_eq!((0..100).sum::<u64>(), r);
//! ```
//!
//! [ranges]: std::ops::Range
use crate::iter::plumbing::*;
use crate::iter::*;
use std::ops::Range;
/// Parallel iterator over a range, implemented for all integer types and `char`.
///
/// **Note:** The `zip` operation requires `IndexedParallelIterator`
/// which is not implemented for `u64`, `i64`, `u128`, or `i128`.
///
/// ```
/// use rayon::prelude::*;
///
/// let p = (0..25usize).into_par_iter()
/// .zip(0..25usize)
/// .filter(|&(x, y)| x % 5 == 0 || y % 5 == 0)
/// .map(|(x, y)| x * y)
/// .sum::<usize>();
///
/// let s = (0..25usize).zip(0..25)
/// .filter(|&(x, y)| x % 5 == 0 || y % 5 == 0)
/// .map(|(x, y)| x * y)
/// .sum();
///
/// assert_eq!(p, s);
/// ```
#[derive(Debug, Clone)]
pub struct Iter<T> {
range: Range<T>,
}
/// Implemented for ranges of all primitive integer types and `char`.
impl<T> IntoParallelIterator for Range<T>
where
Iter<T>: ParallelIterator,
{
type Item = <Iter<T> as ParallelIterator>::Item;
type Iter = Iter<T>;
fn into_par_iter(self) -> Self::Iter {
Iter { range: self }
}
}
struct IterProducer<T> {
range: Range<T>,
}
impl<T> IntoIterator for IterProducer<T>
where
Range<T>: Iterator,
{
type Item = <Range<T> as Iterator>::Item;
type IntoIter = Range<T>;
fn into_iter(self) -> Self::IntoIter {
self.range
}
}
/// These traits help drive integer type inference. Without them, an unknown `{integer}` type only
/// has constraints on `Iter<{integer}>`, which will probably give up and use `i32`. By adding
/// these traits on the item type, the compiler can see a more direct constraint to infer like
/// `{integer}: RangeInteger`, which works better. See `test_issue_833` for an example.
///
/// They have to be `pub` since they're seen in the public `impl ParallelIterator` constraints, but
/// we put them in a private modules so they're not actually reachable in our public API.
mod private {
use super::*;
/// Implementation details of `ParallelIterator for Iter<Self>`
pub trait RangeInteger: Sized + Send {
private_decl! {}
fn drive_unindexed<C>(iter: Iter<Self>, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self>;
fn opt_len(iter: &Iter<Self>) -> Option<usize>;
}
/// Implementation details of `IndexedParallelIterator for Iter<Self>`
pub trait IndexedRangeInteger: RangeInteger {
private_decl! {}
fn drive<C>(iter: Iter<Self>, consumer: C) -> C::Result
where
C: Consumer<Self>;
fn len(iter: &Iter<Self>) -> usize;
fn with_producer<CB>(iter: Iter<Self>, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self>;
}
}
use private::{IndexedRangeInteger, RangeInteger};
impl<T: RangeInteger> ParallelIterator for Iter<T> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<T>,
{
T::drive_unindexed(self, consumer)
}
#[inline]
fn opt_len(&self) -> Option<usize> {
T::opt_len(self)
}
}
impl<T: IndexedRangeInteger> IndexedParallelIterator for Iter<T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<T>,
{
T::drive(self, consumer)
}
#[inline]
fn len(&self) -> usize {
T::len(self)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<T>,
{
T::with_producer(self, callback)
}
}
macro_rules! indexed_range_impl {
( $t:ty ) => {
impl RangeInteger for $t {
private_impl! {}
fn drive_unindexed<C>(iter: Iter<$t>, consumer: C) -> C::Result
where
C: UnindexedConsumer<$t>,
{
bridge(iter, consumer)
}
fn opt_len(iter: &Iter<$t>) -> Option<usize> {
Some(iter.range.len())
}
}
impl IndexedRangeInteger for $t {
private_impl! {}
fn drive<C>(iter: Iter<$t>, consumer: C) -> C::Result
where
C: Consumer<$t>,
{
bridge(iter, consumer)
}
fn len(iter: &Iter<$t>) -> usize {
iter.range.len()
}
fn with_producer<CB>(iter: Iter<$t>, callback: CB) -> CB::Output
where
CB: ProducerCallback<$t>,
{
callback.callback(IterProducer { range: iter.range })
}
}
impl Producer for IterProducer<$t> {
type Item = <Range<$t> as Iterator>::Item;
type IntoIter = Range<$t>;
fn into_iter(self) -> Self::IntoIter {
self.range
}
fn split_at(self, index: usize) -> (Self, Self) {
assert!(index <= self.range.len());
// For signed $t, the length and requested index could be greater than $t::MAX, and
// then `index as $t` could wrap to negative, so wrapping_add is necessary.
let mid = self.range.start.wrapping_add(index as $t);
let left = self.range.start..mid;
let right = mid..self.range.end;
(IterProducer { range: left }, IterProducer { range: right })
}
}
};
}
trait UnindexedRangeLen<L> {
fn unindexed_len(&self) -> L;
}
macro_rules! unindexed_range_impl {
( $t:ty, $len_t:ty ) => {
impl UnindexedRangeLen<$len_t> for Range<$t> {
fn unindexed_len(&self) -> $len_t {
let &Range { start, end } = self;
if end > start {
end.wrapping_sub(start) as $len_t
} else {
0
}
}
}
impl RangeInteger for $t {
private_impl! {}
fn drive_unindexed<C>(iter: Iter<$t>, consumer: C) -> C::Result
where
C: UnindexedConsumer<$t>,
{
#[inline]
fn offset(start: $t) -> impl Fn(usize) -> $t {
move |i| start.wrapping_add(i as $t)
}
if let Some(len) = iter.opt_len() {
// Drive this in indexed mode for better `collect`.
(0..len)
.into_par_iter()
.map(offset(iter.range.start))
.drive(consumer)
} else {
bridge_unindexed(IterProducer { range: iter.range }, consumer)
}
}
fn opt_len(iter: &Iter<$t>) -> Option<usize> {
usize::try_from(iter.range.unindexed_len()).ok()
}
}
impl UnindexedProducer for IterProducer<$t> {
type Item = $t;
fn split(mut self) -> (Self, Option<Self>) {
let index = self.range.unindexed_len() / 2;
if index > 0 {
let mid = self.range.start.wrapping_add(index as $t);
let right = mid..self.range.end;
self.range.end = mid;
(self, Some(IterProducer { range: right }))
} else {
(self, None)
}
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
folder.consume_iter(self)
}
}
};
}
// all Range<T> with ExactSizeIterator
indexed_range_impl! {u8}
indexed_range_impl! {u16}
indexed_range_impl! {u32}
indexed_range_impl! {usize}
indexed_range_impl! {i8}
indexed_range_impl! {i16}
indexed_range_impl! {i32}
indexed_range_impl! {isize}
// other Range<T> with just Iterator
unindexed_range_impl! {u64, u64}
unindexed_range_impl! {i64, u64}
unindexed_range_impl! {u128, u128}
unindexed_range_impl! {i128, u128}
// char is special because of the surrogate range hole
macro_rules! convert_char {
( $self:ident . $method:ident ( $( $arg:expr ),* ) ) => {{
let start = $self.range.start as u32;
let end = $self.range.end as u32;
if start < 0xD800 && 0xE000 < end {
// chain the before and after surrogate range fragments
(start..0xD800)
.into_par_iter()
.chain(0xE000..end)
.map(|codepoint| unsafe { char::from_u32_unchecked(codepoint) })
.$method($( $arg ),*)
} else {
// no surrogate range to worry about
(start..end)
.into_par_iter()
.map(|codepoint| unsafe { char::from_u32_unchecked(codepoint) })
.$method($( $arg ),*)
}
}};
}
impl ParallelIterator for Iter<char> {
type Item = char;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
convert_char!(self.drive(consumer))
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl IndexedParallelIterator for Iter<char> {
// Split at the surrogate range first if we're allowed to
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
convert_char!(self.drive(consumer))
}
fn len(&self) -> usize {
// Taken from <char as Step>::steps_between
let start = self.range.start as u32;
let end = self.range.end as u32;
if start < end {
let mut count = end - start;
if start < 0xD800 && 0xE000 <= end {
count -= 0x800
}
count as usize
} else {
0
}
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
convert_char!(self.with_producer(callback))
}
}
#[test]
fn check_range_split_at_overflow() {
// Note, this split index overflows i8!
let producer = IterProducer { range: -100i8..100 };
let (left, right) = producer.split_at(150);
let r1: i32 = left.range.map(i32::from).sum();
let r2: i32 = right.range.map(i32::from).sum();
assert_eq!(r1 + r2, -100);
}
#[test]
fn test_i128_len_doesnt_overflow() {
// Using parse because some versions of rust don't allow long literals
let octillion: i128 = "1000000000000000000000000000".parse().unwrap();
let producer = IterProducer {
range: 0..octillion,
};
assert_eq!(octillion as u128, producer.range.unindexed_len());
assert_eq!(octillion as u128, (0..octillion).unindexed_len());
assert_eq!(
2 * octillion as u128,
(-octillion..octillion).unindexed_len()
);
assert_eq!(u128::MAX, (i128::MIN..i128::MAX).unindexed_len());
}
#[test]
fn test_u64_opt_len() {
assert_eq!(Some(100), (0..100u64).into_par_iter().opt_len());
assert_eq!(
Some(usize::MAX),
(0..usize::MAX as u64).into_par_iter().opt_len()
);
if (usize::MAX as u64) < u64::MAX {
assert_eq!(
None,
(0..(usize::MAX as u64).wrapping_add(1))
.into_par_iter()
.opt_len()
);
assert_eq!(None, (0..u64::MAX).into_par_iter().opt_len());
}
}
#[test]
fn test_u128_opt_len() {
assert_eq!(Some(100), (0..100u128).into_par_iter().opt_len());
assert_eq!(
Some(usize::MAX),
(0..usize::MAX as u128).into_par_iter().opt_len()
);
assert_eq!(None, (0..1 + usize::MAX as u128).into_par_iter().opt_len());
assert_eq!(None, (0..u128::MAX).into_par_iter().opt_len());
}
// `usize as i64` can overflow, so make sure to wrap it appropriately
// when using the `opt_len` "indexed" mode.
#[test]
#[cfg(target_pointer_width = "64")]
fn test_usize_i64_overflow() {
use crate::ThreadPoolBuilder;
let iter = (-2..i64::MAX).into_par_iter();
assert_eq!(iter.opt_len(), Some(i64::MAX as usize + 2));
// always run with multiple threads to split into, or this will take forever...
let pool = ThreadPoolBuilder::new().num_threads(8).build().unwrap();
pool.install(|| assert_eq!(iter.find_last(|_| true), Some(i64::MAX - 1)));
}
#[test]
fn test_issue_833() {
fn is_even(n: i64) -> bool {
n % 2 == 0
}
// The integer type should be inferred from `is_even`
let v: Vec<_> = (1..100).into_par_iter().filter(|&x| is_even(x)).collect();
assert!(v.into_iter().eq((2..100).step_by(2)));
// Try examples with indexed iterators too
let pos = (0..100).into_par_iter().position_any(|x| x == 50i16);
assert_eq!(pos, Some(50usize));
assert!((0..100)
.into_par_iter()
.zip(0..100)
.all(|(a, b)| i16::eq(&a, &b)));
}

381
vendor/rayon/src/range_inclusive.rs vendored Normal file
View File

@@ -0,0 +1,381 @@
//! Parallel iterator types for [inclusive ranges],
//! the type for values created by `a..=b` expressions
//!
//! You will rarely need to interact with this module directly unless you have
//! need to name one of the iterator types.
//!
//! ```
//! use rayon::prelude::*;
//!
//! let r = (0..=100u64).into_par_iter()
//! .sum();
//!
//! // compare result with sequential calculation
//! assert_eq!((0..=100).sum::<u64>(), r);
//! ```
//!
//! [inclusive ranges]: std::ops::RangeInclusive
use crate::iter::plumbing::*;
use crate::iter::*;
use std::ops::RangeInclusive;
/// Parallel iterator over an inclusive range, implemented for all integer types and `char`.
///
/// **Note:** The `zip` operation requires `IndexedParallelIterator`
/// which is only implemented for `u8`, `i8`, `u16`, `i16`, and `char`.
///
/// ```
/// use rayon::prelude::*;
///
/// let p = (0..=25u16).into_par_iter()
/// .zip(0..=25u16)
/// .filter(|&(x, y)| x % 5 == 0 || y % 5 == 0)
/// .map(|(x, y)| x * y)
/// .sum::<u16>();
///
/// let s = (0..=25u16).zip(0..=25u16)
/// .filter(|&(x, y)| x % 5 == 0 || y % 5 == 0)
/// .map(|(x, y)| x * y)
/// .sum();
///
/// assert_eq!(p, s);
/// ```
#[derive(Debug, Clone)]
pub struct Iter<T> {
range: RangeInclusive<T>,
}
impl<T> Iter<T>
where
RangeInclusive<T>: Eq,
T: Ord + Copy,
{
/// Returns `Some((start, end))` for `start..=end`, or `None` if it is exhausted.
///
/// Note that `RangeInclusive` does not specify the bounds of an exhausted iterator,
/// so this is a way for us to figure out what we've got. Thankfully, all of the
/// integer types we care about can be trivially cloned.
fn bounds(&self) -> Option<(T, T)> {
let start = *self.range.start();
let end = *self.range.end();
if start <= end && self.range == (start..=end) {
// If the range is still nonempty, this is obviously true
// If the range is exhausted, either start > end or
// the range does not equal start..=end.
Some((start, end))
} else {
None
}
}
}
/// Implemented for ranges of all primitive integer types and `char`.
impl<T> IntoParallelIterator for RangeInclusive<T>
where
Iter<T>: ParallelIterator,
{
type Item = <Iter<T> as ParallelIterator>::Item;
type Iter = Iter<T>;
fn into_par_iter(self) -> Self::Iter {
Iter { range: self }
}
}
/// These traits help drive integer type inference. Without them, an unknown `{integer}` type only
/// has constraints on `Iter<{integer}>`, which will probably give up and use `i32`. By adding
/// these traits on the item type, the compiler can see a more direct constraint to infer like
/// `{integer}: RangeInteger`, which works better. See `test_issue_833` for an example.
///
/// They have to be `pub` since they're seen in the public `impl ParallelIterator` constraints, but
/// we put them in a private modules so they're not actually reachable in our public API.
mod private {
use super::*;
/// Implementation details of `ParallelIterator for Iter<Self>`
pub trait RangeInteger: Sized + Send {
private_decl! {}
fn drive_unindexed<C>(iter: Iter<Self>, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self>;
fn opt_len(iter: &Iter<Self>) -> Option<usize>;
}
/// Implementation details of `IndexedParallelIterator for Iter<Self>`
pub trait IndexedRangeInteger: RangeInteger {
private_decl! {}
fn drive<C>(iter: Iter<Self>, consumer: C) -> C::Result
where
C: Consumer<Self>;
fn len(iter: &Iter<Self>) -> usize;
fn with_producer<CB>(iter: Iter<Self>, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self>;
}
}
use private::{IndexedRangeInteger, RangeInteger};
impl<T: RangeInteger> ParallelIterator for Iter<T> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<T>,
{
T::drive_unindexed(self, consumer)
}
#[inline]
fn opt_len(&self) -> Option<usize> {
T::opt_len(self)
}
}
impl<T: IndexedRangeInteger> IndexedParallelIterator for Iter<T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<T>,
{
T::drive(self, consumer)
}
#[inline]
fn len(&self) -> usize {
T::len(self)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<T>,
{
T::with_producer(self, callback)
}
}
macro_rules! convert {
( $iter:ident . $method:ident ( $( $arg:expr ),* ) ) => {
if let Some((start, end)) = $iter.bounds() {
if let Some(end) = end.checked_add(1) {
(start..end).into_par_iter().$method($( $arg ),*)
} else {
(start..end).into_par_iter().chain(once(end)).$method($( $arg ),*)
}
} else {
empty::<Self>().$method($( $arg ),*)
}
};
}
macro_rules! parallel_range_impl {
( $t:ty ) => {
impl RangeInteger for $t {
private_impl! {}
fn drive_unindexed<C>(iter: Iter<$t>, consumer: C) -> C::Result
where
C: UnindexedConsumer<$t>,
{
convert!(iter.drive_unindexed(consumer))
}
fn opt_len(iter: &Iter<$t>) -> Option<usize> {
convert!(iter.opt_len())
}
}
};
}
macro_rules! indexed_range_impl {
( $t:ty ) => {
parallel_range_impl! { $t }
impl IndexedRangeInteger for $t {
private_impl! {}
fn drive<C>(iter: Iter<$t>, consumer: C) -> C::Result
where
C: Consumer<$t>,
{
convert!(iter.drive(consumer))
}
fn len(iter: &Iter<$t>) -> usize {
iter.range.len()
}
fn with_producer<CB>(iter: Iter<$t>, callback: CB) -> CB::Output
where
CB: ProducerCallback<$t>,
{
convert!(iter.with_producer(callback))
}
}
};
}
// all RangeInclusive<T> with ExactSizeIterator
indexed_range_impl! {u8}
indexed_range_impl! {u16}
indexed_range_impl! {i8}
indexed_range_impl! {i16}
// other RangeInclusive<T> with just Iterator
parallel_range_impl! {usize}
parallel_range_impl! {isize}
parallel_range_impl! {u32}
parallel_range_impl! {i32}
parallel_range_impl! {u64}
parallel_range_impl! {i64}
parallel_range_impl! {u128}
parallel_range_impl! {i128}
// char is special
macro_rules! convert_char {
( $self:ident . $method:ident ( $( $arg:expr ),* ) ) => {
if let Some((start, end)) = $self.bounds() {
let start = start as u32;
let end = end as u32;
if start < 0xD800 && 0xE000 <= end {
// chain the before and after surrogate range fragments
(start..0xD800)
.into_par_iter()
.chain(0xE000..end + 1) // cannot use RangeInclusive, so add one to end
.map(|codepoint| unsafe { char::from_u32_unchecked(codepoint) })
.$method($( $arg ),*)
} else {
// no surrogate range to worry about
(start..end + 1) // cannot use RangeInclusive, so add one to end
.into_par_iter()
.map(|codepoint| unsafe { char::from_u32_unchecked(codepoint) })
.$method($( $arg ),*)
}
} else {
empty::<char>().$method($( $arg ),*)
}
};
}
impl ParallelIterator for Iter<char> {
type Item = char;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
convert_char!(self.drive(consumer))
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
// Range<u32> is broken on 16 bit platforms, may as well benefit from it
impl IndexedParallelIterator for Iter<char> {
// Split at the surrogate range first if we're allowed to
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
convert_char!(self.drive(consumer))
}
fn len(&self) -> usize {
if let Some((start, end)) = self.bounds() {
// Taken from <char as Step>::steps_between
let start = start as u32;
let end = end as u32;
let mut count = end - start;
if start < 0xD800 && 0xE000 <= end {
count -= 0x800
}
(count + 1) as usize // add one for inclusive
} else {
0
}
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
convert_char!(self.with_producer(callback))
}
}
#[test]
#[cfg(target_pointer_width = "64")]
fn test_u32_opt_len() {
assert_eq!(Some(101), (0..=100u32).into_par_iter().opt_len());
assert_eq!(
Some(u32::MAX as usize),
(0..=u32::MAX - 1).into_par_iter().opt_len()
);
assert_eq!(
Some(u32::MAX as usize + 1),
(0..=u32::MAX).into_par_iter().opt_len()
);
}
#[test]
fn test_u64_opt_len() {
assert_eq!(Some(101), (0..=100u64).into_par_iter().opt_len());
assert_eq!(
Some(usize::MAX),
(0..=usize::MAX as u64 - 1).into_par_iter().opt_len()
);
assert_eq!(None, (0..=usize::MAX as u64).into_par_iter().opt_len());
assert_eq!(None, (0..=u64::MAX).into_par_iter().opt_len());
}
#[test]
fn test_u128_opt_len() {
assert_eq!(Some(101), (0..=100u128).into_par_iter().opt_len());
assert_eq!(
Some(usize::MAX),
(0..=usize::MAX as u128 - 1).into_par_iter().opt_len()
);
assert_eq!(None, (0..=usize::MAX as u128).into_par_iter().opt_len());
assert_eq!(None, (0..=u128::MAX).into_par_iter().opt_len());
}
// `usize as i64` can overflow, so make sure to wrap it appropriately
// when using the `opt_len` "indexed" mode.
#[test]
#[cfg(target_pointer_width = "64")]
fn test_usize_i64_overflow() {
use crate::ThreadPoolBuilder;
let iter = (-2..=i64::MAX).into_par_iter();
assert_eq!(iter.opt_len(), Some(i64::MAX as usize + 3));
// always run with multiple threads to split into, or this will take forever...
let pool = ThreadPoolBuilder::new().num_threads(8).build().unwrap();
pool.install(|| assert_eq!(iter.find_last(|_| true), Some(i64::MAX)));
}
#[test]
fn test_issue_833() {
fn is_even(n: i64) -> bool {
n % 2 == 0
}
// The integer type should be inferred from `is_even`
let v: Vec<_> = (1..=100).into_par_iter().filter(|&x| is_even(x)).collect();
assert!(v.into_iter().eq((2..=100).step_by(2)));
// Try examples with indexed iterators too
let pos = (0..=100).into_par_iter().position_any(|x| x == 50i16);
assert_eq!(pos, Some(50usize));
assert!((0..=100)
.into_par_iter()
.zip(0..=100)
.all(|(a, b)| i16::eq(&a, &b)));
}

132
vendor/rayon/src/result.rs vendored Normal file
View File

@@ -0,0 +1,132 @@
//! Parallel iterator types for [results]
//!
//! You will rarely need to interact with this module directly unless you need
//! to name one of the iterator types.
//!
//! [results]: std::result
use crate::iter::plumbing::*;
use crate::iter::*;
use std::sync::Mutex;
use crate::option;
/// Parallel iterator over a result
#[derive(Debug, Clone)]
pub struct IntoIter<T> {
inner: option::IntoIter<T>,
}
impl<T: Send, E> IntoParallelIterator for Result<T, E> {
type Item = T;
type Iter = IntoIter<T>;
fn into_par_iter(self) -> Self::Iter {
IntoIter {
inner: self.ok().into_par_iter(),
}
}
}
delegate_indexed_iterator! {
IntoIter<T> => T,
impl<T: Send>
}
/// Parallel iterator over an immutable reference to a result
#[derive(Debug)]
pub struct Iter<'a, T> {
inner: option::IntoIter<&'a T>,
}
impl<T> Clone for Iter<'_, T> {
fn clone(&self) -> Self {
Iter {
inner: self.inner.clone(),
}
}
}
impl<'a, T: Sync, E> IntoParallelIterator for &'a Result<T, E> {
type Item = &'a T;
type Iter = Iter<'a, T>;
fn into_par_iter(self) -> Self::Iter {
Iter {
inner: self.as_ref().ok().into_par_iter(),
}
}
}
delegate_indexed_iterator! {
Iter<'a, T> => &'a T,
impl<'a, T: Sync>
}
/// Parallel iterator over a mutable reference to a result
#[derive(Debug)]
pub struct IterMut<'a, T> {
inner: option::IntoIter<&'a mut T>,
}
impl<'a, T: Send, E> IntoParallelIterator for &'a mut Result<T, E> {
type Item = &'a mut T;
type Iter = IterMut<'a, T>;
fn into_par_iter(self) -> Self::Iter {
IterMut {
inner: self.as_mut().ok().into_par_iter(),
}
}
}
delegate_indexed_iterator! {
IterMut<'a, T> => &'a mut T,
impl<'a, T: Send>
}
/// Collect an arbitrary `Result`-wrapped collection.
///
/// If any item is `Err`, then all previous `Ok` items collected are
/// discarded, and it returns that error. If there are multiple errors, the
/// one returned is not deterministic.
impl<C, T, E> FromParallelIterator<Result<T, E>> for Result<C, E>
where
C: FromParallelIterator<T>,
T: Send,
E: Send,
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: IntoParallelIterator<Item = Result<T, E>>,
{
fn ok<T, E>(saved: &Mutex<Option<E>>) -> impl Fn(Result<T, E>) -> Option<T> + '_ {
move |item| match item {
Ok(item) => Some(item),
Err(error) => {
// We don't need a blocking `lock()`, as anybody
// else holding the lock will also be writing
// `Some(error)`, and then ours is irrelevant.
if let Ok(mut guard) = saved.try_lock() {
if guard.is_none() {
*guard = Some(error);
}
}
None
}
}
}
let saved_error = Mutex::new(None);
let collection = par_iter
.into_par_iter()
.map(ok(&saved_error))
.while_some()
.collect();
match saved_error.into_inner().unwrap() {
Some(error) => Err(error),
None => Ok(collection),
}
}
}

239
vendor/rayon/src/slice/chunk_by.rs vendored Normal file
View File

@@ -0,0 +1,239 @@
use crate::iter::plumbing::*;
use crate::iter::*;
use std::fmt;
use std::marker::PhantomData;
trait ChunkBySlice<T>: AsRef<[T]> + Default + Send {
fn split(self, index: usize) -> (Self, Self);
fn chunk_by(self, pred: &impl Fn(&T, &T) -> bool) -> impl Iterator<Item = Self>;
fn find(&self, pred: &impl Fn(&T, &T) -> bool, start: usize, end: usize) -> Option<usize> {
self.as_ref()[start..end]
.windows(2)
.position(move |w| !pred(&w[0], &w[1]))
.map(|i| i + 1)
}
fn rfind(&self, pred: &impl Fn(&T, &T) -> bool, end: usize) -> Option<usize> {
self.as_ref()[..end]
.windows(2)
.rposition(move |w| !pred(&w[0], &w[1]))
.map(|i| i + 1)
}
}
impl<T: Sync> ChunkBySlice<T> for &[T] {
fn split(self, index: usize) -> (Self, Self) {
self.split_at(index)
}
fn chunk_by(self, pred: &impl Fn(&T, &T) -> bool) -> impl Iterator<Item = Self> {
self.chunk_by(pred)
}
}
impl<T: Send> ChunkBySlice<T> for &mut [T] {
fn split(self, index: usize) -> (Self, Self) {
self.split_at_mut(index)
}
fn chunk_by(self, pred: &impl Fn(&T, &T) -> bool) -> impl Iterator<Item = Self> {
self.chunk_by_mut(pred)
}
}
struct ChunkByProducer<'p, T, Slice, Pred> {
slice: Slice,
pred: &'p Pred,
tail: usize,
marker: PhantomData<fn(&T)>,
}
// Note: this implementation is very similar to `SplitProducer`.
impl<T, Slice, Pred> UnindexedProducer for ChunkByProducer<'_, T, Slice, Pred>
where
Slice: ChunkBySlice<T>,
Pred: Fn(&T, &T) -> bool + Send + Sync,
{
type Item = Slice;
fn split(self) -> (Self, Option<Self>) {
if self.tail < 2 {
return (Self { tail: 0, ..self }, None);
}
// Look forward for the separator, and failing that look backward.
let mid = self.tail / 2;
let index = match self.slice.find(self.pred, mid, self.tail) {
Some(i) => Some(mid + i),
None => self.slice.rfind(self.pred, mid + 1),
};
if let Some(index) = index {
let (left, right) = self.slice.split(index);
let (left_tail, right_tail) = if index <= mid {
// If we scanned backwards to find the separator, everything in
// the right side is exhausted, with no separators left to find.
(index, 0)
} else {
(mid + 1, self.tail - index)
};
// Create the left split before the separator.
let left = Self {
slice: left,
tail: left_tail,
..self
};
// Create the right split following the separator.
let right = Self {
slice: right,
tail: right_tail,
..self
};
(left, Some(right))
} else {
// The search is exhausted, no more separators...
(Self { tail: 0, ..self }, None)
}
}
fn fold_with<F>(self, mut folder: F) -> F
where
F: Folder<Self::Item>,
{
let Self {
slice, pred, tail, ..
} = self;
let (slice, tail) = if tail == slice.as_ref().len() {
// No tail section, so just let `consume_iter` do it all.
(Some(slice), None)
} else if let Some(index) = slice.rfind(pred, tail) {
// We found the last separator to complete the tail, so
// end with that slice after `consume_iter` finds the rest.
let (left, right) = slice.split(index);
(Some(left), Some(right))
} else {
// We know there are no separators at all, so it's all "tail".
(None, Some(slice))
};
if let Some(slice) = slice {
folder = folder.consume_iter(slice.chunk_by(pred));
}
if let Some(tail) = tail {
folder = folder.consume(tail);
}
folder
}
}
/// Parallel iterator over slice in (non-overlapping) chunks separated by a predicate.
///
/// This struct is created by the [`par_chunk_by`] method on `&[T]`.
///
/// [`par_chunk_by`]: super::ParallelSlice::par_chunk_by()
pub struct ChunkBy<'data, T, P> {
pred: P,
slice: &'data [T],
}
impl<T, P: Clone> Clone for ChunkBy<'_, T, P> {
fn clone(&self) -> Self {
ChunkBy {
pred: self.pred.clone(),
slice: self.slice,
}
}
}
impl<T: fmt::Debug, P> fmt::Debug for ChunkBy<'_, T, P> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ChunkBy")
.field("slice", &self.slice)
.finish()
}
}
impl<'data, T, P> ChunkBy<'data, T, P> {
pub(super) fn new(slice: &'data [T], pred: P) -> Self {
Self { pred, slice }
}
}
impl<'data, T, P> ParallelIterator for ChunkBy<'data, T, P>
where
T: Sync,
P: Fn(&T, &T) -> bool + Send + Sync,
{
type Item = &'data [T];
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge_unindexed(
ChunkByProducer {
tail: self.slice.len(),
slice: self.slice,
pred: &self.pred,
marker: PhantomData,
},
consumer,
)
}
}
/// Parallel iterator over slice in (non-overlapping) mutable chunks
/// separated by a predicate.
///
/// This struct is created by the [`par_chunk_by_mut`] method on `&mut [T]`.
///
/// [`par_chunk_by_mut`]: super::ParallelSliceMut::par_chunk_by_mut()
pub struct ChunkByMut<'data, T, P> {
pred: P,
slice: &'data mut [T],
}
impl<T: fmt::Debug, P> fmt::Debug for ChunkByMut<'_, T, P> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ChunkByMut")
.field("slice", &self.slice)
.finish()
}
}
impl<'data, T, P> ChunkByMut<'data, T, P> {
pub(super) fn new(slice: &'data mut [T], pred: P) -> Self {
Self { pred, slice }
}
}
impl<'data, T, P> ParallelIterator for ChunkByMut<'data, T, P>
where
T: Send,
P: Fn(&T, &T) -> bool + Send + Sync,
{
type Item = &'data mut [T];
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge_unindexed(
ChunkByProducer {
tail: self.slice.len(),
slice: self.slice,
pred: &self.pred,
marker: PhantomData,
},
consumer,
)
}
}

387
vendor/rayon/src/slice/chunks.rs vendored Normal file
View File

@@ -0,0 +1,387 @@
use crate::iter::plumbing::*;
use crate::iter::*;
/// Parallel iterator over immutable non-overlapping chunks of a slice
#[derive(Debug)]
pub struct Chunks<'data, T> {
chunk_size: usize,
slice: &'data [T],
}
impl<'data, T> Chunks<'data, T> {
pub(super) fn new(chunk_size: usize, slice: &'data [T]) -> Self {
Self { chunk_size, slice }
}
}
impl<T> Clone for Chunks<'_, T> {
fn clone(&self) -> Self {
Chunks { ..*self }
}
}
impl<'data, T: Sync> ParallelIterator for Chunks<'data, T> {
type Item = &'data [T];
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<T: Sync> IndexedParallelIterator for Chunks<'_, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.slice.len().div_ceil(self.chunk_size)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(ChunksProducer {
chunk_size: self.chunk_size,
slice: self.slice,
})
}
}
struct ChunksProducer<'data, T: Sync> {
chunk_size: usize,
slice: &'data [T],
}
impl<'data, T: 'data + Sync> Producer for ChunksProducer<'data, T> {
type Item = &'data [T];
type IntoIter = ::std::slice::Chunks<'data, T>;
fn into_iter(self) -> Self::IntoIter {
self.slice.chunks(self.chunk_size)
}
fn split_at(self, index: usize) -> (Self, Self) {
let elem_index = Ord::min(index * self.chunk_size, self.slice.len());
let (left, right) = self.slice.split_at(elem_index);
(
ChunksProducer {
chunk_size: self.chunk_size,
slice: left,
},
ChunksProducer {
chunk_size: self.chunk_size,
slice: right,
},
)
}
}
/// Parallel iterator over immutable non-overlapping chunks of a slice
#[derive(Debug)]
pub struct ChunksExact<'data, T> {
chunk_size: usize,
slice: &'data [T],
rem: &'data [T],
}
impl<'data, T> ChunksExact<'data, T> {
pub(super) fn new(chunk_size: usize, slice: &'data [T]) -> Self {
let rem_len = slice.len() % chunk_size;
let len = slice.len() - rem_len;
let (slice, rem) = slice.split_at(len);
Self {
chunk_size,
slice,
rem,
}
}
/// Return the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
pub fn remainder(&self) -> &'data [T] {
self.rem
}
}
impl<T> Clone for ChunksExact<'_, T> {
fn clone(&self) -> Self {
ChunksExact { ..*self }
}
}
impl<'data, T: Sync> ParallelIterator for ChunksExact<'data, T> {
type Item = &'data [T];
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<T: Sync> IndexedParallelIterator for ChunksExact<'_, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.slice.len() / self.chunk_size
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(ChunksExactProducer {
chunk_size: self.chunk_size,
slice: self.slice,
})
}
}
struct ChunksExactProducer<'data, T: Sync> {
chunk_size: usize,
slice: &'data [T],
}
impl<'data, T: 'data + Sync> Producer for ChunksExactProducer<'data, T> {
type Item = &'data [T];
type IntoIter = ::std::slice::ChunksExact<'data, T>;
fn into_iter(self) -> Self::IntoIter {
self.slice.chunks_exact(self.chunk_size)
}
fn split_at(self, index: usize) -> (Self, Self) {
let elem_index = index * self.chunk_size;
let (left, right) = self.slice.split_at(elem_index);
(
ChunksExactProducer {
chunk_size: self.chunk_size,
slice: left,
},
ChunksExactProducer {
chunk_size: self.chunk_size,
slice: right,
},
)
}
}
/// Parallel iterator over mutable non-overlapping chunks of a slice
#[derive(Debug)]
pub struct ChunksMut<'data, T> {
chunk_size: usize,
slice: &'data mut [T],
}
impl<'data, T> ChunksMut<'data, T> {
pub(super) fn new(chunk_size: usize, slice: &'data mut [T]) -> Self {
Self { chunk_size, slice }
}
}
impl<'data, T: Send> ParallelIterator for ChunksMut<'data, T> {
type Item = &'data mut [T];
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<T: Send> IndexedParallelIterator for ChunksMut<'_, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.slice.len().div_ceil(self.chunk_size)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(ChunksMutProducer {
chunk_size: self.chunk_size,
slice: self.slice,
})
}
}
struct ChunksMutProducer<'data, T: Send> {
chunk_size: usize,
slice: &'data mut [T],
}
impl<'data, T: 'data + Send> Producer for ChunksMutProducer<'data, T> {
type Item = &'data mut [T];
type IntoIter = ::std::slice::ChunksMut<'data, T>;
fn into_iter(self) -> Self::IntoIter {
self.slice.chunks_mut(self.chunk_size)
}
fn split_at(self, index: usize) -> (Self, Self) {
let elem_index = Ord::min(index * self.chunk_size, self.slice.len());
let (left, right) = self.slice.split_at_mut(elem_index);
(
ChunksMutProducer {
chunk_size: self.chunk_size,
slice: left,
},
ChunksMutProducer {
chunk_size: self.chunk_size,
slice: right,
},
)
}
}
/// Parallel iterator over mutable non-overlapping chunks of a slice
#[derive(Debug)]
pub struct ChunksExactMut<'data, T> {
chunk_size: usize,
slice: &'data mut [T],
rem: &'data mut [T],
}
impl<'data, T> ChunksExactMut<'data, T> {
pub(super) fn new(chunk_size: usize, slice: &'data mut [T]) -> Self {
let rem_len = slice.len() % chunk_size;
let len = slice.len() - rem_len;
let (slice, rem) = slice.split_at_mut(len);
Self {
chunk_size,
slice,
rem,
}
}
/// Return the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
///
/// Note that this has to consume `self` to return the original lifetime of
/// the data, which prevents this from actually being used as a parallel
/// iterator since that also consumes. This method is provided for parity
/// with `std::iter::ChunksExactMut`, but consider calling `remainder()` or
/// `take_remainder()` as alternatives.
pub fn into_remainder(self) -> &'data mut [T] {
self.rem
}
/// Return the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
///
/// Consider `take_remainder()` if you need access to the data with its
/// original lifetime, rather than borrowing through `&mut self` here.
pub fn remainder(&mut self) -> &mut [T] {
self.rem
}
/// Return the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements. Subsequent calls will return an empty slice.
pub fn take_remainder(&mut self) -> &'data mut [T] {
std::mem::take(&mut self.rem)
}
}
impl<'data, T: Send> ParallelIterator for ChunksExactMut<'data, T> {
type Item = &'data mut [T];
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<T: Send> IndexedParallelIterator for ChunksExactMut<'_, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.slice.len() / self.chunk_size
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(ChunksExactMutProducer {
chunk_size: self.chunk_size,
slice: self.slice,
})
}
}
struct ChunksExactMutProducer<'data, T: Send> {
chunk_size: usize,
slice: &'data mut [T],
}
impl<'data, T: 'data + Send> Producer for ChunksExactMutProducer<'data, T> {
type Item = &'data mut [T];
type IntoIter = ::std::slice::ChunksExactMut<'data, T>;
fn into_iter(self) -> Self::IntoIter {
self.slice.chunks_exact_mut(self.chunk_size)
}
fn split_at(self, index: usize) -> (Self, Self) {
let elem_index = index * self.chunk_size;
let (left, right) = self.slice.split_at_mut(elem_index);
(
ChunksExactMutProducer {
chunk_size: self.chunk_size,
slice: left,
},
ChunksExactMutProducer {
chunk_size: self.chunk_size,
slice: right,
},
)
}
}

1242
vendor/rayon/src/slice/mod.rs vendored Normal file

File diff suppressed because it is too large Load Diff

385
vendor/rayon/src/slice/rchunks.rs vendored Normal file
View File

@@ -0,0 +1,385 @@
use crate::iter::plumbing::*;
use crate::iter::*;
/// Parallel iterator over immutable non-overlapping chunks of a slice, starting at the end.
#[derive(Debug)]
pub struct RChunks<'data, T> {
chunk_size: usize,
slice: &'data [T],
}
impl<'data, T> RChunks<'data, T> {
pub(super) fn new(chunk_size: usize, slice: &'data [T]) -> Self {
Self { chunk_size, slice }
}
}
impl<T> Clone for RChunks<'_, T> {
fn clone(&self) -> Self {
RChunks { ..*self }
}
}
impl<'data, T: Sync> ParallelIterator for RChunks<'data, T> {
type Item = &'data [T];
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<T: Sync> IndexedParallelIterator for RChunks<'_, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.slice.len().div_ceil(self.chunk_size)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(RChunksProducer {
chunk_size: self.chunk_size,
slice: self.slice,
})
}
}
struct RChunksProducer<'data, T: Sync> {
chunk_size: usize,
slice: &'data [T],
}
impl<'data, T: 'data + Sync> Producer for RChunksProducer<'data, T> {
type Item = &'data [T];
type IntoIter = ::std::slice::RChunks<'data, T>;
fn into_iter(self) -> Self::IntoIter {
self.slice.rchunks(self.chunk_size)
}
fn split_at(self, index: usize) -> (Self, Self) {
let elem_index = self.slice.len().saturating_sub(index * self.chunk_size);
let (left, right) = self.slice.split_at(elem_index);
(
RChunksProducer {
chunk_size: self.chunk_size,
slice: right,
},
RChunksProducer {
chunk_size: self.chunk_size,
slice: left,
},
)
}
}
/// Parallel iterator over immutable non-overlapping chunks of a slice, starting at the end.
#[derive(Debug)]
pub struct RChunksExact<'data, T> {
chunk_size: usize,
slice: &'data [T],
rem: &'data [T],
}
impl<'data, T> RChunksExact<'data, T> {
pub(super) fn new(chunk_size: usize, slice: &'data [T]) -> Self {
let rem_len = slice.len() % chunk_size;
let (rem, slice) = slice.split_at(rem_len);
Self {
chunk_size,
slice,
rem,
}
}
/// Return the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
pub fn remainder(&self) -> &'data [T] {
self.rem
}
}
impl<T> Clone for RChunksExact<'_, T> {
fn clone(&self) -> Self {
RChunksExact { ..*self }
}
}
impl<'data, T: Sync> ParallelIterator for RChunksExact<'data, T> {
type Item = &'data [T];
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<T: Sync> IndexedParallelIterator for RChunksExact<'_, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.slice.len() / self.chunk_size
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(RChunksExactProducer {
chunk_size: self.chunk_size,
slice: self.slice,
})
}
}
struct RChunksExactProducer<'data, T: Sync> {
chunk_size: usize,
slice: &'data [T],
}
impl<'data, T: 'data + Sync> Producer for RChunksExactProducer<'data, T> {
type Item = &'data [T];
type IntoIter = ::std::slice::RChunksExact<'data, T>;
fn into_iter(self) -> Self::IntoIter {
self.slice.rchunks_exact(self.chunk_size)
}
fn split_at(self, index: usize) -> (Self, Self) {
let elem_index = self.slice.len() - index * self.chunk_size;
let (left, right) = self.slice.split_at(elem_index);
(
RChunksExactProducer {
chunk_size: self.chunk_size,
slice: right,
},
RChunksExactProducer {
chunk_size: self.chunk_size,
slice: left,
},
)
}
}
/// Parallel iterator over mutable non-overlapping chunks of a slice, starting at the end.
#[derive(Debug)]
pub struct RChunksMut<'data, T> {
chunk_size: usize,
slice: &'data mut [T],
}
impl<'data, T> RChunksMut<'data, T> {
pub(super) fn new(chunk_size: usize, slice: &'data mut [T]) -> Self {
Self { chunk_size, slice }
}
}
impl<'data, T: Send> ParallelIterator for RChunksMut<'data, T> {
type Item = &'data mut [T];
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<T: Send> IndexedParallelIterator for RChunksMut<'_, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.slice.len().div_ceil(self.chunk_size)
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(RChunksMutProducer {
chunk_size: self.chunk_size,
slice: self.slice,
})
}
}
struct RChunksMutProducer<'data, T: Send> {
chunk_size: usize,
slice: &'data mut [T],
}
impl<'data, T: 'data + Send> Producer for RChunksMutProducer<'data, T> {
type Item = &'data mut [T];
type IntoIter = ::std::slice::RChunksMut<'data, T>;
fn into_iter(self) -> Self::IntoIter {
self.slice.rchunks_mut(self.chunk_size)
}
fn split_at(self, index: usize) -> (Self, Self) {
let elem_index = self.slice.len().saturating_sub(index * self.chunk_size);
let (left, right) = self.slice.split_at_mut(elem_index);
(
RChunksMutProducer {
chunk_size: self.chunk_size,
slice: right,
},
RChunksMutProducer {
chunk_size: self.chunk_size,
slice: left,
},
)
}
}
/// Parallel iterator over mutable non-overlapping chunks of a slice, starting at the end.
#[derive(Debug)]
pub struct RChunksExactMut<'data, T: Send> {
chunk_size: usize,
slice: &'data mut [T],
rem: &'data mut [T],
}
impl<'data, T: Send> RChunksExactMut<'data, T> {
pub(super) fn new(chunk_size: usize, slice: &'data mut [T]) -> Self {
let rem_len = slice.len() % chunk_size;
let (rem, slice) = slice.split_at_mut(rem_len);
Self {
chunk_size,
slice,
rem,
}
}
/// Return the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
///
/// Note that this has to consume `self` to return the original lifetime of
/// the data, which prevents this from actually being used as a parallel
/// iterator since that also consumes. This method is provided for parity
/// with `std::iter::RChunksExactMut`, but consider calling `remainder()` or
/// `take_remainder()` as alternatives.
pub fn into_remainder(self) -> &'data mut [T] {
self.rem
}
/// Return the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
///
/// Consider `take_remainder()` if you need access to the data with its
/// original lifetime, rather than borrowing through `&mut self` here.
pub fn remainder(&mut self) -> &mut [T] {
self.rem
}
/// Return the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements. Subsequent calls will return an empty slice.
pub fn take_remainder(&mut self) -> &'data mut [T] {
std::mem::take(&mut self.rem)
}
}
impl<'data, T: Send + 'data> ParallelIterator for RChunksExactMut<'data, T> {
type Item = &'data mut [T];
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<'data, T: Send + 'data> IndexedParallelIterator for RChunksExactMut<'data, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.slice.len() / self.chunk_size
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
callback.callback(RChunksExactMutProducer {
chunk_size: self.chunk_size,
slice: self.slice,
})
}
}
struct RChunksExactMutProducer<'data, T: Send> {
chunk_size: usize,
slice: &'data mut [T],
}
impl<'data, T: 'data + Send> Producer for RChunksExactMutProducer<'data, T> {
type Item = &'data mut [T];
type IntoIter = ::std::slice::RChunksExactMut<'data, T>;
fn into_iter(self) -> Self::IntoIter {
self.slice.rchunks_exact_mut(self.chunk_size)
}
fn split_at(self, index: usize) -> (Self, Self) {
let elem_index = self.slice.len() - index * self.chunk_size;
let (left, right) = self.slice.split_at_mut(elem_index);
(
RChunksExactMutProducer {
chunk_size: self.chunk_size,
slice: right,
},
RChunksExactMutProducer {
chunk_size: self.chunk_size,
slice: left,
},
)
}
}

1686
vendor/rayon/src/slice/sort.rs vendored Normal file

File diff suppressed because it is too large Load Diff

216
vendor/rayon/src/slice/test.rs vendored Normal file
View File

@@ -0,0 +1,216 @@
#![cfg(test)]
use crate::prelude::*;
use rand::distr::Uniform;
use rand::seq::IndexedRandom;
use rand::{rng, Rng};
use std::cmp::Ordering::{Equal, Greater, Less};
use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
macro_rules! sort {
($f:ident, $name:ident) => {
#[test]
fn $name() {
let rng = &mut rng();
for len in (0..25).chain(500..501) {
for &modulus in &[5, 10, 100] {
let dist = Uniform::new(0, modulus).unwrap();
for _ in 0..100 {
let v: Vec<i32> = rng.sample_iter(&dist).take(len).collect();
// Test sort using `<` operator.
let mut tmp = v.clone();
tmp.$f(|a, b| a.cmp(b));
assert!(tmp.windows(2).all(|w| w[0] <= w[1]));
// Test sort using `>` operator.
let mut tmp = v.clone();
tmp.$f(|a, b| b.cmp(a));
assert!(tmp.windows(2).all(|w| w[0] >= w[1]));
}
}
}
// Test sort with many duplicates.
for &len in &[1_000, 10_000, 100_000] {
for &modulus in &[5, 10, 100, 10_000] {
let dist = Uniform::new(0, modulus).unwrap();
let mut v: Vec<i32> = rng.sample_iter(&dist).take(len).collect();
v.$f(|a, b| a.cmp(b));
assert!(v.windows(2).all(|w| w[0] <= w[1]));
}
}
// Test sort with many pre-sorted runs.
for &len in &[1_000, 10_000, 100_000] {
let len_dist = Uniform::new(0, len).unwrap();
for &modulus in &[5, 10, 1000, 50_000] {
let dist = Uniform::new(0, modulus).unwrap();
let mut v: Vec<i32> = rng.sample_iter(&dist).take(len).collect();
v.sort();
v.reverse();
for _ in 0..5 {
let a = rng.sample(&len_dist);
let b = rng.sample(&len_dist);
if a < b {
v[a..b].reverse();
} else {
v.swap(a, b);
}
}
v.$f(|a, b| a.cmp(b));
assert!(v.windows(2).all(|w| w[0] <= w[1]));
}
}
// Sort using a completely random comparison function.
// This will reorder the elements *somehow*, but won't panic.
let mut v: Vec<_> = (0..100).collect();
v.$f(|_, _| *[Less, Equal, Greater].choose(&mut rand::rng()).unwrap());
v.$f(|a, b| a.cmp(b));
for i in 0..v.len() {
assert_eq!(v[i], i);
}
// Should not panic.
[0i32; 0].$f(|a, b| a.cmp(b));
[(); 10].$f(|a, b| a.cmp(b));
[(); 100].$f(|a, b| a.cmp(b));
let mut v = [0xDEAD_BEEFu64];
v.$f(|a, b| a.cmp(b));
assert!(v == [0xDEAD_BEEF]);
}
};
}
sort!(par_sort_by, test_par_sort);
sort!(par_sort_unstable_by, test_par_sort_unstable);
#[test]
fn test_par_sort_stability() {
for len in (2..25).chain(500..510).chain(50_000..50_010) {
for _ in 0..10 {
let mut counts = [0; 10];
// Create a vector like [(6, 1), (5, 1), (6, 2), ...],
// where the first item of each tuple is random, but
// the second item represents which occurrence of that
// number this element is, i.e. the second elements
// will occur in sorted order.
let mut rng = rng();
let mut v: Vec<_> = (0..len)
.map(|_| {
let n: usize = rng.random_range(0..10);
counts[n] += 1;
(n, counts[n])
})
.collect();
// Only sort on the first element, so an unstable sort
// may mix up the counts.
v.par_sort_by(|&(a, _), &(b, _)| a.cmp(&b));
// This comparison includes the count (the second item
// of the tuple), so elements with equal first items
// will need to be ordered with increasing
// counts... i.e. exactly asserting that this sort is
// stable.
assert!(v.windows(2).all(|w| w[0] <= w[1]));
}
}
}
#[test]
fn test_par_chunks_exact_remainder() {
let v: &[i32] = &[0, 1, 2, 3, 4];
let c = v.par_chunks_exact(2);
assert_eq!(c.remainder(), &[4]);
assert_eq!(c.len(), 2);
}
#[test]
fn test_par_chunks_exact_mut_remainder() {
let v: &mut [i32] = &mut [0, 1, 2, 3, 4];
let mut c = v.par_chunks_exact_mut(2);
assert_eq!(c.remainder(), &[4]);
assert_eq!(c.len(), 2);
assert_eq!(c.into_remainder(), &[4]);
let mut c = v.par_chunks_exact_mut(2);
assert_eq!(c.take_remainder(), &[4]);
assert_eq!(c.take_remainder(), &[]);
assert_eq!(c.len(), 2);
}
#[test]
fn test_par_rchunks_exact_remainder() {
let v: &[i32] = &[0, 1, 2, 3, 4];
let c = v.par_rchunks_exact(2);
assert_eq!(c.remainder(), &[0]);
assert_eq!(c.len(), 2);
}
#[test]
fn test_par_rchunks_exact_mut_remainder() {
let v: &mut [i32] = &mut [0, 1, 2, 3, 4];
let mut c = v.par_rchunks_exact_mut(2);
assert_eq!(c.remainder(), &[0]);
assert_eq!(c.len(), 2);
assert_eq!(c.into_remainder(), &[0]);
let mut c = v.par_rchunks_exact_mut(2);
assert_eq!(c.take_remainder(), &[0]);
assert_eq!(c.take_remainder(), &[]);
assert_eq!(c.len(), 2);
}
#[test]
fn slice_chunk_by() {
let v: Vec<_> = (0..1000).collect();
assert_eq!(v[..0].par_chunk_by(|_, _| todo!()).count(), 0);
assert_eq!(v[..1].par_chunk_by(|_, _| todo!()).count(), 1);
assert_eq!(v[..2].par_chunk_by(|_, _| true).count(), 1);
assert_eq!(v[..2].par_chunk_by(|_, _| false).count(), 2);
let count = AtomicUsize::new(0);
let par: Vec<_> = v
.par_chunk_by(|x, y| {
count.fetch_add(1, Relaxed);
(x % 10 < 3) == (y % 10 < 3)
})
.collect();
assert_eq!(count.into_inner(), v.len() - 1);
let seq: Vec<_> = v.chunk_by(|x, y| (x % 10 < 3) == (y % 10 < 3)).collect();
assert_eq!(par, seq);
}
#[test]
fn slice_chunk_by_mut() {
let mut v: Vec<_> = (0..1000).collect();
assert_eq!(v[..0].par_chunk_by_mut(|_, _| todo!()).count(), 0);
assert_eq!(v[..1].par_chunk_by_mut(|_, _| todo!()).count(), 1);
assert_eq!(v[..2].par_chunk_by_mut(|_, _| true).count(), 1);
assert_eq!(v[..2].par_chunk_by_mut(|_, _| false).count(), 2);
let mut v2 = v.clone();
let count = AtomicUsize::new(0);
let par: Vec<_> = v
.par_chunk_by_mut(|x, y| {
count.fetch_add(1, Relaxed);
(x % 10 < 3) == (y % 10 < 3)
})
.collect();
assert_eq!(count.into_inner(), v2.len() - 1);
let seq: Vec<_> = v2
.chunk_by_mut(|x, y| (x % 10 < 3) == (y % 10 < 3))
.collect();
assert_eq!(par, seq);
}

152
vendor/rayon/src/split_producer.rs vendored Normal file
View File

@@ -0,0 +1,152 @@
//! Common splitter for strings and slices
//!
//! This module is private, so these items are effectively `pub(super)`
use crate::iter::plumbing::{Folder, UnindexedProducer};
/// Common producer for splitting on a predicate.
pub(super) struct SplitProducer<'p, P, V, const INCL: bool = false> {
data: V,
separator: &'p P,
/// Marks the endpoint beyond which we've already found no separators.
tail: usize,
}
pub(super) type SplitInclusiveProducer<'p, P, V> = SplitProducer<'p, P, V, true>;
/// Helper trait so `&str`, `&[T]`, and `&mut [T]` can share `SplitProducer`.
pub(super) trait Fissile<P>: Sized {
fn length(&self) -> usize;
fn midpoint(&self, end: usize) -> usize;
fn find(&self, separator: &P, start: usize, end: usize) -> Option<usize>;
fn rfind(&self, separator: &P, end: usize) -> Option<usize>;
fn split_once<const INCL: bool>(self, index: usize) -> (Self, Self);
fn fold_splits<F, const INCL: bool>(self, separator: &P, folder: F, skip_last: bool) -> F
where
F: Folder<Self>,
Self: Send;
}
impl<'p, P, V> SplitProducer<'p, P, V>
where
V: Fissile<P> + Send,
{
pub(super) fn new(data: V, separator: &'p P) -> Self {
SplitProducer {
tail: data.length(),
data,
separator,
}
}
}
impl<'p, P, V> SplitInclusiveProducer<'p, P, V>
where
V: Fissile<P> + Send,
{
pub(super) fn new_incl(data: V, separator: &'p P) -> Self {
SplitProducer {
tail: data.length(),
data,
separator,
}
}
}
impl<'p, P, V, const INCL: bool> SplitProducer<'p, P, V, INCL>
where
V: Fissile<P> + Send,
{
/// Common `fold_with` implementation, integrating `SplitTerminator`'s
/// need to sometimes skip its final empty item.
pub(super) fn fold_with<F>(self, folder: F, skip_last: bool) -> F
where
F: Folder<V>,
{
let SplitProducer {
data,
separator,
tail,
} = self;
if tail == data.length() {
// No tail section, so just let `fold_splits` handle it.
data.fold_splits::<F, INCL>(separator, folder, skip_last)
} else if let Some(index) = data.rfind(separator, tail) {
// We found the last separator to complete the tail, so
// end with that slice after `fold_splits` finds the rest.
let (left, right) = data.split_once::<INCL>(index);
let folder = left.fold_splits::<F, INCL>(separator, folder, false);
if skip_last || folder.full() {
folder
} else {
folder.consume(right)
}
} else {
// We know there are no separators at all. Return our whole data.
if skip_last {
folder
} else {
folder.consume(data)
}
}
}
}
impl<'p, P, V, const INCL: bool> UnindexedProducer for SplitProducer<'p, P, V, INCL>
where
V: Fissile<P> + Send,
P: Sync,
{
type Item = V;
fn split(self) -> (Self, Option<Self>) {
// Look forward for the separator, and failing that look backward.
let mid = self.data.midpoint(self.tail);
let index = match self.data.find(self.separator, mid, self.tail) {
Some(i) => Some(mid + i),
None => self.data.rfind(self.separator, mid),
};
if let Some(index) = index {
let len = self.data.length();
let (left, right) = self.data.split_once::<INCL>(index);
let (left_tail, right_tail) = if index < mid {
// If we scanned backwards to find the separator, everything in
// the right side is exhausted, with no separators left to find.
(index, 0)
} else {
let right_index = len - right.length();
(mid, self.tail - right_index)
};
// Create the left split before the separator.
let left = SplitProducer {
data: left,
tail: left_tail,
..self
};
// Create the right split following the separator.
let right = SplitProducer {
data: right,
tail: right_tail,
..self
};
(left, Some(right))
} else {
// The search is exhausted, no more separators...
(SplitProducer { tail: 0, ..self }, None)
}
}
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
self.fold_with(folder, false)
}
}

1005
vendor/rayon/src/str.rs vendored Normal file

File diff suppressed because it is too large Load Diff

48
vendor/rayon/src/string.rs vendored Normal file
View File

@@ -0,0 +1,48 @@
//! This module contains the parallel iterator types for owned strings
//! (`String`). You will rarely need to interact with it directly
//! unless you have need to name one of the iterator types.
use crate::iter::plumbing::*;
use crate::math::simplify_range;
use crate::prelude::*;
use std::ops::{Range, RangeBounds};
impl<'a> ParallelDrainRange<usize> for &'a mut String {
type Iter = Drain<'a>;
type Item = char;
fn par_drain<R: RangeBounds<usize>>(self, range: R) -> Self::Iter {
Drain {
range: simplify_range(range, self.len()),
string: self,
}
}
}
/// Draining parallel iterator that moves a range of characters out of a string,
/// but keeps the total capacity.
#[derive(Debug)]
pub struct Drain<'a> {
string: &'a mut String,
range: Range<usize>,
}
impl<'a> ParallelIterator for Drain<'a> {
type Item = char;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.string[self.range.clone()]
.par_chars()
.drive_unindexed(consumer)
}
}
impl<'a> Drop for Drain<'a> {
fn drop(&mut self) {
// Remove the drained range.
self.string.drain(self.range.clone());
}
}

292
vendor/rayon/src/vec.rs vendored Normal file
View File

@@ -0,0 +1,292 @@
//! Parallel iterator types for [vectors] (`Vec<T>`)
//!
//! You will rarely need to interact with this module directly unless you need
//! to name one of the iterator types.
//!
//! [vectors]: mod@std::vec
use crate::iter::plumbing::*;
use crate::iter::*;
use crate::math::simplify_range;
use crate::slice::{Iter, IterMut};
use std::iter;
use std::mem;
use std::ops::{Range, RangeBounds};
use std::ptr;
use std::slice;
impl<'data, T: Sync + 'data> IntoParallelIterator for &'data Vec<T> {
type Item = &'data T;
type Iter = Iter<'data, T>;
fn into_par_iter(self) -> Self::Iter {
<&[T]>::into_par_iter(self)
}
}
impl<'data, T: Send + 'data> IntoParallelIterator for &'data mut Vec<T> {
type Item = &'data mut T;
type Iter = IterMut<'data, T>;
fn into_par_iter(self) -> Self::Iter {
<&mut [T]>::into_par_iter(self)
}
}
/// Parallel iterator that moves out of a vector.
#[derive(Debug, Clone)]
pub struct IntoIter<T> {
vec: Vec<T>,
}
impl<T: Send> IntoParallelIterator for Vec<T> {
type Item = T;
type Iter = IntoIter<T>;
fn into_par_iter(self) -> Self::Iter {
IntoIter { vec: self }
}
}
impl<T: Send> IntoParallelIterator for Box<[T]> {
type Item = T;
type Iter = IntoIter<T>;
fn into_par_iter(self) -> Self::Iter {
IntoIter { vec: self.into() }
}
}
impl<T: Send> ParallelIterator for IntoIter<T> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<T: Send> IndexedParallelIterator for IntoIter<T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.vec.len()
}
fn with_producer<CB>(mut self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
// Drain every item, and then the vector only needs to free its buffer.
self.vec.par_drain(..).with_producer(callback)
}
}
impl<'data, T: Send> ParallelDrainRange<usize> for &'data mut Vec<T> {
type Iter = Drain<'data, T>;
type Item = T;
fn par_drain<R: RangeBounds<usize>>(self, range: R) -> Self::Iter {
Drain {
orig_len: self.len(),
range: simplify_range(range, self.len()),
vec: self,
}
}
}
/// Draining parallel iterator that moves a range out of a vector, but keeps the total capacity.
#[derive(Debug)]
pub struct Drain<'data, T: Send> {
vec: &'data mut Vec<T>,
range: Range<usize>,
orig_len: usize,
}
impl<'data, T: Send> ParallelIterator for Drain<'data, T> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
bridge(self, consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<'data, T: Send> IndexedParallelIterator for Drain<'data, T> {
fn drive<C>(self, consumer: C) -> C::Result
where
C: Consumer<Self::Item>,
{
bridge(self, consumer)
}
fn len(&self) -> usize {
self.range.len()
}
fn with_producer<CB>(self, callback: CB) -> CB::Output
where
CB: ProducerCallback<Self::Item>,
{
unsafe {
// Make the vector forget about the drained items, and temporarily the tail too.
self.vec.set_len(self.range.start);
// Create the producer as the exclusive "owner" of the slice.
let producer = DrainProducer::from_vec(self.vec, self.range.len());
// The producer will move or drop each item from the drained range.
callback.callback(producer)
}
}
}
impl<'data, T: Send> Drop for Drain<'data, T> {
fn drop(&mut self) {
let Range { start, end } = self.range;
if self.vec.len() == self.orig_len {
// We must not have produced, so just call a normal drain to remove the items.
self.vec.drain(start..end);
} else if start == end {
// Empty range, so just restore the length to its original state
unsafe {
self.vec.set_len(self.orig_len);
}
} else if end < self.orig_len {
// The producer was responsible for consuming the drained items.
// Move the tail items to their new place, then set the length to include them.
unsafe {
let ptr = self.vec.as_mut_ptr().add(start);
let tail_ptr = self.vec.as_ptr().add(end);
let tail_len = self.orig_len - end;
ptr::copy(tail_ptr, ptr, tail_len);
self.vec.set_len(start + tail_len);
}
}
}
}
// ////////////////////////////////////////////////////////////////////////
pub(crate) struct DrainProducer<'data, T: Send> {
slice: &'data mut [T],
}
impl<T: Send> DrainProducer<'_, T> {
/// Creates a draining producer, which *moves* items from the slice.
///
/// Unsafe because `!Copy` data must not be read after the borrow is released.
pub(crate) unsafe fn new(slice: &mut [T]) -> DrainProducer<'_, T> {
DrainProducer { slice }
}
/// Creates a draining producer, which *moves* items from the tail of the vector.
///
/// Unsafe because we're moving from beyond `vec.len()`, so the caller must ensure
/// that data is initialized and not read after the borrow is released.
unsafe fn from_vec(vec: &mut Vec<T>, len: usize) -> DrainProducer<'_, T> {
let start = vec.len();
assert!(vec.capacity() - start >= len);
// The pointer is derived from `Vec` directly, not through a `Deref`,
// so it has provenance over the whole allocation.
let ptr = vec.as_mut_ptr().add(start);
DrainProducer::new(slice::from_raw_parts_mut(ptr, len))
}
}
impl<'data, T: 'data + Send> Producer for DrainProducer<'data, T> {
type Item = T;
type IntoIter = SliceDrain<'data, T>;
fn into_iter(mut self) -> Self::IntoIter {
// replace the slice so we don't drop it twice
let slice = mem::take(&mut self.slice);
SliceDrain {
iter: slice.iter_mut(),
}
}
fn split_at(mut self, index: usize) -> (Self, Self) {
// replace the slice so we don't drop it twice
let slice = mem::take(&mut self.slice);
let (left, right) = slice.split_at_mut(index);
unsafe { (DrainProducer::new(left), DrainProducer::new(right)) }
}
}
impl<'data, T: 'data + Send> Drop for DrainProducer<'data, T> {
fn drop(&mut self) {
// extract the slice so we can use `Drop for [T]`
let slice_ptr: *mut [T] = mem::take::<&'data mut [T]>(&mut self.slice);
unsafe { ptr::drop_in_place::<[T]>(slice_ptr) };
}
}
// ////////////////////////////////////////////////////////////////////////
// like std::vec::Drain, without updating a source Vec
pub(crate) struct SliceDrain<'data, T> {
iter: slice::IterMut<'data, T>,
}
impl<'data, T: 'data> Iterator for SliceDrain<'data, T> {
type Item = T;
fn next(&mut self) -> Option<T> {
// Coerce the pointer early, so we don't keep the
// reference that's about to be invalidated.
let ptr: *const T = self.iter.next()?;
Some(unsafe { ptr::read(ptr) })
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
fn count(self) -> usize {
self.iter.len()
}
}
impl<'data, T: 'data> DoubleEndedIterator for SliceDrain<'data, T> {
fn next_back(&mut self) -> Option<Self::Item> {
// Coerce the pointer early, so we don't keep the
// reference that's about to be invalidated.
let ptr: *const T = self.iter.next_back()?;
Some(unsafe { ptr::read(ptr) })
}
}
impl<'data, T: 'data> ExactSizeIterator for SliceDrain<'data, T> {
fn len(&self) -> usize {
self.iter.len()
}
}
impl<'data, T: 'data> iter::FusedIterator for SliceDrain<'data, T> {}
impl<'data, T: 'data> Drop for SliceDrain<'data, T> {
fn drop(&mut self) {
// extract the iterator so we can use `Drop for [T]`
let slice_ptr: *mut [T] = mem::replace(&mut self.iter, [].iter_mut()).into_slice();
unsafe { ptr::drop_in_place::<[T]>(slice_ptr) };
}
}