Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

117
vendor/hashbrown/src/control/bitmask.rs vendored Normal file
View File

@@ -0,0 +1,117 @@
use super::group::{
BitMaskWord, NonZeroBitMaskWord, BITMASK_ITER_MASK, BITMASK_MASK, BITMASK_STRIDE,
};
/// A bit mask which contains the result of a `Match` operation on a `Group` and
/// allows iterating through them.
///
/// The bit mask is arranged so that low-order bits represent lower memory
/// addresses for group match results.
///
/// For implementation reasons, the bits in the set may be sparsely packed with
/// groups of 8 bits representing one element. If any of these bits are non-zero
/// then this element is considered to true in the mask. If this is the
/// case, `BITMASK_STRIDE` will be 8 to indicate a divide-by-8 should be
/// performed on counts/indices to normalize this difference. `BITMASK_MASK` is
/// similarly a mask of all the actually-used bits.
///
/// To iterate over a bit mask, it must be converted to a form where only 1 bit
/// is set per element. This is done by applying `BITMASK_ITER_MASK` on the
/// mask bits.
#[derive(Copy, Clone)]
pub(crate) struct BitMask(pub(crate) BitMaskWord);
#[allow(clippy::use_self)]
impl BitMask {
/// Returns a new `BitMask` with all bits inverted.
#[inline]
#[must_use]
#[allow(dead_code)]
pub(crate) fn invert(self) -> Self {
BitMask(self.0 ^ BITMASK_MASK)
}
/// Returns a new `BitMask` with the lowest bit removed.
#[inline]
#[must_use]
fn remove_lowest_bit(self) -> Self {
BitMask(self.0 & (self.0 - 1))
}
/// Returns whether the `BitMask` has at least one set bit.
#[inline]
pub(crate) fn any_bit_set(self) -> bool {
self.0 != 0
}
/// Returns the first set bit in the `BitMask`, if there is one.
#[inline]
pub(crate) fn lowest_set_bit(self) -> Option<usize> {
if let Some(nonzero) = NonZeroBitMaskWord::new(self.0) {
Some(Self::nonzero_trailing_zeros(nonzero))
} else {
None
}
}
/// Returns the number of trailing zeroes in the `BitMask`.
#[inline]
pub(crate) fn trailing_zeros(self) -> usize {
// ARM doesn't have a trailing_zeroes instruction, and instead uses
// reverse_bits (RBIT) + leading_zeroes (CLZ). However older ARM
// versions (pre-ARMv7) don't have RBIT and need to emulate it
// instead. Since we only have 1 bit set in each byte on ARM, we can
// use swap_bytes (REV) + leading_zeroes instead.
if cfg!(target_arch = "arm") && BITMASK_STRIDE % 8 == 0 {
self.0.swap_bytes().leading_zeros() as usize / BITMASK_STRIDE
} else {
self.0.trailing_zeros() as usize / BITMASK_STRIDE
}
}
/// Same as above but takes a `NonZeroBitMaskWord`.
#[inline]
fn nonzero_trailing_zeros(nonzero: NonZeroBitMaskWord) -> usize {
if cfg!(target_arch = "arm") && BITMASK_STRIDE % 8 == 0 {
// SAFETY: A byte-swapped non-zero value is still non-zero.
let swapped = unsafe { NonZeroBitMaskWord::new_unchecked(nonzero.get().swap_bytes()) };
swapped.leading_zeros() as usize / BITMASK_STRIDE
} else {
nonzero.trailing_zeros() as usize / BITMASK_STRIDE
}
}
/// Returns the number of leading zeroes in the `BitMask`.
#[inline]
pub(crate) fn leading_zeros(self) -> usize {
self.0.leading_zeros() as usize / BITMASK_STRIDE
}
}
impl IntoIterator for BitMask {
type Item = usize;
type IntoIter = BitMaskIter;
#[inline]
fn into_iter(self) -> BitMaskIter {
// A BitMask only requires each element (group of bits) to be non-zero.
// However for iteration we need each element to only contain 1 bit.
BitMaskIter(BitMask(self.0 & BITMASK_ITER_MASK))
}
}
/// Iterator over the contents of a `BitMask`, returning the indices of set
/// bits.
#[derive(Clone)]
pub(crate) struct BitMaskIter(pub(crate) BitMask);
impl Iterator for BitMaskIter {
type Item = usize;
#[inline]
fn next(&mut self) -> Option<usize> {
let bit = self.0.lowest_set_bit()?;
self.0 = self.0.remove_lowest_bit();
Some(bit)
}
}

View File

@@ -0,0 +1,154 @@
use super::super::{BitMask, Tag};
use core::{mem, ptr};
// Use the native word size as the group size. Using a 64-bit group size on
// a 32-bit architecture will just end up being more expensive because
// shifts and multiplies will need to be emulated.
cfg_if! {
if #[cfg(any(
target_pointer_width = "64",
target_arch = "aarch64",
target_arch = "x86_64",
target_arch = "wasm32",
))] {
type GroupWord = u64;
type NonZeroGroupWord = core::num::NonZeroU64;
} else {
type GroupWord = u32;
type NonZeroGroupWord = core::num::NonZeroU32;
}
}
pub(crate) type BitMaskWord = GroupWord;
pub(crate) type NonZeroBitMaskWord = NonZeroGroupWord;
pub(crate) const BITMASK_STRIDE: usize = 8;
// We only care about the highest bit of each tag for the mask.
#[allow(clippy::cast_possible_truncation, clippy::unnecessary_cast)]
pub(crate) const BITMASK_MASK: BitMaskWord = u64::from_ne_bytes([Tag::DELETED.0; 8]) as GroupWord;
pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0;
/// Helper function to replicate a tag across a `GroupWord`.
#[inline]
fn repeat(tag: Tag) -> GroupWord {
GroupWord::from_ne_bytes([tag.0; Group::WIDTH])
}
/// Abstraction over a group of control tags which can be scanned in
/// parallel.
///
/// This implementation uses a word-sized integer.
#[derive(Copy, Clone)]
pub(crate) struct Group(GroupWord);
// We perform all operations in the native endianness, and convert to
// little-endian just before creating a BitMask. The can potentially
// enable the compiler to eliminate unnecessary byte swaps if we are
// only checking whether a BitMask is empty.
#[allow(clippy::use_self)]
impl Group {
/// Number of bytes in the group.
pub(crate) const WIDTH: usize = mem::size_of::<Self>();
/// Returns a full group of empty tags, suitable for use as the initial
/// value for an empty hash table.
///
/// This is guaranteed to be aligned to the group size.
#[inline]
pub(crate) const fn static_empty() -> &'static [Tag; Group::WIDTH] {
#[repr(C)]
struct AlignedTags {
_align: [Group; 0],
tags: [Tag; Group::WIDTH],
}
const ALIGNED_TAGS: AlignedTags = AlignedTags {
_align: [],
tags: [Tag::EMPTY; Group::WIDTH],
};
&ALIGNED_TAGS.tags
}
/// Loads a group of tags starting at the given address.
#[inline]
#[allow(clippy::cast_ptr_alignment)] // unaligned load
pub(crate) unsafe fn load(ptr: *const Tag) -> Self {
Group(ptr::read_unaligned(ptr.cast()))
}
/// Loads a group of tags starting at the given address, which must be
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
pub(crate) unsafe fn load_aligned(ptr: *const Tag) -> Self {
debug_assert_eq!(ptr.align_offset(mem::align_of::<Self>()), 0);
Group(ptr::read(ptr.cast()))
}
/// Stores the group of tags to the given address, which must be
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
pub(crate) unsafe fn store_aligned(self, ptr: *mut Tag) {
debug_assert_eq!(ptr.align_offset(mem::align_of::<Self>()), 0);
ptr::write(ptr.cast(), self.0);
}
/// Returns a `BitMask` indicating all tags in the group which *may*
/// have the given value.
///
/// This function may return a false positive in certain cases where
/// the tag in the group differs from the searched value only in its
/// lowest bit. This is fine because:
/// - This never happens for `EMPTY` and `DELETED`, only full entries.
/// - The check for key equality will catch these.
/// - This only happens if there is at least 1 true match.
/// - The chance of this happening is very low (< 1% chance per byte).
#[inline]
pub(crate) fn match_tag(self, tag: Tag) -> BitMask {
// This algorithm is derived from
// https://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
let cmp = self.0 ^ repeat(tag);
BitMask((cmp.wrapping_sub(repeat(Tag(0x01))) & !cmp & repeat(Tag::DELETED)).to_le())
}
/// Returns a `BitMask` indicating all tags in the group which are
/// `EMPTY`.
#[inline]
pub(crate) fn match_empty(self) -> BitMask {
// If the high bit is set, then the tag must be either:
// 1111_1111 (EMPTY) or 1000_0000 (DELETED).
// So we can just check if the top two bits are 1 by ANDing them.
BitMask((self.0 & (self.0 << 1) & repeat(Tag::DELETED)).to_le())
}
/// Returns a `BitMask` indicating all tags in the group which are
/// `EMPTY` or `DELETED`.
#[inline]
pub(crate) fn match_empty_or_deleted(self) -> BitMask {
// A tag is EMPTY or DELETED iff the high bit is set
BitMask((self.0 & repeat(Tag::DELETED)).to_le())
}
/// Returns a `BitMask` indicating all tags in the group which are full.
#[inline]
pub(crate) fn match_full(self) -> BitMask {
self.match_empty_or_deleted().invert()
}
/// Performs the following transformation on all tags in the group:
/// - `EMPTY => EMPTY`
/// - `DELETED => EMPTY`
/// - `FULL => DELETED`
#[inline]
pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
// Map high_bit = 1 (EMPTY or DELETED) to 1111_1111
// and high_bit = 0 (FULL) to 1000_0000
//
// Here's this logic expanded to concrete values:
// let full = 1000_0000 (true) or 0000_0000 (false)
// !1000_0000 + 1 = 0111_1111 + 1 = 1000_0000 (no carry)
// !0000_0000 + 0 = 1111_1111 + 0 = 1111_1111 (no carry)
let full = !self.0 & repeat(Tag::DELETED);
Group(!full + (full >> 7))
}
}

View File

@@ -0,0 +1,128 @@
use super::super::{BitMask, Tag};
use core::mem;
use core::num::NonZeroU16;
use core::arch::loongarch64::*;
pub(crate) type BitMaskWord = u16;
pub(crate) type NonZeroBitMaskWord = NonZeroU16;
pub(crate) const BITMASK_STRIDE: usize = 1;
pub(crate) const BITMASK_MASK: BitMaskWord = 0xffff;
pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0;
/// Abstraction over a group of control tags which can be scanned in
/// parallel.
///
/// This implementation uses a 128-bit LSX value.
#[derive(Copy, Clone)]
pub(crate) struct Group(m128i);
// FIXME: https://github.com/rust-lang/rust-clippy/issues/3859
#[allow(clippy::use_self)]
impl Group {
/// Number of bytes in the group.
pub(crate) const WIDTH: usize = mem::size_of::<Self>();
/// Returns a full group of empty tags, suitable for use as the initial
/// value for an empty hash table.
///
/// This is guaranteed to be aligned to the group size.
#[inline]
#[allow(clippy::items_after_statements)]
pub(crate) const fn static_empty() -> &'static [Tag; Group::WIDTH] {
#[repr(C)]
struct AlignedTags {
_align: [Group; 0],
tags: [Tag; Group::WIDTH],
}
const ALIGNED_TAGS: AlignedTags = AlignedTags {
_align: [],
tags: [Tag::EMPTY; Group::WIDTH],
};
&ALIGNED_TAGS.tags
}
/// Loads a group of tags starting at the given address.
#[inline]
#[allow(clippy::cast_ptr_alignment)] // unaligned load
pub(crate) unsafe fn load(ptr: *const Tag) -> Self {
Group(lsx_vld::<0>(ptr.cast()))
}
/// Loads a group of tags starting at the given address, which must be
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
pub(crate) unsafe fn load_aligned(ptr: *const Tag) -> Self {
debug_assert_eq!(ptr.align_offset(mem::align_of::<Self>()), 0);
Group(lsx_vld::<0>(ptr.cast()))
}
/// Stores the group of tags to the given address, which must be
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
pub(crate) unsafe fn store_aligned(self, ptr: *mut Tag) {
debug_assert_eq!(ptr.align_offset(mem::align_of::<Self>()), 0);
lsx_vst::<0>(self.0, ptr.cast());
}
/// Returns a `BitMask` indicating all tags in the group which have
/// the given value.
#[inline]
pub(crate) fn match_tag(self, tag: Tag) -> BitMask {
unsafe {
let cmp = lsx_vseq_b(self.0, lsx_vreplgr2vr_b(tag.0 as i32));
BitMask(lsx_vpickve2gr_hu::<0>(lsx_vmskltz_b(cmp)) as u16)
}
}
/// Returns a `BitMask` indicating all tags in the group which are
/// `EMPTY`.
#[inline]
pub(crate) fn match_empty(self) -> BitMask {
unsafe {
let cmp = lsx_vseqi_b::<{ Tag::EMPTY.0 as i8 as i32 }>(self.0);
BitMask(lsx_vpickve2gr_hu::<0>(lsx_vmskltz_b(cmp)) as u16)
}
}
/// Returns a `BitMask` indicating all tags in the group which are
/// `EMPTY` or `DELETED`.
#[inline]
pub(crate) fn match_empty_or_deleted(self) -> BitMask {
unsafe {
// A tag is EMPTY or DELETED iff the high bit is set
BitMask(lsx_vpickve2gr_hu::<0>(lsx_vmskltz_b(self.0)) as u16)
}
}
/// Returns a `BitMask` indicating all tags in the group which are full.
#[inline]
pub(crate) fn match_full(&self) -> BitMask {
unsafe {
// A tag is EMPTY or DELETED iff the high bit is set
BitMask(lsx_vpickve2gr_hu::<0>(lsx_vmskgez_b(self.0)) as u16)
}
}
/// Performs the following transformation on all tags in the group:
/// - `EMPTY => EMPTY`
/// - `DELETED => EMPTY`
/// - `FULL => DELETED`
#[inline]
pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
// Map high_bit = 1 (EMPTY or DELETED) to 1111_1111
// and high_bit = 0 (FULL) to 1000_0000
//
// Here's this logic expanded to concrete values:
// let special = 0 > tag = 1111_1111 (true) or 0000_0000 (false)
// 1111_1111 | 1000_0000 = 1111_1111
// 0000_0000 | 1000_0000 = 1000_0000
unsafe {
let zero = lsx_vreplgr2vr_b(0);
let special = lsx_vslt_b(self.0, zero);
Group(lsx_vor_v(special, lsx_vreplgr2vr_b(Tag::DELETED.0 as i32)))
}
}
}

View File

@@ -0,0 +1,43 @@
cfg_if! {
// Use the SSE2 implementation if possible: it allows us to scan 16 buckets
// at once instead of 8. We don't bother with AVX since it would require
// runtime dispatch and wouldn't gain us much anyways: the probability of
// finding a match drops off drastically after the first few buckets.
//
// I attempted an implementation on ARM using NEON instructions, but it
// turns out that most NEON instructions have multi-cycle latency, which in
// the end outweighs any gains over the generic implementation.
if #[cfg(all(
target_feature = "sse2",
any(target_arch = "x86", target_arch = "x86_64"),
not(miri),
))] {
mod sse2;
use sse2 as imp;
} else if #[cfg(all(
target_arch = "aarch64",
target_feature = "neon",
// NEON intrinsics are currently broken on big-endian targets.
// See https://github.com/rust-lang/stdarch/issues/1484.
target_endian = "little",
not(miri),
))] {
mod neon;
use neon as imp;
} else if #[cfg(all(
feature = "nightly",
target_arch = "loongarch64",
target_feature = "lsx",
not(miri),
))] {
mod lsx;
use lsx as imp;
} else {
mod generic;
use generic as imp;
}
}
pub(crate) use self::imp::Group;
pub(super) use self::imp::{
BitMaskWord, NonZeroBitMaskWord, BITMASK_ITER_MASK, BITMASK_MASK, BITMASK_STRIDE,
};

View File

@@ -0,0 +1,121 @@
use super::super::{BitMask, Tag};
use core::arch::aarch64 as neon;
use core::mem;
use core::num::NonZeroU64;
pub(crate) type BitMaskWord = u64;
pub(crate) type NonZeroBitMaskWord = NonZeroU64;
pub(crate) const BITMASK_STRIDE: usize = 8;
pub(crate) const BITMASK_MASK: BitMaskWord = !0;
pub(crate) const BITMASK_ITER_MASK: BitMaskWord = 0x8080_8080_8080_8080;
/// Abstraction over a group of control tags which can be scanned in
/// parallel.
///
/// This implementation uses a 64-bit NEON value.
#[derive(Copy, Clone)]
pub(crate) struct Group(neon::uint8x8_t);
#[allow(clippy::use_self)]
impl Group {
/// Number of bytes in the group.
pub(crate) const WIDTH: usize = mem::size_of::<Self>();
/// Returns a full group of empty tags, suitable for use as the initial
/// value for an empty hash table.
///
/// This is guaranteed to be aligned to the group size.
#[inline]
pub(crate) const fn static_empty() -> &'static [Tag; Group::WIDTH] {
#[repr(C)]
struct AlignedTags {
_align: [Group; 0],
tags: [Tag; Group::WIDTH],
}
const ALIGNED_TAGS: AlignedTags = AlignedTags {
_align: [],
tags: [Tag::EMPTY; Group::WIDTH],
};
&ALIGNED_TAGS.tags
}
/// Loads a group of tags starting at the given address.
#[inline]
#[allow(clippy::cast_ptr_alignment)] // unaligned load
pub(crate) unsafe fn load(ptr: *const Tag) -> Self {
Group(neon::vld1_u8(ptr.cast()))
}
/// Loads a group of tags starting at the given address, which must be
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
pub(crate) unsafe fn load_aligned(ptr: *const Tag) -> Self {
debug_assert_eq!(ptr.align_offset(mem::align_of::<Self>()), 0);
Group(neon::vld1_u8(ptr.cast()))
}
/// Stores the group of tags to the given address, which must be
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
pub(crate) unsafe fn store_aligned(self, ptr: *mut Tag) {
debug_assert_eq!(ptr.align_offset(mem::align_of::<Self>()), 0);
neon::vst1_u8(ptr.cast(), self.0);
}
/// Returns a `BitMask` indicating all tags in the group which *may*
/// have the given value.
#[inline]
pub(crate) fn match_tag(self, tag: Tag) -> BitMask {
unsafe {
let cmp = neon::vceq_u8(self.0, neon::vdup_n_u8(tag.0));
BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0))
}
}
/// Returns a `BitMask` indicating all tags in the group which are
/// `EMPTY`.
#[inline]
pub(crate) fn match_empty(self) -> BitMask {
self.match_tag(Tag::EMPTY)
}
/// Returns a `BitMask` indicating all tags in the group which are
/// `EMPTY` or `DELETED`.
#[inline]
pub(crate) fn match_empty_or_deleted(self) -> BitMask {
unsafe {
let cmp = neon::vcltz_s8(neon::vreinterpret_s8_u8(self.0));
BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0))
}
}
/// Returns a `BitMask` indicating all tags in the group which are full.
#[inline]
pub(crate) fn match_full(self) -> BitMask {
unsafe {
let cmp = neon::vcgez_s8(neon::vreinterpret_s8_u8(self.0));
BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0))
}
}
/// Performs the following transformation on all tags in the group:
/// - `EMPTY => EMPTY`
/// - `DELETED => EMPTY`
/// - `FULL => DELETED`
#[inline]
pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
// Map high_bit = 1 (EMPTY or DELETED) to 1111_1111
// and high_bit = 0 (FULL) to 1000_0000
//
// Here's this logic expanded to concrete values:
// let special = 0 > tag = 1111_1111 (true) or 0000_0000 (false)
// 1111_1111 | 1000_0000 = 1111_1111
// 0000_0000 | 1000_0000 = 1000_0000
unsafe {
let special = neon::vcltz_s8(neon::vreinterpret_s8_u8(self.0));
Group(neon::vorr_u8(special, neon::vdup_n_u8(0x80)))
}
}
}

View File

@@ -0,0 +1,146 @@
use super::super::{BitMask, Tag};
use core::mem;
use core::num::NonZeroU16;
#[cfg(target_arch = "x86")]
use core::arch::x86;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64 as x86;
pub(crate) type BitMaskWord = u16;
pub(crate) type NonZeroBitMaskWord = NonZeroU16;
pub(crate) const BITMASK_STRIDE: usize = 1;
pub(crate) const BITMASK_MASK: BitMaskWord = 0xffff;
pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0;
/// Abstraction over a group of control tags which can be scanned in
/// parallel.
///
/// This implementation uses a 128-bit SSE value.
#[derive(Copy, Clone)]
pub(crate) struct Group(x86::__m128i);
// FIXME: https://github.com/rust-lang/rust-clippy/issues/3859
#[allow(clippy::use_self)]
impl Group {
/// Number of bytes in the group.
pub(crate) const WIDTH: usize = mem::size_of::<Self>();
/// Returns a full group of empty tags, suitable for use as the initial
/// value for an empty hash table.
///
/// This is guaranteed to be aligned to the group size.
#[inline]
#[allow(clippy::items_after_statements)]
pub(crate) const fn static_empty() -> &'static [Tag; Group::WIDTH] {
#[repr(C)]
struct AlignedTags {
_align: [Group; 0],
tags: [Tag; Group::WIDTH],
}
const ALIGNED_TAGS: AlignedTags = AlignedTags {
_align: [],
tags: [Tag::EMPTY; Group::WIDTH],
};
&ALIGNED_TAGS.tags
}
/// Loads a group of tags starting at the given address.
#[inline]
#[allow(clippy::cast_ptr_alignment)] // unaligned load
pub(crate) unsafe fn load(ptr: *const Tag) -> Self {
Group(x86::_mm_loadu_si128(ptr.cast()))
}
/// Loads a group of tags starting at the given address, which must be
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
pub(crate) unsafe fn load_aligned(ptr: *const Tag) -> Self {
debug_assert_eq!(ptr.align_offset(mem::align_of::<Self>()), 0);
Group(x86::_mm_load_si128(ptr.cast()))
}
/// Stores the group of tags to the given address, which must be
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
pub(crate) unsafe fn store_aligned(self, ptr: *mut Tag) {
debug_assert_eq!(ptr.align_offset(mem::align_of::<Self>()), 0);
x86::_mm_store_si128(ptr.cast(), self.0);
}
/// Returns a `BitMask` indicating all tags in the group which have
/// the given value.
#[inline]
pub(crate) fn match_tag(self, tag: Tag) -> BitMask {
#[allow(
clippy::cast_possible_wrap, // tag.0: Tag as i8
// tag: i32 as u16
// note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the
// upper 16-bits of the i32 are zeroed:
clippy::cast_sign_loss,
clippy::cast_possible_truncation
)]
unsafe {
let cmp = x86::_mm_cmpeq_epi8(self.0, x86::_mm_set1_epi8(tag.0 as i8));
BitMask(x86::_mm_movemask_epi8(cmp) as u16)
}
}
/// Returns a `BitMask` indicating all tags in the group which are
/// `EMPTY`.
#[inline]
pub(crate) fn match_empty(self) -> BitMask {
self.match_tag(Tag::EMPTY)
}
/// Returns a `BitMask` indicating all tags in the group which are
/// `EMPTY` or `DELETED`.
#[inline]
pub(crate) fn match_empty_or_deleted(self) -> BitMask {
#[allow(
// tag: i32 as u16
// note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the
// upper 16-bits of the i32 are zeroed:
clippy::cast_sign_loss,
clippy::cast_possible_truncation
)]
unsafe {
// A tag is EMPTY or DELETED iff the high bit is set
BitMask(x86::_mm_movemask_epi8(self.0) as u16)
}
}
/// Returns a `BitMask` indicating all tags in the group which are full.
#[inline]
pub(crate) fn match_full(&self) -> BitMask {
self.match_empty_or_deleted().invert()
}
/// Performs the following transformation on all tags in the group:
/// - `EMPTY => EMPTY`
/// - `DELETED => EMPTY`
/// - `FULL => DELETED`
#[inline]
pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
// Map high_bit = 1 (EMPTY or DELETED) to 1111_1111
// and high_bit = 0 (FULL) to 1000_0000
//
// Here's this logic expanded to concrete values:
// let special = 0 > tag = 1111_1111 (true) or 0000_0000 (false)
// 1111_1111 | 1000_0000 = 1111_1111
// 0000_0000 | 1000_0000 = 1000_0000
#[allow(
clippy::cast_possible_wrap, // tag: Tag::DELETED.0 as i8
)]
unsafe {
let zero = x86::_mm_setzero_si128();
let special = x86::_mm_cmpgt_epi8(zero, self.0);
Group(x86::_mm_or_si128(
special,
x86::_mm_set1_epi8(Tag::DELETED.0 as i8),
))
}
}
}

10
vendor/hashbrown/src/control/mod.rs vendored Normal file
View File

@@ -0,0 +1,10 @@
mod bitmask;
mod group;
mod tag;
use self::bitmask::BitMask;
pub(crate) use self::{
bitmask::BitMaskIter,
group::Group,
tag::{Tag, TagSliceExt},
};

83
vendor/hashbrown/src/control/tag.rs vendored Normal file
View File

@@ -0,0 +1,83 @@
use core::{fmt, mem};
/// Single tag in a control group.
#[derive(Copy, Clone, PartialEq, Eq)]
#[repr(transparent)]
pub(crate) struct Tag(pub(super) u8);
impl Tag {
/// Control tag value for an empty bucket.
pub(crate) const EMPTY: Tag = Tag(0b1111_1111);
/// Control tag value for a deleted bucket.
pub(crate) const DELETED: Tag = Tag(0b1000_0000);
/// Checks whether a control tag represents a full bucket (top bit is clear).
#[inline]
pub(crate) const fn is_full(self) -> bool {
self.0 & 0x80 == 0
}
/// Checks whether a control tag represents a special value (top bit is set).
#[inline]
pub(crate) const fn is_special(self) -> bool {
self.0 & 0x80 != 0
}
/// Checks whether a special control value is EMPTY (just check 1 bit).
#[inline]
pub(crate) const fn special_is_empty(self) -> bool {
debug_assert!(self.is_special());
self.0 & 0x01 != 0
}
/// Creates a control tag representing a full bucket with the given hash.
#[inline]
#[allow(clippy::cast_possible_truncation)]
pub(crate) const fn full(hash: u64) -> Tag {
// Constant for function that grabs the top 7 bits of the hash.
const MIN_HASH_LEN: usize = if mem::size_of::<usize>() < mem::size_of::<u64>() {
mem::size_of::<usize>()
} else {
mem::size_of::<u64>()
};
// Grab the top 7 bits of the hash. While the hash is normally a full 64-bit
// value, some hash functions (such as FxHash) produce a usize result
// instead, which means that the top 32 bits are 0 on 32-bit platforms.
// So we use MIN_HASH_LEN constant to handle this.
let top7 = hash >> (MIN_HASH_LEN * 8 - 7);
Tag((top7 & 0x7f) as u8) // truncation
}
}
impl fmt::Debug for Tag {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.is_special() {
if self.special_is_empty() {
f.pad("EMPTY")
} else {
f.pad("DELETED")
}
} else {
f.debug_tuple("full").field(&(self.0 & 0x7F)).finish()
}
}
}
/// Extension trait for slices of tags.
pub(crate) trait TagSliceExt {
/// Fills the control with the given tag.
fn fill_tag(&mut self, tag: Tag);
/// Clears out the control.
#[inline]
fn fill_empty(&mut self) {
self.fill_tag(Tag::EMPTY)
}
}
impl TagSliceExt for [Tag] {
#[inline]
fn fill_tag(&mut self, tag: Tag) {
// SAFETY: We have access to the entire slice, so, we can write to the entire slice.
unsafe { self.as_mut_ptr().write_bytes(tag.0, self.len()) }
}
}

View File

@@ -0,0 +1,4 @@
#[cfg(feature = "rayon")]
pub(crate) mod rayon;
#[cfg(feature = "serde")]
mod serde;

View File

@@ -0,0 +1,27 @@
use alloc::collections::LinkedList;
use alloc::vec::Vec;
use rayon::iter::{IntoParallelIterator, ParallelIterator};
/// Helper for collecting parallel iterators to an intermediary
#[allow(clippy::linkedlist)] // yes, we need linked list here for efficient appending!
pub(super) fn collect<I: IntoParallelIterator>(iter: I) -> (LinkedList<Vec<I::Item>>, usize) {
let list = iter
.into_par_iter()
.fold(Vec::new, |mut vec, elem| {
vec.push(elem);
vec
})
.map(|vec| {
let mut list = LinkedList::new();
list.push_back(vec);
list
})
.reduce(LinkedList::new, |mut list1, mut list2| {
list1.append(&mut list2);
list1
});
let len = list.iter().map(Vec::len).sum();
(list, len)
}

View File

@@ -0,0 +1,721 @@
//! Rayon extensions for `HashMap`.
use super::raw::{RawIntoParIter, RawParDrain, RawParIter};
use crate::hash_map::HashMap;
use crate::raw::{Allocator, Global};
use core::fmt;
use core::hash::{BuildHasher, Hash};
use core::marker::PhantomData;
use rayon::iter::plumbing::UnindexedConsumer;
use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator};
/// Parallel iterator over shared references to entries in a map.
///
/// This iterator is created by the [`par_iter`] method on [`HashMap`]
/// (provided by the [`IntoParallelRefIterator`] trait).
/// See its documentation for more.
///
/// [`par_iter`]: /hashbrown/struct.HashMap.html#method.par_iter
/// [`HashMap`]: /hashbrown/struct.HashMap.html
/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html
pub struct ParIter<'a, K, V> {
inner: RawParIter<(K, V)>,
marker: PhantomData<(&'a K, &'a V)>,
}
impl<'a, K: Sync, V: Sync> ParallelIterator for ParIter<'a, K, V> {
type Item = (&'a K, &'a V);
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner
.map(|x| unsafe {
let r = x.as_ref();
(&r.0, &r.1)
})
.drive_unindexed(consumer)
}
}
impl<K, V> Clone for ParIter<'_, K, V> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
marker: PhantomData,
}
}
}
impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug> fmt::Debug for ParIter<'_, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let iter = unsafe { self.inner.iter() }.map(|x| unsafe {
let r = x.as_ref();
(&r.0, &r.1)
});
f.debug_list().entries(iter).finish()
}
}
/// Parallel iterator over shared references to keys in a map.
///
/// This iterator is created by the [`par_keys`] method on [`HashMap`].
/// See its documentation for more.
///
/// [`par_keys`]: /hashbrown/struct.HashMap.html#method.par_keys
/// [`HashMap`]: /hashbrown/struct.HashMap.html
pub struct ParKeys<'a, K, V> {
inner: RawParIter<(K, V)>,
marker: PhantomData<(&'a K, &'a V)>,
}
impl<'a, K: Sync, V: Sync> ParallelIterator for ParKeys<'a, K, V> {
type Item = &'a K;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner
.map(|x| unsafe { &x.as_ref().0 })
.drive_unindexed(consumer)
}
}
impl<K, V> Clone for ParKeys<'_, K, V> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
marker: PhantomData,
}
}
}
impl<K: fmt::Debug + Eq + Hash, V> fmt::Debug for ParKeys<'_, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let iter = unsafe { self.inner.iter() }.map(|x| unsafe { &x.as_ref().0 });
f.debug_list().entries(iter).finish()
}
}
/// Parallel iterator over shared references to values in a map.
///
/// This iterator is created by the [`par_values`] method on [`HashMap`].
/// See its documentation for more.
///
/// [`par_values`]: /hashbrown/struct.HashMap.html#method.par_values
/// [`HashMap`]: /hashbrown/struct.HashMap.html
pub struct ParValues<'a, K, V> {
inner: RawParIter<(K, V)>,
marker: PhantomData<(&'a K, &'a V)>,
}
impl<'a, K: Sync, V: Sync> ParallelIterator for ParValues<'a, K, V> {
type Item = &'a V;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner
.map(|x| unsafe { &x.as_ref().1 })
.drive_unindexed(consumer)
}
}
impl<K, V> Clone for ParValues<'_, K, V> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
marker: PhantomData,
}
}
}
impl<K: Eq + Hash, V: fmt::Debug> fmt::Debug for ParValues<'_, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let iter = unsafe { self.inner.iter() }.map(|x| unsafe { &x.as_ref().1 });
f.debug_list().entries(iter).finish()
}
}
/// Parallel iterator over mutable references to entries in a map.
///
/// This iterator is created by the [`par_iter_mut`] method on [`HashMap`]
/// (provided by the [`IntoParallelRefMutIterator`] trait).
/// See its documentation for more.
///
/// [`par_iter_mut`]: /hashbrown/struct.HashMap.html#method.par_iter_mut
/// [`HashMap`]: /hashbrown/struct.HashMap.html
/// [`IntoParallelRefMutIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefMutIterator.html
pub struct ParIterMut<'a, K, V> {
inner: RawParIter<(K, V)>,
marker: PhantomData<(&'a K, &'a mut V)>,
}
impl<'a, K: Sync, V: Send> ParallelIterator for ParIterMut<'a, K, V> {
type Item = (&'a K, &'a mut V);
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner
.map(|x| unsafe {
let r = x.as_mut();
(&r.0, &mut r.1)
})
.drive_unindexed(consumer)
}
}
impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug> fmt::Debug for ParIterMut<'_, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
ParIter {
inner: self.inner.clone(),
marker: PhantomData,
}
.fmt(f)
}
}
/// Parallel iterator over mutable references to values in a map.
///
/// This iterator is created by the [`par_values_mut`] method on [`HashMap`].
/// See its documentation for more.
///
/// [`par_values_mut`]: /hashbrown/struct.HashMap.html#method.par_values_mut
/// [`HashMap`]: /hashbrown/struct.HashMap.html
pub struct ParValuesMut<'a, K, V> {
inner: RawParIter<(K, V)>,
marker: PhantomData<(&'a K, &'a mut V)>,
}
impl<'a, K: Sync, V: Send> ParallelIterator for ParValuesMut<'a, K, V> {
type Item = &'a mut V;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner
.map(|x| unsafe { &mut x.as_mut().1 })
.drive_unindexed(consumer)
}
}
impl<K: Eq + Hash, V: fmt::Debug> fmt::Debug for ParValuesMut<'_, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
ParValues {
inner: self.inner.clone(),
marker: PhantomData,
}
.fmt(f)
}
}
/// Parallel iterator over entries of a consumed map.
///
/// This iterator is created by the [`into_par_iter`] method on [`HashMap`]
/// (provided by the [`IntoParallelIterator`] trait).
/// See its documentation for more.
///
/// [`into_par_iter`]: /hashbrown/struct.HashMap.html#method.into_par_iter
/// [`HashMap`]: /hashbrown/struct.HashMap.html
/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html
pub struct IntoParIter<K, V, A: Allocator = Global> {
inner: RawIntoParIter<(K, V), A>,
}
impl<K: Send, V: Send, A: Allocator + Send> ParallelIterator for IntoParIter<K, V, A> {
type Item = (K, V);
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner.drive_unindexed(consumer)
}
}
impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug, A: Allocator> fmt::Debug for IntoParIter<K, V, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
ParIter {
inner: unsafe { self.inner.par_iter() },
marker: PhantomData,
}
.fmt(f)
}
}
/// Parallel draining iterator over entries of a map.
///
/// This iterator is created by the [`par_drain`] method on [`HashMap`].
/// See its documentation for more.
///
/// [`par_drain`]: /hashbrown/struct.HashMap.html#method.par_drain
/// [`HashMap`]: /hashbrown/struct.HashMap.html
pub struct ParDrain<'a, K, V, A: Allocator = Global> {
inner: RawParDrain<'a, (K, V), A>,
}
impl<K: Send, V: Send, A: Allocator + Sync> ParallelIterator for ParDrain<'_, K, V, A> {
type Item = (K, V);
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner.drive_unindexed(consumer)
}
}
impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug, A: Allocator> fmt::Debug for ParDrain<'_, K, V, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
ParIter {
inner: unsafe { self.inner.par_iter() },
marker: PhantomData,
}
.fmt(f)
}
}
impl<K: Sync, V: Sync, S, A: Allocator> HashMap<K, V, S, A> {
/// Visits (potentially in parallel) immutably borrowed keys in an arbitrary order.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_keys(&self) -> ParKeys<'_, K, V> {
ParKeys {
inner: unsafe { self.table.par_iter() },
marker: PhantomData,
}
}
/// Visits (potentially in parallel) immutably borrowed values in an arbitrary order.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_values(&self) -> ParValues<'_, K, V> {
ParValues {
inner: unsafe { self.table.par_iter() },
marker: PhantomData,
}
}
}
impl<K: Send, V: Send, S, A: Allocator> HashMap<K, V, S, A> {
/// Visits (potentially in parallel) mutably borrowed values in an arbitrary order.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> {
ParValuesMut {
inner: unsafe { self.table.par_iter() },
marker: PhantomData,
}
}
/// Consumes (potentially in parallel) all values in an arbitrary order,
/// while preserving the map's allocated memory for reuse.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_drain(&mut self) -> ParDrain<'_, K, V, A> {
ParDrain {
inner: self.table.par_drain(),
}
}
}
impl<K, V, S, A> HashMap<K, V, S, A>
where
K: Eq + Hash + Sync,
V: PartialEq + Sync,
S: BuildHasher + Sync,
A: Allocator + Sync,
{
/// Returns `true` if the map is equal to another,
/// i.e. both maps contain the same keys mapped to the same values.
///
/// This method runs in a potentially parallel fashion.
pub fn par_eq(&self, other: &Self) -> bool {
self.len() == other.len()
&& self
.into_par_iter()
.all(|(key, value)| other.get(key).map_or(false, |v| *value == *v))
}
}
impl<K: Send, V: Send, S, A: Allocator + Send> IntoParallelIterator for HashMap<K, V, S, A> {
type Item = (K, V);
type Iter = IntoParIter<K, V, A>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_par_iter(self) -> Self::Iter {
IntoParIter {
inner: self.table.into_par_iter(),
}
}
}
impl<'a, K: Sync, V: Sync, S, A: Allocator> IntoParallelIterator for &'a HashMap<K, V, S, A> {
type Item = (&'a K, &'a V);
type Iter = ParIter<'a, K, V>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_par_iter(self) -> Self::Iter {
ParIter {
inner: unsafe { self.table.par_iter() },
marker: PhantomData,
}
}
}
impl<'a, K: Sync, V: Send, S, A: Allocator> IntoParallelIterator for &'a mut HashMap<K, V, S, A> {
type Item = (&'a K, &'a mut V);
type Iter = ParIterMut<'a, K, V>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_par_iter(self) -> Self::Iter {
ParIterMut {
inner: unsafe { self.table.par_iter() },
marker: PhantomData,
}
}
}
/// Collect (key, value) pairs from a parallel iterator into a
/// hashmap. If multiple pairs correspond to the same key, then the
/// ones produced earlier in the parallel iterator will be
/// overwritten, just as with a sequential iterator.
impl<K, V, S> FromParallelIterator<(K, V)> for HashMap<K, V, S, Global>
where
K: Eq + Hash + Send,
V: Send,
S: BuildHasher + Default,
{
fn from_par_iter<P>(par_iter: P) -> Self
where
P: IntoParallelIterator<Item = (K, V)>,
{
let mut map = HashMap::default();
map.par_extend(par_iter);
map
}
}
/// Extend a hash map with items from a parallel iterator.
impl<K, V, S, A> ParallelExtend<(K, V)> for HashMap<K, V, S, A>
where
K: Eq + Hash + Send,
V: Send,
S: BuildHasher,
A: Allocator,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = (K, V)>,
{
extend(self, par_iter);
}
}
/// Extend a hash map with copied items from a parallel iterator.
impl<'a, K, V, S, A> ParallelExtend<(&'a K, &'a V)> for HashMap<K, V, S, A>
where
K: Copy + Eq + Hash + Sync,
V: Copy + Sync,
S: BuildHasher,
A: Allocator,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = (&'a K, &'a V)>,
{
extend(self, par_iter);
}
}
// This is equal to the normal `HashMap` -- no custom advantage.
fn extend<K, V, S, A, I>(map: &mut HashMap<K, V, S, A>, par_iter: I)
where
K: Eq + Hash,
S: BuildHasher,
I: IntoParallelIterator,
A: Allocator,
HashMap<K, V, S, A>: Extend<I::Item>,
{
let (list, len) = super::helpers::collect(par_iter);
// Keys may be already present or show multiple times in the iterator.
// Reserve the entire length if the map is empty.
// Otherwise reserve half the length (rounded up), so the map
// will only resize twice in the worst case.
let reserve = if map.is_empty() { len } else { (len + 1) / 2 };
map.reserve(reserve);
for vec in list {
map.extend(vec);
}
}
#[cfg(test)]
mod test_par_map {
use alloc::vec::Vec;
use core::hash::{Hash, Hasher};
use core::sync::atomic::{AtomicUsize, Ordering};
use rayon::prelude::*;
use crate::hash_map::HashMap;
struct Droppable<'a> {
k: usize,
counter: &'a AtomicUsize,
}
impl Droppable<'_> {
fn new(k: usize, counter: &AtomicUsize) -> Droppable<'_> {
counter.fetch_add(1, Ordering::Relaxed);
Droppable { k, counter }
}
}
impl Drop for Droppable<'_> {
fn drop(&mut self) {
self.counter.fetch_sub(1, Ordering::Relaxed);
}
}
impl Clone for Droppable<'_> {
fn clone(&self) -> Self {
Droppable::new(self.k, self.counter)
}
}
impl Hash for Droppable<'_> {
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
self.k.hash(state);
}
}
impl PartialEq for Droppable<'_> {
fn eq(&self, other: &Self) -> bool {
self.k == other.k
}
}
impl Eq for Droppable<'_> {}
#[test]
fn test_into_iter_drops() {
let key = AtomicUsize::new(0);
let value = AtomicUsize::new(0);
let hm = {
let mut hm = HashMap::new();
assert_eq!(key.load(Ordering::Relaxed), 0);
assert_eq!(value.load(Ordering::Relaxed), 0);
for i in 0..100 {
let d1 = Droppable::new(i, &key);
let d2 = Droppable::new(i + 100, &value);
hm.insert(d1, d2);
}
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
hm
};
// By the way, ensure that cloning doesn't screw up the dropping.
drop(hm.clone());
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
// Ensure that dropping the iterator does not leak anything.
drop(hm.clone().into_par_iter());
{
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
// retain only half
let _v: Vec<_> = hm.into_par_iter().filter(|(key, _)| key.k < 50).collect();
assert_eq!(key.load(Ordering::Relaxed), 50);
assert_eq!(value.load(Ordering::Relaxed), 50);
};
assert_eq!(key.load(Ordering::Relaxed), 0);
assert_eq!(value.load(Ordering::Relaxed), 0);
}
#[test]
fn test_drain_drops() {
let key = AtomicUsize::new(0);
let value = AtomicUsize::new(0);
let mut hm = {
let mut hm = HashMap::new();
assert_eq!(key.load(Ordering::Relaxed), 0);
assert_eq!(value.load(Ordering::Relaxed), 0);
for i in 0..100 {
let d1 = Droppable::new(i, &key);
let d2 = Droppable::new(i + 100, &value);
hm.insert(d1, d2);
}
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
hm
};
// By the way, ensure that cloning doesn't screw up the dropping.
drop(hm.clone());
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
// Ensure that dropping the drain iterator does not leak anything.
drop(hm.clone().par_drain());
{
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
// retain only half
let _v: Vec<_> = hm.drain().filter(|(key, _)| key.k < 50).collect();
assert!(hm.is_empty());
assert_eq!(key.load(Ordering::Relaxed), 50);
assert_eq!(value.load(Ordering::Relaxed), 50);
};
assert_eq!(key.load(Ordering::Relaxed), 0);
assert_eq!(value.load(Ordering::Relaxed), 0);
}
#[test]
fn test_empty_iter() {
let mut m: HashMap<isize, bool> = HashMap::new();
assert_eq!(m.par_drain().count(), 0);
assert_eq!(m.par_keys().count(), 0);
assert_eq!(m.par_values().count(), 0);
assert_eq!(m.par_values_mut().count(), 0);
assert_eq!(m.par_iter().count(), 0);
assert_eq!(m.par_iter_mut().count(), 0);
assert_eq!(m.len(), 0);
assert!(m.is_empty());
assert_eq!(m.into_par_iter().count(), 0);
}
#[test]
fn test_iterate() {
let mut m = HashMap::with_capacity(4);
for i in 0..32 {
assert!(m.insert(i, i * 2).is_none());
}
assert_eq!(m.len(), 32);
let observed = AtomicUsize::new(0);
m.par_iter().for_each(|(k, v)| {
assert_eq!(*v, *k * 2);
observed.fetch_or(1 << *k, Ordering::Relaxed);
});
assert_eq!(observed.into_inner(), 0xFFFF_FFFF);
}
#[test]
fn test_keys() {
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
let map: HashMap<_, _> = vec.into_par_iter().collect();
let keys: Vec<_> = map.par_keys().cloned().collect();
assert_eq!(keys.len(), 3);
assert!(keys.contains(&1));
assert!(keys.contains(&2));
assert!(keys.contains(&3));
}
#[test]
fn test_values() {
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
let map: HashMap<_, _> = vec.into_par_iter().collect();
let values: Vec<_> = map.par_values().cloned().collect();
assert_eq!(values.len(), 3);
assert!(values.contains(&'a'));
assert!(values.contains(&'b'));
assert!(values.contains(&'c'));
}
#[test]
fn test_values_mut() {
let vec = vec![(1, 1), (2, 2), (3, 3)];
let mut map: HashMap<_, _> = vec.into_par_iter().collect();
map.par_values_mut().for_each(|value| *value *= 2);
let values: Vec<_> = map.par_values().cloned().collect();
assert_eq!(values.len(), 3);
assert!(values.contains(&2));
assert!(values.contains(&4));
assert!(values.contains(&6));
}
#[test]
fn test_eq() {
let mut m1 = HashMap::new();
m1.insert(1, 2);
m1.insert(2, 3);
m1.insert(3, 4);
let mut m2 = HashMap::new();
m2.insert(1, 2);
m2.insert(2, 3);
assert!(!m1.par_eq(&m2));
m2.insert(3, 4);
assert!(m1.par_eq(&m2));
}
#[test]
fn test_from_iter() {
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
let map: HashMap<_, _> = xs.par_iter().cloned().collect();
for &(k, v) in &xs {
assert_eq!(map.get(&k), Some(&v));
}
}
#[test]
fn test_extend_ref() {
let mut a = HashMap::new();
a.insert(1, "one");
let mut b = HashMap::new();
b.insert(2, "two");
b.insert(3, "three");
a.par_extend(&b);
assert_eq!(a.len(), 3);
assert_eq!(a[&1], "one");
assert_eq!(a[&2], "two");
assert_eq!(a[&3], "three");
}
}

View File

@@ -0,0 +1,5 @@
mod helpers;
pub(crate) mod map;
pub(crate) mod raw;
pub(crate) mod set;
pub(crate) mod table;

View File

@@ -0,0 +1,230 @@
use crate::raw::Bucket;
use crate::raw::{Allocator, Global, RawIter, RawIterRange, RawTable};
use crate::scopeguard::guard;
use core::marker::PhantomData;
use core::mem;
use core::ptr::NonNull;
use rayon::iter::{
plumbing::{self, Folder, UnindexedConsumer, UnindexedProducer},
ParallelIterator,
};
/// Parallel iterator which returns a raw pointer to every full bucket in the table.
pub struct RawParIter<T> {
iter: RawIterRange<T>,
}
impl<T> RawParIter<T> {
#[cfg_attr(feature = "inline-more", inline)]
pub(super) unsafe fn iter(&self) -> RawIterRange<T> {
self.iter.clone()
}
}
impl<T> Clone for RawParIter<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self {
iter: self.iter.clone(),
}
}
}
impl<T> From<RawIter<T>> for RawParIter<T> {
fn from(it: RawIter<T>) -> Self {
RawParIter { iter: it.iter }
}
}
impl<T> ParallelIterator for RawParIter<T> {
type Item = Bucket<T>;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let producer = ParIterProducer { iter: self.iter };
plumbing::bridge_unindexed(producer, consumer)
}
}
/// Producer which returns a `Bucket<T>` for every element.
struct ParIterProducer<T> {
iter: RawIterRange<T>,
}
impl<T> UnindexedProducer for ParIterProducer<T> {
type Item = Bucket<T>;
#[cfg_attr(feature = "inline-more", inline)]
fn split(self) -> (Self, Option<Self>) {
let (left, right) = self.iter.split();
let left = ParIterProducer { iter: left };
let right = right.map(|right| ParIterProducer { iter: right });
(left, right)
}
#[cfg_attr(feature = "inline-more", inline)]
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
folder.consume_iter(self.iter)
}
}
/// Parallel iterator which consumes a table and returns elements.
pub struct RawIntoParIter<T, A: Allocator = Global> {
table: RawTable<T, A>,
}
impl<T, A: Allocator> RawIntoParIter<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub(super) unsafe fn par_iter(&self) -> RawParIter<T> {
self.table.par_iter()
}
}
impl<T: Send, A: Allocator + Send> ParallelIterator for RawIntoParIter<T, A> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let iter = unsafe { self.table.iter().iter };
let _guard = guard(self.table.into_allocation(), |alloc| {
if let Some((ptr, layout, ref alloc)) = *alloc {
unsafe {
alloc.deallocate(ptr, layout);
}
}
});
let producer = ParDrainProducer { iter };
plumbing::bridge_unindexed(producer, consumer)
}
}
/// Parallel iterator which consumes elements without freeing the table storage.
pub struct RawParDrain<'a, T, A: Allocator = Global> {
// We don't use a &'a mut RawTable<T> because we want RawParDrain to be
// covariant over T.
table: NonNull<RawTable<T, A>>,
marker: PhantomData<&'a RawTable<T, A>>,
}
unsafe impl<T: Send, A: Allocator> Send for RawParDrain<'_, T, A> {}
impl<T, A: Allocator> RawParDrain<'_, T, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub(super) unsafe fn par_iter(&self) -> RawParIter<T> {
self.table.as_ref().par_iter()
}
}
impl<T: Send, A: Allocator> ParallelIterator for RawParDrain<'_, T, A> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let _guard = guard(self.table, |table| unsafe {
table.as_mut().clear_no_drop();
});
let iter = unsafe { self.table.as_ref().iter().iter };
mem::forget(self);
let producer = ParDrainProducer { iter };
plumbing::bridge_unindexed(producer, consumer)
}
}
impl<T, A: Allocator> Drop for RawParDrain<'_, T, A> {
fn drop(&mut self) {
// If drive_unindexed is not called then simply clear the table.
unsafe {
self.table.as_mut().clear();
}
}
}
/// Producer which will consume all elements in the range, even if it is dropped
/// halfway through.
struct ParDrainProducer<T> {
iter: RawIterRange<T>,
}
impl<T: Send> UnindexedProducer for ParDrainProducer<T> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
fn split(self) -> (Self, Option<Self>) {
let (left, right) = self.iter.clone().split();
mem::forget(self);
let left = ParDrainProducer { iter: left };
let right = right.map(|right| ParDrainProducer { iter: right });
(left, right)
}
#[cfg_attr(feature = "inline-more", inline)]
fn fold_with<F>(mut self, mut folder: F) -> F
where
F: Folder<Self::Item>,
{
// Make sure to modify the iterator in-place so that any remaining
// elements are processed in our Drop impl.
for item in &mut self.iter {
folder = folder.consume(unsafe { item.read() });
if folder.full() {
return folder;
}
}
// If we processed all elements then we don't need to run the drop.
mem::forget(self);
folder
}
}
impl<T> Drop for ParDrainProducer<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
// Drop all remaining elements
if mem::needs_drop::<T>() {
for item in &mut self.iter {
unsafe {
item.drop();
}
}
}
}
}
impl<T, A: Allocator> RawTable<T, A> {
/// Returns a parallel iterator over the elements in a `RawTable`.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn par_iter(&self) -> RawParIter<T> {
RawParIter {
iter: self.iter().iter,
}
}
/// Returns a parallel iterator over the elements in a `RawTable`.
#[cfg_attr(feature = "inline-more", inline)]
pub fn into_par_iter(self) -> RawIntoParIter<T, A> {
RawIntoParIter { table: self }
}
/// Returns a parallel iterator which consumes all elements of a `RawTable`
/// without freeing its memory allocation.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_drain(&mut self) -> RawParDrain<'_, T, A> {
RawParDrain {
table: NonNull::from(self),
marker: PhantomData,
}
}
}

View File

@@ -0,0 +1,659 @@
//! Rayon extensions for `HashSet`.
use super::map;
use crate::hash_set::HashSet;
use crate::raw::{Allocator, Global};
use core::hash::{BuildHasher, Hash};
use rayon::iter::plumbing::UnindexedConsumer;
use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator};
/// Parallel iterator over elements of a consumed set.
///
/// This iterator is created by the [`into_par_iter`] method on [`HashSet`]
/// (provided by the [`IntoParallelIterator`] trait).
/// See its documentation for more.
///
/// [`into_par_iter`]: /hashbrown/struct.HashSet.html#method.into_par_iter
/// [`HashSet`]: /hashbrown/struct.HashSet.html
/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html
pub struct IntoParIter<T, A: Allocator = Global> {
inner: map::IntoParIter<T, (), A>,
}
impl<T: Send, A: Allocator + Send> ParallelIterator for IntoParIter<T, A> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner.map(|(k, _)| k).drive_unindexed(consumer)
}
}
/// Parallel draining iterator over entries of a set.
///
/// This iterator is created by the [`par_drain`] method on [`HashSet`].
/// See its documentation for more.
///
/// [`par_drain`]: /hashbrown/struct.HashSet.html#method.par_drain
/// [`HashSet`]: /hashbrown/struct.HashSet.html
pub struct ParDrain<'a, T, A: Allocator = Global> {
inner: map::ParDrain<'a, T, (), A>,
}
impl<T: Send, A: Allocator + Send + Sync> ParallelIterator for ParDrain<'_, T, A> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner.map(|(k, _)| k).drive_unindexed(consumer)
}
}
/// Parallel iterator over shared references to elements in a set.
///
/// This iterator is created by the [`par_iter`] method on [`HashSet`]
/// (provided by the [`IntoParallelRefIterator`] trait).
/// See its documentation for more.
///
/// [`par_iter`]: /hashbrown/struct.HashSet.html#method.par_iter
/// [`HashSet`]: /hashbrown/struct.HashSet.html
/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html
pub struct ParIter<'a, T> {
inner: map::ParKeys<'a, T, ()>,
}
impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> {
type Item = &'a T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner.drive_unindexed(consumer)
}
}
/// Parallel iterator over shared references to elements in the difference of
/// sets.
///
/// This iterator is created by the [`par_difference`] method on [`HashSet`].
/// See its documentation for more.
///
/// [`par_difference`]: /hashbrown/struct.HashSet.html#method.par_difference
/// [`HashSet`]: /hashbrown/struct.HashSet.html
pub struct ParDifference<'a, T, S, A: Allocator = Global> {
a: &'a HashSet<T, S, A>,
b: &'a HashSet<T, S, A>,
}
impl<'a, T, S, A> ParallelIterator for ParDifference<'a, T, S, A>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
A: Allocator + Sync,
{
type Item = &'a T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.a
.into_par_iter()
.filter(|&x| !self.b.contains(x))
.drive_unindexed(consumer)
}
}
/// Parallel iterator over shared references to elements in the symmetric
/// difference of sets.
///
/// This iterator is created by the [`par_symmetric_difference`] method on
/// [`HashSet`].
/// See its documentation for more.
///
/// [`par_symmetric_difference`]: /hashbrown/struct.HashSet.html#method.par_symmetric_difference
/// [`HashSet`]: /hashbrown/struct.HashSet.html
pub struct ParSymmetricDifference<'a, T, S, A: Allocator = Global> {
a: &'a HashSet<T, S, A>,
b: &'a HashSet<T, S, A>,
}
impl<'a, T, S, A> ParallelIterator for ParSymmetricDifference<'a, T, S, A>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
A: Allocator + Sync,
{
type Item = &'a T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.a
.par_difference(self.b)
.chain(self.b.par_difference(self.a))
.drive_unindexed(consumer)
}
}
/// Parallel iterator over shared references to elements in the intersection of
/// sets.
///
/// This iterator is created by the [`par_intersection`] method on [`HashSet`].
/// See its documentation for more.
///
/// [`par_intersection`]: /hashbrown/struct.HashSet.html#method.par_intersection
/// [`HashSet`]: /hashbrown/struct.HashSet.html
pub struct ParIntersection<'a, T, S, A: Allocator = Global> {
a: &'a HashSet<T, S, A>,
b: &'a HashSet<T, S, A>,
}
impl<'a, T, S, A> ParallelIterator for ParIntersection<'a, T, S, A>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
A: Allocator + Sync,
{
type Item = &'a T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.a
.into_par_iter()
.filter(|&x| self.b.contains(x))
.drive_unindexed(consumer)
}
}
/// Parallel iterator over shared references to elements in the union of sets.
///
/// This iterator is created by the [`par_union`] method on [`HashSet`].
/// See its documentation for more.
///
/// [`par_union`]: /hashbrown/struct.HashSet.html#method.par_union
/// [`HashSet`]: /hashbrown/struct.HashSet.html
pub struct ParUnion<'a, T, S, A: Allocator = Global> {
a: &'a HashSet<T, S, A>,
b: &'a HashSet<T, S, A>,
}
impl<'a, T, S, A> ParallelIterator for ParUnion<'a, T, S, A>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
A: Allocator + Sync,
{
type Item = &'a T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
// We'll iterate one set in full, and only the remaining difference from the other.
// Use the smaller set for the difference in order to reduce hash lookups.
let (smaller, larger) = if self.a.len() <= self.b.len() {
(self.a, self.b)
} else {
(self.b, self.a)
};
larger
.into_par_iter()
.chain(smaller.par_difference(larger))
.drive_unindexed(consumer)
}
}
impl<T, S, A> HashSet<T, S, A>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
A: Allocator + Sync,
{
/// Visits (potentially in parallel) the values representing the union,
/// i.e. all the values in `self` or `other`, without duplicates.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_union<'a>(&'a self, other: &'a Self) -> ParUnion<'a, T, S, A> {
ParUnion { a: self, b: other }
}
/// Visits (potentially in parallel) the values representing the difference,
/// i.e. the values that are in `self` but not in `other`.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_difference<'a>(&'a self, other: &'a Self) -> ParDifference<'a, T, S, A> {
ParDifference { a: self, b: other }
}
/// Visits (potentially in parallel) the values representing the symmetric
/// difference, i.e. the values that are in `self` or in `other` but not in both.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_symmetric_difference<'a>(
&'a self,
other: &'a Self,
) -> ParSymmetricDifference<'a, T, S, A> {
ParSymmetricDifference { a: self, b: other }
}
/// Visits (potentially in parallel) the values representing the
/// intersection, i.e. the values that are both in `self` and `other`.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_intersection<'a>(&'a self, other: &'a Self) -> ParIntersection<'a, T, S, A> {
ParIntersection { a: self, b: other }
}
/// Returns `true` if `self` has no elements in common with `other`.
/// This is equivalent to checking for an empty intersection.
///
/// This method runs in a potentially parallel fashion.
pub fn par_is_disjoint(&self, other: &Self) -> bool {
self.into_par_iter().all(|x| !other.contains(x))
}
/// Returns `true` if the set is a subset of another,
/// i.e. `other` contains at least all the values in `self`.
///
/// This method runs in a potentially parallel fashion.
pub fn par_is_subset(&self, other: &Self) -> bool {
if self.len() <= other.len() {
self.into_par_iter().all(|x| other.contains(x))
} else {
false
}
}
/// Returns `true` if the set is a superset of another,
/// i.e. `self` contains at least all the values in `other`.
///
/// This method runs in a potentially parallel fashion.
pub fn par_is_superset(&self, other: &Self) -> bool {
other.par_is_subset(self)
}
/// Returns `true` if the set is equal to another,
/// i.e. both sets contain the same values.
///
/// This method runs in a potentially parallel fashion.
pub fn par_eq(&self, other: &Self) -> bool {
self.len() == other.len() && self.par_is_subset(other)
}
}
impl<T, S, A> HashSet<T, S, A>
where
T: Eq + Hash + Send,
A: Allocator + Send,
{
/// Consumes (potentially in parallel) all values in an arbitrary order,
/// while preserving the set's allocated memory for reuse.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_drain(&mut self) -> ParDrain<'_, T, A> {
ParDrain {
inner: self.map.par_drain(),
}
}
}
impl<T: Send, S, A: Allocator + Send> IntoParallelIterator for HashSet<T, S, A> {
type Item = T;
type Iter = IntoParIter<T, A>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_par_iter(self) -> Self::Iter {
IntoParIter {
inner: self.map.into_par_iter(),
}
}
}
impl<'a, T: Sync, S, A: Allocator> IntoParallelIterator for &'a HashSet<T, S, A> {
type Item = &'a T;
type Iter = ParIter<'a, T>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_par_iter(self) -> Self::Iter {
ParIter {
inner: self.map.par_keys(),
}
}
}
/// Collect values from a parallel iterator into a hashset.
impl<T, S> FromParallelIterator<T> for HashSet<T, S, Global>
where
T: Eq + Hash + Send,
S: BuildHasher + Default,
{
fn from_par_iter<P>(par_iter: P) -> Self
where
P: IntoParallelIterator<Item = T>,
{
let mut set = HashSet::default();
set.par_extend(par_iter);
set
}
}
/// Extend a hash set with items from a parallel iterator.
impl<T, S> ParallelExtend<T> for HashSet<T, S, Global>
where
T: Eq + Hash + Send,
S: BuildHasher,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = T>,
{
extend(self, par_iter);
}
}
/// Extend a hash set with copied items from a parallel iterator.
impl<'a, T, S> ParallelExtend<&'a T> for HashSet<T, S, Global>
where
T: 'a + Copy + Eq + Hash + Sync,
S: BuildHasher,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = &'a T>,
{
extend(self, par_iter);
}
}
// This is equal to the normal `HashSet` -- no custom advantage.
fn extend<T, S, I, A>(set: &mut HashSet<T, S, A>, par_iter: I)
where
T: Eq + Hash,
S: BuildHasher,
A: Allocator,
I: IntoParallelIterator,
HashSet<T, S, A>: Extend<I::Item>,
{
let (list, len) = super::helpers::collect(par_iter);
// Values may be already present or show multiple times in the iterator.
// Reserve the entire length if the set is empty.
// Otherwise reserve half the length (rounded up), so the set
// will only resize twice in the worst case.
let reserve = if set.is_empty() { len } else { (len + 1) / 2 };
set.reserve(reserve);
for vec in list {
set.extend(vec);
}
}
#[cfg(test)]
mod test_par_set {
use alloc::vec::Vec;
use core::sync::atomic::{AtomicUsize, Ordering};
use rayon::prelude::*;
use crate::hash_set::HashSet;
#[test]
fn test_disjoint() {
let mut xs = HashSet::new();
let mut ys = HashSet::new();
assert!(xs.par_is_disjoint(&ys));
assert!(ys.par_is_disjoint(&xs));
assert!(xs.insert(5));
assert!(ys.insert(11));
assert!(xs.par_is_disjoint(&ys));
assert!(ys.par_is_disjoint(&xs));
assert!(xs.insert(7));
assert!(xs.insert(19));
assert!(xs.insert(4));
assert!(ys.insert(2));
assert!(ys.insert(-11));
assert!(xs.par_is_disjoint(&ys));
assert!(ys.par_is_disjoint(&xs));
assert!(ys.insert(7));
assert!(!xs.par_is_disjoint(&ys));
assert!(!ys.par_is_disjoint(&xs));
}
#[test]
fn test_subset_and_superset() {
let mut a = HashSet::new();
assert!(a.insert(0));
assert!(a.insert(5));
assert!(a.insert(11));
assert!(a.insert(7));
let mut b = HashSet::new();
assert!(b.insert(0));
assert!(b.insert(7));
assert!(b.insert(19));
assert!(b.insert(250));
assert!(b.insert(11));
assert!(b.insert(200));
assert!(!a.par_is_subset(&b));
assert!(!a.par_is_superset(&b));
assert!(!b.par_is_subset(&a));
assert!(!b.par_is_superset(&a));
assert!(b.insert(5));
assert!(a.par_is_subset(&b));
assert!(!a.par_is_superset(&b));
assert!(!b.par_is_subset(&a));
assert!(b.par_is_superset(&a));
}
#[test]
fn test_iterate() {
let mut a = HashSet::new();
for i in 0..32 {
assert!(a.insert(i));
}
let observed = AtomicUsize::new(0);
a.par_iter().for_each(|k| {
observed.fetch_or(1 << *k, Ordering::Relaxed);
});
assert_eq!(observed.into_inner(), 0xFFFF_FFFF);
}
#[test]
fn test_intersection() {
let mut a = HashSet::new();
let mut b = HashSet::new();
assert!(a.insert(11));
assert!(a.insert(1));
assert!(a.insert(3));
assert!(a.insert(77));
assert!(a.insert(103));
assert!(a.insert(5));
assert!(a.insert(-5));
assert!(b.insert(2));
assert!(b.insert(11));
assert!(b.insert(77));
assert!(b.insert(-9));
assert!(b.insert(-42));
assert!(b.insert(5));
assert!(b.insert(3));
let expected = [3, 5, 11, 77];
let i = a
.par_intersection(&b)
.map(|x| {
assert!(expected.contains(x));
1
})
.sum::<usize>();
assert_eq!(i, expected.len());
}
#[test]
fn test_difference() {
let mut a = HashSet::new();
let mut b = HashSet::new();
assert!(a.insert(1));
assert!(a.insert(3));
assert!(a.insert(5));
assert!(a.insert(9));
assert!(a.insert(11));
assert!(b.insert(3));
assert!(b.insert(9));
let expected = [1, 5, 11];
let i = a
.par_difference(&b)
.map(|x| {
assert!(expected.contains(x));
1
})
.sum::<usize>();
assert_eq!(i, expected.len());
}
#[test]
fn test_symmetric_difference() {
let mut a = HashSet::new();
let mut b = HashSet::new();
assert!(a.insert(1));
assert!(a.insert(3));
assert!(a.insert(5));
assert!(a.insert(9));
assert!(a.insert(11));
assert!(b.insert(-2));
assert!(b.insert(3));
assert!(b.insert(9));
assert!(b.insert(14));
assert!(b.insert(22));
let expected = [-2, 1, 5, 11, 14, 22];
let i = a
.par_symmetric_difference(&b)
.map(|x| {
assert!(expected.contains(x));
1
})
.sum::<usize>();
assert_eq!(i, expected.len());
}
#[test]
fn test_union() {
let mut a = HashSet::new();
let mut b = HashSet::new();
assert!(a.insert(1));
assert!(a.insert(3));
assert!(a.insert(5));
assert!(a.insert(9));
assert!(a.insert(11));
assert!(a.insert(16));
assert!(a.insert(19));
assert!(a.insert(24));
assert!(b.insert(-2));
assert!(b.insert(1));
assert!(b.insert(5));
assert!(b.insert(9));
assert!(b.insert(13));
assert!(b.insert(19));
let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24];
let i = a
.par_union(&b)
.map(|x| {
assert!(expected.contains(x));
1
})
.sum::<usize>();
assert_eq!(i, expected.len());
}
#[test]
fn test_from_iter() {
let xs = [1, 2, 3, 4, 5, 6, 7, 8, 9];
let set: HashSet<_> = xs.par_iter().cloned().collect();
for x in &xs {
assert!(set.contains(x));
}
}
#[test]
fn test_move_iter() {
let hs = {
let mut hs = HashSet::new();
hs.insert('a');
hs.insert('b');
hs
};
let v = hs.into_par_iter().collect::<Vec<char>>();
assert!(v == ['a', 'b'] || v == ['b', 'a']);
}
#[test]
fn test_eq() {
// These constants once happened to expose a bug in insert().
// I'm keeping them around to prevent a regression.
let mut s1 = HashSet::new();
s1.insert(1);
s1.insert(2);
s1.insert(3);
let mut s2 = HashSet::new();
s2.insert(1);
s2.insert(2);
assert!(!s1.par_eq(&s2));
s2.insert(3);
assert!(s1.par_eq(&s2));
}
#[test]
fn test_extend_ref() {
let mut a = HashSet::new();
a.insert(1);
a.par_extend(&[2, 3, 4][..]);
assert_eq!(a.len(), 4);
assert!(a.contains(&1));
assert!(a.contains(&2));
assert!(a.contains(&3));
assert!(a.contains(&4));
let mut b = HashSet::new();
b.insert(5);
b.insert(6);
a.par_extend(&b);
assert_eq!(a.len(), 6);
assert!(a.contains(&1));
assert!(a.contains(&2));
assert!(a.contains(&3));
assert!(a.contains(&4));
assert!(a.contains(&5));
assert!(a.contains(&6));
}
}

View File

@@ -0,0 +1,249 @@
//! Rayon extensions for `HashTable`.
use super::raw::{RawIntoParIter, RawParDrain, RawParIter};
use crate::hash_table::HashTable;
use crate::raw::{Allocator, Global};
use core::fmt;
use core::marker::PhantomData;
use rayon::iter::plumbing::UnindexedConsumer;
use rayon::iter::{IntoParallelIterator, ParallelIterator};
/// Parallel iterator over shared references to entries in a map.
///
/// This iterator is created by the [`par_iter`] method on [`HashTable`]
/// (provided by the [`IntoParallelRefIterator`] trait).
/// See its documentation for more.
///
/// [`par_iter`]: /hashbrown/struct.HashTable.html#method.par_iter
/// [`HashTable`]: /hashbrown/struct.HashTable.html
/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html
pub struct ParIter<'a, T> {
inner: RawParIter<T>,
marker: PhantomData<&'a T>,
}
impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> {
type Item = &'a T;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner
.map(|x| unsafe { x.as_ref() })
.drive_unindexed(consumer)
}
}
impl<T> Clone for ParIter<'_, T> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
marker: PhantomData,
}
}
}
impl<T: fmt::Debug> fmt::Debug for ParIter<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let iter = unsafe { self.inner.iter() }.map(|x| unsafe { x.as_ref() });
f.debug_list().entries(iter).finish()
}
}
/// Parallel iterator over mutable references to entries in a map.
///
/// This iterator is created by the [`par_iter_mut`] method on [`HashTable`]
/// (provided by the [`IntoParallelRefMutIterator`] trait).
/// See its documentation for more.
///
/// [`par_iter_mut`]: /hashbrown/struct.HashTable.html#method.par_iter_mut
/// [`HashTable`]: /hashbrown/struct.HashTable.html
/// [`IntoParallelRefMutIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefMutIterator.html
pub struct ParIterMut<'a, T> {
inner: RawParIter<T>,
marker: PhantomData<&'a mut T>,
}
impl<'a, T: Send> ParallelIterator for ParIterMut<'a, T> {
type Item = &'a mut T;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner
.map(|x| unsafe { x.as_mut() })
.drive_unindexed(consumer)
}
}
impl<T: fmt::Debug> fmt::Debug for ParIterMut<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
ParIter {
inner: self.inner.clone(),
marker: PhantomData,
}
.fmt(f)
}
}
/// Parallel iterator over entries of a consumed map.
///
/// This iterator is created by the [`into_par_iter`] method on [`HashTable`]
/// (provided by the [`IntoParallelIterator`] trait).
/// See its documentation for more.
///
/// [`into_par_iter`]: /hashbrown/struct.HashTable.html#method.into_par_iter
/// [`HashTable`]: /hashbrown/struct.HashTable.html
/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html
pub struct IntoParIter<T, A: Allocator = Global> {
inner: RawIntoParIter<T, A>,
}
impl<T: Send, A: Allocator + Send> ParallelIterator for IntoParIter<T, A> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner.drive_unindexed(consumer)
}
}
impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoParIter<T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
ParIter {
inner: unsafe { self.inner.par_iter() },
marker: PhantomData,
}
.fmt(f)
}
}
/// Parallel draining iterator over entries of a map.
///
/// This iterator is created by the [`par_drain`] method on [`HashTable`].
/// See its documentation for more.
///
/// [`par_drain`]: /hashbrown/struct.HashTable.html#method.par_drain
/// [`HashTable`]: /hashbrown/struct.HashTable.html
pub struct ParDrain<'a, T, A: Allocator = Global> {
inner: RawParDrain<'a, T, A>,
}
impl<T: Send, A: Allocator + Sync> ParallelIterator for ParDrain<'_, T, A> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner.drive_unindexed(consumer)
}
}
impl<T: fmt::Debug, A: Allocator> fmt::Debug for ParDrain<'_, T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
ParIter {
inner: unsafe { self.inner.par_iter() },
marker: PhantomData,
}
.fmt(f)
}
}
impl<T: Send, A: Allocator> HashTable<T, A> {
/// Consumes (potentially in parallel) all values in an arbitrary order,
/// while preserving the map's allocated memory for reuse.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_drain(&mut self) -> ParDrain<'_, T, A> {
ParDrain {
inner: self.raw.par_drain(),
}
}
}
impl<T: Send, A: Allocator + Send> IntoParallelIterator for HashTable<T, A> {
type Item = T;
type Iter = IntoParIter<T, A>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_par_iter(self) -> Self::Iter {
IntoParIter {
inner: self.raw.into_par_iter(),
}
}
}
impl<'a, T: Sync, A: Allocator> IntoParallelIterator for &'a HashTable<T, A> {
type Item = &'a T;
type Iter = ParIter<'a, T>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_par_iter(self) -> Self::Iter {
ParIter {
inner: unsafe { self.raw.par_iter() },
marker: PhantomData,
}
}
}
impl<'a, T: Send, A: Allocator> IntoParallelIterator for &'a mut HashTable<T, A> {
type Item = &'a mut T;
type Iter = ParIterMut<'a, T>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_par_iter(self) -> Self::Iter {
ParIterMut {
inner: unsafe { self.raw.par_iter() },
marker: PhantomData,
}
}
}
#[cfg(test)]
mod test_par_table {
use alloc::vec::Vec;
use core::sync::atomic::{AtomicUsize, Ordering};
use rayon::prelude::*;
use crate::{hash_map::make_hash, hash_table::HashTable, DefaultHashBuilder};
#[test]
fn test_iterate() {
let hasher = DefaultHashBuilder::default();
let mut a = HashTable::new();
for i in 0..32 {
a.insert_unique(make_hash(&hasher, &i), i, |x| make_hash(&hasher, x));
}
let observed = AtomicUsize::new(0);
a.par_iter().for_each(|k| {
observed.fetch_or(1 << *k, Ordering::Relaxed);
});
assert_eq!(observed.into_inner(), 0xFFFF_FFFF);
}
#[test]
fn test_move_iter() {
let hasher = DefaultHashBuilder::default();
let hs = {
let mut hs = HashTable::new();
hs.insert_unique(make_hash(&hasher, &'a'), 'a', |x| make_hash(&hasher, x));
hs.insert_unique(make_hash(&hasher, &'b'), 'b', |x| make_hash(&hasher, x));
hs
};
let v = hs.into_par_iter().collect::<Vec<char>>();
assert!(v == ['a', 'b'] || v == ['b', 'a']);
}
}

View File

@@ -0,0 +1,220 @@
mod size_hint {
use core::cmp;
/// This presumably exists to prevent denial of service attacks.
///
/// Original discussion: https://github.com/serde-rs/serde/issues/1114.
#[cfg_attr(feature = "inline-more", inline)]
pub(super) fn cautious(hint: Option<usize>) -> usize {
cmp::min(hint.unwrap_or(0), 4096)
}
}
mod map {
use crate::raw::Allocator;
use core::fmt;
use core::hash::{BuildHasher, Hash};
use core::marker::PhantomData;
use serde::de::{Deserialize, Deserializer, MapAccess, Visitor};
use serde::ser::{Serialize, Serializer};
use crate::hash_map::HashMap;
use super::size_hint;
impl<K, V, H, A> Serialize for HashMap<K, V, H, A>
where
K: Serialize + Eq + Hash,
V: Serialize,
H: BuildHasher,
A: Allocator,
{
#[cfg_attr(feature = "inline-more", inline)]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.collect_map(self)
}
}
impl<'de, K, V, S, A> Deserialize<'de> for HashMap<K, V, S, A>
where
K: Deserialize<'de> + Eq + Hash,
V: Deserialize<'de>,
S: BuildHasher + Default,
A: Allocator + Default,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct MapVisitor<K, V, S, A>
where
A: Allocator,
{
marker: PhantomData<HashMap<K, V, S, A>>,
}
impl<'de, K, V, S, A> Visitor<'de> for MapVisitor<K, V, S, A>
where
K: Deserialize<'de> + Eq + Hash,
V: Deserialize<'de>,
S: BuildHasher + Default,
A: Allocator + Default,
{
type Value = HashMap<K, V, S, A>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a map")
}
#[cfg_attr(feature = "inline-more", inline)]
fn visit_map<M>(self, mut map: M) -> Result<Self::Value, M::Error>
where
M: MapAccess<'de>,
{
let mut values = HashMap::with_capacity_and_hasher_in(
size_hint::cautious(map.size_hint()),
S::default(),
A::default(),
);
while let Some((key, value)) = map.next_entry()? {
values.insert(key, value);
}
Ok(values)
}
}
let visitor = MapVisitor {
marker: PhantomData,
};
deserializer.deserialize_map(visitor)
}
}
}
mod set {
use crate::raw::Allocator;
use core::fmt;
use core::hash::{BuildHasher, Hash};
use core::marker::PhantomData;
use serde::de::{Deserialize, Deserializer, SeqAccess, Visitor};
use serde::ser::{Serialize, Serializer};
use crate::hash_set::HashSet;
use super::size_hint;
impl<T, H, A> Serialize for HashSet<T, H, A>
where
T: Serialize + Eq + Hash,
H: BuildHasher,
A: Allocator,
{
#[cfg_attr(feature = "inline-more", inline)]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.collect_seq(self)
}
}
impl<'de, T, S, A> Deserialize<'de> for HashSet<T, S, A>
where
T: Deserialize<'de> + Eq + Hash,
S: BuildHasher + Default,
A: Allocator + Default,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct SeqVisitor<T, S, A>
where
A: Allocator,
{
marker: PhantomData<HashSet<T, S, A>>,
}
impl<'de, T, S, A> Visitor<'de> for SeqVisitor<T, S, A>
where
T: Deserialize<'de> + Eq + Hash,
S: BuildHasher + Default,
A: Allocator + Default,
{
type Value = HashSet<T, S, A>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a sequence")
}
#[cfg_attr(feature = "inline-more", inline)]
fn visit_seq<M>(self, mut seq: M) -> Result<Self::Value, M::Error>
where
M: SeqAccess<'de>,
{
let mut values = HashSet::with_capacity_and_hasher_in(
size_hint::cautious(seq.size_hint()),
S::default(),
A::default(),
);
while let Some(value) = seq.next_element()? {
values.insert(value);
}
Ok(values)
}
}
let visitor = SeqVisitor {
marker: PhantomData,
};
deserializer.deserialize_seq(visitor)
}
#[allow(clippy::missing_errors_doc)]
fn deserialize_in_place<D>(deserializer: D, place: &mut Self) -> Result<(), D::Error>
where
D: Deserializer<'de>,
{
struct SeqInPlaceVisitor<'a, T, S, A>(&'a mut HashSet<T, S, A>)
where
A: Allocator;
impl<'de, T, S, A> Visitor<'de> for SeqInPlaceVisitor<'_, T, S, A>
where
T: Deserialize<'de> + Eq + Hash,
S: BuildHasher + Default,
A: Allocator,
{
type Value = ();
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a sequence")
}
#[cfg_attr(feature = "inline-more", inline)]
fn visit_seq<M>(self, mut seq: M) -> Result<Self::Value, M::Error>
where
M: SeqAccess<'de>,
{
self.0.clear();
self.0.reserve(size_hint::cautious(seq.size_hint()));
while let Some(value) = seq.next_element()? {
self.0.insert(value);
}
Ok(())
}
}
deserializer.deserialize_seq(SeqInPlaceVisitor(place))
}
}
}

77
vendor/hashbrown/src/hasher.rs vendored Normal file
View File

@@ -0,0 +1,77 @@
#[cfg(feature = "default-hasher")]
use {
core::hash::{BuildHasher, Hasher},
foldhash::fast::RandomState,
};
/// Default hash builder for the `S` type parameter of
/// [`HashMap`](crate::HashMap) and [`HashSet`](crate::HashSet).
///
/// This only implements `BuildHasher` when the "default-hasher" crate feature
/// is enabled; otherwise it just serves as a placeholder, and a custom `S` type
/// must be used to have a fully functional `HashMap` or `HashSet`.
#[derive(Clone, Debug, Default)]
pub struct DefaultHashBuilder {
#[cfg(feature = "default-hasher")]
inner: RandomState,
}
#[cfg(feature = "default-hasher")]
impl BuildHasher for DefaultHashBuilder {
type Hasher = DefaultHasher;
#[inline(always)]
fn build_hasher(&self) -> Self::Hasher {
DefaultHasher {
inner: self.inner.build_hasher(),
}
}
}
/// Default hasher for [`HashMap`](crate::HashMap) and [`HashSet`](crate::HashSet).
#[cfg(feature = "default-hasher")]
#[derive(Clone)]
pub struct DefaultHasher {
inner: <RandomState as BuildHasher>::Hasher,
}
#[cfg(feature = "default-hasher")]
macro_rules! forward_writes {
($( $write:ident ( $ty:ty ) , )*) => {$(
#[inline(always)]
fn $write(&mut self, arg: $ty) {
self.inner.$write(arg);
}
)*}
}
#[cfg(feature = "default-hasher")]
impl Hasher for DefaultHasher {
forward_writes! {
write(&[u8]),
write_u8(u8),
write_u16(u16),
write_u32(u32),
write_u64(u64),
write_u128(u128),
write_usize(usize),
write_i8(i8),
write_i16(i16),
write_i32(i32),
write_i64(i64),
write_i128(i128),
write_isize(isize),
}
// feature(hasher_prefixfree_extras)
#[cfg(feature = "nightly")]
forward_writes! {
write_length_prefix(usize),
write_str(&str),
}
#[inline(always)]
fn finish(&self) -> u64 {
self.inner.finish()
}
}

189
vendor/hashbrown/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,189 @@
//! This crate is a Rust port of Google's high-performance [SwissTable] hash
//! map, adapted to make it a drop-in replacement for Rust's standard `HashMap`
//! and `HashSet` types.
//!
//! The original C++ version of [SwissTable] can be found [here], and this
//! [CppCon talk] gives an overview of how the algorithm works.
//!
//! [SwissTable]: https://abseil.io/blog/20180927-swisstables
//! [here]: https://github.com/abseil/abseil-cpp/blob/master/absl/container/internal/raw_hash_set.h
//! [CppCon talk]: https://www.youtube.com/watch?v=ncHmEUmJZf4
#![no_std]
#![cfg_attr(
feature = "nightly",
feature(
test,
core_intrinsics,
dropck_eyepatch,
min_specialization,
extend_one,
allocator_api,
slice_ptr_get,
maybe_uninit_array_assume_init,
strict_provenance_lints
)
)]
#![cfg_attr(feature = "rustc-dep-of-std", feature(rustc_attrs))]
#![allow(
clippy::doc_markdown,
clippy::module_name_repetitions,
clippy::must_use_candidate,
clippy::option_if_let_else,
clippy::redundant_else,
clippy::manual_map,
clippy::missing_safety_doc,
clippy::missing_errors_doc
)]
#![warn(missing_docs)]
#![warn(rust_2018_idioms)]
#![cfg_attr(feature = "nightly", warn(fuzzy_provenance_casts))]
#![cfg_attr(
feature = "nightly",
allow(clippy::incompatible_msrv, internal_features)
)]
#![cfg_attr(
all(feature = "nightly", target_arch = "loongarch64"),
feature(stdarch_loongarch)
)]
#![cfg_attr(
all(feature = "nightly", feature = "default-hasher"),
feature(hasher_prefixfree_extras)
)]
#[cfg(test)]
#[macro_use]
extern crate std;
#[cfg_attr(test, macro_use)]
#[cfg_attr(feature = "rustc-dep-of-std", allow(unused_extern_crates))]
extern crate alloc;
#[cfg(feature = "nightly")]
#[cfg(doctest)]
doc_comment::doctest!("../README.md");
#[macro_use]
mod macros;
mod control;
mod hasher;
mod raw;
mod util;
mod external_trait_impls;
mod map;
#[cfg(feature = "raw-entry")]
mod raw_entry;
#[cfg(feature = "rustc-internal-api")]
mod rustc_entry;
mod scopeguard;
mod set;
mod table;
pub use crate::hasher::DefaultHashBuilder;
#[cfg(feature = "default-hasher")]
pub use crate::hasher::DefaultHasher;
pub mod hash_map {
//! A hash map implemented with quadratic probing and SIMD lookup.
pub use crate::map::*;
#[cfg(feature = "rustc-internal-api")]
pub use crate::rustc_entry::*;
#[cfg(feature = "rayon")]
/// [rayon]-based parallel iterator types for hash maps.
/// You will rarely need to interact with it directly unless you have need
/// to name one of the iterator types.
///
/// [rayon]: https://docs.rs/rayon/1.0/rayon
pub mod rayon {
pub use crate::external_trait_impls::rayon::map::*;
}
}
pub mod hash_set {
//! A hash set implemented as a `HashMap` where the value is `()`.
pub use crate::set::*;
#[cfg(feature = "rayon")]
/// [rayon]-based parallel iterator types for hash sets.
/// You will rarely need to interact with it directly unless you have need
/// to name one of the iterator types.
///
/// [rayon]: https://docs.rs/rayon/1.0/rayon
pub mod rayon {
pub use crate::external_trait_impls::rayon::set::*;
}
}
pub mod hash_table {
//! A hash table implemented with quadratic probing and SIMD lookup.
pub use crate::table::*;
#[cfg(feature = "rayon")]
/// [rayon]-based parallel iterator types for hash tables.
/// You will rarely need to interact with it directly unless you have need
/// to name one of the iterator types.
///
/// [rayon]: https://docs.rs/rayon/1.0/rayon
pub mod rayon {
pub use crate::external_trait_impls::rayon::table::*;
}
}
pub use crate::map::HashMap;
pub use crate::set::HashSet;
pub use crate::table::HashTable;
#[cfg(feature = "equivalent")]
pub use equivalent::Equivalent;
// This is only used as a fallback when building as part of `std`.
#[cfg(not(feature = "equivalent"))]
/// Key equivalence trait.
///
/// This trait defines the function used to compare the input value with the
/// map keys (or set values) during a lookup operation such as [`HashMap::get`]
/// or [`HashSet::contains`].
/// It is provided with a blanket implementation based on the
/// [`Borrow`](core::borrow::Borrow) trait.
///
/// # Correctness
///
/// Equivalent values must hash to the same value.
pub trait Equivalent<K: ?Sized> {
/// Checks if this value is equivalent to the given key.
///
/// Returns `true` if both values are equivalent, and `false` otherwise.
///
/// # Correctness
///
/// When this function returns `true`, both `self` and `key` must hash to
/// the same value.
fn equivalent(&self, key: &K) -> bool;
}
#[cfg(not(feature = "equivalent"))]
impl<Q: ?Sized, K: ?Sized> Equivalent<K> for Q
where
Q: Eq,
K: core::borrow::Borrow<Q>,
{
fn equivalent(&self, key: &K) -> bool {
self == key.borrow()
}
}
/// The error type for `try_reserve` methods.
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum TryReserveError {
/// Error due to the computed capacity exceeding the collection's maximum
/// (usually `isize::MAX` bytes).
CapacityOverflow,
/// The memory allocator returned an error
AllocError {
/// The layout of the allocation request that failed.
layout: alloc::alloc::Layout,
},
}

70
vendor/hashbrown/src/macros.rs vendored Normal file
View File

@@ -0,0 +1,70 @@
// See the cfg-if crate.
#[allow(unused_macro_rules)]
macro_rules! cfg_if {
// match if/else chains with a final `else`
($(
if #[cfg($($meta:meta),*)] { $($it:item)* }
) else * else {
$($it2:item)*
}) => {
cfg_if! {
@__items
() ;
$( ( ($($meta),*) ($($it)*) ), )*
( () ($($it2)*) ),
}
};
// match if/else chains lacking a final `else`
(
if #[cfg($($i_met:meta),*)] { $($i_it:item)* }
$(
else if #[cfg($($e_met:meta),*)] { $($e_it:item)* }
)*
) => {
cfg_if! {
@__items
() ;
( ($($i_met),*) ($($i_it)*) ),
$( ( ($($e_met),*) ($($e_it)*) ), )*
( () () ),
}
};
// Internal and recursive macro to emit all the items
//
// Collects all the negated cfgs in a list at the beginning and after the
// semicolon is all the remaining items
(@__items ($($not:meta,)*) ; ) => {};
(@__items ($($not:meta,)*) ; ( ($($m:meta),*) ($($it:item)*) ), $($rest:tt)*) => {
// Emit all items within one block, applying an appropriate #[cfg]. The
// #[cfg] will require all `$m` matchers specified and must also negate
// all previous matchers.
cfg_if! { @__apply cfg(all($($m,)* not(any($($not),*)))), $($it)* }
// Recurse to emit all other items in `$rest`, and when we do so add all
// our `$m` matchers to the list of `$not` matchers as future emissions
// will have to negate everything we just matched as well.
cfg_if! { @__items ($($not,)* $($m,)*) ; $($rest)* }
};
// Internal macro to Apply a cfg attribute to a list of items
(@__apply $m:meta, $($it:item)*) => {
$(#[$m] $it)*
};
}
// Helper macro for specialization. This also helps avoid parse errors if the
// default fn syntax for specialization changes in the future.
#[cfg(feature = "nightly")]
macro_rules! default_fn {
(#[$($a:tt)*] $($tt:tt)*) => {
#[$($a)*] default $($tt)*
}
}
#[cfg(not(feature = "nightly"))]
macro_rules! default_fn {
($($tt:tt)*) => {
$($tt)*
}
}

6581
vendor/hashbrown/src/map.rs vendored Normal file

File diff suppressed because it is too large Load Diff

92
vendor/hashbrown/src/raw/alloc.rs vendored Normal file
View File

@@ -0,0 +1,92 @@
#[cfg(test)]
pub(crate) use self::inner::AllocError;
pub(crate) use self::inner::{do_alloc, Allocator, Global};
// Nightly-case.
// Use unstable `allocator_api` feature.
// This is compatible with `allocator-api2` which can be enabled or not.
// This is used when building for `std`.
#[cfg(feature = "nightly")]
mod inner {
#[cfg(test)]
pub use crate::alloc::alloc::AllocError;
use crate::alloc::alloc::Layout;
pub use crate::alloc::alloc::{Allocator, Global};
use core::ptr::NonNull;
#[allow(clippy::map_err_ignore)]
pub(crate) fn do_alloc<A: Allocator>(alloc: &A, layout: Layout) -> Result<NonNull<u8>, ()> {
match alloc.allocate(layout) {
Ok(ptr) => Ok(ptr.as_non_null_ptr()),
Err(_) => Err(()),
}
}
}
// Basic non-nightly case.
// This uses `allocator-api2` enabled by default.
// If any crate enables "nightly" in `allocator-api2`,
// this will be equivalent to the nightly case,
// since `allocator_api2::alloc::Allocator` would be re-export of
// `core::alloc::Allocator`.
#[cfg(all(not(feature = "nightly"), feature = "allocator-api2"))]
mod inner {
use crate::alloc::alloc::Layout;
#[cfg(test)]
pub use allocator_api2::alloc::AllocError;
pub use allocator_api2::alloc::{Allocator, Global};
use core::ptr::NonNull;
#[allow(clippy::map_err_ignore)]
pub(crate) fn do_alloc<A: Allocator>(alloc: &A, layout: Layout) -> Result<NonNull<u8>, ()> {
match alloc.allocate(layout) {
Ok(ptr) => Ok(ptr.cast()),
Err(_) => Err(()),
}
}
}
// No-defaults case.
// When building with default-features turned off and
// neither `nightly` nor `allocator-api2` is enabled,
// this will be used.
// Making it impossible to use any custom allocator with collections defined
// in this crate.
// Any crate in build-tree can enable `allocator-api2`,
// or `nightly` without disturbing users that don't want to use it.
#[cfg(not(any(feature = "nightly", feature = "allocator-api2")))]
mod inner {
use crate::alloc::alloc::{alloc, dealloc, Layout};
use core::ptr::NonNull;
#[allow(clippy::missing_safety_doc)] // not exposed outside of this crate
pub unsafe trait Allocator {
fn allocate(&self, layout: Layout) -> Result<NonNull<u8>, ()>;
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout);
}
#[derive(Copy, Clone)]
pub struct Global;
unsafe impl Allocator for Global {
#[inline]
fn allocate(&self, layout: Layout) -> Result<NonNull<u8>, ()> {
unsafe { NonNull::new(alloc(layout)).ok_or(()) }
}
#[inline]
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
dealloc(ptr.as_ptr(), layout);
}
}
impl Default for Global {
#[inline]
fn default() -> Self {
Global
}
}
pub(crate) fn do_alloc<A: Allocator>(alloc: &A, layout: Layout) -> Result<NonNull<u8>, ()> {
alloc.allocate(layout)
}
}

4436
vendor/hashbrown/src/raw/mod.rs vendored Normal file

File diff suppressed because it is too large Load Diff

1740
vendor/hashbrown/src/raw_entry.rs vendored Normal file

File diff suppressed because it is too large Load Diff

567
vendor/hashbrown/src/rustc_entry.rs vendored Normal file
View File

@@ -0,0 +1,567 @@
use self::RustcEntry::*;
use crate::map::{make_hash, Drain, HashMap, IntoIter, Iter, IterMut};
use crate::raw::{Allocator, Bucket, Global, RawTable};
use core::fmt::{self, Debug};
use core::hash::{BuildHasher, Hash};
use core::mem;
impl<K, V, S, A> HashMap<K, V, S, A>
where
K: Eq + Hash,
S: BuildHasher,
A: Allocator,
{
/// Gets the given key's corresponding entry in the map for in-place manipulation.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut letters = HashMap::new();
///
/// for ch in "a short treatise on fungi".chars() {
/// let counter = letters.rustc_entry(ch).or_insert(0);
/// *counter += 1;
/// }
///
/// assert_eq!(letters[&'s'], 2);
/// assert_eq!(letters[&'t'], 3);
/// assert_eq!(letters[&'u'], 1);
/// assert_eq!(letters.get(&'y'), None);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn rustc_entry(&mut self, key: K) -> RustcEntry<'_, K, V, A> {
let hash = make_hash(&self.hash_builder, &key);
if let Some(elem) = self.table.find(hash, |q| q.0.eq(&key)) {
RustcEntry::Occupied(RustcOccupiedEntry {
elem,
table: &mut self.table,
})
} else {
// Ideally we would put this in VacantEntry::insert, but Entry is not
// generic over the BuildHasher and adding a generic parameter would be
// a breaking change.
self.reserve(1);
RustcEntry::Vacant(RustcVacantEntry {
hash,
key,
table: &mut self.table,
})
}
}
}
/// A view into a single entry in a map, which may either be vacant or occupied.
///
/// This `enum` is constructed from the [`rustc_entry`] method on [`HashMap`].
///
/// [`HashMap`]: struct.HashMap.html
/// [`rustc_entry`]: struct.HashMap.html#method.rustc_entry
pub enum RustcEntry<'a, K, V, A = Global>
where
A: Allocator,
{
/// An occupied entry.
Occupied(RustcOccupiedEntry<'a, K, V, A>),
/// A vacant entry.
Vacant(RustcVacantEntry<'a, K, V, A>),
}
impl<K: Debug, V: Debug, A: Allocator> Debug for RustcEntry<'_, K, V, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(),
Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(),
}
}
}
/// A view into an occupied entry in a `HashMap`.
/// It is part of the [`RustcEntry`] enum.
///
/// [`RustcEntry`]: enum.RustcEntry.html
pub struct RustcOccupiedEntry<'a, K, V, A = Global>
where
A: Allocator,
{
elem: Bucket<(K, V)>,
table: &'a mut RawTable<(K, V), A>,
}
unsafe impl<K, V, A> Send for RustcOccupiedEntry<'_, K, V, A>
where
K: Send,
V: Send,
A: Allocator + Send,
{
}
unsafe impl<K, V, A> Sync for RustcOccupiedEntry<'_, K, V, A>
where
K: Sync,
V: Sync,
A: Allocator + Sync,
{
}
impl<K: Debug, V: Debug, A: Allocator> Debug for RustcOccupiedEntry<'_, K, V, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("OccupiedEntry")
.field("key", self.key())
.field("value", self.get())
.finish()
}
}
/// A view into a vacant entry in a `HashMap`.
/// It is part of the [`RustcEntry`] enum.
///
/// [`RustcEntry`]: enum.RustcEntry.html
pub struct RustcVacantEntry<'a, K, V, A = Global>
where
A: Allocator,
{
hash: u64,
key: K,
table: &'a mut RawTable<(K, V), A>,
}
impl<K: Debug, V, A: Allocator> Debug for RustcVacantEntry<'_, K, V, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("VacantEntry").field(self.key()).finish()
}
}
impl<'a, K, V, A: Allocator> RustcEntry<'a, K, V, A> {
/// Sets the value of the entry, and returns a RustcOccupiedEntry.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// let entry = map.rustc_entry("horseyland").insert(37);
///
/// assert_eq!(entry.key(), &"horseyland");
/// ```
pub fn insert(self, value: V) -> RustcOccupiedEntry<'a, K, V, A> {
match self {
Vacant(entry) => entry.insert_entry(value),
Occupied(mut entry) => {
entry.insert(value);
entry
}
}
}
/// Ensures a value is in the entry by inserting the default if empty, and returns
/// a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// map.rustc_entry("poneyland").or_insert(3);
/// assert_eq!(map["poneyland"], 3);
///
/// *map.rustc_entry("poneyland").or_insert(10) *= 2;
/// assert_eq!(map["poneyland"], 6);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn or_insert(self, default: V) -> &'a mut V
where
K: Hash,
{
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(default),
}
}
/// Ensures a value is in the entry by inserting the result of the default function if empty,
/// and returns a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, String> = HashMap::new();
/// let s = "hoho".to_string();
///
/// map.rustc_entry("poneyland").or_insert_with(|| s);
///
/// assert_eq!(map["poneyland"], "hoho".to_string());
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn or_insert_with<F: FnOnce() -> V>(self, default: F) -> &'a mut V
where
K: Hash,
{
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(default()),
}
}
/// Returns a reference to this entry's key.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// assert_eq!(map.rustc_entry("poneyland").key(), &"poneyland");
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn key(&self) -> &K {
match *self {
Occupied(ref entry) => entry.key(),
Vacant(ref entry) => entry.key(),
}
}
/// Provides in-place mutable access to an occupied entry before any
/// potential inserts into the map.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// map.rustc_entry("poneyland")
/// .and_modify(|e| { *e += 1 })
/// .or_insert(42);
/// assert_eq!(map["poneyland"], 42);
///
/// map.rustc_entry("poneyland")
/// .and_modify(|e| { *e += 1 })
/// .or_insert(42);
/// assert_eq!(map["poneyland"], 43);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn and_modify<F>(self, f: F) -> Self
where
F: FnOnce(&mut V),
{
match self {
Occupied(mut entry) => {
f(entry.get_mut());
Occupied(entry)
}
Vacant(entry) => Vacant(entry),
}
}
}
impl<'a, K, V: Default, A: Allocator> RustcEntry<'a, K, V, A> {
/// Ensures a value is in the entry by inserting the default value if empty,
/// and returns a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// # fn main() {
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, Option<u32>> = HashMap::new();
/// map.rustc_entry("poneyland").or_default();
///
/// assert_eq!(map["poneyland"], None);
/// # }
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn or_default(self) -> &'a mut V
where
K: Hash,
{
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(Default::default()),
}
}
}
impl<'a, K, V, A: Allocator> RustcOccupiedEntry<'a, K, V, A> {
/// Gets a reference to the key in the entry.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.rustc_entry("poneyland").or_insert(12);
/// assert_eq!(map.rustc_entry("poneyland").key(), &"poneyland");
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn key(&self) -> &K {
unsafe { &self.elem.as_ref().0 }
}
/// Take the ownership of the key and value from the map.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.rustc_entry("poneyland").or_insert(12);
///
/// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") {
/// // We delete the entry from the map.
/// o.remove_entry();
/// }
///
/// assert_eq!(map.contains_key("poneyland"), false);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove_entry(self) -> (K, V) {
unsafe { self.table.remove(self.elem).0 }
}
/// Gets a reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.rustc_entry("poneyland").or_insert(12);
///
/// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") {
/// assert_eq!(o.get(), &12);
/// }
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn get(&self) -> &V {
unsafe { &self.elem.as_ref().1 }
}
/// Gets a mutable reference to the value in the entry.
///
/// If you need a reference to the `RustcOccupiedEntry` which may outlive the
/// destruction of the `RustcEntry` value, see [`into_mut`].
///
/// [`into_mut`]: #method.into_mut
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.rustc_entry("poneyland").or_insert(12);
///
/// assert_eq!(map["poneyland"], 12);
/// if let RustcEntry::Occupied(mut o) = map.rustc_entry("poneyland") {
/// *o.get_mut() += 10;
/// assert_eq!(*o.get(), 22);
///
/// // We can use the same RustcEntry multiple times.
/// *o.get_mut() += 2;
/// }
///
/// assert_eq!(map["poneyland"], 24);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn get_mut(&mut self) -> &mut V {
unsafe { &mut self.elem.as_mut().1 }
}
/// Converts the RustcOccupiedEntry into a mutable reference to the value in the entry
/// with a lifetime bound to the map itself.
///
/// If you need multiple references to the `RustcOccupiedEntry`, see [`get_mut`].
///
/// [`get_mut`]: #method.get_mut
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.rustc_entry("poneyland").or_insert(12);
///
/// assert_eq!(map["poneyland"], 12);
/// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") {
/// *o.into_mut() += 10;
/// }
///
/// assert_eq!(map["poneyland"], 22);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn into_mut(self) -> &'a mut V {
unsafe { &mut self.elem.as_mut().1 }
}
/// Sets the value of the entry, and returns the entry's old value.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.rustc_entry("poneyland").or_insert(12);
///
/// if let RustcEntry::Occupied(mut o) = map.rustc_entry("poneyland") {
/// assert_eq!(o.insert(15), 12);
/// }
///
/// assert_eq!(map["poneyland"], 15);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert(&mut self, value: V) -> V {
mem::replace(self.get_mut(), value)
}
/// Takes the value out of the entry, and returns it.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.rustc_entry("poneyland").or_insert(12);
///
/// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") {
/// assert_eq!(o.remove(), 12);
/// }
///
/// assert_eq!(map.contains_key("poneyland"), false);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove(self) -> V {
self.remove_entry().1
}
}
impl<'a, K, V, A: Allocator> RustcVacantEntry<'a, K, V, A> {
/// Gets a reference to the key that would be used when inserting a value
/// through the `RustcVacantEntry`.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// assert_eq!(map.rustc_entry("poneyland").key(), &"poneyland");
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn key(&self) -> &K {
&self.key
}
/// Take ownership of the key.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// if let RustcEntry::Vacant(v) = map.rustc_entry("poneyland") {
/// v.into_key();
/// }
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn into_key(self) -> K {
self.key
}
/// Sets the value of the entry with the RustcVacantEntry's key,
/// and returns a mutable reference to it.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// if let RustcEntry::Vacant(o) = map.rustc_entry("poneyland") {
/// o.insert(37);
/// }
/// assert_eq!(map["poneyland"], 37);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert(self, value: V) -> &'a mut V {
unsafe {
let bucket = self.table.insert_no_grow(self.hash, (self.key, value));
&mut bucket.as_mut().1
}
}
/// Sets the value of the entry with the RustcVacantEntry's key,
/// and returns a RustcOccupiedEntry.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// if let RustcEntry::Vacant(v) = map.rustc_entry("poneyland") {
/// let o = v.insert_entry(37);
/// assert_eq!(o.get(), &37);
/// }
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert_entry(self, value: V) -> RustcOccupiedEntry<'a, K, V, A> {
let bucket = unsafe { self.table.insert_no_grow(self.hash, (self.key, value)) };
RustcOccupiedEntry {
elem: bucket,
table: self.table,
}
}
}
impl<K, V> IterMut<'_, K, V> {
/// Returns a iterator of references over the remaining items.
#[cfg_attr(feature = "inline-more", inline)]
pub fn rustc_iter(&self) -> Iter<'_, K, V> {
self.iter()
}
}
impl<K, V, A: Allocator> IntoIter<K, V, A> {
/// Returns a iterator of references over the remaining items.
#[cfg_attr(feature = "inline-more", inline)]
pub fn rustc_iter(&self) -> Iter<'_, K, V> {
self.iter()
}
}
impl<K, V, A: Allocator> Drain<'_, K, V, A> {
/// Returns a iterator of references over the remaining items.
#[cfg_attr(feature = "inline-more", inline)]
pub fn rustc_iter(&self) -> Iter<'_, K, V> {
self.iter()
}
}

72
vendor/hashbrown/src/scopeguard.rs vendored Normal file
View File

@@ -0,0 +1,72 @@
// Extracted from the scopeguard crate
use core::{
mem::ManuallyDrop,
ops::{Deref, DerefMut},
ptr,
};
pub struct ScopeGuard<T, F>
where
F: FnMut(&mut T),
{
dropfn: F,
value: T,
}
#[inline]
pub fn guard<T, F>(value: T, dropfn: F) -> ScopeGuard<T, F>
where
F: FnMut(&mut T),
{
ScopeGuard { dropfn, value }
}
impl<T, F> ScopeGuard<T, F>
where
F: FnMut(&mut T),
{
#[inline]
pub fn into_inner(guard: Self) -> T {
// Cannot move out of Drop-implementing types, so
// ptr::read the value out of a ManuallyDrop<Self>
// Don't use mem::forget as that might invalidate value
let guard = ManuallyDrop::new(guard);
unsafe {
let value = ptr::read(&guard.value);
// read the closure so that it is dropped
let _ = ptr::read(&guard.dropfn);
value
}
}
}
impl<T, F> Deref for ScopeGuard<T, F>
where
F: FnMut(&mut T),
{
type Target = T;
#[inline]
fn deref(&self) -> &T {
&self.value
}
}
impl<T, F> DerefMut for ScopeGuard<T, F>
where
F: FnMut(&mut T),
{
#[inline]
fn deref_mut(&mut self) -> &mut T {
&mut self.value
}
}
impl<T, F> Drop for ScopeGuard<T, F>
where
F: FnMut(&mut T),
{
#[inline]
fn drop(&mut self) {
(self.dropfn)(&mut self.value);
}
}

3121
vendor/hashbrown/src/set.rs vendored Normal file

File diff suppressed because it is too large Load Diff

2380
vendor/hashbrown/src/table.rs vendored Normal file

File diff suppressed because it is too large Load Diff

38
vendor/hashbrown/src/util.rs vendored Normal file
View File

@@ -0,0 +1,38 @@
// FIXME: Replace with `core::hint::{likely, unlikely}` once they are stable.
#[cfg(feature = "nightly")]
pub(crate) use core::intrinsics::{likely, unlikely};
#[cfg(not(feature = "nightly"))]
#[inline(always)]
#[cold]
fn cold_path() {}
#[cfg(not(feature = "nightly"))]
#[inline(always)]
pub(crate) fn likely(b: bool) -> bool {
if b {
true
} else {
cold_path();
false
}
}
#[cfg(not(feature = "nightly"))]
#[inline(always)]
pub(crate) fn unlikely(b: bool) -> bool {
if b {
cold_path();
true
} else {
false
}
}
// FIXME: use strict provenance functions once they are stable.
// Implement it with a transmute for now.
#[inline(always)]
#[allow(clippy::useless_transmute)] // clippy is wrong, cast and transmute are different here
pub(crate) fn invalid_mut<T>(addr: usize) -> *mut T {
unsafe { core::mem::transmute(addr) }
}