Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

73
vendor/foldhash/src/convenience.rs vendored Normal file
View File

@@ -0,0 +1,73 @@
use super::fast::{FixedState, RandomState};
/// Type alias for [`std::collections::HashMap<K, V, foldhash::fast::RandomState>`].
pub type HashMap<K, V> = std::collections::HashMap<K, V, RandomState>;
/// Type alias for [`std::collections::HashSet<T, foldhash::fast::RandomState>`].
pub type HashSet<T> = std::collections::HashSet<T, RandomState>;
/// A convenience extension trait to enable [`HashMap::new`] for hash maps that use `foldhash`.
pub trait HashMapExt {
/// Creates an empty `HashMap`.
fn new() -> Self;
/// Creates an empty `HashMap` with at least the specified capacity.
fn with_capacity(capacity: usize) -> Self;
}
impl<K, V> HashMapExt for std::collections::HashMap<K, V, RandomState> {
#[inline(always)]
fn new() -> Self {
Self::with_hasher(RandomState::default())
}
#[inline(always)]
fn with_capacity(capacity: usize) -> Self {
Self::with_capacity_and_hasher(capacity, RandomState::default())
}
}
impl<K, V> HashMapExt for std::collections::HashMap<K, V, FixedState> {
#[inline(always)]
fn new() -> Self {
Self::with_hasher(FixedState::default())
}
#[inline(always)]
fn with_capacity(capacity: usize) -> Self {
Self::with_capacity_and_hasher(capacity, FixedState::default())
}
}
/// A convenience extension trait to enable [`HashSet::new`] for hash sets that use `foldhash`.
pub trait HashSetExt {
/// Creates an empty `HashSet`.
fn new() -> Self;
/// Creates an empty `HashSet` with at least the specified capacity.
fn with_capacity(capacity: usize) -> Self;
}
impl<T> HashSetExt for std::collections::HashSet<T, RandomState> {
#[inline(always)]
fn new() -> Self {
Self::with_hasher(RandomState::default())
}
#[inline(always)]
fn with_capacity(capacity: usize) -> Self {
Self::with_capacity_and_hasher(capacity, RandomState::default())
}
}
impl<T> HashSetExt for std::collections::HashSet<T, FixedState> {
#[inline(always)]
fn new() -> Self {
Self::with_hasher(FixedState::default())
}
#[inline(always)]
fn with_capacity(capacity: usize) -> Self {
Self::with_capacity_and_hasher(capacity, FixedState::default())
}
}

270
vendor/foldhash/src/fast.rs vendored Normal file
View File

@@ -0,0 +1,270 @@
//! The foldhash implementation optimized for speed.
use core::hash::{BuildHasher, Hasher};
use crate::seed::{gen_per_hasher_seed, GlobalSeed, SharedSeed};
use crate::{folded_multiply, hash_bytes_long, hash_bytes_medium, rotate_right, ARBITRARY3};
/// A [`Hasher`] instance implementing foldhash, optimized for speed.
///
/// While you can create one directly with [`FoldHasher::with_seed`], you
/// most likely want to use [`RandomState`], [`SeedableRandomState`] or
/// [`FixedState`] to create [`FoldHasher`]s.
#[derive(Clone)]
pub struct FoldHasher {
accumulator: u64,
sponge: u128,
sponge_len: u8,
fold_seed: u64,
expand_seed: u64,
expand_seed2: u64,
expand_seed3: u64,
}
impl FoldHasher {
/// Initializes this [`FoldHasher`] with the given per-hasher seed and
/// [`SharedSeed`].
#[inline]
pub fn with_seed(per_hasher_seed: u64, shared_seed: &SharedSeed) -> FoldHasher {
FoldHasher {
accumulator: per_hasher_seed,
sponge: 0,
sponge_len: 0,
fold_seed: shared_seed.seeds[0],
expand_seed: shared_seed.seeds[1],
expand_seed2: shared_seed.seeds[2],
expand_seed3: shared_seed.seeds[3],
}
}
#[inline(always)]
fn write_num<T: Into<u128>>(&mut self, x: T) {
let bits: usize = 8 * core::mem::size_of::<T>();
if self.sponge_len as usize + bits > 128 {
let lo = self.sponge as u64;
let hi = (self.sponge >> 64) as u64;
self.accumulator = folded_multiply(lo ^ self.accumulator, hi ^ self.fold_seed);
self.sponge = x.into();
self.sponge_len = bits as u8;
} else {
self.sponge |= x.into() << self.sponge_len;
self.sponge_len += bits as u8;
}
}
}
impl Hasher for FoldHasher {
#[inline(always)]
fn write(&mut self, bytes: &[u8]) {
// We perform overlapping reads in the byte hash which could lead to
// trivial length-extension attacks. These should be defeated by
// adding a length-dependent rotation on our unpredictable seed
// which costs only a single cycle (or none if executed with
// instruction-level parallelism).
let len = bytes.len();
let base_seed = rotate_right(self.accumulator, len as u32);
if len <= 16 {
let mut s0 = base_seed;
let mut s1 = self.expand_seed;
// XOR the input into s0, s1, then multiply and fold.
if len >= 8 {
s0 ^= u64::from_ne_bytes(bytes[0..8].try_into().unwrap());
s1 ^= u64::from_ne_bytes(bytes[len - 8..].try_into().unwrap());
} else if len >= 4 {
s0 ^= u32::from_ne_bytes(bytes[0..4].try_into().unwrap()) as u64;
s1 ^= u32::from_ne_bytes(bytes[len - 4..].try_into().unwrap()) as u64;
} else if len > 0 {
let lo = bytes[0];
let mid = bytes[len / 2];
let hi = bytes[len - 1];
s0 ^= lo as u64;
s1 ^= ((hi as u64) << 8) | mid as u64;
}
self.accumulator = folded_multiply(s0, s1);
} else if len < 256 {
self.accumulator = hash_bytes_medium(
bytes,
base_seed,
base_seed.wrapping_add(self.expand_seed),
self.fold_seed,
);
} else {
self.accumulator = hash_bytes_long(
bytes,
base_seed,
base_seed.wrapping_add(self.expand_seed),
base_seed.wrapping_add(self.expand_seed2),
base_seed.wrapping_add(self.expand_seed3),
self.fold_seed,
);
}
}
#[inline(always)]
fn write_u8(&mut self, i: u8) {
self.write_num(i);
}
#[inline(always)]
fn write_u16(&mut self, i: u16) {
self.write_num(i);
}
#[inline(always)]
fn write_u32(&mut self, i: u32) {
self.write_num(i);
}
#[inline(always)]
fn write_u64(&mut self, i: u64) {
self.write_num(i);
}
#[inline(always)]
fn write_u128(&mut self, i: u128) {
let lo = i as u64;
let hi = (i >> 64) as u64;
self.accumulator = folded_multiply(lo ^ self.accumulator, hi ^ self.fold_seed);
}
#[inline(always)]
fn write_usize(&mut self, i: usize) {
// u128 doesn't implement From<usize>.
#[cfg(target_pointer_width = "32")]
self.write_num(i as u32);
#[cfg(target_pointer_width = "64")]
self.write_num(i as u64);
}
#[inline(always)]
fn finish(&self) -> u64 {
if self.sponge_len > 0 {
let lo = self.sponge as u64;
let hi = (self.sponge >> 64) as u64;
folded_multiply(lo ^ self.accumulator, hi ^ self.fold_seed)
} else {
self.accumulator
}
}
}
/// A [`BuildHasher`] for [`fast::FoldHasher`](FoldHasher) that is randomly initialized.
#[derive(Copy, Clone, Debug)]
pub struct RandomState {
per_hasher_seed: u64,
global_seed: GlobalSeed,
}
impl Default for RandomState {
#[inline(always)]
fn default() -> Self {
Self {
per_hasher_seed: gen_per_hasher_seed(),
global_seed: GlobalSeed::new(),
}
}
}
impl BuildHasher for RandomState {
type Hasher = FoldHasher;
#[inline(always)]
fn build_hasher(&self) -> FoldHasher {
FoldHasher::with_seed(self.per_hasher_seed, self.global_seed.get())
}
}
/// A [`BuildHasher`] for [`fast::FoldHasher`](FoldHasher) that is randomly
/// initialized by default, but can also be initialized with a specific seed.
///
/// This can be useful for e.g. testing, but the downside is that this type
/// has a size of 16 bytes rather than the 8 bytes [`RandomState`] is.
#[derive(Copy, Clone, Debug)]
pub struct SeedableRandomState {
per_hasher_seed: u64,
shared_seed: &'static SharedSeed,
}
impl Default for SeedableRandomState {
#[inline(always)]
fn default() -> Self {
Self::random()
}
}
impl SeedableRandomState {
/// Generates a random [`SeedableRandomState`], similar to [`RandomState`].
#[inline(always)]
pub fn random() -> Self {
Self {
per_hasher_seed: gen_per_hasher_seed(),
shared_seed: SharedSeed::global_random(),
}
}
/// Generates a fixed [`SeedableRandomState`], similar to [`FixedState`].
#[inline(always)]
pub fn fixed() -> Self {
Self {
per_hasher_seed: ARBITRARY3,
shared_seed: SharedSeed::global_fixed(),
}
}
/// Generates a [`SeedableRandomState`] with the given per-hasher seed
/// and [`SharedSeed`].
#[inline(always)]
pub fn with_seed(per_hasher_seed: u64, shared_seed: &'static SharedSeed) -> Self {
// XOR with ARBITRARY3 such that with_seed(0) matches default.
Self {
per_hasher_seed: per_hasher_seed ^ ARBITRARY3,
shared_seed,
}
}
}
impl BuildHasher for SeedableRandomState {
type Hasher = FoldHasher;
#[inline(always)]
fn build_hasher(&self) -> FoldHasher {
FoldHasher::with_seed(self.per_hasher_seed, self.shared_seed)
}
}
/// A [`BuildHasher`] for [`fast::FoldHasher`](FoldHasher) that always has the same fixed seed.
///
/// Not recommended unless you absolutely need determinism.
#[derive(Copy, Clone, Debug)]
pub struct FixedState {
per_hasher_seed: u64,
}
impl FixedState {
/// Creates a [`FixedState`] with the given per-hasher-seed.
#[inline(always)]
pub const fn with_seed(per_hasher_seed: u64) -> Self {
// XOR with ARBITRARY3 such that with_seed(0) matches default.
Self {
per_hasher_seed: per_hasher_seed ^ ARBITRARY3,
}
}
}
impl Default for FixedState {
#[inline(always)]
fn default() -> Self {
Self {
per_hasher_seed: ARBITRARY3,
}
}
}
impl BuildHasher for FixedState {
type Hasher = FoldHasher;
#[inline(always)]
fn build_hasher(&self) -> FoldHasher {
FoldHasher::with_seed(self.per_hasher_seed, SharedSeed::global_fixed())
}
}

284
vendor/foldhash/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,284 @@
//! This crate provides foldhash, a fast, non-cryptographic, minimally
//! DoS-resistant hashing algorithm designed for computational uses such as
//! hashmaps, bloom filters, count sketching, etc.
//!
//! When should you **not** use foldhash:
//!
//! - You are afraid of people studying your long-running program's behavior
//! to reverse engineer its internal random state and using this knowledge to
//! create many colliding inputs for computational complexity attacks.
//!
//! - You expect foldhash to have a consistent output across versions or
//! platforms, such as for persistent file formats or communication protocols.
//!
//! - You are relying on foldhash's properties for any kind of security.
//! Foldhash is **not appropriate for any cryptographic purpose**.
//!
//! Foldhash has two variants, one optimized for speed which is ideal for data
//! structures such as hash maps and bloom filters, and one optimized for
//! statistical quality which is ideal for algorithms such as
//! [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) and
//! [MinHash](https://en.wikipedia.org/wiki/MinHash).
//!
//! Foldhash can be used in a `#![no_std]` environment by disabling its default
//! `"std"` feature.
//!
//! # Usage
//!
//! The easiest way to use this crate with the standard library [`HashMap`] or
//! [`HashSet`] is to import them from `foldhash` instead, along with the
//! extension traits to make [`HashMap::new`] and [`HashMap::with_capacity`]
//! work out-of-the-box:
//!
//! ```rust
//! use foldhash::{HashMap, HashMapExt};
//!
//! let mut hm = HashMap::new();
//! hm.insert(42, "hello");
//! ```
//!
//! You can also avoid the convenience types and do it manually by initializing
//! a [`RandomState`](fast::RandomState), for example if you are using a different hash map
//! implementation like [`hashbrown`](https://docs.rs/hashbrown/):
//!
//! ```rust
//! use hashbrown::HashMap;
//! use foldhash::fast::RandomState;
//!
//! let mut hm = HashMap::with_hasher(RandomState::default());
//! hm.insert("foo", "bar");
//! ```
//!
//! The above methods are the recommended way to use foldhash, which will
//! automatically generate a randomly generated hasher instance for you. If you
//! absolutely must have determinism you can use [`FixedState`](fast::FixedState)
//! instead, but note that this makes you trivially vulnerable to HashDoS
//! attacks and might lead to quadratic runtime when moving data from one
//! hashmap/set into another:
//!
//! ```rust
//! use std::collections::HashSet;
//! use foldhash::fast::FixedState;
//!
//! let mut hm = HashSet::with_hasher(FixedState::with_seed(42));
//! hm.insert([1, 10, 100]);
//! ```
//!
//! If you rely on statistical properties of the hash for the correctness of
//! your algorithm, such as in [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog),
//! it is suggested to use the [`RandomState`](quality::RandomState)
//! or [`FixedState`](quality::FixedState) from the [`quality`] module instead
//! of the [`fast`] module. The latter is optimized purely for speed in hash
//! tables and has known statistical imperfections.
//!
//! Finally, you can also directly use the [`RandomState`](quality::RandomState)
//! or [`FixedState`](quality::FixedState) to manually hash items using the
//! [`BuildHasher`](std::hash::BuildHasher) trait:
//! ```rust
//! use std::hash::BuildHasher;
//! use foldhash::quality::RandomState;
//!
//! let random_state = RandomState::default();
//! let hash = random_state.hash_one("hello world");
//! ```
//!
//! ## Seeding
//!
//! Foldhash relies on a single 8-byte per-hasher seed which should be ideally
//! be different from each instance to instance, and also a larger
//! [`SharedSeed`] which may be shared by many different instances.
//!
//! To reduce overhead, this [`SharedSeed`] is typically initialized once and
//! stored. To prevent each hashmap unnecessarily containing a reference to this
//! value there are three kinds of [`BuildHasher`](core::hash::BuildHasher)s
//! foldhash provides (both for [`fast`] and [`quality`]):
//!
//! 1. [`RandomState`](fast::RandomState), which always generates a
//! random per-hasher seed and implicitly stores a reference to [`SharedSeed::global_random`].
//! 2. [`FixedState`](fast::FixedState), which by default uses a fixed
//! per-hasher seed and implicitly stores a reference to [`SharedSeed::global_fixed`].
//! 3. [`SeedableRandomState`](fast::SeedableRandomState), which works like
//! [`RandomState`](fast::RandomState) by default but can be seeded in any manner.
//! This state must include an explicit reference to a [`SharedSeed`], and thus
//! this struct is 16 bytes as opposed to just 8 bytes for the previous two.
#![cfg_attr(all(not(test), not(feature = "std")), no_std)]
#![warn(missing_docs)]
pub mod fast;
pub mod quality;
mod seed;
pub use seed::SharedSeed;
#[cfg(feature = "std")]
mod convenience;
#[cfg(feature = "std")]
pub use convenience::*;
// Arbitrary constants with high entropy. Hexadecimal digits of pi were used.
const ARBITRARY0: u64 = 0x243f6a8885a308d3;
const ARBITRARY1: u64 = 0x13198a2e03707344;
const ARBITRARY2: u64 = 0xa4093822299f31d0;
const ARBITRARY3: u64 = 0x082efa98ec4e6c89;
const ARBITRARY4: u64 = 0x452821e638d01377;
const ARBITRARY5: u64 = 0xbe5466cf34e90c6c;
const ARBITRARY6: u64 = 0xc0ac29b7c97c50dd;
const ARBITRARY7: u64 = 0x3f84d5b5b5470917;
const ARBITRARY8: u64 = 0x9216d5d98979fb1b;
const ARBITRARY9: u64 = 0xd1310ba698dfb5ac;
#[inline(always)]
const fn folded_multiply(x: u64, y: u64) -> u64 {
// The following code path is only fast if 64-bit to 128-bit widening
// multiplication is supported by the architecture. Most 64-bit
// architectures except SPARC64 and Wasm64 support it. However, the target
// pointer width doesn't always indicate that we are dealing with a 64-bit
// architecture, as there are ABIs that reduce the pointer width, especially
// on AArch64 and x86-64. WebAssembly (regardless of pointer width) supports
// 64-bit to 128-bit widening multiplication with the `wide-arithmetic`
// proposal.
#[cfg(any(
all(
target_pointer_width = "64",
not(any(target_arch = "sparc64", target_arch = "wasm64")),
),
target_arch = "aarch64",
target_arch = "x86_64",
all(target_family = "wasm", target_feature = "wide-arithmetic"),
))]
{
// We compute the full u64 x u64 -> u128 product, this is a single mul
// instruction on x86-64, one mul plus one mulhi on ARM64.
let full = (x as u128).wrapping_mul(y as u128);
let lo = full as u64;
let hi = (full >> 64) as u64;
// The middle bits of the full product fluctuate the most with small
// changes in the input. This is the top bits of lo and the bottom bits
// of hi. We can thus make the entire output fluctuate with small
// changes to the input by XOR'ing these two halves.
lo ^ hi
}
#[cfg(not(any(
all(
target_pointer_width = "64",
not(any(target_arch = "sparc64", target_arch = "wasm64")),
),
target_arch = "aarch64",
target_arch = "x86_64",
all(target_family = "wasm", target_feature = "wide-arithmetic"),
)))]
{
// u64 x u64 -> u128 product is quite expensive on 32-bit.
// We approximate it by expanding the multiplication and eliminating
// carries by replacing additions with XORs:
// (2^32 hx + lx)*(2^32 hy + ly) =
// 2^64 hx*hy + 2^32 (hx*ly + lx*hy) + lx*ly ~=
// 2^64 hx*hy ^ 2^32 (hx*ly ^ lx*hy) ^ lx*ly
// Which when folded becomes:
// (hx*hy ^ lx*ly) ^ (hx*ly ^ lx*hy).rotate_right(32)
let lx = x as u32;
let ly = y as u32;
let hx = (x >> 32) as u32;
let hy = (y >> 32) as u32;
let ll = (lx as u64).wrapping_mul(ly as u64);
let lh = (lx as u64).wrapping_mul(hy as u64);
let hl = (hx as u64).wrapping_mul(ly as u64);
let hh = (hx as u64).wrapping_mul(hy as u64);
(hh ^ ll) ^ (hl ^ lh).rotate_right(32)
}
}
#[inline(always)]
const fn rotate_right(x: u64, r: u32) -> u64 {
#[cfg(any(
target_pointer_width = "64",
target_arch = "aarch64",
target_arch = "x86_64",
target_family = "wasm",
))]
{
x.rotate_right(r)
}
#[cfg(not(any(
target_pointer_width = "64",
target_arch = "aarch64",
target_arch = "x86_64",
target_family = "wasm",
)))]
{
// On platforms without 64-bit arithmetic rotation can be slow, rotate
// each 32-bit half independently.
let lo = (x as u32).rotate_right(r);
let hi = ((x >> 32) as u32).rotate_right(r);
((hi as u64) << 32) | lo as u64
}
}
/// Hashes strings >= 16 bytes, has unspecified behavior when bytes.len() < 16.
fn hash_bytes_medium(bytes: &[u8], mut s0: u64, mut s1: u64, fold_seed: u64) -> u64 {
// Process 32 bytes per iteration, 16 bytes from the start, 16 bytes from
// the end. On the last iteration these two chunks can overlap, but that is
// perfectly fine.
let left_to_right = bytes.chunks_exact(16);
let mut right_to_left = bytes.rchunks_exact(16);
for lo in left_to_right {
let hi = right_to_left.next().unwrap();
let unconsumed_start = lo.as_ptr();
let unconsumed_end = hi.as_ptr_range().end;
if unconsumed_start >= unconsumed_end {
break;
}
let a = u64::from_ne_bytes(lo[0..8].try_into().unwrap());
let b = u64::from_ne_bytes(lo[8..16].try_into().unwrap());
let c = u64::from_ne_bytes(hi[0..8].try_into().unwrap());
let d = u64::from_ne_bytes(hi[8..16].try_into().unwrap());
s0 = folded_multiply(a ^ s0, c ^ fold_seed);
s1 = folded_multiply(b ^ s1, d ^ fold_seed);
}
s0 ^ s1
}
/// Hashes strings >= 16 bytes, has unspecified behavior when bytes.len() < 16.
#[cold]
#[inline(never)]
fn hash_bytes_long(
bytes: &[u8],
mut s0: u64,
mut s1: u64,
mut s2: u64,
mut s3: u64,
fold_seed: u64,
) -> u64 {
let chunks = bytes.chunks_exact(64);
let remainder = chunks.remainder().len();
for chunk in chunks {
let a = u64::from_ne_bytes(chunk[0..8].try_into().unwrap());
let b = u64::from_ne_bytes(chunk[8..16].try_into().unwrap());
let c = u64::from_ne_bytes(chunk[16..24].try_into().unwrap());
let d = u64::from_ne_bytes(chunk[24..32].try_into().unwrap());
let e = u64::from_ne_bytes(chunk[32..40].try_into().unwrap());
let f = u64::from_ne_bytes(chunk[40..48].try_into().unwrap());
let g = u64::from_ne_bytes(chunk[48..56].try_into().unwrap());
let h = u64::from_ne_bytes(chunk[56..64].try_into().unwrap());
s0 = folded_multiply(a ^ s0, e ^ fold_seed);
s1 = folded_multiply(b ^ s1, f ^ fold_seed);
s2 = folded_multiply(c ^ s2, g ^ fold_seed);
s3 = folded_multiply(d ^ s3, h ^ fold_seed);
}
s0 ^= s2;
s1 ^= s3;
if remainder > 0 {
hash_bytes_medium(&bytes[bytes.len() - remainder.max(16)..], s0, s1, fold_seed)
} else {
s0 ^ s1
}
}

174
vendor/foldhash/src/quality.rs vendored Normal file
View File

@@ -0,0 +1,174 @@
//! The foldhash implementation optimized for quality.
use core::hash::{BuildHasher, Hasher};
use crate::seed::SharedSeed;
use crate::{fast, folded_multiply, ARBITRARY0, ARBITRARY8};
/// A [`Hasher`] instance implementing foldhash, optimized for quality.
///
/// While you can create one directly with [`FoldHasher::with_seed`], you
/// most likely want to use [`RandomState`], [`SeedableRandomState`] or
/// [`FixedState`] to create [`FoldHasher`]s.
#[derive(Clone)]
pub struct FoldHasher {
pub(crate) inner: fast::FoldHasher,
}
impl FoldHasher {
/// Initializes this [`FoldHasher`] with the given per-hasher seed and
/// [`SharedSeed`].
#[inline(always)]
pub fn with_seed(per_hasher_seed: u64, shared_seed: &SharedSeed) -> FoldHasher {
FoldHasher {
inner: fast::FoldHasher::with_seed(per_hasher_seed, shared_seed),
}
}
}
impl Hasher for FoldHasher {
#[inline(always)]
fn write(&mut self, bytes: &[u8]) {
self.inner.write(bytes);
}
#[inline(always)]
fn write_u8(&mut self, i: u8) {
self.inner.write_u8(i);
}
#[inline(always)]
fn write_u16(&mut self, i: u16) {
self.inner.write_u16(i);
}
#[inline(always)]
fn write_u32(&mut self, i: u32) {
self.inner.write_u32(i);
}
#[inline(always)]
fn write_u64(&mut self, i: u64) {
self.inner.write_u64(i);
}
#[inline(always)]
fn write_u128(&mut self, i: u128) {
self.inner.write_u128(i);
}
#[inline(always)]
fn write_usize(&mut self, i: usize) {
self.inner.write_usize(i);
}
#[inline(always)]
fn finish(&self) -> u64 {
folded_multiply(self.inner.finish(), ARBITRARY0)
}
}
/// A [`BuildHasher`] for [`quality::FoldHasher`](FoldHasher) that is randomly initialized.
#[derive(Copy, Clone, Default, Debug)]
pub struct RandomState {
inner: fast::RandomState,
}
impl BuildHasher for RandomState {
type Hasher = FoldHasher;
#[inline(always)]
fn build_hasher(&self) -> FoldHasher {
FoldHasher {
inner: self.inner.build_hasher(),
}
}
}
/// A [`BuildHasher`] for [`quality::FoldHasher`](FoldHasher) that is randomly
/// initialized by default, but can also be initialized with a specific seed.
///
/// This can be useful for e.g. testing, but the downside is that this type
/// has a size of 16 bytes rather than the 8 bytes [`RandomState`] is.
#[derive(Copy, Clone, Default, Debug)]
pub struct SeedableRandomState {
inner: fast::SeedableRandomState,
}
impl SeedableRandomState {
/// Generates a random [`SeedableRandomState`], similar to [`RandomState`].
#[inline(always)]
pub fn random() -> Self {
Self {
inner: fast::SeedableRandomState::random(),
}
}
/// Generates a fixed [`SeedableRandomState`], similar to [`FixedState`].
#[inline(always)]
pub fn fixed() -> Self {
Self {
inner: fast::SeedableRandomState::fixed(),
}
}
/// Generates a [`SeedableRandomState`] with the given per-hasher seed
/// and [`SharedSeed`].
#[inline(always)]
pub fn with_seed(per_hasher_seed: u64, shared_seed: &'static SharedSeed) -> Self {
Self {
// We do an additional folded multiply with the seed here for
// the quality hash to ensure better independence between seed
// and hash.
inner: fast::SeedableRandomState::with_seed(
folded_multiply(per_hasher_seed, ARBITRARY8),
shared_seed,
),
}
}
}
impl BuildHasher for SeedableRandomState {
type Hasher = FoldHasher;
#[inline(always)]
fn build_hasher(&self) -> FoldHasher {
FoldHasher {
inner: self.inner.build_hasher(),
}
}
}
/// A [`BuildHasher`] for [`quality::FoldHasher`](FoldHasher) that always has the same fixed seed.
///
/// Not recommended unless you absolutely need determinism.
#[derive(Copy, Clone, Default, Debug)]
pub struct FixedState {
inner: fast::FixedState,
}
impl FixedState {
/// Creates a [`FixedState`] with the given per-hasher seed.
#[inline(always)]
pub const fn with_seed(per_hasher_seed: u64) -> Self {
Self {
// We do an additional folded multiply with the seed here for
// the quality hash to ensure better independence between seed
// and hash. If the seed is zero the folded multiply is zero,
// preserving with_seed(0) == default().
inner: fast::FixedState::with_seed(folded_multiply(per_hasher_seed, ARBITRARY8)),
}
}
}
impl BuildHasher for FixedState {
type Hasher = FoldHasher;
#[inline(always)]
fn build_hasher(&self) -> FoldHasher {
FoldHasher {
inner: self.inner.build_hasher(),
}
}
}

267
vendor/foldhash/src/seed.rs vendored Normal file
View File

@@ -0,0 +1,267 @@
// These constants may end up unused depending on platform support.
#[allow(unused)]
use crate::{ARBITRARY1, ARBITRARY9};
use super::{folded_multiply, ARBITRARY2, ARBITRARY4, ARBITRARY5, ARBITRARY6, ARBITRARY7};
/// Used for FixedState, and RandomState if atomics for dynamic init are unavailable.
const FIXED_GLOBAL_SEED: SharedSeed = SharedSeed {
seeds: [ARBITRARY4, ARBITRARY5, ARBITRARY6, ARBITRARY7],
};
pub(crate) fn gen_per_hasher_seed() -> u64 {
// We initialize the per-hasher seed with the stack pointer to ensure
// different threads have different seeds, with as side benefit that
// stack address randomization gives us further non-determinism.
let mut per_hasher_seed = 0;
let stack_ptr = core::ptr::addr_of!(per_hasher_seed) as u64;
per_hasher_seed = stack_ptr;
// If we have the standard library available we use a thread-local
// state to ensure RandomStates are different with high probability,
// even if the call stack is the same.
#[cfg(feature = "std")]
{
use std::cell::Cell;
thread_local! {
static PER_HASHER_NONDETERMINISM: Cell<u64> = const { Cell::new(0) };
}
PER_HASHER_NONDETERMINISM.with(|cell| {
let nondeterminism = cell.get();
per_hasher_seed = folded_multiply(per_hasher_seed, ARBITRARY1 ^ nondeterminism);
cell.set(per_hasher_seed);
})
};
// If we don't have the standard library we instead use a global
// atomic instead of a thread-local state.
//
// PER_HASHER_NONDETERMINISM is loaded and updated in a racy manner,
// but this doesn't matter in practice - it is impossible that two
// different threads have the same stack location, so they'll almost
// surely generate different seeds, and provide a different possible
// update for PER_HASHER_NONDETERMINISM. If we would use a proper
// fetch_add atomic update then there is a larger chance of
// problematic contention.
//
// We use usize instead of 64-bit atomics for best platform support.
#[cfg(not(feature = "std"))]
{
use core::sync::atomic::{AtomicUsize, Ordering};
static PER_HASHER_NONDETERMINISM: AtomicUsize = AtomicUsize::new(0);
let nondeterminism = PER_HASHER_NONDETERMINISM.load(Ordering::Relaxed) as u64;
per_hasher_seed = folded_multiply(per_hasher_seed, ARBITRARY1 ^ nondeterminism);
PER_HASHER_NONDETERMINISM.store(per_hasher_seed as usize, Ordering::Relaxed);
}
// One extra mixing step to ensure good random bits.
folded_multiply(per_hasher_seed, ARBITRARY2)
}
/// A random seed intended to be shared by many different foldhash instances.
///
/// This seed is consumed by [`FoldHasher::with_seed`](crate::fast::FoldHasher::with_seed),
/// and [`SeedableRandomState::with_seed`](crate::fast::SeedableRandomState::with_seed).
#[derive(Clone, Debug)]
pub struct SharedSeed {
pub(crate) seeds: [u64; 4],
}
impl SharedSeed {
/// Returns the globally shared randomly initialized [`SharedSeed`] as used
/// by [`RandomState`](crate::fast::RandomState).
#[inline(always)]
pub fn global_random() -> &'static SharedSeed {
global::GlobalSeed::new().get()
}
/// Returns the globally shared fixed [`SharedSeed`] as used
/// by [`FixedState`](crate::fast::FixedState).
#[inline(always)]
pub const fn global_fixed() -> &'static SharedSeed {
&FIXED_GLOBAL_SEED
}
/// Generates a new [`SharedSeed`] from a single 64-bit seed.
///
/// Note that this is somewhat expensive so it is suggested to re-use the
/// [`SharedSeed`] as much as possible, using the per-hasher seed to
/// differentiate between hash instances.
pub const fn from_u64(seed: u64) -> Self {
macro_rules! mix {
($x: expr) => {
folded_multiply($x, ARBITRARY9)
};
}
let seed_a = mix!(mix!(mix!(seed)));
let seed_b = mix!(mix!(mix!(seed_a)));
let seed_c = mix!(mix!(mix!(seed_b)));
let seed_d = mix!(mix!(mix!(seed_c)));
// Zeroes form a weak-point for the multiply-mix, and zeroes tend to be
// a common input. So we want our global seeds that are XOR'ed with the
// input to always be non-zero. To also ensure there is always a good spread
// of bits, we give up 3 bits of entropy and simply force some bits on.
const FORCED_ONES: u64 = (1 << 63) | (1 << 31) | 1;
Self {
seeds: [
seed_a | FORCED_ONES,
seed_b | FORCED_ONES,
seed_c | FORCED_ONES,
seed_d | FORCED_ONES,
],
}
}
}
#[cfg(target_has_atomic = "8")]
mod global {
use super::*;
use core::cell::UnsafeCell;
use core::sync::atomic::{AtomicU8, Ordering};
fn generate_global_seed() -> SharedSeed {
let mix = |seed: u64, x: u64| folded_multiply(seed ^ x, ARBITRARY9);
// Use address space layout randomization as our main randomness source.
// This isn't great, but we don't advertise HashDoS resistance in the first
// place. This is a whole lot better than nothing, at near zero cost with
// no dependencies.
let mut seed = 0;
let stack_ptr = &seed as *const _;
let func_ptr = generate_global_seed;
let static_ptr = &GLOBAL_SEED_STORAGE as *const _;
seed = mix(seed, stack_ptr as usize as u64);
seed = mix(seed, func_ptr as usize as u64);
seed = mix(seed, static_ptr as usize as u64);
// If we have the standard library available, augment entropy with the
// current time and an address from the allocator.
#[cfg(feature = "std")]
{
#[cfg(not(any(
miri,
all(target_family = "wasm", target_os = "unknown"),
target_os = "zkvm"
)))]
if let Ok(duration) = std::time::UNIX_EPOCH.elapsed() {
seed = mix(seed, duration.subsec_nanos() as u64);
seed = mix(seed, duration.as_secs());
}
let box_ptr = &*Box::new(0u8) as *const _;
seed = mix(seed, box_ptr as usize as u64);
}
SharedSeed::from_u64(seed)
}
// Now all the below code purely exists to cache the above seed as
// efficiently as possible. Even if we weren't a no_std crate and had access to
// OnceLock, we don't want to check whether the global is set each time we
// hash an object, so we hand-roll a global storage where type safety allows us
// to assume the storage is initialized after construction.
struct GlobalSeedStorage {
state: AtomicU8,
seed: UnsafeCell<SharedSeed>,
}
const UNINIT: u8 = 0;
const LOCKED: u8 = 1;
const INIT: u8 = 2;
// SAFETY: we only mutate the UnsafeCells when state is in the thread-exclusive
// LOCKED state, and only read the UnsafeCells when state is in the
// once-achieved-eternally-preserved state INIT.
unsafe impl Sync for GlobalSeedStorage {}
static GLOBAL_SEED_STORAGE: GlobalSeedStorage = GlobalSeedStorage {
state: AtomicU8::new(UNINIT),
seed: UnsafeCell::new(SharedSeed { seeds: [0; 4] }),
};
/// An object representing an initialized global seed.
///
/// Does not actually store the seed inside itself, it is a zero-sized type.
/// This prevents inflating the RandomState size and in turn HashMap's size.
#[derive(Copy, Clone, Debug)]
pub struct GlobalSeed {
// So we can't accidentally type GlobalSeed { } within this crate.
_no_accidental_unsafe_init: (),
}
impl GlobalSeed {
#[inline(always)]
pub fn new() -> Self {
if GLOBAL_SEED_STORAGE.state.load(Ordering::Acquire) != INIT {
Self::init_slow()
}
Self {
_no_accidental_unsafe_init: (),
}
}
#[cold]
#[inline(never)]
fn init_slow() {
// Generate seed outside of critical section.
let seed = generate_global_seed();
loop {
match GLOBAL_SEED_STORAGE.state.compare_exchange_weak(
UNINIT,
LOCKED,
Ordering::Acquire,
Ordering::Acquire,
) {
Ok(_) => unsafe {
// SAFETY: we just acquired an exclusive lock.
*GLOBAL_SEED_STORAGE.seed.get() = seed;
GLOBAL_SEED_STORAGE.state.store(INIT, Ordering::Release);
return;
},
Err(INIT) => return,
// Yes, it's a spin loop. We need to support no_std (so no easy
// access to proper locks), this is a one-time-per-program
// initialization, and the critical section is only a few
// store instructions, so it'll be fine.
_ => core::hint::spin_loop(),
}
}
}
#[inline(always)]
pub fn get(self) -> &'static SharedSeed {
// SAFETY: our constructor ensured we are in the INIT state and thus
// this raw read does not race with any write.
unsafe { &*GLOBAL_SEED_STORAGE.seed.get() }
}
}
}
#[cfg(not(target_has_atomic = "8"))]
mod global {
use super::*;
#[derive(Copy, Clone, Debug)]
pub struct GlobalSeed {}
impl GlobalSeed {
#[inline(always)]
pub fn new() -> Self {
Self {}
}
#[inline(always)]
pub fn get(self) -> &'static SharedSeed {
&super::FIXED_GLOBAL_SEED
}
}
}
pub(crate) use global::GlobalSeed;