Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

37
vendor/hash32/src/fnv.rs vendored Normal file
View File

@@ -0,0 +1,37 @@
use crate::Hasher as _;
const BASIS: u32 = 0x811c9dc5;
const PRIME: u32 = 0x1000193;
/// 32-bit Fowler-Noll-Vo hasher
pub struct Hasher {
state: u32,
}
impl Default for Hasher {
fn default() -> Self {
Hasher { state: BASIS }
}
}
impl crate::Hasher for Hasher {
#[inline]
fn finish32(&self) -> u32 {
self.state
}
}
impl core::hash::Hasher for Hasher {
#[inline]
fn write(&mut self, bytes: &[u8]) {
for byte in bytes {
self.state ^= u32::from(*byte);
self.state = self.state.wrapping_mul(PRIME);
}
}
#[inline]
fn finish(&self) -> u64 {
self.finish32().into()
}
}

141
vendor/hash32/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,141 @@
//! 32-bit hashing algorithms
//!
//! # Why?
//!
//! Because 32-bit architectures are a thing (e.g. ARM Cortex-M) and you don't want your hashing
//! function to pull in a bunch of slow 64-bit compiler intrinsics (software implementations of
//! 64-bit operations).
//!
//! # Relationship to `core::hash`
//!
//! This crate extends [`core::hash`] with a 32-bit version of `Hasher`, which extends
//! `core::hash::Hasher`. It requires that the hasher only performs 32-bit operations when computing
//! the hash, and adds [`finish32`] to get the hasher's result as a `u32`. The standard `finish`
//! method should just zero-extend this result.
//!
//! Since it extends `core::hash::Hasher`, `Hasher` can be used with any type which implements the
//! standard `Hash` trait.
//!
//! This crate also adds a version of `BuildHasherDefault` with a const constructor, to work around
//! the `core` version's lack of one.
//!
//! [`core::hash`]: https://doc.rust-lang.org/std/hash/index.html
//! [`finish32`]: crate::Hasher::finish32
//!
//! # Hashers
//!
//! This crate provides implementations of the following 32-bit hashing algorithms:
//!
//! - [Fowler-Noll-Vo](struct.FnvHasher.html)
//! - [MurmurHash3](struct.Murmur3Hasher.html)
//!
//! # Generic code
//!
//! In generic code, the trait bound `H: core::hash::Hasher` accepts *both* 64-bit hashers like
//! `std::collections::hash_map::DefaultHasher`; and 32-bit hashers like the ones defined in this
//! crate (`hash32::FnvHasher` and `hash32::Murmur3Hasher`)
//!
//! The trait bound `H: hash32::Hasher` is *more* restrictive as it only accepts 32-bit hashers.
//!
//! The `BuildHasherDefault<H>` type implements the `core::hash::BuildHasher` trait so it can
//! construct both 32-bit and 64-bit hashers. To constrain the type to only produce 32-bit hasher
//! you can add the trait bound `H::Hasher: hash32::Hasher`
//!
//! # MSRV
//!
//! This crate is guaranteed to compile on latest stable Rust. It *might* compile on older
//! versions but that may change in any new patch release.
#![deny(missing_docs)]
#![deny(warnings)]
#![no_std]
extern crate byteorder;
use core::fmt;
use core::hash::BuildHasher;
use core::marker::PhantomData;
pub use fnv::Hasher as FnvHasher;
pub use murmur3::Hasher as Murmur3Hasher;
mod fnv;
mod murmur3;
/// A copy of [`core::hash::BuildHasherDefault`][0], but with a const constructor.
///
/// This will eventually be deprecated once the version in `core` becomes const-constructible
/// (presumably using `const Default`).
///
/// [0]: https://doc.rust-lang.org/core/hash/struct.BuildHasherDefault.html
pub struct BuildHasherDefault<H> {
_marker: PhantomData<H>,
}
impl<H> Default for BuildHasherDefault<H> {
fn default() -> Self {
BuildHasherDefault {
_marker: PhantomData,
}
}
}
impl<H> Clone for BuildHasherDefault<H> {
fn clone(&self) -> Self {
BuildHasherDefault::default()
}
}
impl<H> PartialEq for BuildHasherDefault<H> {
fn eq(&self, _other: &BuildHasherDefault<H>) -> bool {
true
}
}
impl<H> Eq for BuildHasherDefault<H> {}
impl<H> fmt::Debug for BuildHasherDefault<H> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("BuildHasherDefault")
}
}
impl<H> BuildHasherDefault<H> {
/// `const` constructor
pub const fn new() -> Self {
BuildHasherDefault {
_marker: PhantomData,
}
}
}
impl<H> BuildHasher for BuildHasherDefault<H>
where
H: Default + core::hash::Hasher,
{
type Hasher = H;
fn build_hasher(&self) -> Self::Hasher {
H::default()
}
}
/// An extension of [core::hash::Hasher][0] for hashers which use 32 bits.
///
/// For hashers which implement this trait, the standard `finish` method should just return a
/// zero-extended version of the result of `finish32`.
///
/// [0]: https://doc.rust-lang.org/core/hash/trait.Hasher.html
///
/// # Contract
///
/// Implementers of this trait must *not* perform any 64-bit (or 128-bit) operation while computing
/// the hash.
pub trait Hasher: core::hash::Hasher {
/// The equivalent of [`core::hash::Hasher.finish`][0] for 32-bit hashers.
///
/// This returns the hash directly; `finish` zero-extends it to 64 bits for compatibility.
///
/// [0]: https://doc.rust-lang.org/std/hash/trait.Hasher.html#tymethod.finish
fn finish32(&self) -> u32;
}

206
vendor/hash32/src/murmur3.rs vendored Normal file
View File

@@ -0,0 +1,206 @@
use core::slice;
use core::mem::MaybeUninit;
use byteorder::{ByteOrder, LE};
use crate::Hasher as _;
/// 32-bit MurmurHash3 hasher
pub struct Hasher {
buf: Buffer,
index: Index,
processed: u32,
state: State,
}
struct State(u32);
#[derive(Clone, Copy)]
#[repr(align(4))]
struct Buffer {
bytes: MaybeUninit<[u8; 4]>,
}
#[derive(Clone, Copy, PartialEq)]
enum Index {
_0,
_1,
_2,
_3,
}
impl Index {
fn usize(&self) -> usize {
match *self {
Index::_0 => 0,
Index::_1 => 1,
Index::_2 => 2,
Index::_3 => 3,
}
}
}
impl From<usize> for Index {
fn from(x: usize) -> Self {
match x % 4 {
0 => Index::_0,
1 => Index::_1,
2 => Index::_2,
3 => Index::_3,
_ => unreachable!(),
}
}
}
impl Hasher {
fn push(&mut self, buf: &[u8]) {
let start = self.index.usize();
let len = buf.len();
// NOTE(unsafe) avoid calling `memcpy` on a 0-3 byte copy
// self.buf.bytes[start..start+len].copy_from(buf);
for i in 0..len {
unsafe {
*self.buf.bytes.assume_init_mut().get_unchecked_mut(start + i) = *buf.get_unchecked(i);
}
}
self.index = Index::from(start + len);
}
}
impl Default for Hasher {
#[allow(deprecated)]
fn default() -> Self {
Hasher {
buf: Buffer { bytes: MaybeUninit::uninit() },
index: Index::_0,
processed: 0,
state: State(0),
}
}
}
impl crate::Hasher for Hasher {
fn finish32(&self) -> u32 {
// tail
let mut state = match self.index {
Index::_3 => {
let mut block = 0;
unsafe {
block ^= u32::from(self.buf.bytes.assume_init_ref()[2]) << 16;
block ^= u32::from(self.buf.bytes.assume_init_ref()[1]) << 8;
block ^= u32::from(self.buf.bytes.assume_init_ref()[0]);
}
self.state.0 ^ pre_mix(block)
}
Index::_2 => {
let mut block = 0;
unsafe {
block ^= u32::from(self.buf.bytes.assume_init_ref()[1]) << 8;
block ^= u32::from(self.buf.bytes.assume_init_ref()[0]);
}
self.state.0 ^ pre_mix(block)
}
Index::_1 => {
let mut block = 0;
unsafe {
block ^= u32::from(self.buf.bytes.assume_init_ref()[0]);
}
self.state.0 ^ pre_mix(block)
}
Index::_0 => self.state.0,
};
// finalization mix
state ^= self.processed;
state ^= state >> 16;
state = state.wrapping_mul(0x85ebca6b);
state ^= state >> 13;
state = state.wrapping_mul(0xc2b2ae35);
state ^= state >> 16;
state
}
}
impl core::hash::Hasher for Hasher {
#[inline]
fn write(&mut self, bytes: &[u8]) {
let len = bytes.len();
self.processed += len as u32;
let body = if self.index == Index::_0 {
bytes
} else {
let index = self.index.usize();
if len + index >= 4 {
// we can complete a block using the data left in the buffer
// NOTE(unsafe) avoid panicking branch (`slice_index_len_fail`)
// let (head, body) = bytes.split_at(4 - index);
let mid = 4 - index;
let head = unsafe { slice::from_raw_parts(bytes.as_ptr(), mid) };
let body = unsafe {
slice::from_raw_parts(bytes.as_ptr().offset(mid as isize), len - mid)
};
// NOTE(unsafe) avoid calling `memcpy` on a 0-3 byte copy
// self.buf.bytes[index..].copy_from_slice(head);
for i in 0..4 - index {
unsafe {
*self.buf.bytes.assume_init_mut().get_unchecked_mut(index + i) = *head.get_unchecked(i);
}
}
self.index = Index::_0;
self.state.process_block(&self.buf.bytes);
body
} else {
bytes
}
};
for block in body.chunks(4) {
if block.len() == 4 {
self.state
.process_block(unsafe { &*(block.as_ptr() as *const _) });
} else {
self.push(block);
}
}
// XXX is this faster?
// for block in body.exact_chunks(4) {
// self.state
// .process_block(unsafe { &*(block.as_ptr() as *const _) });
// }
// let tail = body.split_at(body.len() / 4 * 4).1;
// self.push(tail);
}
#[inline]
fn finish(&self) -> u64 {
self.finish32().into()
}
}
const C1: u32 = 0xcc9e2d51;
const C2: u32 = 0x1b873593;
const R1: u32 = 15;
impl State {
fn process_block(&mut self, block: &MaybeUninit<[u8; 4]>) {
self.0 ^= pre_mix(LE::read_u32(unsafe { block.assume_init_ref() }));
self.0 = self.0.rotate_left(13);
self.0 = 5u32.wrapping_mul(self.0).wrapping_add(0xe6546b64);
}
}
fn pre_mix(mut block: u32) -> u32 {
block = block.wrapping_mul(C1);
block = block.rotate_left(R1);
block = block.wrapping_mul(C2);
block
}