Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

92
vendor/fixedbitset/src/block/avx.rs vendored Normal file
View File

@@ -0,0 +1,92 @@
#[cfg(target_arch = "x86")]
use core::arch::x86::*;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64::*;
use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not};
#[derive(Copy, Clone, Debug)]
#[repr(transparent)]
pub struct Block(pub(super) __m256d);
impl Block {
#[inline]
pub fn is_empty(self) -> bool {
unsafe {
let value = _mm256_castpd_si256(self.0);
_mm256_testz_si256(value, value) == 1
}
}
#[inline]
pub fn andnot(self, other: Self) -> Self {
unsafe { Self(_mm256_andnot_pd(other.0, self.0)) }
}
}
impl Not for Block {
type Output = Block;
#[inline]
fn not(self) -> Self::Output {
unsafe { Self(_mm256_xor_pd(self.0, Self::ALL.0)) }
}
}
impl BitAnd for Block {
type Output = Block;
#[inline]
fn bitand(self, other: Self) -> Self::Output {
unsafe { Self(_mm256_and_pd(self.0, other.0)) }
}
}
impl BitAndAssign for Block {
#[inline]
fn bitand_assign(&mut self, other: Self) {
unsafe {
self.0 = _mm256_and_pd(self.0, other.0);
}
}
}
impl BitOr for Block {
type Output = Block;
#[inline]
fn bitor(self, other: Self) -> Self::Output {
unsafe { Self(_mm256_or_pd(self.0, other.0)) }
}
}
impl BitOrAssign for Block {
#[inline]
fn bitor_assign(&mut self, other: Self) {
unsafe {
self.0 = _mm256_or_pd(self.0, other.0);
}
}
}
impl BitXor for Block {
type Output = Block;
#[inline]
fn bitxor(self, other: Self) -> Self::Output {
unsafe { Self(_mm256_xor_pd(self.0, other.0)) }
}
}
impl BitXorAssign for Block {
#[inline]
fn bitxor_assign(&mut self, other: Self) {
unsafe { self.0 = _mm256_xor_pd(self.0, other.0) }
}
}
impl PartialEq for Block {
#[inline]
fn eq(&self, other: &Self) -> bool {
unsafe {
let new = _mm256_xor_pd(self.0, other.0);
let neq = _mm256_castpd_si256(new);
_mm256_testz_si256(neq, neq) == 1
}
}
}

88
vendor/fixedbitset/src/block/avx2.rs vendored Normal file
View File

@@ -0,0 +1,88 @@
#[cfg(target_arch = "x86")]
use core::arch::x86::*;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64::*;
use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not};
#[derive(Copy, Clone, Debug)]
#[repr(transparent)]
pub struct Block(pub(super) __m256i);
impl Block {
#[inline]
pub fn is_empty(self) -> bool {
unsafe { _mm256_testz_si256(self.0, self.0) == 1 }
}
#[inline]
pub fn andnot(self, other: Self) -> Self {
Self(unsafe { _mm256_andnot_si256(other.0, self.0) })
}
}
impl Not for Block {
type Output = Block;
#[inline]
fn not(self) -> Self::Output {
unsafe { Self(_mm256_xor_si256(self.0, Self::ALL.0)) }
}
}
impl BitAnd for Block {
type Output = Block;
#[inline]
fn bitand(self, other: Self) -> Self::Output {
unsafe { Self(_mm256_and_si256(self.0, other.0)) }
}
}
impl BitAndAssign for Block {
#[inline]
fn bitand_assign(&mut self, other: Self) {
unsafe {
self.0 = _mm256_and_si256(self.0, other.0);
}
}
}
impl BitOr for Block {
type Output = Block;
#[inline]
fn bitor(self, other: Self) -> Self::Output {
unsafe { Self(_mm256_or_si256(self.0, other.0)) }
}
}
impl BitOrAssign for Block {
#[inline]
fn bitor_assign(&mut self, other: Self) {
unsafe {
self.0 = _mm256_or_si256(self.0, other.0);
}
}
}
impl BitXor for Block {
type Output = Block;
#[inline]
fn bitxor(self, other: Self) -> Self::Output {
unsafe { Self(_mm256_xor_si256(self.0, other.0)) }
}
}
impl BitXorAssign for Block {
#[inline]
fn bitxor_assign(&mut self, other: Self) {
unsafe { self.0 = _mm256_xor_si256(self.0, other.0) }
}
}
impl PartialEq for Block {
#[inline]
fn eq(&self, other: &Self) -> bool {
unsafe {
let neq = _mm256_xor_si256(self.0, other.0);
_mm256_testz_si256(neq, neq) == 1
}
}
}

70
vendor/fixedbitset/src/block/default.rs vendored Normal file
View File

@@ -0,0 +1,70 @@
use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not};
#[derive(Copy, Clone, PartialEq, Debug)]
#[repr(transparent)]
pub struct Block(pub(super) usize);
impl Block {
#[inline]
pub const fn is_empty(self) -> bool {
self.0 == Self::NONE.0
}
#[inline]
pub fn andnot(self, other: Self) -> Self {
Self(!other.0 & self.0)
}
}
impl Not for Block {
type Output = Block;
#[inline]
fn not(self) -> Self::Output {
Self(self.0.not())
}
}
impl BitAnd for Block {
type Output = Block;
#[inline]
fn bitand(self, other: Self) -> Self::Output {
Self(self.0.bitand(other.0))
}
}
impl BitAndAssign for Block {
#[inline]
fn bitand_assign(&mut self, other: Self) {
self.0.bitand_assign(other.0);
}
}
impl BitOr for Block {
type Output = Block;
#[inline]
fn bitor(self, other: Self) -> Self::Output {
Self(self.0.bitor(other.0))
}
}
impl BitOrAssign for Block {
#[inline]
fn bitor_assign(&mut self, other: Self) {
self.0.bitor_assign(other.0)
}
}
impl BitXor for Block {
type Output = Block;
#[inline]
fn bitxor(self, other: Self) -> Self::Output {
Self(self.0.bitxor(other.0))
}
}
impl BitXorAssign for Block {
#[inline]
fn bitxor_assign(&mut self, other: Self) {
self.0.bitxor_assign(other.0)
}
}

114
vendor/fixedbitset/src/block/mod.rs vendored Normal file
View File

@@ -0,0 +1,114 @@
#![allow(clippy::undocumented_unsafe_blocks)]
#![allow(dead_code)]
// TODO: Remove once the transmutes are fixed
#![allow(unknown_lints)]
#![allow(clippy::missing_transmute_annotations)]
use core::cmp::Ordering;
use core::hash::{Hash, Hasher};
#[cfg(all(
not(all(target_family = "wasm", target_feature = "simd128")),
not(target_feature = "sse2"),
not(target_feature = "avx"),
not(target_feature = "avx2"),
))]
mod default;
#[cfg(all(
not(all(target_family = "wasm", target_feature = "simd128")),
not(target_feature = "sse2"),
not(target_feature = "avx"),
not(target_feature = "avx2"),
))]
pub use self::default::*;
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
target_feature = "sse2",
not(target_feature = "avx"),
not(target_feature = "avx2"),
))]
mod sse2;
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
target_feature = "sse2",
not(target_feature = "avx"),
not(target_feature = "avx2"),
))]
pub use self::sse2::*;
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
target_feature = "avx",
not(target_feature = "avx2")
))]
mod avx;
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
target_feature = "avx",
not(target_feature = "avx2")
))]
pub use self::avx::*;
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
target_feature = "avx2"
))]
mod avx2;
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
target_feature = "avx2"
))]
pub use self::avx2::*;
#[cfg(all(target_family = "wasm", target_feature = "simd128"))]
mod wasm;
#[cfg(all(target_family = "wasm", target_feature = "simd128"))]
pub use self::wasm::*;
impl Block {
pub const USIZE_COUNT: usize = core::mem::size_of::<Self>() / core::mem::size_of::<usize>();
pub const NONE: Self = Self::from_usize_array([0; Self::USIZE_COUNT]);
pub const ALL: Self = Self::from_usize_array([usize::MAX; Self::USIZE_COUNT]);
pub const BITS: usize = core::mem::size_of::<Self>() * 8;
#[inline]
pub fn into_usize_array(self) -> [usize; Self::USIZE_COUNT] {
unsafe { core::mem::transmute(self.0) }
}
#[inline]
pub const fn from_usize_array(array: [usize; Self::USIZE_COUNT]) -> Self {
Self(unsafe { core::mem::transmute(array) })
}
}
impl Eq for Block {}
impl PartialOrd for Block {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Block {
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
self.into_usize_array().cmp(&other.into_usize_array())
}
}
impl Default for Block {
#[inline]
fn default() -> Self {
Self::NONE
}
}
impl Hash for Block {
#[inline]
fn hash<H: Hasher>(&self, hasher: &mut H) {
Hash::hash_slice(&self.into_usize_array(), hasher);
}
}

104
vendor/fixedbitset/src/block/sse2.rs vendored Normal file
View File

@@ -0,0 +1,104 @@
#![allow(clippy::undocumented_unsafe_blocks)]
#[cfg(target_arch = "x86")]
use core::arch::x86::*;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64::*;
use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not};
#[derive(Copy, Clone, Debug)]
#[repr(transparent)]
pub struct Block(pub(super) __m128i);
impl Block {
#[inline]
pub fn is_empty(self) -> bool {
#[cfg(not(target_feature = "sse4.1"))]
{
self == Self::NONE
}
#[cfg(target_feature = "sse4.1")]
{
unsafe { _mm_test_all_zeros(self.0, self.0) == 1 }
}
}
#[inline]
pub fn andnot(self, other: Self) -> Self {
Self(unsafe { _mm_andnot_si128(other.0, self.0) })
}
}
impl Not for Block {
type Output = Block;
#[inline]
fn not(self) -> Self::Output {
unsafe { Self(_mm_xor_si128(self.0, Self::ALL.0)) }
}
}
impl BitAnd for Block {
type Output = Block;
#[inline]
fn bitand(self, other: Self) -> Self::Output {
unsafe { Self(_mm_and_si128(self.0, other.0)) }
}
}
impl BitAndAssign for Block {
#[inline]
fn bitand_assign(&mut self, other: Self) {
unsafe {
self.0 = _mm_and_si128(self.0, other.0);
}
}
}
impl BitOr for Block {
type Output = Block;
#[inline]
fn bitor(self, other: Self) -> Self::Output {
unsafe { Self(_mm_or_si128(self.0, other.0)) }
}
}
impl BitOrAssign for Block {
#[inline]
fn bitor_assign(&mut self, other: Self) {
unsafe {
self.0 = _mm_or_si128(self.0, other.0);
}
}
}
impl BitXor for Block {
type Output = Block;
#[inline]
fn bitxor(self, other: Self) -> Self::Output {
unsafe { Self(_mm_xor_si128(self.0, other.0)) }
}
}
impl BitXorAssign for Block {
#[inline]
fn bitxor_assign(&mut self, other: Self) {
unsafe { self.0 = _mm_xor_si128(self.0, other.0) }
}
}
impl PartialEq for Block {
#[inline]
fn eq(&self, other: &Self) -> bool {
unsafe {
#[cfg(not(target_feature = "sse4.1"))]
{
_mm_movemask_epi8(_mm_cmpeq_epi8(self.0, other.0)) == 0xffff
}
#[cfg(target_feature = "sse4.1")]
{
let neq = _mm_xor_si128(self.0, other.0);
_mm_test_all_zeros(neq, neq) == 1
}
}
}
}

80
vendor/fixedbitset/src/block/wasm.rs vendored Normal file
View File

@@ -0,0 +1,80 @@
use core::{
arch::wasm32::*,
ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not},
};
#[derive(Copy, Clone, Debug)]
#[repr(transparent)]
pub struct Block(pub(super) v128);
impl Block {
#[inline]
pub fn is_empty(self) -> bool {
!v128_any_true(self.0)
}
#[inline]
pub fn andnot(self, other: Self) -> Self {
Self(v128_andnot(self.0, other.0))
}
}
impl Not for Block {
type Output = Block;
#[inline]
fn not(self) -> Self::Output {
Self(v128_xor(self.0, Self::ALL.0))
}
}
impl BitAnd for Block {
type Output = Block;
#[inline]
fn bitand(self, other: Self) -> Self::Output {
Self(v128_and(self.0, other.0))
}
}
impl BitAndAssign for Block {
#[inline]
fn bitand_assign(&mut self, other: Self) {
self.0 = v128_and(self.0, other.0);
}
}
impl BitOr for Block {
type Output = Block;
#[inline]
fn bitor(self, other: Self) -> Self::Output {
Self(v128_or(self.0, other.0))
}
}
impl BitOrAssign for Block {
#[inline]
fn bitor_assign(&mut self, other: Self) {
self.0 = v128_or(self.0, other.0);
}
}
impl BitXor for Block {
type Output = Block;
#[inline]
fn bitxor(self, other: Self) -> Self::Output {
Self(v128_xor(self.0, other.0))
}
}
impl BitXorAssign for Block {
#[inline]
fn bitxor_assign(&mut self, other: Self) {
self.0 = v128_xor(self.0, other.0)
}
}
impl PartialEq for Block {
#[inline]
fn eq(&self, other: &Self) -> bool {
!v128_any_true(v128_xor(self.0, other.0))
}
}

1711
vendor/fixedbitset/src/lib.rs vendored Normal file

File diff suppressed because it is too large Load Diff

45
vendor/fixedbitset/src/range.rs vendored Normal file
View File

@@ -0,0 +1,45 @@
use core::ops::{Range, RangeFrom, RangeFull, RangeTo};
// Taken from https://github.com/bluss/odds/blob/master/src/range.rs.
/// **IndexRange** is implemented by Rust's built-in range types, produced
/// by range syntax like `..`, `a..`, `..b` or `c..d`.
pub trait IndexRange<T = usize> {
#[inline]
/// Start index (inclusive)
fn start(&self) -> Option<T> {
None
}
#[inline]
/// End index (exclusive)
fn end(&self) -> Option<T> {
None
}
}
impl<T> IndexRange<T> for RangeFull {}
impl<T: Copy> IndexRange<T> for RangeFrom<T> {
#[inline]
fn start(&self) -> Option<T> {
Some(self.start)
}
}
impl<T: Copy> IndexRange<T> for RangeTo<T> {
#[inline]
fn end(&self) -> Option<T> {
Some(self.end)
}
}
impl<T: Copy> IndexRange<T> for Range<T> {
#[inline]
fn start(&self) -> Option<T> {
Some(self.start)
}
#[inline]
fn end(&self) -> Option<T> {
Some(self.end)
}
}

150
vendor/fixedbitset/src/serde_impl.rs vendored Normal file
View File

@@ -0,0 +1,150 @@
#[cfg(not(feature = "std"))]
use core as std;
use crate::{Block, FixedBitSet, BYTES};
use alloc::vec::Vec;
use core::{convert::TryFrom, fmt};
use serde::de::{self, Deserialize, Deserializer, MapAccess, SeqAccess, Visitor};
use serde::ser::{Serialize, SerializeStruct, Serializer};
struct BitSetByteSerializer<'a>(&'a FixedBitSet);
impl Serialize for FixedBitSet {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut struct_serializer = serializer.serialize_struct("FixedBitset", 2)?;
struct_serializer.serialize_field("length", &(self.length as u64))?;
struct_serializer.serialize_field("data", &BitSetByteSerializer(self))?;
struct_serializer.end()
}
}
impl<'a> Serialize for BitSetByteSerializer<'a> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let len = self.0.as_slice().len() * BYTES;
// PERF: Figure out a way to do this without allocating.
let mut temp = Vec::with_capacity(len);
for block in self.0.as_slice() {
temp.extend(&block.to_le_bytes());
}
serializer.serialize_bytes(&temp)
}
}
impl<'de> Deserialize<'de> for FixedBitSet {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
enum Field {
Length,
Data,
}
fn bytes_to_data(length: usize, input: &[u8]) -> Vec<Block> {
let block_len = length / BYTES + 1;
let mut data = Vec::with_capacity(block_len);
for chunk in input.chunks(BYTES) {
match <&[u8; BYTES]>::try_from(chunk) {
Ok(bytes) => data.push(usize::from_le_bytes(*bytes)),
Err(_) => {
let mut bytes = [0u8; BYTES];
bytes[0..BYTES].copy_from_slice(chunk);
data.push(usize::from_le_bytes(bytes));
}
}
}
data
}
impl<'de> Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Field, D::Error>
where
D: Deserializer<'de>,
{
struct FieldVisitor;
impl<'de> Visitor<'de> for FieldVisitor {
type Value = Field;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("`length` or `data`")
}
fn visit_str<E>(self, value: &str) -> Result<Field, E>
where
E: de::Error,
{
match value {
"length" => Ok(Field::Length),
"data" => Ok(Field::Data),
_ => Err(de::Error::unknown_field(value, FIELDS)),
}
}
}
deserializer.deserialize_identifier(FieldVisitor)
}
}
struct FixedBitSetVisitor;
impl<'de> Visitor<'de> for FixedBitSetVisitor {
type Value = FixedBitSet;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("struct Duration")
}
fn visit_seq<V>(self, mut seq: V) -> Result<FixedBitSet, V::Error>
where
V: SeqAccess<'de>,
{
let length = seq
.next_element()?
.ok_or_else(|| de::Error::invalid_length(0, &self))?;
let data: &[u8] = seq
.next_element()?
.ok_or_else(|| de::Error::invalid_length(1, &self))?;
let data = bytes_to_data(length, data);
Ok(FixedBitSet::with_capacity_and_blocks(length, data))
}
fn visit_map<V>(self, mut map: V) -> Result<FixedBitSet, V::Error>
where
V: MapAccess<'de>,
{
let mut length = None;
let mut temp: Option<&[u8]> = None;
while let Some(key) = map.next_key()? {
match key {
Field::Length => {
if length.is_some() {
return Err(de::Error::duplicate_field("length"));
}
length = Some(map.next_value()?);
}
Field::Data => {
if temp.is_some() {
return Err(de::Error::duplicate_field("data"));
}
temp = Some(map.next_value()?);
}
}
}
let length = length.ok_or_else(|| de::Error::missing_field("length"))?;
let data = temp.ok_or_else(|| de::Error::missing_field("data"))?;
let data = bytes_to_data(length, data);
Ok(FixedBitSet::with_capacity_and_blocks(length, data))
}
}
const FIELDS: &'static [&'static str] = &["length", "data"];
deserializer.deserialize_struct("Duration", FIELDS, FixedBitSetVisitor)
}
}