Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

View File

@@ -0,0 +1,152 @@
use super::SizeValue;
use core::num::NonZeroU64;
/// Helper type for alignment calculations
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct AlignmentValue(NonZeroU64);
impl AlignmentValue {
pub const fn new(val: u64) -> Self {
if !val.is_power_of_two() {
panic!("Alignment must be a power of 2!");
}
// SAFETY: This is safe since 0 is not a power of 2
Self(unsafe { NonZeroU64::new_unchecked(val) })
}
/// Returns an alignment that is the smallest power of two greater than the passed in `size`
#[inline]
pub const fn from_next_power_of_two_size(size: SizeValue) -> Self {
match size.get().checked_next_power_of_two() {
None => panic!("Overflow occurred while getting the next power of 2!"),
Some(val) => {
// SAFETY: This is safe since we got the next_power_of_two
Self(unsafe { NonZeroU64::new_unchecked(val) })
}
}
}
#[inline]
pub const fn get(&self) -> u64 {
self.0.get()
}
/// Returns the max alignment from an array of alignments
pub const fn max<const N: usize>(input: [AlignmentValue; N]) -> AlignmentValue {
let mut max = input[0];
let mut i = 1;
while i < N {
if input[i].get() > max.get() {
max = input[i];
}
i += 1;
}
max
}
/// Returns true if `n` is a multiple of this alignment
#[inline]
pub const fn is_aligned(&self, n: u64) -> bool {
n % self.get() == 0
}
/// Returns the amount of padding needed so that `n + padding` will be a multiple of this alignment
#[inline]
pub const fn padding_needed_for(&self, n: u64) -> u64 {
let r = n % self.get();
if r > 0 {
self.get() - r
} else {
0
}
}
/// Will round up the given `n` so that the returned value will be a multiple of this alignment
#[inline]
pub const fn round_up(&self, n: u64) -> u64 {
n + self.padding_needed_for(n)
}
/// Will round up the given `n` so that the returned value will be a multiple of this alignment
#[inline]
pub const fn round_up_size(&self, n: SizeValue) -> SizeValue {
SizeValue::new(self.round_up(n.get()))
}
}
#[cfg(test)]
mod test {
use super::AlignmentValue;
#[test]
fn new() {
assert_eq!(4, AlignmentValue::new(4).get());
}
#[test]
#[should_panic]
fn new_panic() {
AlignmentValue::new(3);
}
#[test]
fn from_next_power_of_two_size() {
assert_eq!(
AlignmentValue::new(8),
AlignmentValue::from_next_power_of_two_size(super::SizeValue::new(7))
);
}
#[test]
#[should_panic]
fn from_next_power_of_two_size_panic() {
AlignmentValue::from_next_power_of_two_size(super::SizeValue::new(u64::MAX));
}
#[test]
fn max() {
assert_eq!(
AlignmentValue::new(32),
AlignmentValue::max([
AlignmentValue::new(2),
AlignmentValue::new(8),
AlignmentValue::new(32)
])
);
}
#[test]
fn is_aligned() {
assert!(AlignmentValue::new(8).is_aligned(32));
assert!(!AlignmentValue::new(8).is_aligned(9));
}
#[test]
fn padding_needed_for() {
assert_eq!(1, AlignmentValue::new(8).padding_needed_for(7));
assert_eq!(16 - 9, AlignmentValue::new(8).padding_needed_for(9));
}
#[test]
fn round_up() {
assert_eq!(24, AlignmentValue::new(8).round_up(20));
assert_eq!(
super::SizeValue::new(16),
AlignmentValue::new(16).round_up_size(super::SizeValue::new(7))
);
}
#[test]
fn derived_traits() {
let alignment = AlignmentValue::new(8);
#[allow(clippy::clone_on_copy)]
let alignment_clone = alignment.clone();
assert!(alignment == alignment_clone);
assert_eq!(format!("{alignment:?}"), "AlignmentValue(8)");
}
}

317
vendor/encase/src/core/buffers.rs vendored Normal file
View File

@@ -0,0 +1,317 @@
use super::{
AlignmentValue, BufferMut, BufferRef, CreateFrom, ReadFrom, Reader, Result, ShaderType,
WriteInto, Writer,
};
/// Storage buffer wrapper facilitating RW operations
pub struct StorageBuffer<B> {
inner: B,
}
impl<B> StorageBuffer<B> {
pub const fn new(buffer: B) -> Self {
Self { inner: buffer }
}
pub fn into_inner(self) -> B {
self.inner
}
}
impl<B> From<B> for StorageBuffer<B> {
fn from(buffer: B) -> Self {
Self::new(buffer)
}
}
impl<B> AsRef<B> for StorageBuffer<B> {
fn as_ref(&self) -> &B {
&self.inner
}
}
impl<B> AsMut<B> for StorageBuffer<B> {
fn as_mut(&mut self) -> &mut B {
&mut self.inner
}
}
impl<B: BufferMut> StorageBuffer<B> {
pub fn write<T>(&mut self, value: &T) -> Result<()>
where
T: ?Sized + ShaderType + WriteInto,
{
let mut writer = Writer::new(value, &mut self.inner, 0)?;
value.write_into(&mut writer);
Ok(())
}
}
impl<B: BufferRef> StorageBuffer<B> {
pub fn read<T>(&self, value: &mut T) -> Result<()>
where
T: ?Sized + ShaderType + ReadFrom,
{
let mut writer = Reader::new::<T>(&self.inner, 0)?;
value.read_from(&mut writer);
Ok(())
}
pub fn create<T>(&self) -> Result<T>
where
T: ShaderType + CreateFrom,
{
let mut writer = Reader::new::<T>(&self.inner, 0)?;
Ok(T::create_from(&mut writer))
}
}
/// Uniform buffer wrapper facilitating RW operations
pub struct UniformBuffer<B> {
inner: StorageBuffer<B>,
}
impl<B> UniformBuffer<B> {
pub const fn new(buffer: B) -> Self {
Self {
inner: StorageBuffer::new(buffer),
}
}
pub fn into_inner(self) -> B {
self.inner.inner
}
}
impl<B> From<B> for UniformBuffer<B> {
fn from(buffer: B) -> Self {
Self::new(buffer)
}
}
impl<B> AsRef<B> for UniformBuffer<B> {
fn as_ref(&self) -> &B {
&self.inner.inner
}
}
impl<B> AsMut<B> for UniformBuffer<B> {
fn as_mut(&mut self) -> &mut B {
&mut self.inner.inner
}
}
impl<B: BufferMut> UniformBuffer<B> {
pub fn write<T>(&mut self, value: &T) -> Result<()>
where
T: ?Sized + ShaderType + WriteInto,
{
T::assert_uniform_compat();
self.inner.write(value)
}
}
impl<B: BufferRef> UniformBuffer<B> {
pub fn read<T>(&self, value: &mut T) -> Result<()>
where
T: ?Sized + ShaderType + ReadFrom,
{
T::assert_uniform_compat();
self.inner.read(value)
}
pub fn create<T>(&self) -> Result<T>
where
T: ShaderType + CreateFrom,
{
T::assert_uniform_compat();
self.inner.create()
}
}
/// Dynamic storage buffer wrapper facilitating RW operations
pub struct DynamicStorageBuffer<B> {
inner: B,
alignment: AlignmentValue,
offset: usize,
}
impl<B> DynamicStorageBuffer<B> {
/// Creates a new dynamic storage buffer wrapper with an alignment of 256
/// (default alignment in the WebGPU spec).
pub const fn new(buffer: B) -> Self {
Self::new_with_alignment(buffer, 256)
}
/// Creates a new dynamic storage buffer wrapper with a given alignment.
/// # Panics
///
/// - if `alignment` is not a power of two.
/// - if `alignment` is less than 32 (min alignment imposed by the WebGPU spec).
pub const fn new_with_alignment(buffer: B, alignment: u64) -> Self {
if alignment < 32 {
panic!("Alignment must be at least 32!");
}
Self {
inner: buffer,
alignment: AlignmentValue::new(alignment),
offset: 0,
}
}
pub fn set_offset(&mut self, offset: u64) {
if !self.alignment.is_aligned(offset) {
panic!(
"offset of {} bytes is not aligned to alignment of {} bytes",
offset,
self.alignment.get()
);
}
self.offset = offset as usize;
}
pub fn into_inner(self) -> B {
self.inner
}
}
impl<B> From<B> for DynamicStorageBuffer<B> {
fn from(buffer: B) -> Self {
Self::new(buffer)
}
}
impl<B> AsRef<B> for DynamicStorageBuffer<B> {
fn as_ref(&self) -> &B {
&self.inner
}
}
impl<B> AsMut<B> for DynamicStorageBuffer<B> {
fn as_mut(&mut self) -> &mut B {
&mut self.inner
}
}
impl<B: BufferMut> DynamicStorageBuffer<B> {
pub fn write<T>(&mut self, value: &T) -> Result<u64>
where
T: ?Sized + ShaderType + WriteInto,
{
let offset = self.offset;
let mut writer = Writer::new(value, &mut self.inner, offset)?;
value.write_into(&mut writer);
self.offset += self.alignment.round_up(value.size().get()) as usize;
Ok(offset as u64)
}
}
impl<B: BufferRef> DynamicStorageBuffer<B> {
pub fn read<T>(&mut self, value: &mut T) -> Result<()>
where
T: ?Sized + ShaderType + ReadFrom,
{
let mut writer = Reader::new::<T>(&self.inner, self.offset)?;
value.read_from(&mut writer);
self.offset += self.alignment.round_up(value.size().get()) as usize;
Ok(())
}
pub fn create<T>(&mut self) -> Result<T>
where
T: ShaderType + CreateFrom,
{
let mut writer = Reader::new::<T>(&self.inner, self.offset)?;
let value = T::create_from(&mut writer);
self.offset += self.alignment.round_up(value.size().get()) as usize;
Ok(value)
}
}
/// Dynamic uniform buffer wrapper facilitating RW operations
pub struct DynamicUniformBuffer<B> {
inner: DynamicStorageBuffer<B>,
}
impl<B> DynamicUniformBuffer<B> {
/// Creates a new dynamic uniform buffer wrapper with an alignment of 256
/// (default alignment in the WebGPU spec).
pub const fn new(buffer: B) -> Self {
Self {
inner: DynamicStorageBuffer::new(buffer),
}
}
/// Creates a new dynamic uniform buffer wrapper with a given alignment.
/// # Panics
///
/// - if `alignment` is not a power of two.
/// - if `alignment` is less than 32 (min alignment imposed by the WebGPU spec).
pub const fn new_with_alignment(buffer: B, alignment: u64) -> Self {
Self {
inner: DynamicStorageBuffer::new_with_alignment(buffer, alignment),
}
}
pub fn set_offset(&mut self, offset: u64) {
self.inner.set_offset(offset);
}
pub fn into_inner(self) -> B {
self.inner.inner
}
}
impl<B> From<B> for DynamicUniformBuffer<B> {
fn from(buffer: B) -> Self {
Self::new(buffer)
}
}
impl<B> AsRef<B> for DynamicUniformBuffer<B> {
fn as_ref(&self) -> &B {
&self.inner.inner
}
}
impl<B> AsMut<B> for DynamicUniformBuffer<B> {
fn as_mut(&mut self) -> &mut B {
&mut self.inner.inner
}
}
impl<B: BufferMut> DynamicUniformBuffer<B> {
pub fn write<T>(&mut self, value: &T) -> Result<u64>
where
T: ?Sized + ShaderType + WriteInto,
{
T::assert_uniform_compat();
self.inner.write(value)
}
}
impl<B: BufferRef> DynamicUniformBuffer<B> {
pub fn read<T>(&mut self, value: &mut T) -> Result<()>
where
T: ?Sized + ShaderType + ReadFrom,
{
T::assert_uniform_compat();
self.inner.read(value)
}
pub fn create<T>(&mut self) -> Result<T>
where
T: ShaderType + CreateFrom,
{
T::assert_uniform_compat();
self.inner.create()
}
}

11
vendor/encase/src/core/mod.rs vendored Normal file
View File

@@ -0,0 +1,11 @@
mod alignment_value;
mod buffers;
mod rw;
mod size_value;
mod traits;
pub use alignment_value::*;
pub use buffers::*;
pub use rw::*;
pub use size_value::*;
pub use traits::*;

541
vendor/encase/src/core/rw.rs vendored Normal file
View File

@@ -0,0 +1,541 @@
use super::ShaderType;
use core::mem::MaybeUninit;
use thiserror::Error;
#[derive(Clone, Copy, Debug, Error)]
pub enum Error {
#[error("could not read/write {expected} bytes from/into {found} byte sized buffer")]
BufferTooSmall { expected: u64, found: u64 },
}
pub type Result<T> = core::result::Result<T, Error>;
pub struct WriteContext {
/// length of the contained runtime sized array
///
/// used by the derive macro
pub rts_array_length: Option<u32>,
}
pub struct Writer<B: BufferMut> {
pub ctx: WriteContext,
cursor: Cursor<B>,
}
impl<B: BufferMut> Writer<B> {
#[inline]
pub fn new<T: ?Sized + ShaderType>(data: &T, buffer: B, offset: usize) -> Result<Self> {
let mut cursor = Cursor::new(buffer, offset);
let size = data.size().get();
if cursor.try_enlarge(offset + size as usize).is_err() {
Err(Error::BufferTooSmall {
expected: size,
found: cursor.capacity() as u64,
})
} else {
Ok(Self {
ctx: WriteContext {
rts_array_length: None,
},
cursor,
})
}
}
#[inline]
pub fn advance(&mut self, amount: usize) {
self.cursor.advance(amount);
}
#[inline]
pub fn write<const N: usize>(&mut self, val: &[u8; N]) {
self.cursor.write(val);
}
#[inline]
pub fn write_slice(&mut self, val: &[u8]) {
self.cursor.write_slice(val)
}
}
pub struct ReadContext {
/// max elements to read into the contained runtime sized array
///
/// used by the derive macro
pub rts_array_max_el_to_read: Option<u32>,
}
pub struct Reader<B: BufferRef> {
pub ctx: ReadContext,
cursor: Cursor<B>,
}
impl<B: BufferRef> Reader<B> {
#[inline]
pub fn new<T: ?Sized + ShaderType>(buffer: B, offset: usize) -> Result<Self> {
let cursor = Cursor::new(buffer, offset);
if cursor.remaining() < T::min_size().get() as usize {
Err(Error::BufferTooSmall {
expected: T::min_size().get(),
found: cursor.remaining() as u64,
})
} else {
Ok(Self {
ctx: ReadContext {
rts_array_max_el_to_read: None,
},
cursor,
})
}
}
#[inline]
pub fn advance(&mut self, amount: usize) {
self.cursor.advance(amount);
}
#[inline]
pub fn read<const N: usize>(&mut self) -> &[u8; N] {
self.cursor.read()
}
#[inline]
pub fn read_slice(&mut self, val: &mut [u8]) {
self.cursor.read_slice(val)
}
#[inline]
pub fn remaining(&self) -> usize {
self.cursor.remaining()
}
}
struct Cursor<B> {
buffer: B,
pos: usize,
}
impl<B> Cursor<B> {
#[inline]
fn new(buffer: B, offset: usize) -> Self {
Self {
buffer,
pos: offset,
}
}
#[inline]
fn advance(&mut self, amount: usize) {
self.pos += amount;
}
}
impl<B: BufferRef> Cursor<B> {
#[inline]
fn remaining(&self) -> usize {
self.buffer.len().saturating_sub(self.pos)
}
#[inline]
fn read<const N: usize>(&mut self) -> &[u8; N] {
let res = self.buffer.read(self.pos);
self.pos += N;
res
}
#[inline]
fn read_slice(&mut self, val: &mut [u8]) {
self.buffer.read_slice(self.pos, val);
self.pos += val.len();
}
}
impl<B: BufferMut> Cursor<B> {
#[inline]
fn capacity(&self) -> usize {
self.buffer.capacity().saturating_sub(self.pos)
}
#[inline]
fn write<const N: usize>(&mut self, val: &[u8; N]) {
self.buffer.write(self.pos, val);
self.pos += N;
}
#[inline]
fn write_slice(&mut self, val: &[u8]) {
self.buffer.write_slice(self.pos, val);
self.pos += val.len();
}
#[inline]
fn try_enlarge(&mut self, wanted: usize) -> core::result::Result<(), EnlargeError> {
self.buffer.try_enlarge(wanted)
}
}
#[derive(Clone, Copy, Debug, Error)]
#[error("could not enlarge buffer")]
pub struct EnlargeError;
impl From<std::collections::TryReserveError> for EnlargeError {
fn from(_: std::collections::TryReserveError) -> Self {
Self
}
}
#[allow(clippy::len_without_is_empty)]
pub trait BufferRef {
fn len(&self) -> usize;
fn read<const N: usize>(&self, offset: usize) -> &[u8; N];
fn read_slice(&self, offset: usize, val: &mut [u8]);
}
pub trait BufferMut {
fn capacity(&self) -> usize;
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]);
fn write_slice(&mut self, offset: usize, val: &[u8]);
#[inline]
fn try_enlarge(&mut self, wanted: usize) -> core::result::Result<(), EnlargeError> {
if wanted > self.capacity() {
Err(EnlargeError)
} else {
Ok(())
}
}
}
impl BufferRef for [u8] {
fn len(&self) -> usize {
self.len()
}
#[inline]
fn read<const N: usize>(&self, offset: usize) -> &[u8; N] {
use crate::utils::SliceExt;
self.array(offset)
}
#[inline]
fn read_slice(&self, offset: usize, val: &mut [u8]) {
val.copy_from_slice(&self[offset..offset + val.len()])
}
}
impl<const LEN: usize> BufferRef for [u8; LEN] {
#[inline]
fn len(&self) -> usize {
<[u8] as BufferRef>::len(self)
}
#[inline]
fn read<const N: usize>(&self, offset: usize) -> &[u8; N] {
<[u8] as BufferRef>::read(self, offset)
}
#[inline]
fn read_slice(&self, offset: usize, val: &mut [u8]) {
<[u8] as BufferRef>::read_slice(self, offset, val)
}
}
impl BufferRef for Vec<u8> {
#[inline]
fn len(&self) -> usize {
<[u8] as BufferRef>::len(self)
}
#[inline]
fn read<const N: usize>(&self, offset: usize) -> &[u8; N] {
<[u8] as BufferRef>::read(self, offset)
}
#[inline]
fn read_slice(&self, offset: usize, val: &mut [u8]) {
<[u8] as BufferRef>::read_slice(self, offset, val)
}
}
impl BufferMut for [u8] {
#[inline]
fn capacity(&self) -> usize {
self.len()
}
#[inline]
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) {
use crate::utils::SliceExt;
*self.array_mut(offset) = *val;
}
#[inline]
fn write_slice(&mut self, offset: usize, val: &[u8]) {
self[offset..offset + val.len()].copy_from_slice(val);
}
}
impl BufferMut for [MaybeUninit<u8>] {
#[inline]
fn capacity(&self) -> usize {
self.len()
}
#[inline]
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) {
use crate::utils::SliceExt;
// SAFETY: &[u8; N] and &[MaybeUninit<u8>; N] have the same layout
let val: &[MaybeUninit<u8>; N] = unsafe { core::mem::transmute(val) };
*self.array_mut(offset) = *val;
}
#[inline]
fn write_slice(&mut self, offset: usize, val: &[u8]) {
// SAFETY: &[u8] and &[MaybeUninit<u8>] have the same layout
let val: &[MaybeUninit<u8>] = unsafe { core::mem::transmute(val) };
self[offset..offset + val.len()].copy_from_slice(val);
}
}
impl<const LEN: usize> BufferMut for [u8; LEN] {
#[inline]
fn capacity(&self) -> usize {
<[u8] as BufferMut>::capacity(self)
}
#[inline]
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) {
<[u8] as BufferMut>::write(self, offset, val);
}
#[inline]
fn write_slice(&mut self, offset: usize, val: &[u8]) {
<[u8] as BufferMut>::write_slice(self, offset, val)
}
}
impl<const LEN: usize> BufferMut for [MaybeUninit<u8>; LEN] {
#[inline]
fn capacity(&self) -> usize {
<[MaybeUninit<u8>] as BufferMut>::capacity(self)
}
#[inline]
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) {
<[MaybeUninit<u8>] as BufferMut>::write(self, offset, val)
}
#[inline]
fn write_slice(&mut self, offset: usize, val: &[u8]) {
<[MaybeUninit<u8>] as BufferMut>::write_slice(self, offset, val)
}
}
impl BufferMut for Vec<u8> {
#[inline]
fn capacity(&self) -> usize {
self.capacity()
}
#[inline]
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) {
<[u8] as BufferMut>::write(self, offset, val);
}
#[inline]
fn write_slice(&mut self, offset: usize, val: &[u8]) {
<[u8] as BufferMut>::write_slice(self, offset, val)
}
#[inline]
fn try_enlarge(&mut self, wanted: usize) -> core::result::Result<(), EnlargeError> {
use crate::utils::ByteVecExt;
self.try_extend(wanted).map_err(EnlargeError::from)
}
}
impl BufferMut for Vec<MaybeUninit<u8>> {
#[inline]
fn capacity(&self) -> usize {
self.capacity()
}
#[inline]
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) {
<[MaybeUninit<u8>] as BufferMut>::write(self, offset, val)
}
#[inline]
fn write_slice(&mut self, offset: usize, val: &[u8]) {
<[MaybeUninit<u8>] as BufferMut>::write_slice(self, offset, val)
}
#[inline]
fn try_enlarge(&mut self, wanted: usize) -> core::result::Result<(), EnlargeError> {
use crate::utils::ByteVecExt;
self.try_extend(wanted).map_err(EnlargeError::from)
}
}
macro_rules! impl_buffer_ref_for_wrappers {
($($type:ty),*) => {$(
impl<T: ?Sized + BufferRef> BufferRef for $type {
#[inline]
fn len(&self) -> usize {
T::len(self)
}
#[inline]
fn read<const N: usize>(&self, offset: usize) -> &[u8; N] {
T::read(self, offset)
}
#[inline]
fn read_slice(&self, offset: usize, val: &mut [u8]) {
T::read_slice(self, offset, val)
}
}
)*};
}
impl_buffer_ref_for_wrappers!(&T, &mut T, Box<T>, std::rc::Rc<T>, std::sync::Arc<T>);
macro_rules! impl_buffer_mut_for_wrappers {
($($type:ty),*) => {$(
impl<T: ?Sized + BufferMut> BufferMut for $type {
#[inline]
fn capacity(&self) -> usize {
T::capacity(self)
}
#[inline]
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) {
T::write(self, offset, val)
}
#[inline]
fn write_slice(&mut self, offset: usize, val: &[u8]) {
T::write_slice(self, offset, val)
}
#[inline]
fn try_enlarge(&mut self, wanted: usize) -> core::result::Result<(), EnlargeError> {
T::try_enlarge(self, wanted)
}
}
)*};
}
impl_buffer_mut_for_wrappers!(&mut T, Box<T>);
#[cfg(test)]
mod buffer_ref {
use super::BufferRef;
#[test]
fn array() {
let arr = [0, 1, 2, 3, 4, 5];
assert_eq!(BufferRef::len(&arr), 6);
assert_eq!(BufferRef::read(&arr, 3), &[3, 4]);
}
#[test]
fn vec() {
let vec = Vec::from([0, 1, 2, 3, 4, 5]);
assert_eq!(BufferRef::len(&vec), 6);
assert_eq!(BufferRef::read(&vec, 3), &[3, 4]);
}
}
#[cfg(test)]
mod buffer_mut {
use super::BufferMut;
use crate::core::EnlargeError;
#[test]
fn array() {
let mut arr = [0, 1, 2, 3, 4, 5];
assert_eq!(BufferMut::capacity(&arr), 6);
BufferMut::write(&mut arr, 3, &[9, 1]);
assert_eq!(arr, [0, 1, 2, 9, 1, 5]);
assert!(matches!(BufferMut::try_enlarge(&mut arr, 6), Ok(())));
assert!(matches!(
BufferMut::try_enlarge(&mut arr, 7),
Err(EnlargeError)
));
}
#[test]
fn vec() {
let mut vec = Vec::from([0, 1, 2, 3, 4, 5]);
assert_eq!(BufferMut::capacity(&vec), vec.capacity());
BufferMut::write(&mut vec, 3, &[9, 1]);
assert_eq!(vec, Vec::from([0, 1, 2, 9, 1, 5]));
assert!(matches!(BufferMut::try_enlarge(&mut vec, 100), Ok(())));
assert!(matches!(
BufferMut::try_enlarge(&mut vec, usize::MAX),
Err(EnlargeError)
));
}
}
#[cfg(test)]
mod error {
use super::Error;
#[test]
fn derived_traits() {
let err = Error::BufferTooSmall {
expected: 4,
found: 2,
};
{
use std::error::Error;
assert!(err.source().is_none());
}
assert_eq!(
format!("{}", err.clone()),
"could not read/write 4 bytes from/into 2 byte sized buffer"
);
assert_eq!(
format!("{:?}", err.clone()),
"BufferTooSmall { expected: 4, found: 2 }"
);
}
}
#[cfg(test)]
mod enlarge_error {
use super::EnlargeError;
#[test]
fn derived_traits() {
// can't construct a TryReserveError due to TryReserveErrorKind being unstable
let try_reserve_error = {
let mut vec = Vec::<u8>::new();
vec.try_reserve(usize::MAX).err().unwrap()
};
let err = EnlargeError::from(try_reserve_error);
use std::error::Error;
assert!(err.source().is_none());
assert_eq!(format!("{}", err.clone()), "could not enlarge buffer");
assert_eq!(format!("{:?}", err.clone()), "EnlargeError");
}
}

77
vendor/encase/src/core/size_value.rs vendored Normal file
View File

@@ -0,0 +1,77 @@
use core::num::NonZeroU64;
/// Helper type for size calculations
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct SizeValue(pub NonZeroU64);
impl SizeValue {
#[inline]
pub const fn new(val: u64) -> Self {
match val {
0 => panic!("Size can't be 0!"),
val => {
// SAFETY: This is safe since we checked if the value is 0
Self(unsafe { NonZeroU64::new_unchecked(val) })
}
}
}
#[inline]
pub const fn from(val: NonZeroU64) -> Self {
Self(val)
}
#[inline]
pub const fn get(&self) -> u64 {
self.0.get()
}
#[inline]
pub const fn mul(self, rhs: u64) -> Self {
match self.get().checked_mul(rhs) {
None => panic!("Overflow occurred while multiplying size values!"),
Some(val) => {
// SAFETY: This is safe since we checked for overflow
Self(unsafe { NonZeroU64::new_unchecked(val) })
}
}
}
}
#[cfg(test)]
mod test {
use super::SizeValue;
#[test]
fn new() {
assert_eq!(4, SizeValue::new(4).get());
}
#[test]
#[should_panic]
fn new_panic() {
SizeValue::new(0);
}
#[test]
fn mul() {
assert_eq!(SizeValue::new(64), SizeValue::new(8).mul(8));
}
#[test]
#[should_panic]
fn mul_panic() {
SizeValue::new(8).mul(u64::MAX);
}
#[test]
fn derived_traits() {
let size = SizeValue::new(8);
#[allow(clippy::clone_on_copy)]
let size_clone = size.clone();
assert!(size == size_clone);
assert_eq!(format!("{size:?}"), "SizeValue(8)");
}
}

267
vendor/encase/src/core/traits.rs vendored Normal file
View File

@@ -0,0 +1,267 @@
use std::num::NonZeroU64;
use super::{AlignmentValue, BufferMut, BufferRef, Reader, SizeValue, Writer};
const UNIFORM_MIN_ALIGNMENT: AlignmentValue = AlignmentValue::new(16);
pub struct Metadata<E> {
pub alignment: AlignmentValue,
pub has_uniform_min_alignment: bool,
pub min_size: SizeValue,
pub is_pod: bool,
pub extra: E,
}
impl Metadata<()> {
pub const fn from_alignment_and_size(alignment: u64, size: u64) -> Self {
Self {
alignment: AlignmentValue::new(alignment),
has_uniform_min_alignment: false,
min_size: SizeValue::new(size),
is_pod: false,
extra: (),
}
}
}
// using forget() avoids "destructors cannot be evaluated at compile-time" error
// track #![feature(const_precise_live_drops)] (https://github.com/rust-lang/rust/issues/73255)
impl<E> Metadata<E> {
#[inline]
pub const fn alignment(self) -> AlignmentValue {
let value = self.alignment;
core::mem::forget(self);
value
}
#[inline]
pub const fn uniform_min_alignment(self) -> Option<AlignmentValue> {
let value = self.has_uniform_min_alignment;
core::mem::forget(self);
match value {
true => Some(UNIFORM_MIN_ALIGNMENT),
false => None,
}
}
#[inline]
pub const fn min_size(self) -> SizeValue {
let value = self.min_size;
core::mem::forget(self);
value
}
#[inline]
pub const fn is_pod(self) -> bool {
let value = self.is_pod;
core::mem::forget(self);
value
}
#[inline]
pub const fn pod(mut self) -> Self {
self.is_pod = true;
self
}
#[inline]
pub const fn no_pod(mut self) -> Self {
self.is_pod = false;
self
}
}
/// Base trait for all [WGSL host-shareable types](https://gpuweb.github.io/gpuweb/wgsl/#host-shareable-types)
pub trait ShaderType {
#[doc(hidden)]
type ExtraMetadata;
#[doc(hidden)]
const METADATA: Metadata<Self::ExtraMetadata>;
/// Represents the minimum size of `Self` (equivalent to [GPUBufferBindingLayout.minBindingSize](https://gpuweb.github.io/gpuweb/#dom-gpubufferbindinglayout-minbindingsize))
///
/// For [WGSL fixed-footprint types](https://gpuweb.github.io/gpuweb/wgsl/#fixed-footprint-types)
/// it represents [WGSL Size](https://gpuweb.github.io/gpuweb/wgsl/#alignment-and-size)
/// (equivalent to [`ShaderSize::SHADER_SIZE`])
///
/// For
/// [WGSL runtime-sized arrays](https://gpuweb.github.io/gpuweb/wgsl/#runtime-sized) and
/// [WGSL structs containing runtime-sized arrays](https://gpuweb.github.io/gpuweb/wgsl/#struct-types)
/// (non fixed-footprint types)
/// this will be calculated by assuming the array has one element
#[inline]
fn min_size() -> NonZeroU64 {
Self::METADATA.min_size().0
}
/// Returns the size of `Self` at runtime
///
/// For [WGSL fixed-footprint types](https://gpuweb.github.io/gpuweb/wgsl/#fixed-footprint-types)
/// it's equivalent to [`Self::min_size`] and [`ShaderSize::SHADER_SIZE`]
#[inline]
fn size(&self) -> NonZeroU64 {
Self::METADATA.min_size().0
}
#[doc(hidden)]
const UNIFORM_COMPAT_ASSERT: fn() = || {};
/// Asserts that `Self` meets the requirements of the
/// [uniform address space restrictions on stored values](https://gpuweb.github.io/gpuweb/wgsl/#address-spaces-uniform) and the
/// [uniform address space layout constraints](https://gpuweb.github.io/gpuweb/wgsl/#address-space-layout-constraints)
///
/// # Examples
///
/// ## Array
///
/// Will panic since runtime-sized arrays are not compatible with the
/// uniform address space restrictions on stored values
///
/// ```should_panic
/// # use crate::encase::ShaderType;
/// <Vec<mint::Vector4<f32>>>::assert_uniform_compat();
/// ```
///
/// Will panic since the stride is 4 bytes
///
/// ```should_panic
/// # use crate::encase::ShaderType;
/// <[f32; 2]>::assert_uniform_compat();
/// ```
///
/// Will not panic since the stride is 16 bytes
///
/// ```
/// # use crate::encase::ShaderType;
/// # use mint;
/// <[mint::Vector4<f32>; 2]>::assert_uniform_compat();
/// ```
///
/// ## Struct
///
/// Will panic since runtime-sized arrays are not compatible with the
/// uniform address space restrictions on stored values
///
/// ```should_panic
/// # use crate::encase::ShaderType;
/// # use mint;
/// #[derive(ShaderType)]
/// struct Invalid {
/// #[size(runtime)]
/// vec: Vec<mint::Vector4<f32>>
/// }
/// Invalid::assert_uniform_compat();
/// ```
///
/// Will panic since the inner struct's size must be a multiple of 16
///
/// ```should_panic
/// # use crate::encase::ShaderType;
/// #[derive(ShaderType)]
/// struct S {
/// x: f32,
/// }
///
/// #[derive(ShaderType)]
/// struct Invalid {
/// a: f32,
/// b: S, // offset between fields 'a' and 'b' must be at least 16 (currently: 4)
/// }
/// Invalid::assert_uniform_compat();
/// ```
///
/// Will not panic (fixed via align attribute)
///
/// ```
/// # use crate::encase::ShaderType;
/// # #[derive(ShaderType)]
/// # struct S {
/// # x: f32,
/// # }
/// #[derive(ShaderType)]
/// struct Valid {
/// a: f32,
/// #[align(16)]
/// b: S,
/// }
/// Valid::assert_uniform_compat();
/// ```
///
/// Will not panic (fixed via size attribute)
///
/// ```
/// # use crate::encase::ShaderType;
/// # #[derive(ShaderType)]
/// # struct S {
/// # x: f32,
/// # }
/// #[derive(ShaderType)]
/// struct Valid {
/// #[size(16)]
/// a: f32,
/// b: S,
/// }
/// Valid::assert_uniform_compat();
/// ```
#[inline]
fn assert_uniform_compat() {
Self::UNIFORM_COMPAT_ASSERT();
}
// fn assert_can_write_into()
// where
// Self: WriteInto,
// {
// }
// fn assert_can_read_from()
// where
// Self: ReadFrom,
// {
// }
// fn assert_can_create_from()
// where
// Self: CreateFrom,
// {
// }
}
/// Trait implemented for all [WGSL fixed-footprint types](https://gpuweb.github.io/gpuweb/wgsl/#fixed-footprint-types)
pub trait ShaderSize: ShaderType {
/// Represents [WGSL Size](https://gpuweb.github.io/gpuweb/wgsl/#alignment-and-size) (equivalent to [`ShaderType::min_size`])
const SHADER_SIZE: NonZeroU64 = Self::METADATA.min_size().0;
}
/// Trait implemented for
/// [WGSL runtime-sized arrays](https://gpuweb.github.io/gpuweb/wgsl/#runtime-sized) and
/// [WGSL structs containing runtime-sized arrays](https://gpuweb.github.io/gpuweb/wgsl/#struct-types)
/// (non fixed-footprint types)
pub trait CalculateSizeFor {
/// Returns the size of `Self` assuming the (contained) runtime-sized array has `nr_of_el` elements
fn calculate_size_for(nr_of_el: u64) -> NonZeroU64;
}
#[allow(clippy::len_without_is_empty)]
pub trait RuntimeSizedArray {
fn len(&self) -> usize;
}
pub trait WriteInto {
fn write_into<B>(&self, writer: &mut Writer<B>)
where
B: BufferMut;
}
pub trait ReadFrom {
fn read_from<B>(&mut self, reader: &mut Reader<B>)
where
B: BufferRef;
}
pub trait CreateFrom: Sized {
fn create_from<B>(reader: &mut Reader<B>) -> Self
where
B: BufferRef;
}