Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

View File

@@ -0,0 +1,152 @@
use super::SizeValue;
use core::num::NonZeroU64;
/// Helper type for alignment calculations
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct AlignmentValue(NonZeroU64);
impl AlignmentValue {
pub const fn new(val: u64) -> Self {
if !val.is_power_of_two() {
panic!("Alignment must be a power of 2!");
}
// SAFETY: This is safe since 0 is not a power of 2
Self(unsafe { NonZeroU64::new_unchecked(val) })
}
/// Returns an alignment that is the smallest power of two greater than the passed in `size`
#[inline]
pub const fn from_next_power_of_two_size(size: SizeValue) -> Self {
match size.get().checked_next_power_of_two() {
None => panic!("Overflow occurred while getting the next power of 2!"),
Some(val) => {
// SAFETY: This is safe since we got the next_power_of_two
Self(unsafe { NonZeroU64::new_unchecked(val) })
}
}
}
#[inline]
pub const fn get(&self) -> u64 {
self.0.get()
}
/// Returns the max alignment from an array of alignments
pub const fn max<const N: usize>(input: [AlignmentValue; N]) -> AlignmentValue {
let mut max = input[0];
let mut i = 1;
while i < N {
if input[i].get() > max.get() {
max = input[i];
}
i += 1;
}
max
}
/// Returns true if `n` is a multiple of this alignment
#[inline]
pub const fn is_aligned(&self, n: u64) -> bool {
n % self.get() == 0
}
/// Returns the amount of padding needed so that `n + padding` will be a multiple of this alignment
#[inline]
pub const fn padding_needed_for(&self, n: u64) -> u64 {
let r = n % self.get();
if r > 0 {
self.get() - r
} else {
0
}
}
/// Will round up the given `n` so that the returned value will be a multiple of this alignment
#[inline]
pub const fn round_up(&self, n: u64) -> u64 {
n + self.padding_needed_for(n)
}
/// Will round up the given `n` so that the returned value will be a multiple of this alignment
#[inline]
pub const fn round_up_size(&self, n: SizeValue) -> SizeValue {
SizeValue::new(self.round_up(n.get()))
}
}
#[cfg(test)]
mod test {
use super::AlignmentValue;
#[test]
fn new() {
assert_eq!(4, AlignmentValue::new(4).get());
}
#[test]
#[should_panic]
fn new_panic() {
AlignmentValue::new(3);
}
#[test]
fn from_next_power_of_two_size() {
assert_eq!(
AlignmentValue::new(8),
AlignmentValue::from_next_power_of_two_size(super::SizeValue::new(7))
);
}
#[test]
#[should_panic]
fn from_next_power_of_two_size_panic() {
AlignmentValue::from_next_power_of_two_size(super::SizeValue::new(u64::MAX));
}
#[test]
fn max() {
assert_eq!(
AlignmentValue::new(32),
AlignmentValue::max([
AlignmentValue::new(2),
AlignmentValue::new(8),
AlignmentValue::new(32)
])
);
}
#[test]
fn is_aligned() {
assert!(AlignmentValue::new(8).is_aligned(32));
assert!(!AlignmentValue::new(8).is_aligned(9));
}
#[test]
fn padding_needed_for() {
assert_eq!(1, AlignmentValue::new(8).padding_needed_for(7));
assert_eq!(16 - 9, AlignmentValue::new(8).padding_needed_for(9));
}
#[test]
fn round_up() {
assert_eq!(24, AlignmentValue::new(8).round_up(20));
assert_eq!(
super::SizeValue::new(16),
AlignmentValue::new(16).round_up_size(super::SizeValue::new(7))
);
}
#[test]
fn derived_traits() {
let alignment = AlignmentValue::new(8);
#[allow(clippy::clone_on_copy)]
let alignment_clone = alignment.clone();
assert!(alignment == alignment_clone);
assert_eq!(format!("{alignment:?}"), "AlignmentValue(8)");
}
}

317
vendor/encase/src/core/buffers.rs vendored Normal file
View File

@@ -0,0 +1,317 @@
use super::{
AlignmentValue, BufferMut, BufferRef, CreateFrom, ReadFrom, Reader, Result, ShaderType,
WriteInto, Writer,
};
/// Storage buffer wrapper facilitating RW operations
pub struct StorageBuffer<B> {
inner: B,
}
impl<B> StorageBuffer<B> {
pub const fn new(buffer: B) -> Self {
Self { inner: buffer }
}
pub fn into_inner(self) -> B {
self.inner
}
}
impl<B> From<B> for StorageBuffer<B> {
fn from(buffer: B) -> Self {
Self::new(buffer)
}
}
impl<B> AsRef<B> for StorageBuffer<B> {
fn as_ref(&self) -> &B {
&self.inner
}
}
impl<B> AsMut<B> for StorageBuffer<B> {
fn as_mut(&mut self) -> &mut B {
&mut self.inner
}
}
impl<B: BufferMut> StorageBuffer<B> {
pub fn write<T>(&mut self, value: &T) -> Result<()>
where
T: ?Sized + ShaderType + WriteInto,
{
let mut writer = Writer::new(value, &mut self.inner, 0)?;
value.write_into(&mut writer);
Ok(())
}
}
impl<B: BufferRef> StorageBuffer<B> {
pub fn read<T>(&self, value: &mut T) -> Result<()>
where
T: ?Sized + ShaderType + ReadFrom,
{
let mut writer = Reader::new::<T>(&self.inner, 0)?;
value.read_from(&mut writer);
Ok(())
}
pub fn create<T>(&self) -> Result<T>
where
T: ShaderType + CreateFrom,
{
let mut writer = Reader::new::<T>(&self.inner, 0)?;
Ok(T::create_from(&mut writer))
}
}
/// Uniform buffer wrapper facilitating RW operations
pub struct UniformBuffer<B> {
inner: StorageBuffer<B>,
}
impl<B> UniformBuffer<B> {
pub const fn new(buffer: B) -> Self {
Self {
inner: StorageBuffer::new(buffer),
}
}
pub fn into_inner(self) -> B {
self.inner.inner
}
}
impl<B> From<B> for UniformBuffer<B> {
fn from(buffer: B) -> Self {
Self::new(buffer)
}
}
impl<B> AsRef<B> for UniformBuffer<B> {
fn as_ref(&self) -> &B {
&self.inner.inner
}
}
impl<B> AsMut<B> for UniformBuffer<B> {
fn as_mut(&mut self) -> &mut B {
&mut self.inner.inner
}
}
impl<B: BufferMut> UniformBuffer<B> {
pub fn write<T>(&mut self, value: &T) -> Result<()>
where
T: ?Sized + ShaderType + WriteInto,
{
T::assert_uniform_compat();
self.inner.write(value)
}
}
impl<B: BufferRef> UniformBuffer<B> {
pub fn read<T>(&self, value: &mut T) -> Result<()>
where
T: ?Sized + ShaderType + ReadFrom,
{
T::assert_uniform_compat();
self.inner.read(value)
}
pub fn create<T>(&self) -> Result<T>
where
T: ShaderType + CreateFrom,
{
T::assert_uniform_compat();
self.inner.create()
}
}
/// Dynamic storage buffer wrapper facilitating RW operations
pub struct DynamicStorageBuffer<B> {
inner: B,
alignment: AlignmentValue,
offset: usize,
}
impl<B> DynamicStorageBuffer<B> {
/// Creates a new dynamic storage buffer wrapper with an alignment of 256
/// (default alignment in the WebGPU spec).
pub const fn new(buffer: B) -> Self {
Self::new_with_alignment(buffer, 256)
}
/// Creates a new dynamic storage buffer wrapper with a given alignment.
/// # Panics
///
/// - if `alignment` is not a power of two.
/// - if `alignment` is less than 32 (min alignment imposed by the WebGPU spec).
pub const fn new_with_alignment(buffer: B, alignment: u64) -> Self {
if alignment < 32 {
panic!("Alignment must be at least 32!");
}
Self {
inner: buffer,
alignment: AlignmentValue::new(alignment),
offset: 0,
}
}
pub fn set_offset(&mut self, offset: u64) {
if !self.alignment.is_aligned(offset) {
panic!(
"offset of {} bytes is not aligned to alignment of {} bytes",
offset,
self.alignment.get()
);
}
self.offset = offset as usize;
}
pub fn into_inner(self) -> B {
self.inner
}
}
impl<B> From<B> for DynamicStorageBuffer<B> {
fn from(buffer: B) -> Self {
Self::new(buffer)
}
}
impl<B> AsRef<B> for DynamicStorageBuffer<B> {
fn as_ref(&self) -> &B {
&self.inner
}
}
impl<B> AsMut<B> for DynamicStorageBuffer<B> {
fn as_mut(&mut self) -> &mut B {
&mut self.inner
}
}
impl<B: BufferMut> DynamicStorageBuffer<B> {
pub fn write<T>(&mut self, value: &T) -> Result<u64>
where
T: ?Sized + ShaderType + WriteInto,
{
let offset = self.offset;
let mut writer = Writer::new(value, &mut self.inner, offset)?;
value.write_into(&mut writer);
self.offset += self.alignment.round_up(value.size().get()) as usize;
Ok(offset as u64)
}
}
impl<B: BufferRef> DynamicStorageBuffer<B> {
pub fn read<T>(&mut self, value: &mut T) -> Result<()>
where
T: ?Sized + ShaderType + ReadFrom,
{
let mut writer = Reader::new::<T>(&self.inner, self.offset)?;
value.read_from(&mut writer);
self.offset += self.alignment.round_up(value.size().get()) as usize;
Ok(())
}
pub fn create<T>(&mut self) -> Result<T>
where
T: ShaderType + CreateFrom,
{
let mut writer = Reader::new::<T>(&self.inner, self.offset)?;
let value = T::create_from(&mut writer);
self.offset += self.alignment.round_up(value.size().get()) as usize;
Ok(value)
}
}
/// Dynamic uniform buffer wrapper facilitating RW operations
pub struct DynamicUniformBuffer<B> {
inner: DynamicStorageBuffer<B>,
}
impl<B> DynamicUniformBuffer<B> {
/// Creates a new dynamic uniform buffer wrapper with an alignment of 256
/// (default alignment in the WebGPU spec).
pub const fn new(buffer: B) -> Self {
Self {
inner: DynamicStorageBuffer::new(buffer),
}
}
/// Creates a new dynamic uniform buffer wrapper with a given alignment.
/// # Panics
///
/// - if `alignment` is not a power of two.
/// - if `alignment` is less than 32 (min alignment imposed by the WebGPU spec).
pub const fn new_with_alignment(buffer: B, alignment: u64) -> Self {
Self {
inner: DynamicStorageBuffer::new_with_alignment(buffer, alignment),
}
}
pub fn set_offset(&mut self, offset: u64) {
self.inner.set_offset(offset);
}
pub fn into_inner(self) -> B {
self.inner.inner
}
}
impl<B> From<B> for DynamicUniformBuffer<B> {
fn from(buffer: B) -> Self {
Self::new(buffer)
}
}
impl<B> AsRef<B> for DynamicUniformBuffer<B> {
fn as_ref(&self) -> &B {
&self.inner.inner
}
}
impl<B> AsMut<B> for DynamicUniformBuffer<B> {
fn as_mut(&mut self) -> &mut B {
&mut self.inner.inner
}
}
impl<B: BufferMut> DynamicUniformBuffer<B> {
pub fn write<T>(&mut self, value: &T) -> Result<u64>
where
T: ?Sized + ShaderType + WriteInto,
{
T::assert_uniform_compat();
self.inner.write(value)
}
}
impl<B: BufferRef> DynamicUniformBuffer<B> {
pub fn read<T>(&mut self, value: &mut T) -> Result<()>
where
T: ?Sized + ShaderType + ReadFrom,
{
T::assert_uniform_compat();
self.inner.read(value)
}
pub fn create<T>(&mut self) -> Result<T>
where
T: ShaderType + CreateFrom,
{
T::assert_uniform_compat();
self.inner.create()
}
}

11
vendor/encase/src/core/mod.rs vendored Normal file
View File

@@ -0,0 +1,11 @@
mod alignment_value;
mod buffers;
mod rw;
mod size_value;
mod traits;
pub use alignment_value::*;
pub use buffers::*;
pub use rw::*;
pub use size_value::*;
pub use traits::*;

541
vendor/encase/src/core/rw.rs vendored Normal file
View File

@@ -0,0 +1,541 @@
use super::ShaderType;
use core::mem::MaybeUninit;
use thiserror::Error;
#[derive(Clone, Copy, Debug, Error)]
pub enum Error {
#[error("could not read/write {expected} bytes from/into {found} byte sized buffer")]
BufferTooSmall { expected: u64, found: u64 },
}
pub type Result<T> = core::result::Result<T, Error>;
pub struct WriteContext {
/// length of the contained runtime sized array
///
/// used by the derive macro
pub rts_array_length: Option<u32>,
}
pub struct Writer<B: BufferMut> {
pub ctx: WriteContext,
cursor: Cursor<B>,
}
impl<B: BufferMut> Writer<B> {
#[inline]
pub fn new<T: ?Sized + ShaderType>(data: &T, buffer: B, offset: usize) -> Result<Self> {
let mut cursor = Cursor::new(buffer, offset);
let size = data.size().get();
if cursor.try_enlarge(offset + size as usize).is_err() {
Err(Error::BufferTooSmall {
expected: size,
found: cursor.capacity() as u64,
})
} else {
Ok(Self {
ctx: WriteContext {
rts_array_length: None,
},
cursor,
})
}
}
#[inline]
pub fn advance(&mut self, amount: usize) {
self.cursor.advance(amount);
}
#[inline]
pub fn write<const N: usize>(&mut self, val: &[u8; N]) {
self.cursor.write(val);
}
#[inline]
pub fn write_slice(&mut self, val: &[u8]) {
self.cursor.write_slice(val)
}
}
pub struct ReadContext {
/// max elements to read into the contained runtime sized array
///
/// used by the derive macro
pub rts_array_max_el_to_read: Option<u32>,
}
pub struct Reader<B: BufferRef> {
pub ctx: ReadContext,
cursor: Cursor<B>,
}
impl<B: BufferRef> Reader<B> {
#[inline]
pub fn new<T: ?Sized + ShaderType>(buffer: B, offset: usize) -> Result<Self> {
let cursor = Cursor::new(buffer, offset);
if cursor.remaining() < T::min_size().get() as usize {
Err(Error::BufferTooSmall {
expected: T::min_size().get(),
found: cursor.remaining() as u64,
})
} else {
Ok(Self {
ctx: ReadContext {
rts_array_max_el_to_read: None,
},
cursor,
})
}
}
#[inline]
pub fn advance(&mut self, amount: usize) {
self.cursor.advance(amount);
}
#[inline]
pub fn read<const N: usize>(&mut self) -> &[u8; N] {
self.cursor.read()
}
#[inline]
pub fn read_slice(&mut self, val: &mut [u8]) {
self.cursor.read_slice(val)
}
#[inline]
pub fn remaining(&self) -> usize {
self.cursor.remaining()
}
}
struct Cursor<B> {
buffer: B,
pos: usize,
}
impl<B> Cursor<B> {
#[inline]
fn new(buffer: B, offset: usize) -> Self {
Self {
buffer,
pos: offset,
}
}
#[inline]
fn advance(&mut self, amount: usize) {
self.pos += amount;
}
}
impl<B: BufferRef> Cursor<B> {
#[inline]
fn remaining(&self) -> usize {
self.buffer.len().saturating_sub(self.pos)
}
#[inline]
fn read<const N: usize>(&mut self) -> &[u8; N] {
let res = self.buffer.read(self.pos);
self.pos += N;
res
}
#[inline]
fn read_slice(&mut self, val: &mut [u8]) {
self.buffer.read_slice(self.pos, val);
self.pos += val.len();
}
}
impl<B: BufferMut> Cursor<B> {
#[inline]
fn capacity(&self) -> usize {
self.buffer.capacity().saturating_sub(self.pos)
}
#[inline]
fn write<const N: usize>(&mut self, val: &[u8; N]) {
self.buffer.write(self.pos, val);
self.pos += N;
}
#[inline]
fn write_slice(&mut self, val: &[u8]) {
self.buffer.write_slice(self.pos, val);
self.pos += val.len();
}
#[inline]
fn try_enlarge(&mut self, wanted: usize) -> core::result::Result<(), EnlargeError> {
self.buffer.try_enlarge(wanted)
}
}
#[derive(Clone, Copy, Debug, Error)]
#[error("could not enlarge buffer")]
pub struct EnlargeError;
impl From<std::collections::TryReserveError> for EnlargeError {
fn from(_: std::collections::TryReserveError) -> Self {
Self
}
}
#[allow(clippy::len_without_is_empty)]
pub trait BufferRef {
fn len(&self) -> usize;
fn read<const N: usize>(&self, offset: usize) -> &[u8; N];
fn read_slice(&self, offset: usize, val: &mut [u8]);
}
pub trait BufferMut {
fn capacity(&self) -> usize;
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]);
fn write_slice(&mut self, offset: usize, val: &[u8]);
#[inline]
fn try_enlarge(&mut self, wanted: usize) -> core::result::Result<(), EnlargeError> {
if wanted > self.capacity() {
Err(EnlargeError)
} else {
Ok(())
}
}
}
impl BufferRef for [u8] {
fn len(&self) -> usize {
self.len()
}
#[inline]
fn read<const N: usize>(&self, offset: usize) -> &[u8; N] {
use crate::utils::SliceExt;
self.array(offset)
}
#[inline]
fn read_slice(&self, offset: usize, val: &mut [u8]) {
val.copy_from_slice(&self[offset..offset + val.len()])
}
}
impl<const LEN: usize> BufferRef for [u8; LEN] {
#[inline]
fn len(&self) -> usize {
<[u8] as BufferRef>::len(self)
}
#[inline]
fn read<const N: usize>(&self, offset: usize) -> &[u8; N] {
<[u8] as BufferRef>::read(self, offset)
}
#[inline]
fn read_slice(&self, offset: usize, val: &mut [u8]) {
<[u8] as BufferRef>::read_slice(self, offset, val)
}
}
impl BufferRef for Vec<u8> {
#[inline]
fn len(&self) -> usize {
<[u8] as BufferRef>::len(self)
}
#[inline]
fn read<const N: usize>(&self, offset: usize) -> &[u8; N] {
<[u8] as BufferRef>::read(self, offset)
}
#[inline]
fn read_slice(&self, offset: usize, val: &mut [u8]) {
<[u8] as BufferRef>::read_slice(self, offset, val)
}
}
impl BufferMut for [u8] {
#[inline]
fn capacity(&self) -> usize {
self.len()
}
#[inline]
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) {
use crate::utils::SliceExt;
*self.array_mut(offset) = *val;
}
#[inline]
fn write_slice(&mut self, offset: usize, val: &[u8]) {
self[offset..offset + val.len()].copy_from_slice(val);
}
}
impl BufferMut for [MaybeUninit<u8>] {
#[inline]
fn capacity(&self) -> usize {
self.len()
}
#[inline]
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) {
use crate::utils::SliceExt;
// SAFETY: &[u8; N] and &[MaybeUninit<u8>; N] have the same layout
let val: &[MaybeUninit<u8>; N] = unsafe { core::mem::transmute(val) };
*self.array_mut(offset) = *val;
}
#[inline]
fn write_slice(&mut self, offset: usize, val: &[u8]) {
// SAFETY: &[u8] and &[MaybeUninit<u8>] have the same layout
let val: &[MaybeUninit<u8>] = unsafe { core::mem::transmute(val) };
self[offset..offset + val.len()].copy_from_slice(val);
}
}
impl<const LEN: usize> BufferMut for [u8; LEN] {
#[inline]
fn capacity(&self) -> usize {
<[u8] as BufferMut>::capacity(self)
}
#[inline]
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) {
<[u8] as BufferMut>::write(self, offset, val);
}
#[inline]
fn write_slice(&mut self, offset: usize, val: &[u8]) {
<[u8] as BufferMut>::write_slice(self, offset, val)
}
}
impl<const LEN: usize> BufferMut for [MaybeUninit<u8>; LEN] {
#[inline]
fn capacity(&self) -> usize {
<[MaybeUninit<u8>] as BufferMut>::capacity(self)
}
#[inline]
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) {
<[MaybeUninit<u8>] as BufferMut>::write(self, offset, val)
}
#[inline]
fn write_slice(&mut self, offset: usize, val: &[u8]) {
<[MaybeUninit<u8>] as BufferMut>::write_slice(self, offset, val)
}
}
impl BufferMut for Vec<u8> {
#[inline]
fn capacity(&self) -> usize {
self.capacity()
}
#[inline]
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) {
<[u8] as BufferMut>::write(self, offset, val);
}
#[inline]
fn write_slice(&mut self, offset: usize, val: &[u8]) {
<[u8] as BufferMut>::write_slice(self, offset, val)
}
#[inline]
fn try_enlarge(&mut self, wanted: usize) -> core::result::Result<(), EnlargeError> {
use crate::utils::ByteVecExt;
self.try_extend(wanted).map_err(EnlargeError::from)
}
}
impl BufferMut for Vec<MaybeUninit<u8>> {
#[inline]
fn capacity(&self) -> usize {
self.capacity()
}
#[inline]
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) {
<[MaybeUninit<u8>] as BufferMut>::write(self, offset, val)
}
#[inline]
fn write_slice(&mut self, offset: usize, val: &[u8]) {
<[MaybeUninit<u8>] as BufferMut>::write_slice(self, offset, val)
}
#[inline]
fn try_enlarge(&mut self, wanted: usize) -> core::result::Result<(), EnlargeError> {
use crate::utils::ByteVecExt;
self.try_extend(wanted).map_err(EnlargeError::from)
}
}
macro_rules! impl_buffer_ref_for_wrappers {
($($type:ty),*) => {$(
impl<T: ?Sized + BufferRef> BufferRef for $type {
#[inline]
fn len(&self) -> usize {
T::len(self)
}
#[inline]
fn read<const N: usize>(&self, offset: usize) -> &[u8; N] {
T::read(self, offset)
}
#[inline]
fn read_slice(&self, offset: usize, val: &mut [u8]) {
T::read_slice(self, offset, val)
}
}
)*};
}
impl_buffer_ref_for_wrappers!(&T, &mut T, Box<T>, std::rc::Rc<T>, std::sync::Arc<T>);
macro_rules! impl_buffer_mut_for_wrappers {
($($type:ty),*) => {$(
impl<T: ?Sized + BufferMut> BufferMut for $type {
#[inline]
fn capacity(&self) -> usize {
T::capacity(self)
}
#[inline]
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) {
T::write(self, offset, val)
}
#[inline]
fn write_slice(&mut self, offset: usize, val: &[u8]) {
T::write_slice(self, offset, val)
}
#[inline]
fn try_enlarge(&mut self, wanted: usize) -> core::result::Result<(), EnlargeError> {
T::try_enlarge(self, wanted)
}
}
)*};
}
impl_buffer_mut_for_wrappers!(&mut T, Box<T>);
#[cfg(test)]
mod buffer_ref {
use super::BufferRef;
#[test]
fn array() {
let arr = [0, 1, 2, 3, 4, 5];
assert_eq!(BufferRef::len(&arr), 6);
assert_eq!(BufferRef::read(&arr, 3), &[3, 4]);
}
#[test]
fn vec() {
let vec = Vec::from([0, 1, 2, 3, 4, 5]);
assert_eq!(BufferRef::len(&vec), 6);
assert_eq!(BufferRef::read(&vec, 3), &[3, 4]);
}
}
#[cfg(test)]
mod buffer_mut {
use super::BufferMut;
use crate::core::EnlargeError;
#[test]
fn array() {
let mut arr = [0, 1, 2, 3, 4, 5];
assert_eq!(BufferMut::capacity(&arr), 6);
BufferMut::write(&mut arr, 3, &[9, 1]);
assert_eq!(arr, [0, 1, 2, 9, 1, 5]);
assert!(matches!(BufferMut::try_enlarge(&mut arr, 6), Ok(())));
assert!(matches!(
BufferMut::try_enlarge(&mut arr, 7),
Err(EnlargeError)
));
}
#[test]
fn vec() {
let mut vec = Vec::from([0, 1, 2, 3, 4, 5]);
assert_eq!(BufferMut::capacity(&vec), vec.capacity());
BufferMut::write(&mut vec, 3, &[9, 1]);
assert_eq!(vec, Vec::from([0, 1, 2, 9, 1, 5]));
assert!(matches!(BufferMut::try_enlarge(&mut vec, 100), Ok(())));
assert!(matches!(
BufferMut::try_enlarge(&mut vec, usize::MAX),
Err(EnlargeError)
));
}
}
#[cfg(test)]
mod error {
use super::Error;
#[test]
fn derived_traits() {
let err = Error::BufferTooSmall {
expected: 4,
found: 2,
};
{
use std::error::Error;
assert!(err.source().is_none());
}
assert_eq!(
format!("{}", err.clone()),
"could not read/write 4 bytes from/into 2 byte sized buffer"
);
assert_eq!(
format!("{:?}", err.clone()),
"BufferTooSmall { expected: 4, found: 2 }"
);
}
}
#[cfg(test)]
mod enlarge_error {
use super::EnlargeError;
#[test]
fn derived_traits() {
// can't construct a TryReserveError due to TryReserveErrorKind being unstable
let try_reserve_error = {
let mut vec = Vec::<u8>::new();
vec.try_reserve(usize::MAX).err().unwrap()
};
let err = EnlargeError::from(try_reserve_error);
use std::error::Error;
assert!(err.source().is_none());
assert_eq!(format!("{}", err.clone()), "could not enlarge buffer");
assert_eq!(format!("{:?}", err.clone()), "EnlargeError");
}
}

77
vendor/encase/src/core/size_value.rs vendored Normal file
View File

@@ -0,0 +1,77 @@
use core::num::NonZeroU64;
/// Helper type for size calculations
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct SizeValue(pub NonZeroU64);
impl SizeValue {
#[inline]
pub const fn new(val: u64) -> Self {
match val {
0 => panic!("Size can't be 0!"),
val => {
// SAFETY: This is safe since we checked if the value is 0
Self(unsafe { NonZeroU64::new_unchecked(val) })
}
}
}
#[inline]
pub const fn from(val: NonZeroU64) -> Self {
Self(val)
}
#[inline]
pub const fn get(&self) -> u64 {
self.0.get()
}
#[inline]
pub const fn mul(self, rhs: u64) -> Self {
match self.get().checked_mul(rhs) {
None => panic!("Overflow occurred while multiplying size values!"),
Some(val) => {
// SAFETY: This is safe since we checked for overflow
Self(unsafe { NonZeroU64::new_unchecked(val) })
}
}
}
}
#[cfg(test)]
mod test {
use super::SizeValue;
#[test]
fn new() {
assert_eq!(4, SizeValue::new(4).get());
}
#[test]
#[should_panic]
fn new_panic() {
SizeValue::new(0);
}
#[test]
fn mul() {
assert_eq!(SizeValue::new(64), SizeValue::new(8).mul(8));
}
#[test]
#[should_panic]
fn mul_panic() {
SizeValue::new(8).mul(u64::MAX);
}
#[test]
fn derived_traits() {
let size = SizeValue::new(8);
#[allow(clippy::clone_on_copy)]
let size_clone = size.clone();
assert!(size == size_clone);
assert_eq!(format!("{size:?}"), "SizeValue(8)");
}
}

267
vendor/encase/src/core/traits.rs vendored Normal file
View File

@@ -0,0 +1,267 @@
use std::num::NonZeroU64;
use super::{AlignmentValue, BufferMut, BufferRef, Reader, SizeValue, Writer};
const UNIFORM_MIN_ALIGNMENT: AlignmentValue = AlignmentValue::new(16);
pub struct Metadata<E> {
pub alignment: AlignmentValue,
pub has_uniform_min_alignment: bool,
pub min_size: SizeValue,
pub is_pod: bool,
pub extra: E,
}
impl Metadata<()> {
pub const fn from_alignment_and_size(alignment: u64, size: u64) -> Self {
Self {
alignment: AlignmentValue::new(alignment),
has_uniform_min_alignment: false,
min_size: SizeValue::new(size),
is_pod: false,
extra: (),
}
}
}
// using forget() avoids "destructors cannot be evaluated at compile-time" error
// track #![feature(const_precise_live_drops)] (https://github.com/rust-lang/rust/issues/73255)
impl<E> Metadata<E> {
#[inline]
pub const fn alignment(self) -> AlignmentValue {
let value = self.alignment;
core::mem::forget(self);
value
}
#[inline]
pub const fn uniform_min_alignment(self) -> Option<AlignmentValue> {
let value = self.has_uniform_min_alignment;
core::mem::forget(self);
match value {
true => Some(UNIFORM_MIN_ALIGNMENT),
false => None,
}
}
#[inline]
pub const fn min_size(self) -> SizeValue {
let value = self.min_size;
core::mem::forget(self);
value
}
#[inline]
pub const fn is_pod(self) -> bool {
let value = self.is_pod;
core::mem::forget(self);
value
}
#[inline]
pub const fn pod(mut self) -> Self {
self.is_pod = true;
self
}
#[inline]
pub const fn no_pod(mut self) -> Self {
self.is_pod = false;
self
}
}
/// Base trait for all [WGSL host-shareable types](https://gpuweb.github.io/gpuweb/wgsl/#host-shareable-types)
pub trait ShaderType {
#[doc(hidden)]
type ExtraMetadata;
#[doc(hidden)]
const METADATA: Metadata<Self::ExtraMetadata>;
/// Represents the minimum size of `Self` (equivalent to [GPUBufferBindingLayout.minBindingSize](https://gpuweb.github.io/gpuweb/#dom-gpubufferbindinglayout-minbindingsize))
///
/// For [WGSL fixed-footprint types](https://gpuweb.github.io/gpuweb/wgsl/#fixed-footprint-types)
/// it represents [WGSL Size](https://gpuweb.github.io/gpuweb/wgsl/#alignment-and-size)
/// (equivalent to [`ShaderSize::SHADER_SIZE`])
///
/// For
/// [WGSL runtime-sized arrays](https://gpuweb.github.io/gpuweb/wgsl/#runtime-sized) and
/// [WGSL structs containing runtime-sized arrays](https://gpuweb.github.io/gpuweb/wgsl/#struct-types)
/// (non fixed-footprint types)
/// this will be calculated by assuming the array has one element
#[inline]
fn min_size() -> NonZeroU64 {
Self::METADATA.min_size().0
}
/// Returns the size of `Self` at runtime
///
/// For [WGSL fixed-footprint types](https://gpuweb.github.io/gpuweb/wgsl/#fixed-footprint-types)
/// it's equivalent to [`Self::min_size`] and [`ShaderSize::SHADER_SIZE`]
#[inline]
fn size(&self) -> NonZeroU64 {
Self::METADATA.min_size().0
}
#[doc(hidden)]
const UNIFORM_COMPAT_ASSERT: fn() = || {};
/// Asserts that `Self` meets the requirements of the
/// [uniform address space restrictions on stored values](https://gpuweb.github.io/gpuweb/wgsl/#address-spaces-uniform) and the
/// [uniform address space layout constraints](https://gpuweb.github.io/gpuweb/wgsl/#address-space-layout-constraints)
///
/// # Examples
///
/// ## Array
///
/// Will panic since runtime-sized arrays are not compatible with the
/// uniform address space restrictions on stored values
///
/// ```should_panic
/// # use crate::encase::ShaderType;
/// <Vec<mint::Vector4<f32>>>::assert_uniform_compat();
/// ```
///
/// Will panic since the stride is 4 bytes
///
/// ```should_panic
/// # use crate::encase::ShaderType;
/// <[f32; 2]>::assert_uniform_compat();
/// ```
///
/// Will not panic since the stride is 16 bytes
///
/// ```
/// # use crate::encase::ShaderType;
/// # use mint;
/// <[mint::Vector4<f32>; 2]>::assert_uniform_compat();
/// ```
///
/// ## Struct
///
/// Will panic since runtime-sized arrays are not compatible with the
/// uniform address space restrictions on stored values
///
/// ```should_panic
/// # use crate::encase::ShaderType;
/// # use mint;
/// #[derive(ShaderType)]
/// struct Invalid {
/// #[size(runtime)]
/// vec: Vec<mint::Vector4<f32>>
/// }
/// Invalid::assert_uniform_compat();
/// ```
///
/// Will panic since the inner struct's size must be a multiple of 16
///
/// ```should_panic
/// # use crate::encase::ShaderType;
/// #[derive(ShaderType)]
/// struct S {
/// x: f32,
/// }
///
/// #[derive(ShaderType)]
/// struct Invalid {
/// a: f32,
/// b: S, // offset between fields 'a' and 'b' must be at least 16 (currently: 4)
/// }
/// Invalid::assert_uniform_compat();
/// ```
///
/// Will not panic (fixed via align attribute)
///
/// ```
/// # use crate::encase::ShaderType;
/// # #[derive(ShaderType)]
/// # struct S {
/// # x: f32,
/// # }
/// #[derive(ShaderType)]
/// struct Valid {
/// a: f32,
/// #[align(16)]
/// b: S,
/// }
/// Valid::assert_uniform_compat();
/// ```
///
/// Will not panic (fixed via size attribute)
///
/// ```
/// # use crate::encase::ShaderType;
/// # #[derive(ShaderType)]
/// # struct S {
/// # x: f32,
/// # }
/// #[derive(ShaderType)]
/// struct Valid {
/// #[size(16)]
/// a: f32,
/// b: S,
/// }
/// Valid::assert_uniform_compat();
/// ```
#[inline]
fn assert_uniform_compat() {
Self::UNIFORM_COMPAT_ASSERT();
}
// fn assert_can_write_into()
// where
// Self: WriteInto,
// {
// }
// fn assert_can_read_from()
// where
// Self: ReadFrom,
// {
// }
// fn assert_can_create_from()
// where
// Self: CreateFrom,
// {
// }
}
/// Trait implemented for all [WGSL fixed-footprint types](https://gpuweb.github.io/gpuweb/wgsl/#fixed-footprint-types)
pub trait ShaderSize: ShaderType {
/// Represents [WGSL Size](https://gpuweb.github.io/gpuweb/wgsl/#alignment-and-size) (equivalent to [`ShaderType::min_size`])
const SHADER_SIZE: NonZeroU64 = Self::METADATA.min_size().0;
}
/// Trait implemented for
/// [WGSL runtime-sized arrays](https://gpuweb.github.io/gpuweb/wgsl/#runtime-sized) and
/// [WGSL structs containing runtime-sized arrays](https://gpuweb.github.io/gpuweb/wgsl/#struct-types)
/// (non fixed-footprint types)
pub trait CalculateSizeFor {
/// Returns the size of `Self` assuming the (contained) runtime-sized array has `nr_of_el` elements
fn calculate_size_for(nr_of_el: u64) -> NonZeroU64;
}
#[allow(clippy::len_without_is_empty)]
pub trait RuntimeSizedArray {
fn len(&self) -> usize;
}
pub trait WriteInto {
fn write_into<B>(&self, writer: &mut Writer<B>)
where
B: BufferMut;
}
pub trait ReadFrom {
fn read_from<B>(&mut self, reader: &mut Reader<B>)
where
B: BufferRef;
}
pub trait CreateFrom: Sized {
fn create_from<B>(reader: &mut Reader<B>) -> Self
where
B: BufferRef;
}

3
vendor/encase/src/impls/archery.rs vendored Normal file
View File

@@ -0,0 +1,3 @@
use crate::impl_wrapper;
impl_wrapper!(archery::SharedPointer<T, P>; (T, P: archery::SharedPointerKind); using Ref{} From{ new });

4
vendor/encase/src/impls/arrayvec.rs vendored Normal file
View File

@@ -0,0 +1,4 @@
use crate::rts_array::impl_rts_array;
// hardcap
impl_rts_array!(arrayvec::ArrayVec<T, N>; (T, const N: usize); using len truncate);

12
vendor/encase/src/impls/cgmath.rs vendored Executable file
View File

@@ -0,0 +1,12 @@
use crate::{matrix::impl_matrix, vector::impl_vector};
impl_vector!(2, cgmath::Vector2<T>; using AsRef AsMut From);
impl_vector!(3, cgmath::Vector3<T>; using AsRef AsMut From);
impl_vector!(4, cgmath::Vector4<T>; using AsRef AsMut From);
impl_vector!(2, cgmath::Point2<T>; using AsRef AsMut From);
impl_vector!(3, cgmath::Point3<T>; using AsRef AsMut From);
impl_matrix!(2, 2, cgmath::Matrix2<T>; using AsRef AsMut From);
impl_matrix!(3, 3, cgmath::Matrix3<T>; using AsRef AsMut From);
impl_matrix!(4, 4, cgmath::Matrix4<T>; using AsRef AsMut From);

54
vendor/encase/src/impls/glam.rs vendored Executable file
View File

@@ -0,0 +1,54 @@
use crate::{
matrix::{impl_matrix, AsMutMatrixParts, AsRefMatrixParts, FromMatrixParts, MatrixScalar},
vector::impl_vector,
};
impl_vector!(2, glam::Vec2, f32; using AsRef AsMut From);
impl_vector!(2, glam::UVec2, u32; using AsRef AsMut From);
impl_vector!(2, glam::IVec2, i32; using AsRef AsMut From);
impl_vector!(3, glam::Vec3, f32; using AsRef AsMut From);
impl_vector!(3, glam::UVec3, u32; using AsRef AsMut From);
impl_vector!(3, glam::IVec3, i32; using AsRef AsMut From);
impl_vector!(4, glam::Vec4, f32; using AsRef AsMut From);
impl_vector!(4, glam::UVec4, u32; using AsRef AsMut From);
impl_vector!(4, glam::IVec4, i32; using AsRef AsMut From);
impl_matrix!(2, 2, glam::Mat2, f32);
impl_matrix!(3, 3, glam::Mat3, f32);
impl_matrix!(4, 4, glam::Mat4, f32);
macro_rules! impl_matrix_traits {
($c:literal, $r:literal, $type:ty, $el_ty:ty) => {
impl AsRefMatrixParts<$el_ty, $c, $r> for $type
where
Self: AsRef<[$el_ty; $r * $c]>,
$el_ty: MatrixScalar,
{
fn as_ref_parts(&self) -> &[[$el_ty; $r]; $c] {
array_ref_to_2d_array_ref!(self.as_ref(), $el_ty, $c, $r)
}
}
impl AsMutMatrixParts<$el_ty, $c, $r> for $type
where
Self: AsMut<[$el_ty; $r * $c]>,
$el_ty: MatrixScalar,
{
fn as_mut_parts(&mut self) -> &mut [[$el_ty; $r]; $c] {
array_mut_to_2d_array_mut!(self.as_mut(), $el_ty, $c, $r)
}
}
impl FromMatrixParts<$el_ty, $c, $r> for $type {
fn from_parts(parts: [[$el_ty; $r]; $c]) -> Self {
Self::from_cols_array_2d(&parts)
}
}
};
}
impl_matrix_traits!(2, 2, glam::Mat2, f32);
impl_matrix_traits!(3, 3, glam::Mat3, f32);
impl_matrix_traits!(4, 4, glam::Mat4, f32);

3
vendor/encase/src/impls/im.rs vendored Normal file
View File

@@ -0,0 +1,3 @@
use crate::rts_array::impl_rts_array;
impl_rts_array!(im::Vector<T>; (T: Clone); using len);

3
vendor/encase/src/impls/im_rc.rs vendored Normal file
View File

@@ -0,0 +1,3 @@
use crate::rts_array::impl_rts_array;
impl_rts_array!(im_rc::Vector<T>; (T: Clone); using len);

3
vendor/encase/src/impls/imbl.rs vendored Normal file
View File

@@ -0,0 +1,3 @@
use crate::rts_array::impl_rts_array;
impl_rts_array!(imbl::Vector<T>; (T: Clone); using len);

22
vendor/encase/src/impls/mint.rs vendored Executable file
View File

@@ -0,0 +1,22 @@
use crate::{matrix::impl_matrix, vector::impl_vector};
impl_vector!(2, mint::Vector2<T>; using AsRef AsMut From);
impl_vector!(3, mint::Vector3<T>; using AsRef AsMut From);
impl_vector!(4, mint::Vector4<T>; using AsRef AsMut From);
impl_vector!(2, mint::Point2<T>; using AsRef AsMut From);
impl_vector!(3, mint::Point3<T>; using AsRef AsMut From);
impl_matrix!(2, 2, mint::ColumnMatrix2<T>; using AsRef AsMut From);
impl_matrix!(3, 2, mint::ColumnMatrix2x3<T>; using AsRef AsMut From);
impl_matrix!(4, 2, mint::ColumnMatrix2x4<T>; using AsRef AsMut From);
impl_matrix!(2, 3, mint::ColumnMatrix3x2<T>; using AsRef AsMut From);
impl_matrix!(3, 3, mint::ColumnMatrix3<T>; using AsRef AsMut From);
impl_matrix!(4, 3, mint::ColumnMatrix3x4<T>; using AsRef AsMut From);
impl_matrix!(2, 4, mint::ColumnMatrix4x2<T>; using AsRef AsMut From);
impl_matrix!(3, 4, mint::ColumnMatrix4x3<T>; using AsRef AsMut From);
impl_matrix!(4, 4, mint::ColumnMatrix4<T>; using AsRef AsMut From);

35
vendor/encase/src/impls/mod.rs vendored Normal file
View File

@@ -0,0 +1,35 @@
#[cfg(feature = "archery")]
mod archery;
#[cfg(feature = "static-rc")]
mod static_rc;
#[cfg(feature = "cgmath")]
mod cgmath;
#[cfg(feature = "glam")]
mod glam;
#[cfg(feature = "mint")]
mod mint;
#[cfg(feature = "nalgebra")]
mod nalgebra;
#[cfg(feature = "ultraviolet")]
mod ultraviolet;
#[cfg(feature = "vek")]
mod vek;
#[cfg(feature = "arrayvec")]
mod arrayvec;
#[cfg(feature = "ndarray")]
mod ndarray;
#[cfg(feature = "smallvec")]
mod smallvec;
#[cfg(feature = "tinyvec")]
mod tinyvec;
#[cfg(feature = "im")]
mod im;
#[cfg(feature = "im-rc")]
mod im_rc;
#[cfg(feature = "imbl")]
mod imbl;
#[cfg(all(feature = "rpds", feature = "archery"))]
mod rpds;

102
vendor/encase/src/impls/nalgebra.rs vendored Normal file
View File

@@ -0,0 +1,102 @@
use crate::{
matrix::{impl_matrix, AsMutMatrixParts, AsRefMatrixParts, FromMatrixParts, MatrixScalar},
vector::{impl_vector, AsMutVectorParts, AsRefVectorParts, FromVectorParts, VectorScalar},
};
impl_vector!(2, nalgebra::VectorView2<'_, T>);
impl_vector!(2, nalgebra::VectorViewMut2<'_, T>);
impl_vector!(2, nalgebra::Vector2<T>);
impl_vector!(3, nalgebra::VectorView3<'_, T>);
impl_vector!(3, nalgebra::VectorViewMut3<'_, T>);
impl_vector!(3, nalgebra::Vector3<T>);
impl_vector!(4, nalgebra::VectorView4<'_, T>);
impl_vector!(4, nalgebra::VectorViewMut4<'_, T>);
impl_vector!(4, nalgebra::Vector4<T>);
impl_matrix!(2, 2, nalgebra::MatrixView2<'_, T>);
impl_matrix!(2, 2, nalgebra::MatrixViewMut2<'_, T>);
impl_matrix!(2, 2, nalgebra::Matrix2<T>);
impl_matrix!(3, 2, nalgebra::MatrixView2x3<'_, T>);
impl_matrix!(4, 2, nalgebra::MatrixView2x4<'_, T>);
impl_matrix!(2, 3, nalgebra::MatrixView3x2<'_, T>);
impl_matrix!(3, 2, nalgebra::MatrixViewMut2x3<'_, T>);
impl_matrix!(4, 2, nalgebra::MatrixViewMut2x4<'_, T>);
impl_matrix!(2, 3, nalgebra::MatrixViewMut3x2<'_, T>);
impl_matrix!(3, 2, nalgebra::Matrix2x3<T>);
impl_matrix!(4, 2, nalgebra::Matrix2x4<T>);
impl_matrix!(2, 3, nalgebra::Matrix3x2<T>);
impl_matrix!(3, 3, nalgebra::MatrixView3<'_, T>);
impl_matrix!(3, 3, nalgebra::MatrixViewMut3<'_, T>);
impl_matrix!(3, 3, nalgebra::Matrix3<T>);
impl_matrix!(4, 3, nalgebra::MatrixView3x4<'_, T>);
impl_matrix!(2, 4, nalgebra::MatrixView4x2<'_, T>);
impl_matrix!(3, 4, nalgebra::MatrixView4x3<'_, T>);
impl_matrix!(4, 3, nalgebra::MatrixViewMut3x4<'_, T>);
impl_matrix!(2, 4, nalgebra::MatrixViewMut4x2<'_, T>);
impl_matrix!(3, 4, nalgebra::MatrixViewMut4x3<'_, T>);
impl_matrix!(4, 3, nalgebra::Matrix3x4<T>);
impl_matrix!(2, 4, nalgebra::Matrix4x2<T>);
impl_matrix!(3, 4, nalgebra::Matrix4x3<T>);
impl_matrix!(4, 4, nalgebra::MatrixView4<'_, T>);
impl_matrix!(4, 4, nalgebra::MatrixViewMut4<'_, T>);
impl_matrix!(4, 4, nalgebra::Matrix4<T>);
impl<T: VectorScalar, S, const N: usize> AsRefVectorParts<T, N>
for nalgebra::Matrix<T, nalgebra::Const<N>, nalgebra::Const<1>, S>
where
Self: AsRef<[T; N]>,
{
fn as_ref_parts(&self) -> &[T; N] {
self.as_ref()
}
}
impl<T: VectorScalar, S, const N: usize> AsMutVectorParts<T, N>
for nalgebra::Matrix<T, nalgebra::Const<N>, nalgebra::Const<1>, S>
where
Self: AsMut<[T; N]>,
{
fn as_mut_parts(&mut self) -> &mut [T; N] {
self.as_mut()
}
}
impl<T: VectorScalar, const N: usize> FromVectorParts<T, N> for nalgebra::SMatrix<T, N, 1> {
fn from_parts(parts: [T; N]) -> Self {
Self::from_array_storage(nalgebra::ArrayStorage([parts]))
}
}
impl<T: MatrixScalar, S, const C: usize, const R: usize> AsRefMatrixParts<T, C, R>
for nalgebra::Matrix<T, nalgebra::Const<R>, nalgebra::Const<C>, S>
where
Self: AsRef<[[T; R]; C]>,
{
fn as_ref_parts(&self) -> &[[T; R]; C] {
self.as_ref()
}
}
impl<T: MatrixScalar, S, const C: usize, const R: usize> AsMutMatrixParts<T, C, R>
for nalgebra::Matrix<T, nalgebra::Const<R>, nalgebra::Const<C>, S>
where
Self: AsMut<[[T; R]; C]>,
{
fn as_mut_parts(&mut self) -> &mut [[T; R]; C] {
self.as_mut()
}
}
impl<T: MatrixScalar, const C: usize, const R: usize> FromMatrixParts<T, C, R>
for nalgebra::SMatrix<T, R, C>
{
fn from_parts(parts: [[T; R]; C]) -> Self {
Self::from_array_storage(nalgebra::ArrayStorage(parts))
}
}

3
vendor/encase/src/impls/ndarray.rs vendored Normal file
View File

@@ -0,0 +1,3 @@
use crate::rts_array::impl_rts_array;
impl_rts_array!(ndarray::ArrayBase<S, D>; (T, S: ndarray::RawData<Elem = T>, D: ndarray::Dimension); using len);

12
vendor/encase/src/impls/rpds.rs vendored Normal file
View File

@@ -0,0 +1,12 @@
use crate::rts_array::{impl_rts_array, Length};
impl_rts_array!(rpds::List<T, P>; (T, P: archery::SharedPointerKind); using len);
impl_rts_array!(rpds::Vector<T, P>; (T, P: archery::SharedPointerKind); using len);
impl_rts_array!(rpds::Stack<T, P>; (T, P: archery::SharedPointerKind));
impl_rts_array!(rpds::Queue<T, P>; (T, P: archery::SharedPointerKind); using len);
impl<T, P: archery::SharedPointerKind> Length for rpds::Stack<T, P> {
fn length(&self) -> usize {
self.size()
}
}

4
vendor/encase/src/impls/smallvec.rs vendored Normal file
View File

@@ -0,0 +1,4 @@
use crate::rts_array::impl_rts_array;
// softcap
impl_rts_array!(smallvec::SmallVec<A>; (T, A: smallvec::Array<Item = T>); using len truncate);

4
vendor/encase/src/impls/static_rc.rs vendored Normal file
View File

@@ -0,0 +1,4 @@
use crate::impl_wrapper;
impl_wrapper!(static_rc::StaticRc<T, NUM, DEN>; (T: ?Sized, const NUM: usize, const DEN: usize); using Ref{});
impl_wrapper!(static_rc::StaticRc<T, N, N>; (T: ?Sized, const N: usize); using Mut{} From{ new });

7
vendor/encase/src/impls/tinyvec.rs vendored Normal file
View File

@@ -0,0 +1,7 @@
use crate::rts_array::impl_rts_array;
// hardcap
impl_rts_array!(tinyvec::ArrayVec<A>; (T, A: tinyvec::Array<Item = T>); using len truncate);
// softcap
impl_rts_array!(tinyvec::TinyVec<A>; (T, A: tinyvec::Array<Item = T>); using len truncate);

67
vendor/encase/src/impls/ultraviolet.rs vendored Executable file
View File

@@ -0,0 +1,67 @@
use crate::{
matrix::{impl_matrix, AsMutMatrixParts, AsRefMatrixParts},
vector::{impl_vector, AsMutVectorParts, AsRefVectorParts},
};
impl_vector!(2, ultraviolet::Vec2, f32; using From);
impl_vector!(2, ultraviolet::UVec2, u32; using From);
impl_vector!(2, ultraviolet::IVec2, i32; using From);
impl_vector!(3, ultraviolet::Vec3, f32; using From);
impl_vector!(3, ultraviolet::UVec3, u32; using From);
impl_vector!(3, ultraviolet::IVec3, i32; using From);
impl_vector!(4, ultraviolet::Vec4, f32; using From);
impl_vector!(4, ultraviolet::UVec4, u32; using From);
impl_vector!(4, ultraviolet::IVec4, i32; using From);
impl_matrix!(2, 2, ultraviolet::Mat2, f32; using From);
impl_matrix!(3, 3, ultraviolet::Mat3, f32; using From);
impl_matrix!(4, 4, ultraviolet::Mat4, f32; using From);
macro_rules! impl_vector_traits {
($n:literal, $type:ty, $el_ty:ty) => {
impl AsRefVectorParts<$el_ty, $n> for $type {
fn as_ref_parts(&self) -> &[$el_ty; $n] {
self.as_slice().try_into().unwrap()
}
}
impl AsMutVectorParts<$el_ty, $n> for $type {
fn as_mut_parts(&mut self) -> &mut [$el_ty; $n] {
self.as_mut_slice().try_into().unwrap()
}
}
};
}
impl_vector_traits!(2, ultraviolet::Vec2, f32);
impl_vector_traits!(2, ultraviolet::UVec2, u32);
impl_vector_traits!(2, ultraviolet::IVec2, i32);
impl_vector_traits!(3, ultraviolet::Vec3, f32);
impl_vector_traits!(3, ultraviolet::UVec3, u32);
impl_vector_traits!(3, ultraviolet::IVec3, i32);
impl_vector_traits!(4, ultraviolet::Vec4, f32);
impl_vector_traits!(4, ultraviolet::UVec4, u32);
impl_vector_traits!(4, ultraviolet::IVec4, i32);
macro_rules! impl_matrix_traits {
($c:literal, $r:literal, $type:ty, $el_ty:ty) => {
impl AsRefMatrixParts<$el_ty, $c, $r> for $type {
fn as_ref_parts(&self) -> &[[$el_ty; $r]; $c] {
array_ref_to_2d_array_ref!(self.as_array(), $el_ty, $c, $r)
}
}
impl AsMutMatrixParts<$el_ty, $c, $r> for $type {
fn as_mut_parts(&mut self) -> &mut [[$el_ty; $r]; $c] {
let array = self.as_mut_slice().try_into().unwrap();
array_mut_to_2d_array_mut!(array, $el_ty, $c, $r)
}
}
};
}
impl_matrix_traits!(2, 2, ultraviolet::Mat2, f32);
impl_matrix_traits!(3, 3, ultraviolet::Mat3, f32);
impl_matrix_traits!(4, 4, ultraviolet::Mat4, f32);

57
vendor/encase/src/impls/vek.rs vendored Executable file
View File

@@ -0,0 +1,57 @@
use crate::{
matrix::{impl_matrix, AsMutMatrixParts, AsRefMatrixParts, FromMatrixParts, MatrixScalar},
vector::{impl_vector, AsMutVectorParts, AsRefVectorParts, VectorScalar},
};
impl_vector!(2, vek::Vec2<T>; using From);
impl_vector!(3, vek::Vec3<T>; using From);
impl_vector!(4, vek::Vec4<T>; using From);
impl_matrix!(2, 2, vek::Mat2<T>);
impl_matrix!(3, 3, vek::Mat3<T>);
impl_matrix!(4, 4, vek::Mat4<T>);
macro_rules! impl_vector_traits {
($n:literal, $type:ty) => {
impl<T: VectorScalar> AsRefVectorParts<T, $n> for $type {
fn as_ref_parts(&self) -> &[T; $n] {
self.as_slice().try_into().unwrap()
}
}
impl<T: VectorScalar> AsMutVectorParts<T, $n> for $type {
fn as_mut_parts(&mut self) -> &mut [T; $n] {
self.as_mut_slice().try_into().unwrap()
}
}
};
}
impl_vector_traits!(2, vek::Vec2<T>);
impl_vector_traits!(3, vek::Vec3<T>);
impl_vector_traits!(4, vek::Vec4<T>);
macro_rules! impl_matrix_traits {
($c:literal, $r:literal, $type:ty) => {
impl<T: MatrixScalar> AsRefMatrixParts<T, $c, $r> for $type {
fn as_ref_parts(&self) -> &[[T; $r]; $c] {
let array = self.as_col_slice().try_into().unwrap();
array_ref_to_2d_array_ref!(array, T, $c, $r)
}
}
impl<T: MatrixScalar> AsMutMatrixParts<T, $c, $r> for $type {
fn as_mut_parts(&mut self) -> &mut [[T; $r]; $c] {
let array = self.as_mut_col_slice().try_into().unwrap();
array_mut_to_2d_array_mut!(array, T, $c, $r)
}
}
impl<T: MatrixScalar> FromMatrixParts<T, $c, $r> for $type {
fn from_parts(parts: [[T; $r]; $c]) -> Self {
Self::from_col_arrays(parts)
}
}
};
}
impl_matrix_traits!(2, 2, vek::Mat2<T>);
impl_matrix_traits!(3, 3, vek::Mat3<T>);
impl_matrix_traits!(4, 4, vek::Mat4<T>);

167
vendor/encase/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,167 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![deny(rustdoc::broken_intra_doc_links)]
#![warn(
future_incompatible,
nonstandard_style,
rust_2018_idioms,
rust_2021_compatibility,
unused,
// missing_docs,
single_use_lifetimes,
trivial_casts,
trivial_numeric_casts,
// unreachable_pub,
unused_qualifications,
variant_size_differences
)]
#![doc = include_str!("../README.md")]
#![doc(
html_logo_url = "https://raw.githubusercontent.com/teoxoy/encase/3d6d2e4d7670863e97463a15ceeafac6d13ee73e/logo.svg"
)]
/// Used to implement `ShaderType` for structs
///
/// # Attributes
///
/// Field attributes
///
/// - `#[align(X)]` where `X` is a power of 2 [`u32`] literal (equivalent to [WGSL align attribute](https://gpuweb.github.io/gpuweb/wgsl/#attribute-align))
///
/// Used to increase the alignment of the field
///
/// - `#[size(X)]` where `X` is a [`u32`] literal (equivalent to [WGSL size attribute](https://gpuweb.github.io/gpuweb/wgsl/#attribute-size))
///
/// Used to increase the size of the field
///
/// - `#[size(runtime)]` can only be attached to the last field of the struct
///
/// Used to denote the fact that the field it is attached to is a runtime-sized array
///
/// # Note about generics
///
/// While structs using generic type parameters are supported by this derive macro
///
/// - the `#[align(X)]` and `#[size(X)]` attributes will only work
/// if they are attached to fields whose type contains no generic type parameters
///
/// # Examples
///
/// Simple
///
/// ```
/// # use mint;
/// # use crate::encase::ShaderType;
/// #[derive(ShaderType)]
/// struct AffineTransform2D {
/// matrix: mint::ColumnMatrix2<f32>,
/// translate: mint::Vector2<f32>
/// }
/// ```
///
/// Contains a runtime-sized array
///
/// _The [`ArrayLength`] type can be used to explicitly write or read the length of the contained runtime-sized array_
///
/// ```
/// # use mint;
/// # use crate::encase::ShaderType;
/// # use crate::encase::ArrayLength;
/// #[derive(ShaderType)]
/// struct Positions {
/// length: ArrayLength,
/// #[size(runtime)]
/// positions: Vec<mint::Point2<f32>>
/// }
/// ```
///
/// Complex
///
/// ```
/// # use crate::encase::{ShaderType, ShaderSize};
/// #[derive(ShaderType)]
/// struct Complex<
/// 'a,
/// 'b: 'a,
/// E: 'a + ShaderType + ShaderSize,
/// T: 'b + ShaderType + ShaderSize,
/// const N: usize,
/// > {
/// array: [&'a mut E; N],
/// #[size(runtime)]
/// rts_array: &'a mut Vec<&'b T>,
/// }
/// ```
///
pub use encase_derive::ShaderType;
#[macro_use]
mod utils;
mod core;
mod types;
mod impls;
pub use crate::core::{
CalculateSizeFor, DynamicStorageBuffer, DynamicUniformBuffer, ShaderSize, ShaderType,
StorageBuffer, UniformBuffer,
};
pub use types::runtime_sized_array::ArrayLength;
pub mod internal {
pub use super::core::{
AlignmentValue, BufferMut, BufferRef, CreateFrom, EnlargeError, Error, ReadContext,
ReadFrom, Reader, Result, SizeValue, WriteContext, WriteInto, Writer,
};
}
/// Module containing items necessary to implement `ShaderType` for runtime-sized arrays
pub mod rts_array {
#[doc(inline)]
pub use super::impl_rts_array;
pub use super::types::runtime_sized_array::{Length, Truncate};
}
/// Module containing items necessary to implement `ShaderType` for vectors
pub mod vector {
#[doc(inline)]
pub use super::impl_vector;
pub use super::types::vector::{
AsMutVectorParts, AsRefVectorParts, FromVectorParts, VectorScalar,
};
}
/// Module containing items necessary to implement `ShaderType` for matrices
pub mod matrix {
#[doc(inline)]
pub use super::impl_matrix;
pub use super::types::matrix::{
AsMutMatrixParts, AsRefMatrixParts, FromMatrixParts, MatrixScalar,
};
}
/// Private module used by macros
#[doc(hidden)]
pub mod private {
pub use super::build_struct;
pub use super::core::AlignmentValue;
pub use super::core::BufferMut;
pub use super::core::BufferRef;
pub use super::core::CreateFrom;
pub use super::core::Metadata;
pub use super::core::ReadFrom;
pub use super::core::Reader;
pub use super::core::RuntimeSizedArray;
pub use super::core::SizeValue;
pub use super::core::WriteInto;
pub use super::core::Writer;
pub use super::types::array::ArrayMetadata;
pub use super::types::matrix::*;
pub use super::types::r#struct::StructMetadata;
pub use super::types::runtime_sized_array::{ArrayLength, Length, Truncate};
pub use super::types::vector::*;
pub use super::utils::consume_zsts;
pub use super::CalculateSizeFor;
pub use super::ShaderSize;
pub use super::ShaderType;
pub use const_panic::concat_assert;
}

127
vendor/encase/src/types/array.rs vendored Normal file
View File

@@ -0,0 +1,127 @@
use crate::core::{
BufferMut, BufferRef, CreateFrom, Metadata, ReadFrom, Reader, ShaderSize, ShaderType,
SizeValue, WriteInto, Writer,
};
use core::mem::{size_of, MaybeUninit};
pub struct ArrayMetadata {
pub stride: SizeValue,
pub el_padding: u64,
}
impl Metadata<ArrayMetadata> {
pub const fn stride(self) -> SizeValue {
self.extra.stride
}
pub const fn el_padding(self) -> u64 {
self.extra.el_padding
}
}
impl<T: ShaderType + ShaderSize, const N: usize> ShaderType for [T; N] {
type ExtraMetadata = ArrayMetadata;
const METADATA: Metadata<Self::ExtraMetadata> = {
let alignment = T::METADATA.alignment();
let el_size = SizeValue::from(T::SHADER_SIZE);
let stride = alignment.round_up_size(el_size);
let el_padding = alignment.padding_needed_for(el_size.get());
let size = match N {
0 => panic!("0 sized arrays are not supported!"),
val => stride.mul(val as u64),
};
Metadata {
alignment,
has_uniform_min_alignment: true,
min_size: size,
is_pod: T::METADATA.is_pod() && el_padding == 0,
extra: ArrayMetadata { stride, el_padding },
}
};
const UNIFORM_COMPAT_ASSERT: fn() = || {
crate::utils::consume_zsts([
<T as ShaderType>::UNIFORM_COMPAT_ASSERT(),
if let Some(min_alignment) = Self::METADATA.uniform_min_alignment() {
const_panic::concat_assert!(
min_alignment.is_aligned(Self::METADATA.stride().get()),
"array stride must be a multiple of ",
min_alignment.get(),
" (current stride: ",
Self::METADATA.stride().get(),
")"
);
},
]);
};
}
impl<T: ShaderSize, const N: usize> ShaderSize for [T; N] {}
impl<T: WriteInto, const N: usize> WriteInto for [T; N]
where
Self: ShaderType<ExtraMetadata = ArrayMetadata>,
{
#[inline]
fn write_into<B: BufferMut>(&self, writer: &mut Writer<B>) {
if_pod_and_little_endian!(if pod_and_little_endian {
let ptr = self.as_ptr() as *const u8;
let byte_slice: &[u8] = unsafe { core::slice::from_raw_parts(ptr, size_of::<Self>()) };
writer.write_slice(byte_slice);
} else {
for elem in self {
WriteInto::write_into(elem, writer);
writer.advance(Self::METADATA.el_padding() as usize);
}
});
}
}
impl<T: ReadFrom, const N: usize> ReadFrom for [T; N]
where
Self: ShaderType<ExtraMetadata = ArrayMetadata>,
{
#[inline]
fn read_from<B: BufferRef>(&mut self, reader: &mut Reader<B>) {
if_pod_and_little_endian!(if pod_and_little_endian {
let ptr = self.as_mut_ptr() as *mut u8;
let byte_slice: &mut [u8] =
unsafe { core::slice::from_raw_parts_mut(ptr, size_of::<Self>()) };
reader.read_slice(byte_slice);
} else {
for elem in self {
ReadFrom::read_from(elem, reader);
reader.advance(Self::METADATA.el_padding() as usize);
}
});
}
}
impl<T: CreateFrom, const N: usize> CreateFrom for [T; N]
where
Self: ShaderType<ExtraMetadata = ArrayMetadata>,
{
#[inline]
fn create_from<B: BufferRef>(reader: &mut Reader<B>) -> Self {
if_pod_and_little_endian!(if pod_and_little_endian {
let mut me = MaybeUninit::zeroed();
let ptr: *mut MaybeUninit<Self> = &mut me;
let ptr = ptr.cast::<u8>();
let byte_slice: &mut [u8] =
unsafe { core::slice::from_raw_parts_mut(ptr, size_of::<Self>()) };
reader.read_slice(byte_slice);
// SAFETY: All values were properly initialized by reading the bytes.
unsafe { me.assume_init() }
} else {
core::array::from_fn(|_| {
let res = CreateFrom::create_from(reader);
reader.advance(Self::METADATA.el_padding() as usize);
res
})
})
}
}

218
vendor/encase/src/types/matrix.rs vendored Normal file
View File

@@ -0,0 +1,218 @@
use crate::core::Metadata;
pub trait MatrixScalar: crate::ShaderSize {}
impl_marker_trait_for_f32!(MatrixScalar);
pub struct MatrixMetadata {
pub col_padding: u64,
}
impl Metadata<MatrixMetadata> {
#[inline]
pub const fn col_padding(self) -> u64 {
self.extra.col_padding
}
}
/// Enables reading from the matrix (via `&[[T; R]; C]`)
pub trait AsRefMatrixParts<T: MatrixScalar, const C: usize, const R: usize> {
fn as_ref_parts(&self) -> &[[T; R]; C];
}
/// Enables writing to the matrix (via `&mut [[T; R]; C]`)
pub trait AsMutMatrixParts<T: MatrixScalar, const C: usize, const R: usize> {
fn as_mut_parts(&mut self) -> &mut [[T; R]; C];
}
/// Enables the creation of a matrix (via `[[T; R]; C]`)
pub trait FromMatrixParts<T: MatrixScalar, const C: usize, const R: usize> {
fn from_parts(parts: [[T; R]; C]) -> Self;
}
/// Used to implement `ShaderType` for the given matrix type
///
/// The given matrix type should implement any combination of
/// [`AsRefMatrixParts`], [`AsMutMatrixParts`], [`FromMatrixParts`]
/// depending on needed capability (they can also be derived via `$using`)
///
/// # Args
///
/// - `$c` nr of columns the given matrix contains
///
/// - `$r` nr of rows the given matrix contains
///
/// - `$type` the type (representing a matrix) for which `ShaderType` will be implemented for
///
/// - `$generics` \[optional\] generics that will be passed into the `impl< >`
///
/// - `$el_type` \[optional\] inner element type of the matrix (should implement [`MatrixScalar`])
///
/// - `$using` \[optional\] can be any combination of `AsRef AsMut From`
#[macro_export]
macro_rules! impl_matrix {
($c:literal, $r:literal, $type:ty $( ; using $($using:tt)* )?) => {
$crate::impl_matrix_inner!(__inner, ($c, $r, $type, T, (T)); $( $($using)* )?);
};
($c:literal, $r:literal, $type:ty; ($($generics:tt)*) $( ; using $($using:tt)* )?) => {
$crate::impl_matrix_inner!(__inner, ($c, $r, $type, T, ($($generics)*)); $( $($using)* )?);
};
($c:literal, $r:literal, $type:ty, $el_ty:ty $( ; using $($using:tt)* )?) => {
$crate::impl_matrix_inner!(__inner, ($c, $r, $type, $el_ty, ()); $( $($using)* )?);
};
}
#[doc(hidden)]
#[macro_export]
macro_rules! impl_matrix_inner {
(__inner, ($($other:tt)*); AsRef $($using:tt)*) => {
$crate::impl_matrix_inner!(__ref, $($other)*);
$crate::impl_matrix_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($($other:tt)*); AsMut $($using:tt)*) => {
$crate::impl_matrix_inner!(__mut, $($other)*);
$crate::impl_matrix_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($($other:tt)*); From $($using:tt)*) => {
$crate::impl_matrix_inner!(__from, $($other)*);
$crate::impl_matrix_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($c:literal, $r:literal, $type:ty, $el_ty:ty, ($($generics:tt)*)); ) => {
$crate::impl_matrix_inner!(__main, $c, $r, $type, $el_ty, ($($generics)*));
};
(__ref, $c:literal, $r:literal, $type:ty, $el_ty:ty, ($($generics:tt)*)) => {
impl<$($generics)*> $crate::private::AsRefMatrixParts<$el_ty, $c, $r> for $type
where
Self: ::core::convert::AsRef<[[$el_ty; $r]; $c]>,
$el_ty: $crate::private::MatrixScalar,
{
#[inline]
fn as_ref_parts(&self) -> &[[$el_ty; $r]; $c] {
::core::convert::AsRef::as_ref(self)
}
}
};
(__mut, $c:literal, $r:literal, $type:ty, $el_ty:ty, ($($generics:tt)*)) => {
impl<$($generics)*> $crate::private::AsMutMatrixParts<$el_ty, $c, $r> for $type
where
Self: ::core::convert::AsMut<[[$el_ty; $r]; $c]>,
$el_ty: $crate::private::MatrixScalar,
{
#[inline]
fn as_mut_parts(&mut self) -> &mut [[$el_ty; $r]; $c] {
::core::convert::AsMut::as_mut(self)
}
}
};
(__from, $c:literal, $r:literal, $type:ty, $el_ty:ty, ($($generics:tt)*)) => {
impl<$($generics)*> $crate::private::FromMatrixParts<$el_ty, $c, $r> for $type
where
Self: ::core::convert::From<[[$el_ty; $r]; $c]>,
$el_ty: $crate::private::MatrixScalar,
{
#[inline]
fn from_parts(parts: [[$el_ty; $r]; $c]) -> Self {
::core::convert::From::from(parts)
}
}
};
(__main, $c:literal, $r:literal, $type:ty, $el_ty:ty, ($($generics:tt)*)) => {
const _: () = assert!(
2 <= $c && $c <= 4,
"Matrix should have at least 2 columns and at most 4!",
);
const _: () = assert!(
2 <= $r && $r <= 4,
"Matrix should have at least 2 rows and at most 4!",
);
impl<$($generics)*> $crate::private::ShaderType for $type
where
$el_ty: $crate::private::ShaderSize,
{
type ExtraMetadata = $crate::private::MatrixMetadata;
const METADATA: $crate::private::Metadata<Self::ExtraMetadata> = {
let col_size = $crate::private::SizeValue::from(<$el_ty as $crate::private::ShaderSize>::SHADER_SIZE).mul($r);
let alignment = $crate::private::AlignmentValue::from_next_power_of_two_size(col_size);
let size = alignment.round_up_size(col_size).mul($c);
let col_padding = alignment.padding_needed_for(col_size.get());
$crate::private::Metadata {
alignment,
has_uniform_min_alignment: false,
min_size: size,
is_pod: <[$el_ty; $r] as $crate::private::ShaderType>::METADATA.is_pod() && col_padding == 0,
extra: $crate::private::MatrixMetadata {
col_padding,
},
}
};
}
impl<$($generics)*> $crate::private::ShaderSize for $type
where
$el_ty: $crate::private::ShaderSize
{}
impl<$($generics)*> $crate::private::WriteInto for $type
where
Self: $crate::private::AsRefMatrixParts<$el_ty, $c, $r> + $crate::private::ShaderType<ExtraMetadata = $crate::private::MatrixMetadata>,
$el_ty: $crate::private::MatrixScalar + $crate::private::WriteInto,
{
#[inline]
fn write_into<B: $crate::private::BufferMut>(&self, writer: &mut $crate::private::Writer<B>) {
let columns = $crate::private::AsRefMatrixParts::<$el_ty, $c, $r>::as_ref_parts(self);
$crate::if_pod_and_little_endian!(if pod_and_little_endian {
$crate::private::WriteInto::write_into(columns, writer);
} else {
for col in columns {
$crate::private::WriteInto::write_into(col, writer);
writer.advance(<Self as $crate::private::ShaderType>::METADATA.col_padding() as ::core::primitive::usize);
}
});
}
}
impl<$($generics)*> $crate::private::ReadFrom for $type
where
Self: $crate::private::AsMutMatrixParts<$el_ty, $c, $r> + $crate::private::ShaderType<ExtraMetadata = $crate::private::MatrixMetadata>,
$el_ty: $crate::private::MatrixScalar + $crate::private::ReadFrom,
{
#[inline]
fn read_from<B: $crate::private::BufferRef>(&mut self, reader: &mut $crate::private::Reader<B>) {
let columns = $crate::private::AsMutMatrixParts::<$el_ty, $c, $r>::as_mut_parts(self);
$crate::if_pod_and_little_endian!(if pod_and_little_endian {
$crate::private::ReadFrom::read_from(columns, reader);
} else {
for col in columns {
$crate::private::ReadFrom::read_from(col, reader);
reader.advance(<Self as $crate::private::ShaderType>::METADATA.col_padding() as ::core::primitive::usize);
}
});
}
}
impl<$($generics)*> $crate::private::CreateFrom for $type
where
Self: $crate::private::FromMatrixParts<$el_ty, $c, $r> + $crate::private::ShaderType<ExtraMetadata = $crate::private::MatrixMetadata>,
$el_ty: $crate::private::MatrixScalar + $crate::private::CreateFrom,
{
#[inline]
fn create_from<B: $crate::private::BufferRef>(reader: &mut $crate::private::Reader<B>) -> Self {
let columns = $crate::if_pod_and_little_endian!(if pod_and_little_endian {
$crate::private::CreateFrom::create_from(reader)
} else {
::core::array::from_fn(|_| {
let col = $crate::private::CreateFrom::create_from(reader);
reader.advance(<Self as $crate::private::ShaderType>::METADATA.col_padding() as ::core::primitive::usize);
col
})
});
$crate::private::FromMatrixParts::<$el_ty, $c, $r>::from_parts(columns)
}
}
};
}

14
vendor/encase/src/types/mod.rs vendored Executable file
View File

@@ -0,0 +1,14 @@
#[macro_use]
pub mod scalar;
pub mod vector;
pub mod matrix;
pub mod array;
pub mod r#struct;
pub mod runtime_sized_array;
mod wrapper;

View File

@@ -0,0 +1,272 @@
use std::collections::{LinkedList, VecDeque};
use crate::core::{
BufferMut, BufferRef, CreateFrom, Metadata, ReadFrom, Reader, RuntimeSizedArray, ShaderSize,
WriteInto, Writer,
};
use crate::ShaderType;
/// Helper type meant to be used together with the [`derive@ShaderType`] derive macro
///
/// This type should be interpreted as an [`u32`] in the shader
///
/// # Problem
///
/// There are cases where the use of the WGSL function [`arrayLength()`](https://gpuweb.github.io/gpuweb/wgsl/#array-builtin-functions)
/// might be inadequate because of its return value
///
/// - being a minimum of 1 due to how [`minBindingSize` is calculated](https://gpuweb.github.io/gpuweb/#ref-for-dom-gpubufferbindinglayout-minbindingsize%E2%91%A7)
///
/// - possibly being higher than expected due to padding at the end of a struct or buffer being interpreted as array elements
///
/// - representing the capacity of the array for use cases that require oversized buffers
///
/// # Solution
///
/// Using this type on a field of a struct with the [`derive@ShaderType`] derive macro will automatically:
///
/// - on write, write the length of the contained runtime-sized array as an [`u32`] to the buffer
///
/// - on read, read the value as an [`u32`] from the buffer (rep as `LEN`) and when reading the elements of the contained runtime-sized array a max of `LEN` elements will be read
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
pub struct ArrayLength;
impl ShaderType for ArrayLength {
type ExtraMetadata = ();
const METADATA: Metadata<Self::ExtraMetadata> = Metadata::from_alignment_and_size(4, 4);
}
impl ShaderSize for ArrayLength {}
impl WriteInto for ArrayLength {
fn write_into<B: BufferMut>(&self, writer: &mut Writer<B>) {
let length = writer.ctx.rts_array_length.unwrap();
WriteInto::write_into(&length, writer);
}
}
impl ReadFrom for ArrayLength {
fn read_from<B: BufferRef>(&mut self, reader: &mut Reader<B>) {
let length = CreateFrom::create_from(reader);
reader.ctx.rts_array_max_el_to_read = Some(length);
}
}
impl CreateFrom for ArrayLength {
fn create_from<B: BufferRef>(reader: &mut Reader<B>) -> Self {
let length = CreateFrom::create_from(reader);
reader.ctx.rts_array_max_el_to_read = Some(length);
ArrayLength
}
}
pub trait Length {
fn length(&self) -> usize;
}
pub trait Truncate {
fn truncate(&mut self, _len: usize);
}
/// Used to implement `ShaderType` for the given runtime-sized array type
///
/// The given runtime-sized array type should implement [`Length`] and optionally [`Truncate`]
/// depending on needed capability (they can also be derived via `$using`)
///
/// # Args
///
/// - `$type` the type (representing a runtime-sized array) for which `ShaderType` will be implemented for
///
/// - `$generics` \[optional\] generics that will be passed into the `impl< >`
///
/// - `$using` \[optional\] can be any combination of `len truncate`
#[macro_export]
macro_rules! impl_rts_array {
($type:ty $( ; using $($using:tt)* )?) => {
$crate::impl_rts_array_inner!(__inner, ($type, T); $( $($using)* )?);
};
($type:ty; ($($generics:tt)*) $( ; using $($using:tt)* )?) => {
$crate::impl_rts_array_inner!(__inner, ($type, $($generics)*); $( $($using)* )?);
};
}
#[doc(hidden)]
#[macro_export]
macro_rules! impl_rts_array_inner {
(__inner, ($($other:tt)*); len $($using:tt)*) => {
$crate::impl_rts_array_inner!(__len, $($other)*);
$crate::impl_rts_array_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($($other:tt)*); truncate $($using:tt)*) => {
$crate::impl_rts_array_inner!(__truncate, $($other)*);
$crate::impl_rts_array_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($type:ty, $($generics:tt)*); ) => {
$crate::impl_rts_array_inner!(__main, $type, $($generics)*);
};
(__len, $type:ty, $($generics:tt)*) => {
impl<$($generics)*> $crate::private::Length for $type {
fn length(&self) -> ::core::primitive::usize {
self.len()
}
}
};
(__truncate, $type:ty, $($generics:tt)*) => {
impl<$($generics)*> $crate::private::Truncate for $type {
fn truncate(&mut self, len: ::core::primitive::usize) {
self.truncate(len)
}
}
};
(__main, $type:ty, $($generics:tt)*) => {
impl<$($generics)*> $crate::private::ShaderType for $type
where
T: $crate::private::ShaderType + $crate::private::ShaderSize,
Self: $crate::private::Length,
{
type ExtraMetadata = $crate::private::ArrayMetadata;
const METADATA: $crate::private::Metadata<Self::ExtraMetadata> = {
let alignment = T::METADATA.alignment();
let el_size = $crate::private::SizeValue::from(T::SHADER_SIZE);
let stride = alignment.round_up_size(el_size);
let el_padding = alignment.padding_needed_for(el_size.get());
$crate::private::Metadata {
alignment,
has_uniform_min_alignment: true,
min_size: el_size,
is_pod: false,
extra: $crate::private::ArrayMetadata { stride, el_padding },
}
};
const UNIFORM_COMPAT_ASSERT: fn() = ||
::core::panic!("runtime-sized array can't be used in uniform buffers");
fn size(&self) -> ::core::num::NonZeroU64 {
use ::core::cmp::Ord;
Self::METADATA.stride()
.mul($crate::private::Length::length(self).max(1) as ::core::primitive::u64)
.0
}
}
impl<$($generics)*> $crate::private::RuntimeSizedArray for $type
where
Self: $crate::private::Length,
{
fn len(&self) -> ::core::primitive::usize {
$crate::private::Length::length(self)
}
}
impl<$($generics)*> $crate::private::CalculateSizeFor for $type
where
Self: $crate::private::ShaderType<ExtraMetadata = $crate::private::ArrayMetadata>,
{
fn calculate_size_for(nr_of_el: ::core::primitive::u64) -> ::core::num::NonZeroU64 {
use ::core::cmp::Ord;
<Self as $crate::private::ShaderType>::METADATA.stride().mul(nr_of_el.max(1)).0
}
}
impl<$($generics)*> $crate::private::WriteInto for $type
where
T: $crate::private::WriteInto,
Self: $crate::private::ShaderType<ExtraMetadata = $crate::private::ArrayMetadata>,
for<'a> &'a Self: ::core::iter::IntoIterator<Item = &'a T>,
{
fn write_into<B: $crate::private::BufferMut>(&self, writer: &mut $crate::private::Writer<B>) {
use ::core::iter::IntoIterator;
for item in self.into_iter() {
$crate::private::WriteInto::write_into(item, writer);
writer.advance(<Self as $crate::private::ShaderType>::METADATA.el_padding() as ::core::primitive::usize);
}
}
}
impl<$($generics)*> $crate::private::ReadFrom for $type
where
T: $crate::private::ReadFrom + $crate::private::CreateFrom,
Self: $crate::private::Truncate + $crate::private::Length + ::core::iter::Extend<T> + $crate::private::ShaderType<ExtraMetadata = $crate::private::ArrayMetadata>,
for<'a> &'a mut Self: ::core::iter::IntoIterator<Item = &'a mut T>,
{
fn read_from<B: $crate::private::BufferRef>(&mut self, reader: &mut $crate::private::Reader<B>) {
use ::core::cmp::Ord;
use ::core::iter::{IntoIterator, Extend, Iterator};
let max = reader.ctx.rts_array_max_el_to_read.unwrap_or(::core::primitive::u32::MAX) as ::core::primitive::usize;
let count = max.min(reader.remaining() / <Self as $crate::private::ShaderType>::METADATA.stride().get() as ::core::primitive::usize);
$crate::private::Truncate::truncate(self, count);
for item in self.into_iter() {
$crate::private::ReadFrom::read_from(item, reader);
reader.advance(<Self as $crate::private::ShaderType>::METADATA.el_padding() as ::core::primitive::usize);
}
let remaining = count - $crate::private::Length::length(self);
self.extend(
::core::iter::repeat_with(|| {
let el = $crate::private::CreateFrom::create_from(reader);
reader.advance(<Self as $crate::private::ShaderType>::METADATA.el_padding() as ::core::primitive::usize);
el
})
.take(remaining),
);
}
}
impl<$($generics)*> $crate::private::CreateFrom for $type
where
T: $crate::private::CreateFrom,
Self: ::core::iter::FromIterator<T> + $crate::private::ShaderType<ExtraMetadata = $crate::private::ArrayMetadata>,
{
fn create_from<B: $crate::private::BufferRef>(reader: &mut $crate::private::Reader<B>) -> Self {
use ::core::cmp::Ord;
use ::core::iter::Iterator;
let max = reader.ctx.rts_array_max_el_to_read.unwrap_or(::core::primitive::u32::MAX) as ::core::primitive::usize;
let count = max.min(reader.remaining() / <Self as $crate::private::ShaderType>::METADATA.stride().get() as ::core::primitive::usize);
::core::iter::FromIterator::from_iter(
::core::iter::repeat_with(|| {
let el = $crate::private::CreateFrom::create_from(reader);
reader.advance(<Self as $crate::private::ShaderType>::METADATA.el_padding() as ::core::primitive::usize);
el
})
.take(count),
)
}
}
};
}
impl_rts_array!([T]; using len);
impl_rts_array!(Vec<T>; using len truncate);
impl_rts_array!(VecDeque<T>; using len truncate);
impl_rts_array!(LinkedList<T>; using len);
impl<T> Truncate for LinkedList<T> {
fn truncate(&mut self, len: usize) {
if len < self.len() {
self.split_off(len);
}
}
}
#[cfg(test)]
mod array_length {
use super::ArrayLength;
#[test]
fn derived_traits() {
assert_eq!(ArrayLength, ArrayLength.clone());
assert_eq!(format!("{ArrayLength:?}"), "ArrayLength");
}
}

170
vendor/encase/src/types/scalar.rs vendored Normal file
View File

@@ -0,0 +1,170 @@
use crate::core::{
BufferMut, BufferRef, CreateFrom, Metadata, ReadFrom, Reader, ShaderSize, ShaderType,
WriteInto, Writer,
};
use core::num::{NonZeroI32, NonZeroU32, Wrapping};
use core::sync::atomic::{AtomicI32, AtomicU32};
macro_rules! impl_basic_traits {
($type:ty) => {
impl_basic_traits!(__main, $type, );
};
($type:ty, is_pod) => {
impl_basic_traits!(__main, $type, .pod());
};
(__main, $type:ty, $($tail:tt)*) => {
impl ShaderType for $type {
type ExtraMetadata = ();
const METADATA: Metadata<Self::ExtraMetadata> = Metadata::from_alignment_and_size(4, 4) $($tail)*;
}
impl ShaderSize for $type {}
};
}
macro_rules! impl_traits_for_pod {
($type:ty) => {
impl_basic_traits!($type, is_pod);
impl WriteInto for $type {
#[inline]
fn write_into<B: BufferMut>(&self, writer: &mut Writer<B>) {
writer.write(&<$type>::to_le_bytes(*self));
}
}
impl ReadFrom for $type {
#[inline]
fn read_from<B: BufferRef>(&mut self, reader: &mut Reader<B>) {
*self = <$type>::from_le_bytes(*reader.read());
}
}
impl CreateFrom for $type {
#[inline]
fn create_from<B: BufferRef>(reader: &mut Reader<B>) -> Self {
<$type>::from_le_bytes(*reader.read())
}
}
};
}
impl_traits_for_pod!(f32);
impl_traits_for_pod!(u32);
impl_traits_for_pod!(i32);
macro_rules! impl_traits_for_non_zero_option {
($type:ty) => {
impl_basic_traits!(Option<$type>);
impl WriteInto for Option<$type> {
#[inline]
fn write_into<B: BufferMut>(&self, writer: &mut Writer<B>) {
let value = self.map(|num| num.get()).unwrap_or(0);
WriteInto::write_into(&value, writer);
}
}
impl ReadFrom for Option<$type> {
#[inline]
fn read_from<B: BufferRef>(&mut self, reader: &mut Reader<B>) {
*self = <$type>::new(CreateFrom::create_from(reader));
}
}
impl CreateFrom for Option<$type> {
#[inline]
fn create_from<B: BufferRef>(reader: &mut Reader<B>) -> Self {
<$type>::new(CreateFrom::create_from(reader))
}
}
};
}
impl_traits_for_non_zero_option!(NonZeroU32);
impl_traits_for_non_zero_option!(NonZeroI32);
macro_rules! impl_traits_for_wrapping {
($type:ty) => {
impl_basic_traits!($type);
impl WriteInto for $type {
#[inline]
fn write_into<B: BufferMut>(&self, writer: &mut Writer<B>) {
WriteInto::write_into(&self.0, writer);
}
}
impl ReadFrom for $type {
#[inline]
fn read_from<B: BufferRef>(&mut self, reader: &mut Reader<B>) {
ReadFrom::read_from(&mut self.0, reader);
}
}
impl CreateFrom for $type {
#[inline]
fn create_from<B: BufferRef>(reader: &mut Reader<B>) -> Self {
Wrapping(CreateFrom::create_from(reader))
}
}
};
}
impl_traits_for_wrapping!(Wrapping<u32>);
impl_traits_for_wrapping!(Wrapping<i32>);
macro_rules! impl_traits_for_atomic {
($type:ty) => {
impl_basic_traits!($type);
impl WriteInto for $type {
#[inline]
fn write_into<B: BufferMut>(&self, writer: &mut Writer<B>) {
let value = self.load(std::sync::atomic::Ordering::Relaxed);
WriteInto::write_into(&value, writer);
}
}
impl ReadFrom for $type {
#[inline]
fn read_from<B: BufferRef>(&mut self, reader: &mut Reader<B>) {
ReadFrom::read_from(self.get_mut(), reader);
}
}
impl CreateFrom for $type {
#[inline]
fn create_from<B: BufferRef>(reader: &mut Reader<B>) -> Self {
<$type>::new(CreateFrom::create_from(reader))
}
}
};
}
impl_traits_for_atomic!(AtomicU32);
impl_traits_for_atomic!(AtomicI32);
macro_rules! impl_marker_trait_for_f32 {
($trait:path) => {
impl $trait for ::core::primitive::f32 {}
};
}
macro_rules! impl_marker_trait_for_u32 {
($trait:path) => {
impl $trait for ::core::primitive::u32 {}
impl $trait for ::core::option::Option<::core::num::NonZeroU32> {}
impl $trait for ::core::num::Wrapping<::core::primitive::u32> {}
impl $trait for ::core::sync::atomic::AtomicU32 {}
};
}
macro_rules! impl_marker_trait_for_i32 {
($trait:path) => {
impl $trait for ::core::primitive::i32 {}
impl $trait for ::core::option::Option<::core::num::NonZeroI32> {}
impl $trait for ::core::num::Wrapping<::core::primitive::i32> {}
impl $trait for ::core::sync::atomic::AtomicI32 {}
};
}

20
vendor/encase/src/types/struct.rs vendored Executable file
View File

@@ -0,0 +1,20 @@
use crate::core::Metadata;
pub struct StructMetadata<const N: usize> {
pub offsets: [u64; N],
pub paddings: [u64; N],
}
impl<const N: usize> Metadata<StructMetadata<N>> {
pub const fn offset(self, i: usize) -> u64 {
self.extra.offsets[i]
}
pub const fn last_offset(self) -> u64 {
self.extra.offsets[N - 1]
}
pub const fn padding(self, i: usize) -> u64 {
self.extra.paddings[i]
}
}

173
vendor/encase/src/types/vector.rs vendored Normal file
View File

@@ -0,0 +1,173 @@
pub trait VectorScalar: crate::ShaderSize {}
impl_marker_trait_for_f32!(VectorScalar);
impl_marker_trait_for_u32!(VectorScalar);
impl_marker_trait_for_i32!(VectorScalar);
/// Enables reading from the vector (via `&[T; N]`)
pub trait AsRefVectorParts<T: VectorScalar, const N: usize> {
fn as_ref_parts(&self) -> &[T; N];
}
/// Enables writing to the vector (via `&mut [T; N]`)
pub trait AsMutVectorParts<T: VectorScalar, const N: usize> {
fn as_mut_parts(&mut self) -> &mut [T; N];
}
/// Enables the creation of a vector (via `[T; N]`)
pub trait FromVectorParts<T: VectorScalar, const N: usize> {
fn from_parts(parts: [T; N]) -> Self;
}
/// Used to implement `ShaderType` for the given vector type
///
/// The given vector type should implement any combination of
/// [`AsRefVectorParts`], [`AsMutVectorParts`], [`FromVectorParts`]
/// depending on needed capability (they can also be derived via `$using`)
///
/// # Args
///
/// - `$n` nr of elements the given vector contains
///
/// - `$type` the type (representing a vector) for which `ShaderType` will be implemented for
///
/// - `$generics` \[optional\] generics that will be passed into the `impl< >`
///
/// - `$el_type` \[optional\] inner element type of the vector (should implement [`VectorScalar`])
///
/// - `$using` \[optional\] can be any combination of `AsRef AsMut From`
#[macro_export]
macro_rules! impl_vector {
($n:literal, $type:ty $( ; using $($using:tt)* )?) => {
$crate::impl_vector_inner!(__inner, ($n, $type, T, (T)); $( $($using)* )?);
};
($n:literal, $type:ty; ($($generics:tt)*) $( ; using $($using:tt)* )?) => {
$crate::impl_vector_inner!(__inner, ($n, $type, T, ($($generics)*)); $( $($using)* )?);
};
($n:literal, $type:ty, $el_ty:ty $( ; using $($using:tt)* )?) => {
$crate::impl_vector_inner!(__inner, ($n, $type, $el_ty, ()); $( $($using)* )?);
};
}
#[doc(hidden)]
#[macro_export]
macro_rules! impl_vector_inner {
(__inner, ($($other:tt)*); AsRef $($using:tt)*) => {
$crate::impl_vector_inner!(__ref, $($other)*);
$crate::impl_vector_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($($other:tt)*); AsMut $($using:tt)*) => {
$crate::impl_vector_inner!(__mut, $($other)*);
$crate::impl_vector_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($($other:tt)*); From $($using:tt)*) => {
$crate::impl_vector_inner!(__from, $($other)*);
$crate::impl_vector_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($n:literal, $type:ty, $el_ty:ty, ($($generics:tt)*)); ) => {
$crate::impl_vector_inner!(__main, $n, $type, $el_ty, ($($generics)*));
};
(__ref, $n:literal, $type:ty, $el_ty:ty, ($($generics:tt)*)) => {
impl<$($generics)*> $crate::private::AsRefVectorParts<$el_ty, $n> for $type
where
Self: ::core::convert::AsRef<[$el_ty; $n]>,
$el_ty: $crate::private::VectorScalar,
{
#[inline]
fn as_ref_parts(&self) -> &[$el_ty; $n] {
::core::convert::AsRef::as_ref(self)
}
}
};
(__mut, $n:literal, $type:ty, $el_ty:ty, ($($generics:tt)*)) => {
impl<$($generics)*> $crate::private::AsMutVectorParts<$el_ty, $n> for $type
where
Self: ::core::convert::AsMut<[$el_ty; $n]>,
$el_ty: $crate::private::VectorScalar,
{
#[inline]
fn as_mut_parts(&mut self) -> &mut [$el_ty; $n] {
::core::convert::AsMut::as_mut(self)
}
}
};
(__from, $n:literal, $type:ty, $el_ty:ty, ($($generics:tt)*)) => {
impl<$($generics)*> $crate::private::FromVectorParts<$el_ty, $n> for $type
where
Self: ::core::convert::From<[$el_ty; $n]>,
$el_ty: $crate::private::VectorScalar,
{
#[inline]
fn from_parts(parts: [$el_ty; $n]) -> Self {
::core::convert::From::from(parts)
}
}
};
(__main, $n:literal, $type:ty, $el_ty:ty, ($($generics:tt)*)) => {
const _: () = assert!(
2 <= $n && $n <= 4,
"Vector should have at least 2 elements and at most 4!",
);
impl<$($generics)*> $crate::private::ShaderType for $type
where
$el_ty: $crate::private::ShaderSize,
{
type ExtraMetadata = ();
const METADATA: $crate::private::Metadata<Self::ExtraMetadata> = {
let size = $crate::private::SizeValue::from(<$el_ty as $crate::private::ShaderSize>::SHADER_SIZE).mul($n);
let alignment = $crate::private::AlignmentValue::from_next_power_of_two_size(size);
$crate::private::Metadata {
alignment,
has_uniform_min_alignment: false,
min_size: size,
is_pod: <[$el_ty; $n] as $crate::private::ShaderType>::METADATA.is_pod(),
extra: ()
}
};
}
impl<$($generics)*> $crate::private::ShaderSize for $type
where
$el_ty: $crate::private::ShaderSize
{}
impl<$($generics)*> $crate::private::WriteInto for $type
where
Self: $crate::private::AsRefVectorParts<$el_ty, $n>,
$el_ty: $crate::private::VectorScalar + $crate::private::WriteInto,
{
#[inline]
fn write_into<B: $crate::private::BufferMut>(&self, writer: &mut $crate::private::Writer<B>) {
let elements = $crate::private::AsRefVectorParts::<$el_ty, $n>::as_ref_parts(self);
$crate::private::WriteInto::write_into(elements, writer);
}
}
impl<$($generics)*> $crate::private::ReadFrom for $type
where
Self: $crate::private::AsMutVectorParts<$el_ty, $n>,
$el_ty: $crate::private::VectorScalar + $crate::private::ReadFrom,
{
#[inline]
fn read_from<B: $crate::private::BufferRef>(&mut self, reader: &mut $crate::private::Reader<B>) {
let elements = $crate::private::AsMutVectorParts::<$el_ty, $n>::as_mut_parts(self);
$crate::private::ReadFrom::read_from(elements, reader);
}
}
impl<$($generics)*> $crate::private::CreateFrom for $type
where
Self: $crate::private::FromVectorParts<$el_ty, $n>,
$el_ty: $crate::private::VectorScalar + $crate::private::CreateFrom,
{
#[inline]
fn create_from<B: $crate::private::BufferRef>(reader: &mut $crate::private::Reader<B>) -> Self {
let elements = $crate::private::CreateFrom::create_from(reader);
$crate::private::FromVectorParts::<$el_ty, $n>::from_parts(elements)
}
}
};
}

120
vendor/encase/src/types/wrapper.rs vendored Normal file
View File

@@ -0,0 +1,120 @@
/// Used to implement `ShaderType` for the given wrapper type
///
/// # Args
///
/// - `$type` the type (representing a wrapper) for which `ShaderType` will be implemented for
///
/// - `$generics` \[optional\] generics that will be passed into the `impl< >`
///
/// - `$using` \[optional\] can be any combination of `Ref{ X } Mut{ X } From{ X }`
/// (where `X` denotes a possible function call)
#[macro_export]
macro_rules! impl_wrapper {
($type:ty; using $($using:tt)*) => {
$crate::impl_wrapper_inner!(__inner, ($type, T: ?Sized); $($using)*);
};
($type:ty; ($($generics:tt)*); using $($using:tt)*) => {
$crate::impl_wrapper_inner!(__inner, ($type, $($generics)*); $($using)*);
};
}
#[doc(hidden)]
#[macro_export]
macro_rules! impl_wrapper_inner {
(__inner, ($($other:tt)*); Ref{ $($get_ref:tt)* } $($using:tt)*) => {
$crate::impl_wrapper_inner!(__ref, ($($other)*); { $($get_ref)* });
$crate::impl_wrapper_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($($other:tt)*); Mut{ $($get_mut:tt)* } $($using:tt)*) => {
$crate::impl_wrapper_inner!(__mut, ($($other)*); { $($get_mut)* });
$crate::impl_wrapper_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($($other:tt)*); From{ $($from:tt)* } $($using:tt)*) => {
$crate::impl_wrapper_inner!(__from, ($($other)*); { $($from)* });
$crate::impl_wrapper_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($type:ty, $($generics:tt)*); ) => {};
(__ref, ($type:ty, $($generics:tt)*); { $($get_ref:tt)* }) => {
impl<$($generics)*> $crate::private::ShaderType for $type
where
T: $crate::private::ShaderType
{
type ExtraMetadata = T::ExtraMetadata;
const METADATA: $crate::private::Metadata<Self::ExtraMetadata> = T::METADATA.no_pod();
const UNIFORM_COMPAT_ASSERT: fn() = T::UNIFORM_COMPAT_ASSERT;
#[inline]
fn size(&self) -> ::core::num::NonZeroU64 {
<T as $crate::private::ShaderType>::size(&self$($get_ref)*)
}
}
impl<$($generics)*> $crate::private::ShaderSize for $type
where
T: $crate::private::ShaderSize
{
const SHADER_SIZE: ::core::num::NonZeroU64 = T::SHADER_SIZE;
}
impl<$($generics)*> $crate::private::RuntimeSizedArray for $type
where
T: $crate::private::RuntimeSizedArray
{
#[inline]
fn len(&self) -> usize {
<T as $crate::private::RuntimeSizedArray>::len(&self$($get_ref)*)
}
}
impl<$($generics)*> $crate::private::CalculateSizeFor for $type
where
T: $crate::private::CalculateSizeFor
{
#[inline]
fn calculate_size_for(nr_of_el: u64) -> ::core::num::NonZeroU64 {
<T as $crate::private::CalculateSizeFor>::calculate_size_for(nr_of_el)
}
}
impl<$($generics)*> $crate::private::WriteInto for $type
where
T: $crate::private::WriteInto
{
#[inline]
fn write_into<B: $crate::private::BufferMut>(&self, writer: &mut $crate::private::Writer<B>) {
<T as $crate::private::WriteInto>::write_into(&self$($get_ref)*, writer)
}
}
};
(__mut, ($type:ty, $($generics:tt)*); { $($get_mut:tt)* }) => {
impl<$($generics)*> $crate::private::ReadFrom for $type
where
T: $crate::private::ReadFrom
{
#[inline]
fn read_from<B: $crate::private::BufferRef>(&mut self, reader: &mut $crate::private::Reader<B>) {
<T as $crate::private::ReadFrom>::read_from(self$($get_mut)*, reader)
}
}
};
(__from, ($type:ty, $($generics:tt)*); { $($from:tt)* }) => {
impl<$($generics)*> $crate::private::CreateFrom for $type
where
T: $crate::private::CreateFrom
{
#[inline]
fn create_from<B: $crate::private::BufferRef>(reader: &mut $crate::private::Reader<B>) -> Self {
<$type>::$($from)*(<T as $crate::private::CreateFrom>::create_from(reader))
}
}
};
}
impl_wrapper!(&T; using Ref{});
impl_wrapper!(&mut T; using Ref{} Mut{});
impl_wrapper!(Box<T>; using Ref{} Mut{} From{ new });
impl_wrapper!(std::borrow::Cow<'_, T>; (T: ?Sized + ToOwned<Owned = T>); using Ref{} From{ Owned });
impl_wrapper!(std::rc::Rc<T>; using Ref{} From{ new });
impl_wrapper!(std::sync::Arc<T>; using Ref{} From{ new });
impl_wrapper!(core::cell::Cell<T>; (T: Copy); using Ref{ .get() } Mut{ .get_mut() } From{ new });

226
vendor/encase/src/utils.rs vendored Normal file
View File

@@ -0,0 +1,226 @@
use core::mem::MaybeUninit;
#[track_caller]
pub const fn consume_zsts<const N: usize>(_: [(); N]) {}
#[doc(hidden)]
#[macro_export]
macro_rules! build_struct {
($type:ty, $( $field_idents:ident ),*) => {{
let mut uninit_struct = ::core::mem::MaybeUninit::<$type>::uninit();
let ptr = ::core::mem::MaybeUninit::as_mut_ptr(&mut uninit_struct);
$( $crate::build_struct!(__write_to_field; ptr, $field_idents, $field_idents); )*
// SAFETY: Everything has been initialized
unsafe { ::core::mem::MaybeUninit::assume_init(uninit_struct) }
}};
(__write_to_field; $ptr:ident, $field_name:ident, $data:expr) => {
// SAFETY: the pointer `ptr` returned by `as_mut_ptr` is a valid pointer,
// so it's safe to get a pointer to a field through `addr_of_mut!`
let field_ptr = unsafe { ::core::ptr::addr_of_mut!((*$ptr).$field_name) };
// SAFETY: writing to `field_ptr` is safe because it's a pointer
// to one of the struct's fields (therefore valid and aligned)
unsafe { field_ptr.write($data) };
};
}
#[doc(hidden)]
#[macro_export]
macro_rules! if_pod_and_little_endian {
(if pod_and_little_endian $true:block else $false:block) => {{
#[cfg(target_endian = "little")]
// Const branch, should be eliminated at compile time.
if <Self as $crate::private::ShaderType>::METADATA.is_pod() {
$true
} else {
$false
}
#[cfg(not(target_endian = "little"))]
{
$false
}
}};
}
#[cfg(any(feature = "glam", feature = "ultraviolet", feature = "vek"))]
macro_rules! array_ref_to_2d_array_ref {
($array:expr, $ty:ty, $c:literal, $r:literal) => {
// SAFETY:
// transmuting from &[T; R * C] to &[[T; R]; C] is sound since:
// the references have the same size
// size_of::<&[T; R * C]>() = size_of::<usize>()
// size_of::<&[[T; R]; C]>() = size_of::<usize>()
// the values behind the references have the same size and alignment
// size_of::<[T; R * C]>() = size_of::<T>() * R * C
// size_of::<[[T; R]; C]>() = size_of::<[T; R]>() * C = size_of::<T>() * R * C
// align_of::<[T; R * C]>() = align_of::<T>()
// align_of::<[[T; R]; C]>() = align_of::<[T; R]>() = align_of::<T>()
// ref: https://doc.rust-lang.org/reference/type-layout.html
unsafe { ::core::mem::transmute::<&[$ty; $r * $c], &[[$ty; $r]; $c]>($array) }
};
}
#[cfg(any(feature = "glam", feature = "ultraviolet", feature = "vek"))]
macro_rules! array_mut_to_2d_array_mut {
($array:expr, $ty:ty, $c:literal, $r:literal) => {
// SAFETY:
// transmuting from &mut [T; R * C] to &mut [[T; R]; C] is sound since:
// the references have the same size
// size_of::<&mut [T; R * C]>() = size_of::<usize>()
// size_of::<&mut [[T; R]; C]>() = size_of::<usize>()
// the values behind the references have the same size and alignment
// size_of::<[T; R * C]>() = size_of::<T>() * R * C
// size_of::<[[T; R]; C]>() = size_of::<[T; R]>() * C = size_of::<T>() * R * C
// align_of::<[T; R * C]>() = align_of::<T>()
// align_of::<[[T; R]; C]>() = align_of::<[T; R]>() = align_of::<T>()
// ref: https://doc.rust-lang.org/reference/type-layout.html
unsafe { ::core::mem::transmute::<&mut [$ty; $r * $c], &mut [[$ty; $r]; $c]>($array) }
};
}
pub(crate) trait ByteVecExt {
/// Tries to extend `self` with `0`s up to `new_len`, using memset.
fn try_extend(&mut self, new_len: usize) -> Result<(), std::collections::TryReserveError>;
}
impl ByteVecExt for Vec<u8> {
#[inline]
fn try_extend(&mut self, new_len: usize) -> Result<(), std::collections::TryReserveError> {
let additional = new_len.saturating_sub(self.len());
if additional > 0 {
self.try_reserve(additional)?;
let end = unsafe { self.as_mut_ptr().add(self.len()) };
// SAFETY
// 1. dst ptr is valid for writes of count * size_of::<T>() bytes since the call to Vec::reserve() succeeded
// 2. dst ptr is properly aligned since we got it via Vec::as_mut_ptr_range()
unsafe { end.write_bytes(u8::MIN, additional) }
// SAFETY
// 1. new_len is less than or equal to Vec::capacity() since we reserved at least `additional` elements
// 2. The elements at old_len..new_len are initialized since we wrote `additional` bytes
unsafe { self.set_len(new_len) }
}
Ok(())
}
}
impl<T> ByteVecExt for Vec<MaybeUninit<T>> {
#[inline]
fn try_extend(&mut self, new_len: usize) -> Result<(), std::collections::TryReserveError> {
let additional = new_len.saturating_sub(self.len());
if additional > 0 {
self.try_reserve(additional)?;
// It's OK to not initialize the extended elements as MaybeUninit allows
// uninitialized memory.
// SAFETY
// 1. new_len is less than or equal to Vec::capacity() since we reserved at least `additional` elements
// 2. The elements at old_len..new_len are initialized since we wrote `additional` bytes
// 3. MaybeUninit
unsafe { self.set_len(new_len) }
}
Ok(())
}
}
pub(crate) trait SliceExt<T> {
/// Returns a "window" (shared reference to an array of length `N`) into this slice.
///
/// # Panics
///
/// Panics if the range `offset..offset + N` is out of bounds.
fn array<const N: usize>(&self, offset: usize) -> &[T; N];
/// Returns a "window" (mutable reference to an array of length `N`) into this slice.
///
/// # Panics
///
/// Panics if the range `offset..offset + N` is out of bounds.
fn array_mut<const N: usize>(&mut self, offset: usize) -> &mut [T; N];
}
impl<T> SliceExt<T> for [T] {
// from rust core lib https://github.com/rust-lang/rust/blob/11d96b59307b1702fffe871bfc2d0145d070881e/library/core/src/slice/mod.rs#L1794
// track #![feature(split_array)] (https://github.com/rust-lang/rust/issues/90091)
#[inline]
fn array<const N: usize>(&self, offset: usize) -> &[T; N] {
let src = &self[offset..offset + N];
// SAFETY
// casting to &[T; N] is safe since src is a &[T] of length N
unsafe { &*(src.as_ptr() as *const [T; N]) }
}
// from rust core lib https://github.com/rust-lang/rust/blob/11d96b59307b1702fffe871bfc2d0145d070881e/library/core/src/slice/mod.rs#L1827
// track #![feature(split_array)] (https://github.com/rust-lang/rust/issues/90091)
#[inline]
fn array_mut<const N: usize>(&mut self, offset: usize) -> &mut [T; N] {
let src = &mut self[offset..offset + N];
// SAFETY
// casting to &mut [T; N] is safe since src is a &mut [T] of length N
unsafe { &mut *(src.as_mut_ptr() as *mut [T; N]) }
}
}
#[cfg(test)]
mod byte_vec_ext {
use crate::utils::ByteVecExt;
#[test]
fn try_extend() {
let mut vec: Vec<u8> = Vec::new();
vec.try_extend(10).unwrap();
assert_eq!(vec.len(), 10);
assert!(vec.iter().all(|val| *val == 0));
}
#[test]
fn try_extend_noop() {
let mut vec = vec![0; 12];
vec.try_extend(10).unwrap();
assert_eq!(vec.len(), 12);
}
#[test]
fn try_extend_err() {
let mut vec = vec![0; 12];
assert!(vec.try_extend(usize::MAX).is_err());
}
}
#[cfg(test)]
mod slice_ext {
use crate::utils::SliceExt;
#[test]
fn array() {
let arr = [1, 3, 7, 6, 9, 7];
let slice = arr.as_ref();
let sub_arr: &[i32; 2] = slice.array(3);
assert_eq!(sub_arr, &[6, 9]);
}
#[test]
fn array_mut() {
let mut arr = [1, 3, 7, 6, 9, 7];
let slice = arr.as_mut();
let sub_arr: &mut [i32; 2] = slice.array_mut(3);
assert_eq!(sub_arr, &mut [6, 9]);
}
}