Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

1
vendor/encase/.cargo-checksum.json vendored Normal file

File diff suppressed because one or more lines are too long

95
vendor/encase/CHANGELOG.md vendored Normal file
View File

@@ -0,0 +1,95 @@
# Changelog
## v0.10.0 (2024-09-13)
- Updated `glam` to v0.29
- Updated `nalgebra` to v0.33
## v0.9.0 (2024-06-22)
- Updated `glam` to v0.28
- Updated `vek` to v0.17
- Updated `imbl` to v3
## v0.8.0 (2024-04-24)
- Internal optimization: faster copying of POD types
- Added support for writing to uninitialized memory
- Increased MSRV to 1.68.2
- Updated `glam` to v0.27
## v0.7.0 (2024-01-02)
- Allow buffer types to accept `?Sized` types
- Fix min `syn` version (v2.0.1)
- Updated `glam` to v0.25
- Updated `vek` to v0.16
- Updated `rpds` to v1
- Updated `archery` to v1
## v0.6.1 (2023-05-09)
- Fix erroring on attributes not owned by this crate
## v0.6.0 (2023-05-06)
- Inline potentially hot functions more aggressively
- Fix `clippy::extra_unused_type_parameters` warning
- Updated `syn` to v2
- Updated `glam` to v0.24
- Updated `rpds` to v0.13
- Updated `archery` to v0.5
## v0.5.0 (2023-03-04)
- Check dynamic buffer alignment is not less than 32
- Work around `trivial_bounds` error
- Increased MSRV to 1.63
- Updated `glam` to v0.23
- Updated `nalgebra` to v0.32
## v0.4.1 (2022-12-09)
- Renamed `coverage` cfg to `coverage_nightly`
## v0.4.0 (2022-11-06)
- Updated `glam` to v0.22
- Updated `rpds` to v0.12
- Updated `static-rc` to v0.6
## v0.3.0 (2022-07-03)
- Renamed `Size::SIZE` to `ShaderSize::SHADER_SIZE`
- Updated `glam` to v0.21
- Increased MSRV to 1.58
- Fix `clippy::missing_const_for_fn` warning
## v0.2.1 (2022-06-14)
- Fix padding not being generated for one field structs
## v0.2.0 (2022-05-05)
- Renamed `WgslType` to `ShaderType`
- Removed `assert_uniform_compat` derive macro helper attribute
- Fixed crate not compiling on latest rustc in some scenarios
- Added ability for other crates to wrap the derive macro implementation for re-export purposes
- Updated `nalgebra` to v0.31 and `imbl` to v2
## v0.1.3 (2022-03-16)
- Improved uniform address space doc examples
## v0.1.2 (2022-03-15)
- Fixed uniform address space alignment requirements
## v0.1.1 (2022-03-09)
- Added logo
- Fixed broken links in docs
## v0.1.0 (2022-03-06)
- Initial release

220
vendor/encase/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,220 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
rust-version = "1.68.2"
name = "encase"
version = "0.10.0"
build = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "Provides a mechanism to lay out data into GPU buffers ensuring WGSL's memory layout requirements are met"
documentation = "https://docs.rs/encase"
readme = "README.md"
keywords = [
"wgsl",
"wgpu",
]
categories = ["rendering"]
license = "MIT-0"
repository = "https://github.com/teoxoy/encase"
[package.metadata.docs.rs]
all-features = true
[profile.bench]
lto = "thin"
codegen-units = 1
[lib]
name = "encase"
path = "src/lib.rs"
[[test]]
name = "assert_uniform_compat_fail"
path = "tests/assert_uniform_compat_fail.rs"
[[test]]
name = "assert_uniform_compat_success"
path = "tests/assert_uniform_compat_success.rs"
[[test]]
name = "errors"
path = "tests/errors.rs"
[[test]]
name = "general"
path = "tests/general.rs"
[[test]]
name = "hygiene"
path = "tests/hygiene.rs"
[[test]]
name = "metadata"
path = "tests/metadata.rs"
[[test]]
name = "trybuild"
path = "tests/trybuild.rs"
[[test]]
name = "uniform"
path = "tests/uniform.rs"
[[test]]
name = "wgpu"
path = "tests/wgpu.rs"
[[bench]]
name = "throughput"
path = "benches/throughput.rs"
harness = false
[dependencies.archery]
version = "1"
optional = true
default-features = false
[dependencies.arrayvec]
version = "0.7"
optional = true
default-features = false
[dependencies.cgmath]
version = "0.18"
optional = true
default-features = false
[dependencies.const_panic]
version = "0.2"
default-features = false
[dependencies.encase_derive]
version = "=0.10.0"
[dependencies.glam]
version = "0.29"
features = ["std"]
optional = true
default-features = false
[dependencies.im]
version = "15"
optional = true
default-features = false
[dependencies.im-rc]
version = "15"
optional = true
default-features = false
[dependencies.imbl]
version = "3"
optional = true
default-features = false
[dependencies.mint]
version = "0.5.9"
optional = true
default-features = false
[dependencies.nalgebra]
version = "0.33"
optional = true
default-features = false
[dependencies.ndarray]
version = "0.15"
optional = true
default-features = false
[dependencies.rpds]
version = "1"
optional = true
default-features = false
[dependencies.smallvec]
version = "1.8.0"
features = ["const_generics"]
optional = true
default-features = false
[dependencies.static-rc]
version = "0.6"
features = ["alloc"]
optional = true
default-features = false
[dependencies.thiserror]
version = "1"
default-features = false
[dependencies.tinyvec]
version = "1.4"
features = [
"rustc_1_55",
"alloc",
]
optional = true
default-features = false
[dependencies.ultraviolet]
version = "0.9"
features = ["int"]
optional = true
default-features = false
[dependencies.vek]
version = "0.17"
optional = true
default-features = false
[dev-dependencies.criterion]
version = "0.4"
features = [
"cargo_bench_support",
"html_reports",
]
default-features = false
[dev-dependencies.futures]
version = "0.3"
features = ["executor"]
default-features = false
[dev-dependencies.mimalloc]
version = "0.1"
default-features = false
[dev-dependencies.pprof]
version = "0.11"
features = [
"criterion",
"flamegraph",
]
default-features = false
[dev-dependencies.rand]
version = "0.8"
features = ["std_rng"]
default-features = false
[dev-dependencies.trybuild]
version = "1"
default-features = false
[dev-dependencies.wgpu]
version = "22.0.0"
features = ["wgsl"]

16
vendor/encase/LICENSE vendored Executable file
View File

@@ -0,0 +1,16 @@
MIT No Attribution
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

188
vendor/encase/README.md vendored Normal file
View File

@@ -0,0 +1,188 @@
<p align="center"><img src="./logo.svg" width="320px" alt/></p>
Provides a mechanism to lay out data into GPU buffers ensuring WGSL's memory layout requirements are met.
## Features
- supports all WGSL [host-shareable types] + wrapper types (`&T`, `&mut T`, `Box<T>`, ...)
- supports data types from a multitude of crates as [features]
- covers a wide area of use cases (see [examples](#examples))
## Motivation
Having to manually lay out data into GPU buffers can become very tedious and error prone. How do you make sure the data in the buffer is laid out correctly? Enforce it so that future changes don't break this delicate balance?
`encase` gives you the ability to make sure at compile time that your types will be laid out correctly.
## Design
The core trait is [`ShaderType`] which mainly contains metadata about the given type.
The [`WriteInto`], [`ReadFrom`] and [`CreateFrom`] traits represent the ability of a type to be written into the buffer, read from the buffer and created from the buffer respectively.
Most data types can implement the above traits via their respective macros:
- [`impl_vector!`] for vectors
- [`impl_matrix!`] for matrices
- [`impl_rts_array!`] for runtime-sized arrays
- [`impl_wrapper!`] for wrappers
- [`ShaderType`][derive@ShaderType] for structs
The [`UniformBuffer`], [`StorageBuffer`], [`DynamicUniformBuffer`] and [`DynamicStorageBuffer`] structs are wrappers around an underlying raw buffer (a type implementing [`BufferRef`] and/or [`BufferMut`] depending on required capability). They facilitate the read/write/create operations.
## Examples
Write affine transform to uniform buffer
```rust
use encase::{ShaderType, UniformBuffer};
#[derive(ShaderType)]
struct AffineTransform2D {
matrix: glam::Mat2,
translate: glam::Vec2
}
let transform = AffineTransform2D {
matrix: glam::Mat2::IDENTITY,
translate: glam::Vec2::ZERO,
};
let mut buffer = UniformBuffer::new(Vec::<u8>::new());
buffer.write(&transform).unwrap();
let byte_buffer = buffer.into_inner();
// write byte_buffer to GPU
assert_eq!(&byte_buffer, &[0, 0, 128, 63, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 128, 63, 0, 0, 0, 0, 0, 0, 0, 0]);
```
Create vector instance by reading from dynamic uniform buffer at specific offset
```rust
use encase::DynamicUniformBuffer;
// read byte_buffer from GPU
let byte_buffer = [1u8; 256 + 8];
let mut buffer = DynamicUniformBuffer::new(&byte_buffer);
buffer.set_offset(256);
let vector: mint::Vector2<i32> = buffer.create().unwrap();
assert_eq!(vector, mint::Vector2 { x: 16843009, y: 16843009 });
```
Write and read back data from storage buffer
```rust
use encase::{ShaderType, ArrayLength, StorageBuffer};
#[derive(ShaderType)]
struct Positions {
length: ArrayLength,
#[size(runtime)]
positions: Vec<mint::Point2<f32>>
}
let mut positions = Positions {
length: ArrayLength,
positions: Vec::from([
mint::Point2 { x: 4.5, y: 3.4 },
mint::Point2 { x: 1.5, y: 7.4 },
mint::Point2 { x: 4.3, y: 1.9 },
])
};
let mut byte_buffer: Vec<u8> = Vec::new();
let mut buffer = StorageBuffer::new(&mut byte_buffer);
buffer.write(&positions).unwrap();
// write byte_buffer to GPU
// change length on GPU side
byte_buffer[0] = 2;
// read byte_buffer from GPU
let mut buffer = StorageBuffer::new(&mut byte_buffer);
buffer.read(&mut positions).unwrap();
assert_eq!(positions.positions.len(), 2);
```
Write different data types to dynamic storage buffer
```rust
use encase::{ShaderType, DynamicStorageBuffer};
let mut byte_buffer: Vec<u8> = Vec::new();
let mut buffer = DynamicStorageBuffer::new_with_alignment(&mut byte_buffer, 64);
let offsets = [
buffer.write(&[5.; 10]).unwrap(),
buffer.write(&vec![3u32; 20]).unwrap(),
buffer.write(&glam::Vec3::ONE).unwrap(),
];
// write byte_buffer to GPU
assert_eq!(offsets, [0, 64, 192]);
```
Supports writing to uninitialized memory as well.
```rust
use std::mem::MaybeUninit;
use encase::{ShaderType, DynamicStorageBuffer};
let mut uninit_buffer: Vec<MaybeUninit<u8>> = Vec::new();
let mut buffer = DynamicStorageBuffer::new_with_alignment(&mut uninit_buffer, 64);
let offsets = [
buffer.write(&[5.; 10]).unwrap(),
buffer.write(&vec![3u32; 20]).unwrap(),
buffer.write(&glam::Vec3::ONE).unwrap(),
];
// SAFETY: Vec<u8> and Vec<MaybeUninit<u8>> share the same layout.
let byte_buffer: Vec<u8> = unsafe {
Vec::from_raw_parts(
uninit_buffer.as_mut_ptr().cast(),
uninit_buffer.len(),
uninit_buffer.capacity()
)
};
std::mem::forget(uninit_buffer);
// write byte_buffer to GPU
assert_eq!(offsets, [0, 64, 192]);
```
[host-shareable types]: https://gpuweb.github.io/gpuweb/wgsl/#host-shareable-types
[features]: https://docs.rs/crate/encase/latest/features
[`ShaderType`]: https://docs.rs/encase/latest/encase/trait.ShaderType.html
[`WriteInto`]: https://docs.rs/encase/latest/encase/internal/trait.WriteInto.html
[`ReadFrom`]: https://docs.rs/encase/latest/encase/internal/trait.ReadFrom.html
[`CreateFrom`]: https://docs.rs/encase/latest/encase/internal/trait.CreateFrom.html
[`impl_vector!`]: https://docs.rs/encase/latest/encase/macro.impl_vector.html
[`impl_matrix!`]: https://docs.rs/encase/latest/encase/macro.impl_matrix.html
[`impl_rts_array!`]: https://docs.rs/encase/latest/encase/macro.impl_rts_array.html
[`impl_wrapper!`]: https://docs.rs/encase/latest/encase/macro.impl_wrapper.html
[derive@ShaderType]: https://docs.rs/encase/latest/encase/derive.ShaderType.html
[`UniformBuffer`]: https://docs.rs/encase/latest/encase/struct.UniformBuffer.html
[`StorageBuffer`]: https://docs.rs/encase/latest/encase/struct.StorageBuffer.html
[`DynamicUniformBuffer`]: https://docs.rs/encase/latest/encase/struct.DynamicUniformBuffer.html
[`DynamicStorageBuffer`]: https://docs.rs/encase/latest/encase/struct.DynamicStorageBuffer.html
[`BufferRef`]: https://docs.rs/encase/latest/encase/internal/trait.BufferRef.html
[`BufferMut`]: https://docs.rs/encase/latest/encase/internal/trait.BufferMut.html

202
vendor/encase/benches/throughput.rs vendored Normal file
View File

@@ -0,0 +1,202 @@
use criterion::{criterion_group, criterion_main, Criterion, Throughput};
use encase::{ShaderType, StorageBuffer};
use pprof::criterion::{Output, PProfProfiler};
#[global_allocator]
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
macro_rules! gen {
($rng:ident, $ty:ty) => {{
let mut buf = [0; 4];
use rand::RngCore;
$rng.fill_bytes(&mut buf);
<$ty>::from_ne_bytes(buf)
}};
}
macro_rules! gen_arr {
($rng:ident, $ty:ty, $n:literal) => {{
[(); $n].map(|_| gen!($rng, $ty))
}};
}
macro_rules! gen_2d_arr {
($rng:ident, $ty:ty, $n:literal, $m:literal) => {{
[[(); $m]; $n].map(|arr| arr.map(|_| gen!($rng, $ty)))
}};
}
macro_rules! gen_inner {
($n:literal, $($tail:tt)*) => {{
[(); $n].map(|_| $($tail)*)
}};
}
#[derive(Debug, ShaderType, PartialEq, Clone, Copy)]
struct A {
f: f32,
u: u32,
i: i32,
nu: Option<core::num::NonZeroU32>,
ni: Option<core::num::NonZeroI32>,
wu: core::num::Wrapping<u32>,
wi: core::num::Wrapping<i32>,
// au: core::sync::atomic::AtomicU32,
// ai: core::sync::atomic::AtomicI32,
v2: mint::Vector2<f32>,
v3: mint::Vector3<u32>,
v4: mint::Vector4<i32>,
mat2: mint::ColumnMatrix2<f32>,
mat2x3: mint::ColumnMatrix2x3<f32>,
mat2x4: mint::ColumnMatrix2x4<f32>,
mat3x2: mint::ColumnMatrix3x2<f32>,
mat3: mint::ColumnMatrix3<f32>,
mat3x4: mint::ColumnMatrix3x4<f32>,
mat4x2: mint::ColumnMatrix4x2<f32>,
mat4x3: mint::ColumnMatrix4x3<f32>,
mat4: mint::ColumnMatrix4<f32>,
arrf: [f32; 32],
arru: [u32; 32],
arri: [i32; 32],
arrvf: [mint::Vector2<f32>; 16],
arrvu: [mint::Vector3<u32>; 16],
arrvi: [mint::Vector4<i32>; 16],
arrm2: [mint::ColumnMatrix2<f32>; 8],
arrm3: [mint::ColumnMatrix3<f32>; 8],
arrm4: [mint::ColumnMatrix4<f32>; 8],
#[size(1600)]
_pad: u32,
}
fn gen_a(rng: &mut rand::rngs::StdRng) -> A {
A {
f: gen!(rng, f32),
u: gen!(rng, u32),
i: gen!(rng, i32),
nu: core::num::NonZeroU32::new(gen!(rng, u32)),
ni: core::num::NonZeroI32::new(gen!(rng, i32)),
wu: core::num::Wrapping(gen!(rng, u32)),
wi: core::num::Wrapping(gen!(rng, i32)),
v2: mint::Vector2::from(gen_arr!(rng, f32, 2)),
v3: mint::Vector3::from(gen_arr!(rng, u32, 3)),
v4: mint::Vector4::from(gen_arr!(rng, i32, 4)),
mat2: mint::ColumnMatrix2::from(gen_2d_arr!(rng, f32, 2, 2)),
mat2x3: mint::ColumnMatrix2x3::from(gen_2d_arr!(rng, f32, 3, 2)),
mat2x4: mint::ColumnMatrix2x4::from(gen_2d_arr!(rng, f32, 4, 2)),
mat3x2: mint::ColumnMatrix3x2::from(gen_2d_arr!(rng, f32, 2, 3)),
mat3: mint::ColumnMatrix3::from(gen_2d_arr!(rng, f32, 3, 3)),
mat3x4: mint::ColumnMatrix3x4::from(gen_2d_arr!(rng, f32, 4, 3)),
mat4x2: mint::ColumnMatrix4x2::from(gen_2d_arr!(rng, f32, 2, 4)),
mat4x3: mint::ColumnMatrix4x3::from(gen_2d_arr!(rng, f32, 3, 4)),
mat4: mint::ColumnMatrix4::from(gen_2d_arr!(rng, f32, 4, 4)),
arrf: gen_arr!(rng, f32, 32),
arru: gen_arr!(rng, u32, 32),
arri: gen_arr!(rng, i32, 32),
arrvf: gen_inner!(16, mint::Vector2::from(gen_arr!(rng, f32, 2))),
arrvu: gen_inner!(16, mint::Vector3::from(gen_arr!(rng, u32, 3))),
arrvi: gen_inner!(16, mint::Vector4::from(gen_arr!(rng, i32, 4))),
arrm2: gen_inner!(8, mint::ColumnMatrix2::from(gen_2d_arr!(rng, f32, 2, 2))),
arrm3: gen_inner!(8, mint::ColumnMatrix3::from(gen_2d_arr!(rng, f32, 3, 3))),
arrm4: gen_inner!(8, mint::ColumnMatrix4::from(gen_2d_arr!(rng, f32, 4, 4))),
_pad: gen!(rng, u32),
}
}
const _: () = const_panic::concat_assert!(
A::METADATA.min_size().get() == 4096,
A::METADATA.min_size().get()
);
fn bench(c: &mut Criterion) {
let mut group = c.benchmark_group("Throughput");
use rand::SeedableRng;
let mut rng = rand::rngs::StdRng::seed_from_u64(1234);
let a = gen_a(&mut rng);
const KB: usize = 1024;
const MB: usize = KB * KB;
// const GB: usize = MB * KB;
let sizes = [
// ("16B", 16),
// ("128B", 128),
// ("1KiB", KB),
("16KiB", 16 * KB),
("128KiB", 128 * KB),
("1MiB", MB),
("16MiB", 16 * MB),
("512MiB", 512 * MB),
];
for (name, size) in sizes {
group.throughput(Throughput::Bytes(size as u64));
group.bench_function(format!("{name}_write"), |b| {
b.iter_batched_ref(
|| create_vecs(a, size),
|(src, dst)| dst.write(src).unwrap(),
criterion::BatchSize::LargeInput,
);
});
group.bench_function(format!("{name}_read"), |b| {
b.iter_batched_ref(
|| create_vecs(a, size),
|(src, dst)| dst.read(src).unwrap(),
criterion::BatchSize::LargeInput,
);
});
group.bench_function(format!("{name}_create"), |b| {
b.iter_batched_ref(
|| create_vecs(a, size),
|(_src, dst)| dst.create::<Vec<A>>().unwrap(),
criterion::BatchSize::LargeInput,
);
});
group.bench_function(format!("{name}_manual"), |b| {
b.iter_batched_ref(
|| create_aligned_vecs(size),
|(dst, src)| manual_memcpy(dst, src),
criterion::BatchSize::LargeInput,
);
});
group.bench_function(format!("{name}_stdlib"), |b| {
b.iter_batched_ref(
|| create_aligned_vecs(size),
|(dst, src)| dst.copy_from_slice(src),
criterion::BatchSize::LargeInput,
);
});
}
group.finish();
}
fn manual_memcpy(src: &mut [u8], dst: &[u8]) {
assert_eq!(src.len(), dst.len());
#[allow(clippy::manual_memcpy)]
for i in 0..src.len() {
src[i] = dst[i];
}
}
fn create_aligned_vecs(size: usize) -> (Vec<u8>, Vec<u8>) {
let src = vec![1u8; size];
let dst = vec![0u8; size];
assert_eq!(src.as_ptr() as usize % 8, 0);
assert_eq!(dst.as_ptr() as usize % 8, 0);
(src, dst)
}
fn create_vecs(a: A, size: usize) -> (Vec<A>, StorageBuffer<Vec<u8>>) {
let src = vec![a; size / A::min_size().get() as usize];
let dst = StorageBuffer::new(vec![0u8; size]);
(src, dst)
}
criterion_group! {
name = benches;
config = Criterion::default()
.with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
targets = bench
}
criterion_main!(benches);

1
vendor/encase/logo.svg vendored Normal file
View File

@@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" xml:space="preserve" fill-rule="evenodd" stroke-linejoin="round" stroke-miterlimit="2" clip-rule="evenodd" viewBox="0 0 320 90"><path fill="none" d="M0 0h320v90H0z"/><path fill="#e96a53" d="M10 10h149.955v70H10z"/><path fill="#6e588b" d="M162.233 10h44.532v70h-44.532z"/><path fill="#e96a53" d="M209.031 10H310v70H209.031z"/><path fill="#fff" fill-rule="nonzero" d="M46.885 68.349c-4.193 0-7.861-.952-11.006-2.855-3.144-1.903-5.585-4.482-7.323-7.737-1.737-3.255-2.606-6.84-2.606-10.757 0-3.972.827-7.571 2.482-10.798 1.655-3.228 4.027-5.793 7.116-7.696 3.09-1.903 6.73-2.855 10.923-2.855 4.248 0 7.902.952 10.964 2.855 3.062 1.903 5.42 4.468 7.075 7.696 1.655 3.227 2.482 6.826 2.482 10.798v2.482H32.321c.331 2.428 1.117 4.648 2.359 6.662 1.241 2.013 2.909 3.613 5.006 4.799 2.096 1.186 4.523 1.779 7.281 1.779 2.924 0 5.379-.648 7.365-1.945 1.986-1.296 3.531-2.965 4.634-5.006h6.785c-1.434 3.696-3.71 6.716-6.827 9.061-3.116 2.344-7.13 3.517-12.039 3.517ZM32.404 43.69h28.134c-.552-3.53-2.041-6.482-4.468-8.854-2.428-2.372-5.627-3.558-9.599-3.558-3.972 0-7.158 1.186-9.557 3.558-2.4 2.372-3.903 5.324-4.51 8.854Zm42.201 23.997V26.313h6.206v6.455c1.489-1.986 3.31-3.669 5.461-5.048 2.152-1.379 4.91-2.069 8.275-2.069 2.869 0 5.558.676 8.068 2.028 2.51 1.351 4.551 3.365 6.123 6.04 1.572 2.676 2.358 5.972 2.358 9.888v24.08h-6.206V43.773c0-3.751-1.075-6.772-3.227-9.061-2.151-2.289-4.937-3.434-8.357-3.434-2.317 0-4.413.524-6.289 1.572-1.876 1.048-3.379 2.524-4.51 4.427-1.131 1.903-1.696 4.096-1.696 6.579v23.831h-6.206Zm64.625.662c-4.137 0-7.723-.952-10.757-2.855s-5.392-4.482-7.075-7.737c-1.682-3.255-2.523-6.84-2.523-10.757 0-3.972.841-7.571 2.523-10.798 1.683-3.228 4.041-5.793 7.075-7.696 3.034-1.903 6.62-2.855 10.757-2.855 4.91 0 9.089 1.269 12.537 3.807 3.447 2.537 5.778 5.819 6.992 9.847h-6.703c-.938-2.373-2.441-4.303-4.509-5.793-2.069-1.489-4.565-2.234-7.489-2.234-3.034 0-5.682.717-7.944 2.151-2.262 1.435-3.999 3.338-5.213 5.71-1.213 2.372-1.82 4.992-1.82 7.861 0 2.813.607 5.42 1.82 7.82 1.214 2.399 2.951 4.316 5.213 5.751 2.262 1.434 4.91 2.151 7.944 2.151 2.924 0 5.42-.745 7.489-2.234 2.068-1.49 3.571-3.42 4.509-5.793h6.703c-1.214 4.028-3.545 7.31-6.992 9.847-3.448 2.538-7.627 3.807-12.537 3.807Zm44.684 0c-4.027 0-7.544-.952-10.55-2.855-3.007-1.903-5.338-4.482-6.992-7.737-1.655-3.255-2.483-6.84-2.483-10.757 0-3.972.828-7.571 2.483-10.798 1.654-3.228 3.985-5.793 6.992-7.696 3.006-1.903 6.523-2.855 10.55-2.855 3.42 0 6.358.662 8.812 1.986 2.455 1.324 4.455 3.117 6 5.379v-6.703h6.206v41.374h-6.206v-6.62c-1.545 2.207-3.545 3.972-6 5.296-2.454 1.324-5.392 1.986-8.812 1.986Zm.827-5.627c3.09 0 5.696-.717 7.82-2.151 2.124-1.435 3.737-3.352 4.841-5.751 1.103-2.4 1.655-5.007 1.655-7.82 0-2.869-.552-5.489-1.655-7.861-1.104-2.372-2.717-4.275-4.841-5.71-2.124-1.434-4.73-2.151-7.82-2.151-3.034 0-5.654.717-7.861 2.151-2.206 1.435-3.889 3.338-5.047 5.71-1.159 2.372-1.738 4.992-1.738 7.861 0 2.813.579 5.42 1.738 7.82 1.158 2.399 2.841 4.316 5.047 5.751 2.207 1.434 4.827 2.151 7.861 2.151Zm46.008 5.627c-3.807 0-7.02-.635-9.64-1.903-2.621-1.269-4.62-2.938-5.999-5.007-1.38-2.068-2.152-4.316-2.317-6.744h6.454c.165 1.38.648 2.717 1.448 4.014.8 1.296 2.027 2.344 3.682 3.144 1.655.8 3.807 1.2 6.455 1.2.827 0 1.848-.083 3.061-.248 1.214-.166 2.386-.469 3.517-.911 1.131-.441 2.082-1.103 2.855-1.985.772-.883 1.158-2.014 1.158-3.393 0-1.71-.662-3.034-1.986-3.972-1.324-.938-3.034-1.669-5.13-2.193s-4.317-1.034-6.661-1.531c-2.345-.496-4.565-1.144-6.661-1.944-2.097-.8-3.807-1.931-5.131-3.393-1.324-1.462-1.986-3.461-1.986-5.999 0-3.751 1.366-6.661 4.096-8.73 2.731-2.068 6.772-3.103 12.123-3.103 3.641 0 6.606.566 8.895 1.697 2.289 1.13 4.027 2.579 5.213 4.344 1.186 1.765 1.89 3.668 2.11 5.709h-6.289c-.22-1.765-1.089-3.282-2.606-4.551-1.517-1.269-4.013-1.903-7.489-1.903-6.564 0-9.847 1.986-9.847 5.958 0 1.655.662 2.924 1.986 3.806 1.324.883 3.034 1.586 5.131 2.11 2.096.524 4.316 1.021 6.661 1.49 2.344.469 4.565 1.117 6.661 1.944 2.096.828 3.806 2 5.13 3.517 1.324 1.517 1.986 3.572 1.986 6.165 0 4.027-1.531 7.102-4.592 9.226-3.062 2.124-7.158 3.186-12.288 3.186Zm43.194 0c-4.193 0-7.861-.952-11.006-2.855-3.144-1.903-5.585-4.482-7.323-7.737-1.737-3.255-2.606-6.84-2.606-10.757 0-3.972.827-7.571 2.482-10.798 1.655-3.228 4.027-5.793 7.116-7.696 3.09-1.903 6.73-2.855 10.923-2.855 4.248 0 7.902.952 10.964 2.855 3.062 1.903 5.42 4.468 7.075 7.696 1.655 3.227 2.482 6.826 2.482 10.798v2.482h-34.671c.331 2.428 1.117 4.648 2.359 6.662 1.241 2.013 2.909 3.613 5.006 4.799 2.096 1.186 4.523 1.779 7.281 1.779 2.924 0 5.379-.648 7.365-1.945 1.986-1.296 3.531-2.965 4.634-5.006h6.785c-1.434 3.696-3.71 6.716-6.827 9.061-3.116 2.344-7.13 3.517-12.039 3.517ZM259.462 43.69h28.134c-.552-3.53-2.041-6.482-4.468-8.854-2.428-2.372-5.627-3.558-9.599-3.558-3.972 0-7.158 1.186-9.557 3.558-2.4 2.372-3.903 5.324-4.51 8.854Z"/></svg>

After

Width:  |  Height:  |  Size: 4.8 KiB

3
vendor/encase/release.toml vendored Normal file
View File

@@ -0,0 +1,3 @@
shared-version = true
tag-name = "v{{version}}"
tag-message = "{{date}}"

View File

@@ -0,0 +1,152 @@
use super::SizeValue;
use core::num::NonZeroU64;
/// Helper type for alignment calculations
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct AlignmentValue(NonZeroU64);
impl AlignmentValue {
pub const fn new(val: u64) -> Self {
if !val.is_power_of_two() {
panic!("Alignment must be a power of 2!");
}
// SAFETY: This is safe since 0 is not a power of 2
Self(unsafe { NonZeroU64::new_unchecked(val) })
}
/// Returns an alignment that is the smallest power of two greater than the passed in `size`
#[inline]
pub const fn from_next_power_of_two_size(size: SizeValue) -> Self {
match size.get().checked_next_power_of_two() {
None => panic!("Overflow occurred while getting the next power of 2!"),
Some(val) => {
// SAFETY: This is safe since we got the next_power_of_two
Self(unsafe { NonZeroU64::new_unchecked(val) })
}
}
}
#[inline]
pub const fn get(&self) -> u64 {
self.0.get()
}
/// Returns the max alignment from an array of alignments
pub const fn max<const N: usize>(input: [AlignmentValue; N]) -> AlignmentValue {
let mut max = input[0];
let mut i = 1;
while i < N {
if input[i].get() > max.get() {
max = input[i];
}
i += 1;
}
max
}
/// Returns true if `n` is a multiple of this alignment
#[inline]
pub const fn is_aligned(&self, n: u64) -> bool {
n % self.get() == 0
}
/// Returns the amount of padding needed so that `n + padding` will be a multiple of this alignment
#[inline]
pub const fn padding_needed_for(&self, n: u64) -> u64 {
let r = n % self.get();
if r > 0 {
self.get() - r
} else {
0
}
}
/// Will round up the given `n` so that the returned value will be a multiple of this alignment
#[inline]
pub const fn round_up(&self, n: u64) -> u64 {
n + self.padding_needed_for(n)
}
/// Will round up the given `n` so that the returned value will be a multiple of this alignment
#[inline]
pub const fn round_up_size(&self, n: SizeValue) -> SizeValue {
SizeValue::new(self.round_up(n.get()))
}
}
#[cfg(test)]
mod test {
use super::AlignmentValue;
#[test]
fn new() {
assert_eq!(4, AlignmentValue::new(4).get());
}
#[test]
#[should_panic]
fn new_panic() {
AlignmentValue::new(3);
}
#[test]
fn from_next_power_of_two_size() {
assert_eq!(
AlignmentValue::new(8),
AlignmentValue::from_next_power_of_two_size(super::SizeValue::new(7))
);
}
#[test]
#[should_panic]
fn from_next_power_of_two_size_panic() {
AlignmentValue::from_next_power_of_two_size(super::SizeValue::new(u64::MAX));
}
#[test]
fn max() {
assert_eq!(
AlignmentValue::new(32),
AlignmentValue::max([
AlignmentValue::new(2),
AlignmentValue::new(8),
AlignmentValue::new(32)
])
);
}
#[test]
fn is_aligned() {
assert!(AlignmentValue::new(8).is_aligned(32));
assert!(!AlignmentValue::new(8).is_aligned(9));
}
#[test]
fn padding_needed_for() {
assert_eq!(1, AlignmentValue::new(8).padding_needed_for(7));
assert_eq!(16 - 9, AlignmentValue::new(8).padding_needed_for(9));
}
#[test]
fn round_up() {
assert_eq!(24, AlignmentValue::new(8).round_up(20));
assert_eq!(
super::SizeValue::new(16),
AlignmentValue::new(16).round_up_size(super::SizeValue::new(7))
);
}
#[test]
fn derived_traits() {
let alignment = AlignmentValue::new(8);
#[allow(clippy::clone_on_copy)]
let alignment_clone = alignment.clone();
assert!(alignment == alignment_clone);
assert_eq!(format!("{alignment:?}"), "AlignmentValue(8)");
}
}

317
vendor/encase/src/core/buffers.rs vendored Normal file
View File

@@ -0,0 +1,317 @@
use super::{
AlignmentValue, BufferMut, BufferRef, CreateFrom, ReadFrom, Reader, Result, ShaderType,
WriteInto, Writer,
};
/// Storage buffer wrapper facilitating RW operations
pub struct StorageBuffer<B> {
inner: B,
}
impl<B> StorageBuffer<B> {
pub const fn new(buffer: B) -> Self {
Self { inner: buffer }
}
pub fn into_inner(self) -> B {
self.inner
}
}
impl<B> From<B> for StorageBuffer<B> {
fn from(buffer: B) -> Self {
Self::new(buffer)
}
}
impl<B> AsRef<B> for StorageBuffer<B> {
fn as_ref(&self) -> &B {
&self.inner
}
}
impl<B> AsMut<B> for StorageBuffer<B> {
fn as_mut(&mut self) -> &mut B {
&mut self.inner
}
}
impl<B: BufferMut> StorageBuffer<B> {
pub fn write<T>(&mut self, value: &T) -> Result<()>
where
T: ?Sized + ShaderType + WriteInto,
{
let mut writer = Writer::new(value, &mut self.inner, 0)?;
value.write_into(&mut writer);
Ok(())
}
}
impl<B: BufferRef> StorageBuffer<B> {
pub fn read<T>(&self, value: &mut T) -> Result<()>
where
T: ?Sized + ShaderType + ReadFrom,
{
let mut writer = Reader::new::<T>(&self.inner, 0)?;
value.read_from(&mut writer);
Ok(())
}
pub fn create<T>(&self) -> Result<T>
where
T: ShaderType + CreateFrom,
{
let mut writer = Reader::new::<T>(&self.inner, 0)?;
Ok(T::create_from(&mut writer))
}
}
/// Uniform buffer wrapper facilitating RW operations
pub struct UniformBuffer<B> {
inner: StorageBuffer<B>,
}
impl<B> UniformBuffer<B> {
pub const fn new(buffer: B) -> Self {
Self {
inner: StorageBuffer::new(buffer),
}
}
pub fn into_inner(self) -> B {
self.inner.inner
}
}
impl<B> From<B> for UniformBuffer<B> {
fn from(buffer: B) -> Self {
Self::new(buffer)
}
}
impl<B> AsRef<B> for UniformBuffer<B> {
fn as_ref(&self) -> &B {
&self.inner.inner
}
}
impl<B> AsMut<B> for UniformBuffer<B> {
fn as_mut(&mut self) -> &mut B {
&mut self.inner.inner
}
}
impl<B: BufferMut> UniformBuffer<B> {
pub fn write<T>(&mut self, value: &T) -> Result<()>
where
T: ?Sized + ShaderType + WriteInto,
{
T::assert_uniform_compat();
self.inner.write(value)
}
}
impl<B: BufferRef> UniformBuffer<B> {
pub fn read<T>(&self, value: &mut T) -> Result<()>
where
T: ?Sized + ShaderType + ReadFrom,
{
T::assert_uniform_compat();
self.inner.read(value)
}
pub fn create<T>(&self) -> Result<T>
where
T: ShaderType + CreateFrom,
{
T::assert_uniform_compat();
self.inner.create()
}
}
/// Dynamic storage buffer wrapper facilitating RW operations
pub struct DynamicStorageBuffer<B> {
inner: B,
alignment: AlignmentValue,
offset: usize,
}
impl<B> DynamicStorageBuffer<B> {
/// Creates a new dynamic storage buffer wrapper with an alignment of 256
/// (default alignment in the WebGPU spec).
pub const fn new(buffer: B) -> Self {
Self::new_with_alignment(buffer, 256)
}
/// Creates a new dynamic storage buffer wrapper with a given alignment.
/// # Panics
///
/// - if `alignment` is not a power of two.
/// - if `alignment` is less than 32 (min alignment imposed by the WebGPU spec).
pub const fn new_with_alignment(buffer: B, alignment: u64) -> Self {
if alignment < 32 {
panic!("Alignment must be at least 32!");
}
Self {
inner: buffer,
alignment: AlignmentValue::new(alignment),
offset: 0,
}
}
pub fn set_offset(&mut self, offset: u64) {
if !self.alignment.is_aligned(offset) {
panic!(
"offset of {} bytes is not aligned to alignment of {} bytes",
offset,
self.alignment.get()
);
}
self.offset = offset as usize;
}
pub fn into_inner(self) -> B {
self.inner
}
}
impl<B> From<B> for DynamicStorageBuffer<B> {
fn from(buffer: B) -> Self {
Self::new(buffer)
}
}
impl<B> AsRef<B> for DynamicStorageBuffer<B> {
fn as_ref(&self) -> &B {
&self.inner
}
}
impl<B> AsMut<B> for DynamicStorageBuffer<B> {
fn as_mut(&mut self) -> &mut B {
&mut self.inner
}
}
impl<B: BufferMut> DynamicStorageBuffer<B> {
pub fn write<T>(&mut self, value: &T) -> Result<u64>
where
T: ?Sized + ShaderType + WriteInto,
{
let offset = self.offset;
let mut writer = Writer::new(value, &mut self.inner, offset)?;
value.write_into(&mut writer);
self.offset += self.alignment.round_up(value.size().get()) as usize;
Ok(offset as u64)
}
}
impl<B: BufferRef> DynamicStorageBuffer<B> {
pub fn read<T>(&mut self, value: &mut T) -> Result<()>
where
T: ?Sized + ShaderType + ReadFrom,
{
let mut writer = Reader::new::<T>(&self.inner, self.offset)?;
value.read_from(&mut writer);
self.offset += self.alignment.round_up(value.size().get()) as usize;
Ok(())
}
pub fn create<T>(&mut self) -> Result<T>
where
T: ShaderType + CreateFrom,
{
let mut writer = Reader::new::<T>(&self.inner, self.offset)?;
let value = T::create_from(&mut writer);
self.offset += self.alignment.round_up(value.size().get()) as usize;
Ok(value)
}
}
/// Dynamic uniform buffer wrapper facilitating RW operations
pub struct DynamicUniformBuffer<B> {
inner: DynamicStorageBuffer<B>,
}
impl<B> DynamicUniformBuffer<B> {
/// Creates a new dynamic uniform buffer wrapper with an alignment of 256
/// (default alignment in the WebGPU spec).
pub const fn new(buffer: B) -> Self {
Self {
inner: DynamicStorageBuffer::new(buffer),
}
}
/// Creates a new dynamic uniform buffer wrapper with a given alignment.
/// # Panics
///
/// - if `alignment` is not a power of two.
/// - if `alignment` is less than 32 (min alignment imposed by the WebGPU spec).
pub const fn new_with_alignment(buffer: B, alignment: u64) -> Self {
Self {
inner: DynamicStorageBuffer::new_with_alignment(buffer, alignment),
}
}
pub fn set_offset(&mut self, offset: u64) {
self.inner.set_offset(offset);
}
pub fn into_inner(self) -> B {
self.inner.inner
}
}
impl<B> From<B> for DynamicUniformBuffer<B> {
fn from(buffer: B) -> Self {
Self::new(buffer)
}
}
impl<B> AsRef<B> for DynamicUniformBuffer<B> {
fn as_ref(&self) -> &B {
&self.inner.inner
}
}
impl<B> AsMut<B> for DynamicUniformBuffer<B> {
fn as_mut(&mut self) -> &mut B {
&mut self.inner.inner
}
}
impl<B: BufferMut> DynamicUniformBuffer<B> {
pub fn write<T>(&mut self, value: &T) -> Result<u64>
where
T: ?Sized + ShaderType + WriteInto,
{
T::assert_uniform_compat();
self.inner.write(value)
}
}
impl<B: BufferRef> DynamicUniformBuffer<B> {
pub fn read<T>(&mut self, value: &mut T) -> Result<()>
where
T: ?Sized + ShaderType + ReadFrom,
{
T::assert_uniform_compat();
self.inner.read(value)
}
pub fn create<T>(&mut self) -> Result<T>
where
T: ShaderType + CreateFrom,
{
T::assert_uniform_compat();
self.inner.create()
}
}

11
vendor/encase/src/core/mod.rs vendored Normal file
View File

@@ -0,0 +1,11 @@
mod alignment_value;
mod buffers;
mod rw;
mod size_value;
mod traits;
pub use alignment_value::*;
pub use buffers::*;
pub use rw::*;
pub use size_value::*;
pub use traits::*;

541
vendor/encase/src/core/rw.rs vendored Normal file
View File

@@ -0,0 +1,541 @@
use super::ShaderType;
use core::mem::MaybeUninit;
use thiserror::Error;
#[derive(Clone, Copy, Debug, Error)]
pub enum Error {
#[error("could not read/write {expected} bytes from/into {found} byte sized buffer")]
BufferTooSmall { expected: u64, found: u64 },
}
pub type Result<T> = core::result::Result<T, Error>;
pub struct WriteContext {
/// length of the contained runtime sized array
///
/// used by the derive macro
pub rts_array_length: Option<u32>,
}
pub struct Writer<B: BufferMut> {
pub ctx: WriteContext,
cursor: Cursor<B>,
}
impl<B: BufferMut> Writer<B> {
#[inline]
pub fn new<T: ?Sized + ShaderType>(data: &T, buffer: B, offset: usize) -> Result<Self> {
let mut cursor = Cursor::new(buffer, offset);
let size = data.size().get();
if cursor.try_enlarge(offset + size as usize).is_err() {
Err(Error::BufferTooSmall {
expected: size,
found: cursor.capacity() as u64,
})
} else {
Ok(Self {
ctx: WriteContext {
rts_array_length: None,
},
cursor,
})
}
}
#[inline]
pub fn advance(&mut self, amount: usize) {
self.cursor.advance(amount);
}
#[inline]
pub fn write<const N: usize>(&mut self, val: &[u8; N]) {
self.cursor.write(val);
}
#[inline]
pub fn write_slice(&mut self, val: &[u8]) {
self.cursor.write_slice(val)
}
}
pub struct ReadContext {
/// max elements to read into the contained runtime sized array
///
/// used by the derive macro
pub rts_array_max_el_to_read: Option<u32>,
}
pub struct Reader<B: BufferRef> {
pub ctx: ReadContext,
cursor: Cursor<B>,
}
impl<B: BufferRef> Reader<B> {
#[inline]
pub fn new<T: ?Sized + ShaderType>(buffer: B, offset: usize) -> Result<Self> {
let cursor = Cursor::new(buffer, offset);
if cursor.remaining() < T::min_size().get() as usize {
Err(Error::BufferTooSmall {
expected: T::min_size().get(),
found: cursor.remaining() as u64,
})
} else {
Ok(Self {
ctx: ReadContext {
rts_array_max_el_to_read: None,
},
cursor,
})
}
}
#[inline]
pub fn advance(&mut self, amount: usize) {
self.cursor.advance(amount);
}
#[inline]
pub fn read<const N: usize>(&mut self) -> &[u8; N] {
self.cursor.read()
}
#[inline]
pub fn read_slice(&mut self, val: &mut [u8]) {
self.cursor.read_slice(val)
}
#[inline]
pub fn remaining(&self) -> usize {
self.cursor.remaining()
}
}
struct Cursor<B> {
buffer: B,
pos: usize,
}
impl<B> Cursor<B> {
#[inline]
fn new(buffer: B, offset: usize) -> Self {
Self {
buffer,
pos: offset,
}
}
#[inline]
fn advance(&mut self, amount: usize) {
self.pos += amount;
}
}
impl<B: BufferRef> Cursor<B> {
#[inline]
fn remaining(&self) -> usize {
self.buffer.len().saturating_sub(self.pos)
}
#[inline]
fn read<const N: usize>(&mut self) -> &[u8; N] {
let res = self.buffer.read(self.pos);
self.pos += N;
res
}
#[inline]
fn read_slice(&mut self, val: &mut [u8]) {
self.buffer.read_slice(self.pos, val);
self.pos += val.len();
}
}
impl<B: BufferMut> Cursor<B> {
#[inline]
fn capacity(&self) -> usize {
self.buffer.capacity().saturating_sub(self.pos)
}
#[inline]
fn write<const N: usize>(&mut self, val: &[u8; N]) {
self.buffer.write(self.pos, val);
self.pos += N;
}
#[inline]
fn write_slice(&mut self, val: &[u8]) {
self.buffer.write_slice(self.pos, val);
self.pos += val.len();
}
#[inline]
fn try_enlarge(&mut self, wanted: usize) -> core::result::Result<(), EnlargeError> {
self.buffer.try_enlarge(wanted)
}
}
#[derive(Clone, Copy, Debug, Error)]
#[error("could not enlarge buffer")]
pub struct EnlargeError;
impl From<std::collections::TryReserveError> for EnlargeError {
fn from(_: std::collections::TryReserveError) -> Self {
Self
}
}
#[allow(clippy::len_without_is_empty)]
pub trait BufferRef {
fn len(&self) -> usize;
fn read<const N: usize>(&self, offset: usize) -> &[u8; N];
fn read_slice(&self, offset: usize, val: &mut [u8]);
}
pub trait BufferMut {
fn capacity(&self) -> usize;
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]);
fn write_slice(&mut self, offset: usize, val: &[u8]);
#[inline]
fn try_enlarge(&mut self, wanted: usize) -> core::result::Result<(), EnlargeError> {
if wanted > self.capacity() {
Err(EnlargeError)
} else {
Ok(())
}
}
}
impl BufferRef for [u8] {
fn len(&self) -> usize {
self.len()
}
#[inline]
fn read<const N: usize>(&self, offset: usize) -> &[u8; N] {
use crate::utils::SliceExt;
self.array(offset)
}
#[inline]
fn read_slice(&self, offset: usize, val: &mut [u8]) {
val.copy_from_slice(&self[offset..offset + val.len()])
}
}
impl<const LEN: usize> BufferRef for [u8; LEN] {
#[inline]
fn len(&self) -> usize {
<[u8] as BufferRef>::len(self)
}
#[inline]
fn read<const N: usize>(&self, offset: usize) -> &[u8; N] {
<[u8] as BufferRef>::read(self, offset)
}
#[inline]
fn read_slice(&self, offset: usize, val: &mut [u8]) {
<[u8] as BufferRef>::read_slice(self, offset, val)
}
}
impl BufferRef for Vec<u8> {
#[inline]
fn len(&self) -> usize {
<[u8] as BufferRef>::len(self)
}
#[inline]
fn read<const N: usize>(&self, offset: usize) -> &[u8; N] {
<[u8] as BufferRef>::read(self, offset)
}
#[inline]
fn read_slice(&self, offset: usize, val: &mut [u8]) {
<[u8] as BufferRef>::read_slice(self, offset, val)
}
}
impl BufferMut for [u8] {
#[inline]
fn capacity(&self) -> usize {
self.len()
}
#[inline]
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) {
use crate::utils::SliceExt;
*self.array_mut(offset) = *val;
}
#[inline]
fn write_slice(&mut self, offset: usize, val: &[u8]) {
self[offset..offset + val.len()].copy_from_slice(val);
}
}
impl BufferMut for [MaybeUninit<u8>] {
#[inline]
fn capacity(&self) -> usize {
self.len()
}
#[inline]
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) {
use crate::utils::SliceExt;
// SAFETY: &[u8; N] and &[MaybeUninit<u8>; N] have the same layout
let val: &[MaybeUninit<u8>; N] = unsafe { core::mem::transmute(val) };
*self.array_mut(offset) = *val;
}
#[inline]
fn write_slice(&mut self, offset: usize, val: &[u8]) {
// SAFETY: &[u8] and &[MaybeUninit<u8>] have the same layout
let val: &[MaybeUninit<u8>] = unsafe { core::mem::transmute(val) };
self[offset..offset + val.len()].copy_from_slice(val);
}
}
impl<const LEN: usize> BufferMut for [u8; LEN] {
#[inline]
fn capacity(&self) -> usize {
<[u8] as BufferMut>::capacity(self)
}
#[inline]
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) {
<[u8] as BufferMut>::write(self, offset, val);
}
#[inline]
fn write_slice(&mut self, offset: usize, val: &[u8]) {
<[u8] as BufferMut>::write_slice(self, offset, val)
}
}
impl<const LEN: usize> BufferMut for [MaybeUninit<u8>; LEN] {
#[inline]
fn capacity(&self) -> usize {
<[MaybeUninit<u8>] as BufferMut>::capacity(self)
}
#[inline]
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) {
<[MaybeUninit<u8>] as BufferMut>::write(self, offset, val)
}
#[inline]
fn write_slice(&mut self, offset: usize, val: &[u8]) {
<[MaybeUninit<u8>] as BufferMut>::write_slice(self, offset, val)
}
}
impl BufferMut for Vec<u8> {
#[inline]
fn capacity(&self) -> usize {
self.capacity()
}
#[inline]
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) {
<[u8] as BufferMut>::write(self, offset, val);
}
#[inline]
fn write_slice(&mut self, offset: usize, val: &[u8]) {
<[u8] as BufferMut>::write_slice(self, offset, val)
}
#[inline]
fn try_enlarge(&mut self, wanted: usize) -> core::result::Result<(), EnlargeError> {
use crate::utils::ByteVecExt;
self.try_extend(wanted).map_err(EnlargeError::from)
}
}
impl BufferMut for Vec<MaybeUninit<u8>> {
#[inline]
fn capacity(&self) -> usize {
self.capacity()
}
#[inline]
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) {
<[MaybeUninit<u8>] as BufferMut>::write(self, offset, val)
}
#[inline]
fn write_slice(&mut self, offset: usize, val: &[u8]) {
<[MaybeUninit<u8>] as BufferMut>::write_slice(self, offset, val)
}
#[inline]
fn try_enlarge(&mut self, wanted: usize) -> core::result::Result<(), EnlargeError> {
use crate::utils::ByteVecExt;
self.try_extend(wanted).map_err(EnlargeError::from)
}
}
macro_rules! impl_buffer_ref_for_wrappers {
($($type:ty),*) => {$(
impl<T: ?Sized + BufferRef> BufferRef for $type {
#[inline]
fn len(&self) -> usize {
T::len(self)
}
#[inline]
fn read<const N: usize>(&self, offset: usize) -> &[u8; N] {
T::read(self, offset)
}
#[inline]
fn read_slice(&self, offset: usize, val: &mut [u8]) {
T::read_slice(self, offset, val)
}
}
)*};
}
impl_buffer_ref_for_wrappers!(&T, &mut T, Box<T>, std::rc::Rc<T>, std::sync::Arc<T>);
macro_rules! impl_buffer_mut_for_wrappers {
($($type:ty),*) => {$(
impl<T: ?Sized + BufferMut> BufferMut for $type {
#[inline]
fn capacity(&self) -> usize {
T::capacity(self)
}
#[inline]
fn write<const N: usize>(&mut self, offset: usize, val: &[u8; N]) {
T::write(self, offset, val)
}
#[inline]
fn write_slice(&mut self, offset: usize, val: &[u8]) {
T::write_slice(self, offset, val)
}
#[inline]
fn try_enlarge(&mut self, wanted: usize) -> core::result::Result<(), EnlargeError> {
T::try_enlarge(self, wanted)
}
}
)*};
}
impl_buffer_mut_for_wrappers!(&mut T, Box<T>);
#[cfg(test)]
mod buffer_ref {
use super::BufferRef;
#[test]
fn array() {
let arr = [0, 1, 2, 3, 4, 5];
assert_eq!(BufferRef::len(&arr), 6);
assert_eq!(BufferRef::read(&arr, 3), &[3, 4]);
}
#[test]
fn vec() {
let vec = Vec::from([0, 1, 2, 3, 4, 5]);
assert_eq!(BufferRef::len(&vec), 6);
assert_eq!(BufferRef::read(&vec, 3), &[3, 4]);
}
}
#[cfg(test)]
mod buffer_mut {
use super::BufferMut;
use crate::core::EnlargeError;
#[test]
fn array() {
let mut arr = [0, 1, 2, 3, 4, 5];
assert_eq!(BufferMut::capacity(&arr), 6);
BufferMut::write(&mut arr, 3, &[9, 1]);
assert_eq!(arr, [0, 1, 2, 9, 1, 5]);
assert!(matches!(BufferMut::try_enlarge(&mut arr, 6), Ok(())));
assert!(matches!(
BufferMut::try_enlarge(&mut arr, 7),
Err(EnlargeError)
));
}
#[test]
fn vec() {
let mut vec = Vec::from([0, 1, 2, 3, 4, 5]);
assert_eq!(BufferMut::capacity(&vec), vec.capacity());
BufferMut::write(&mut vec, 3, &[9, 1]);
assert_eq!(vec, Vec::from([0, 1, 2, 9, 1, 5]));
assert!(matches!(BufferMut::try_enlarge(&mut vec, 100), Ok(())));
assert!(matches!(
BufferMut::try_enlarge(&mut vec, usize::MAX),
Err(EnlargeError)
));
}
}
#[cfg(test)]
mod error {
use super::Error;
#[test]
fn derived_traits() {
let err = Error::BufferTooSmall {
expected: 4,
found: 2,
};
{
use std::error::Error;
assert!(err.source().is_none());
}
assert_eq!(
format!("{}", err.clone()),
"could not read/write 4 bytes from/into 2 byte sized buffer"
);
assert_eq!(
format!("{:?}", err.clone()),
"BufferTooSmall { expected: 4, found: 2 }"
);
}
}
#[cfg(test)]
mod enlarge_error {
use super::EnlargeError;
#[test]
fn derived_traits() {
// can't construct a TryReserveError due to TryReserveErrorKind being unstable
let try_reserve_error = {
let mut vec = Vec::<u8>::new();
vec.try_reserve(usize::MAX).err().unwrap()
};
let err = EnlargeError::from(try_reserve_error);
use std::error::Error;
assert!(err.source().is_none());
assert_eq!(format!("{}", err.clone()), "could not enlarge buffer");
assert_eq!(format!("{:?}", err.clone()), "EnlargeError");
}
}

77
vendor/encase/src/core/size_value.rs vendored Normal file
View File

@@ -0,0 +1,77 @@
use core::num::NonZeroU64;
/// Helper type for size calculations
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct SizeValue(pub NonZeroU64);
impl SizeValue {
#[inline]
pub const fn new(val: u64) -> Self {
match val {
0 => panic!("Size can't be 0!"),
val => {
// SAFETY: This is safe since we checked if the value is 0
Self(unsafe { NonZeroU64::new_unchecked(val) })
}
}
}
#[inline]
pub const fn from(val: NonZeroU64) -> Self {
Self(val)
}
#[inline]
pub const fn get(&self) -> u64 {
self.0.get()
}
#[inline]
pub const fn mul(self, rhs: u64) -> Self {
match self.get().checked_mul(rhs) {
None => panic!("Overflow occurred while multiplying size values!"),
Some(val) => {
// SAFETY: This is safe since we checked for overflow
Self(unsafe { NonZeroU64::new_unchecked(val) })
}
}
}
}
#[cfg(test)]
mod test {
use super::SizeValue;
#[test]
fn new() {
assert_eq!(4, SizeValue::new(4).get());
}
#[test]
#[should_panic]
fn new_panic() {
SizeValue::new(0);
}
#[test]
fn mul() {
assert_eq!(SizeValue::new(64), SizeValue::new(8).mul(8));
}
#[test]
#[should_panic]
fn mul_panic() {
SizeValue::new(8).mul(u64::MAX);
}
#[test]
fn derived_traits() {
let size = SizeValue::new(8);
#[allow(clippy::clone_on_copy)]
let size_clone = size.clone();
assert!(size == size_clone);
assert_eq!(format!("{size:?}"), "SizeValue(8)");
}
}

267
vendor/encase/src/core/traits.rs vendored Normal file
View File

@@ -0,0 +1,267 @@
use std::num::NonZeroU64;
use super::{AlignmentValue, BufferMut, BufferRef, Reader, SizeValue, Writer};
const UNIFORM_MIN_ALIGNMENT: AlignmentValue = AlignmentValue::new(16);
pub struct Metadata<E> {
pub alignment: AlignmentValue,
pub has_uniform_min_alignment: bool,
pub min_size: SizeValue,
pub is_pod: bool,
pub extra: E,
}
impl Metadata<()> {
pub const fn from_alignment_and_size(alignment: u64, size: u64) -> Self {
Self {
alignment: AlignmentValue::new(alignment),
has_uniform_min_alignment: false,
min_size: SizeValue::new(size),
is_pod: false,
extra: (),
}
}
}
// using forget() avoids "destructors cannot be evaluated at compile-time" error
// track #![feature(const_precise_live_drops)] (https://github.com/rust-lang/rust/issues/73255)
impl<E> Metadata<E> {
#[inline]
pub const fn alignment(self) -> AlignmentValue {
let value = self.alignment;
core::mem::forget(self);
value
}
#[inline]
pub const fn uniform_min_alignment(self) -> Option<AlignmentValue> {
let value = self.has_uniform_min_alignment;
core::mem::forget(self);
match value {
true => Some(UNIFORM_MIN_ALIGNMENT),
false => None,
}
}
#[inline]
pub const fn min_size(self) -> SizeValue {
let value = self.min_size;
core::mem::forget(self);
value
}
#[inline]
pub const fn is_pod(self) -> bool {
let value = self.is_pod;
core::mem::forget(self);
value
}
#[inline]
pub const fn pod(mut self) -> Self {
self.is_pod = true;
self
}
#[inline]
pub const fn no_pod(mut self) -> Self {
self.is_pod = false;
self
}
}
/// Base trait for all [WGSL host-shareable types](https://gpuweb.github.io/gpuweb/wgsl/#host-shareable-types)
pub trait ShaderType {
#[doc(hidden)]
type ExtraMetadata;
#[doc(hidden)]
const METADATA: Metadata<Self::ExtraMetadata>;
/// Represents the minimum size of `Self` (equivalent to [GPUBufferBindingLayout.minBindingSize](https://gpuweb.github.io/gpuweb/#dom-gpubufferbindinglayout-minbindingsize))
///
/// For [WGSL fixed-footprint types](https://gpuweb.github.io/gpuweb/wgsl/#fixed-footprint-types)
/// it represents [WGSL Size](https://gpuweb.github.io/gpuweb/wgsl/#alignment-and-size)
/// (equivalent to [`ShaderSize::SHADER_SIZE`])
///
/// For
/// [WGSL runtime-sized arrays](https://gpuweb.github.io/gpuweb/wgsl/#runtime-sized) and
/// [WGSL structs containing runtime-sized arrays](https://gpuweb.github.io/gpuweb/wgsl/#struct-types)
/// (non fixed-footprint types)
/// this will be calculated by assuming the array has one element
#[inline]
fn min_size() -> NonZeroU64 {
Self::METADATA.min_size().0
}
/// Returns the size of `Self` at runtime
///
/// For [WGSL fixed-footprint types](https://gpuweb.github.io/gpuweb/wgsl/#fixed-footprint-types)
/// it's equivalent to [`Self::min_size`] and [`ShaderSize::SHADER_SIZE`]
#[inline]
fn size(&self) -> NonZeroU64 {
Self::METADATA.min_size().0
}
#[doc(hidden)]
const UNIFORM_COMPAT_ASSERT: fn() = || {};
/// Asserts that `Self` meets the requirements of the
/// [uniform address space restrictions on stored values](https://gpuweb.github.io/gpuweb/wgsl/#address-spaces-uniform) and the
/// [uniform address space layout constraints](https://gpuweb.github.io/gpuweb/wgsl/#address-space-layout-constraints)
///
/// # Examples
///
/// ## Array
///
/// Will panic since runtime-sized arrays are not compatible with the
/// uniform address space restrictions on stored values
///
/// ```should_panic
/// # use crate::encase::ShaderType;
/// <Vec<mint::Vector4<f32>>>::assert_uniform_compat();
/// ```
///
/// Will panic since the stride is 4 bytes
///
/// ```should_panic
/// # use crate::encase::ShaderType;
/// <[f32; 2]>::assert_uniform_compat();
/// ```
///
/// Will not panic since the stride is 16 bytes
///
/// ```
/// # use crate::encase::ShaderType;
/// # use mint;
/// <[mint::Vector4<f32>; 2]>::assert_uniform_compat();
/// ```
///
/// ## Struct
///
/// Will panic since runtime-sized arrays are not compatible with the
/// uniform address space restrictions on stored values
///
/// ```should_panic
/// # use crate::encase::ShaderType;
/// # use mint;
/// #[derive(ShaderType)]
/// struct Invalid {
/// #[size(runtime)]
/// vec: Vec<mint::Vector4<f32>>
/// }
/// Invalid::assert_uniform_compat();
/// ```
///
/// Will panic since the inner struct's size must be a multiple of 16
///
/// ```should_panic
/// # use crate::encase::ShaderType;
/// #[derive(ShaderType)]
/// struct S {
/// x: f32,
/// }
///
/// #[derive(ShaderType)]
/// struct Invalid {
/// a: f32,
/// b: S, // offset between fields 'a' and 'b' must be at least 16 (currently: 4)
/// }
/// Invalid::assert_uniform_compat();
/// ```
///
/// Will not panic (fixed via align attribute)
///
/// ```
/// # use crate::encase::ShaderType;
/// # #[derive(ShaderType)]
/// # struct S {
/// # x: f32,
/// # }
/// #[derive(ShaderType)]
/// struct Valid {
/// a: f32,
/// #[align(16)]
/// b: S,
/// }
/// Valid::assert_uniform_compat();
/// ```
///
/// Will not panic (fixed via size attribute)
///
/// ```
/// # use crate::encase::ShaderType;
/// # #[derive(ShaderType)]
/// # struct S {
/// # x: f32,
/// # }
/// #[derive(ShaderType)]
/// struct Valid {
/// #[size(16)]
/// a: f32,
/// b: S,
/// }
/// Valid::assert_uniform_compat();
/// ```
#[inline]
fn assert_uniform_compat() {
Self::UNIFORM_COMPAT_ASSERT();
}
// fn assert_can_write_into()
// where
// Self: WriteInto,
// {
// }
// fn assert_can_read_from()
// where
// Self: ReadFrom,
// {
// }
// fn assert_can_create_from()
// where
// Self: CreateFrom,
// {
// }
}
/// Trait implemented for all [WGSL fixed-footprint types](https://gpuweb.github.io/gpuweb/wgsl/#fixed-footprint-types)
pub trait ShaderSize: ShaderType {
/// Represents [WGSL Size](https://gpuweb.github.io/gpuweb/wgsl/#alignment-and-size) (equivalent to [`ShaderType::min_size`])
const SHADER_SIZE: NonZeroU64 = Self::METADATA.min_size().0;
}
/// Trait implemented for
/// [WGSL runtime-sized arrays](https://gpuweb.github.io/gpuweb/wgsl/#runtime-sized) and
/// [WGSL structs containing runtime-sized arrays](https://gpuweb.github.io/gpuweb/wgsl/#struct-types)
/// (non fixed-footprint types)
pub trait CalculateSizeFor {
/// Returns the size of `Self` assuming the (contained) runtime-sized array has `nr_of_el` elements
fn calculate_size_for(nr_of_el: u64) -> NonZeroU64;
}
#[allow(clippy::len_without_is_empty)]
pub trait RuntimeSizedArray {
fn len(&self) -> usize;
}
pub trait WriteInto {
fn write_into<B>(&self, writer: &mut Writer<B>)
where
B: BufferMut;
}
pub trait ReadFrom {
fn read_from<B>(&mut self, reader: &mut Reader<B>)
where
B: BufferRef;
}
pub trait CreateFrom: Sized {
fn create_from<B>(reader: &mut Reader<B>) -> Self
where
B: BufferRef;
}

3
vendor/encase/src/impls/archery.rs vendored Normal file
View File

@@ -0,0 +1,3 @@
use crate::impl_wrapper;
impl_wrapper!(archery::SharedPointer<T, P>; (T, P: archery::SharedPointerKind); using Ref{} From{ new });

4
vendor/encase/src/impls/arrayvec.rs vendored Normal file
View File

@@ -0,0 +1,4 @@
use crate::rts_array::impl_rts_array;
// hardcap
impl_rts_array!(arrayvec::ArrayVec<T, N>; (T, const N: usize); using len truncate);

12
vendor/encase/src/impls/cgmath.rs vendored Executable file
View File

@@ -0,0 +1,12 @@
use crate::{matrix::impl_matrix, vector::impl_vector};
impl_vector!(2, cgmath::Vector2<T>; using AsRef AsMut From);
impl_vector!(3, cgmath::Vector3<T>; using AsRef AsMut From);
impl_vector!(4, cgmath::Vector4<T>; using AsRef AsMut From);
impl_vector!(2, cgmath::Point2<T>; using AsRef AsMut From);
impl_vector!(3, cgmath::Point3<T>; using AsRef AsMut From);
impl_matrix!(2, 2, cgmath::Matrix2<T>; using AsRef AsMut From);
impl_matrix!(3, 3, cgmath::Matrix3<T>; using AsRef AsMut From);
impl_matrix!(4, 4, cgmath::Matrix4<T>; using AsRef AsMut From);

54
vendor/encase/src/impls/glam.rs vendored Executable file
View File

@@ -0,0 +1,54 @@
use crate::{
matrix::{impl_matrix, AsMutMatrixParts, AsRefMatrixParts, FromMatrixParts, MatrixScalar},
vector::impl_vector,
};
impl_vector!(2, glam::Vec2, f32; using AsRef AsMut From);
impl_vector!(2, glam::UVec2, u32; using AsRef AsMut From);
impl_vector!(2, glam::IVec2, i32; using AsRef AsMut From);
impl_vector!(3, glam::Vec3, f32; using AsRef AsMut From);
impl_vector!(3, glam::UVec3, u32; using AsRef AsMut From);
impl_vector!(3, glam::IVec3, i32; using AsRef AsMut From);
impl_vector!(4, glam::Vec4, f32; using AsRef AsMut From);
impl_vector!(4, glam::UVec4, u32; using AsRef AsMut From);
impl_vector!(4, glam::IVec4, i32; using AsRef AsMut From);
impl_matrix!(2, 2, glam::Mat2, f32);
impl_matrix!(3, 3, glam::Mat3, f32);
impl_matrix!(4, 4, glam::Mat4, f32);
macro_rules! impl_matrix_traits {
($c:literal, $r:literal, $type:ty, $el_ty:ty) => {
impl AsRefMatrixParts<$el_ty, $c, $r> for $type
where
Self: AsRef<[$el_ty; $r * $c]>,
$el_ty: MatrixScalar,
{
fn as_ref_parts(&self) -> &[[$el_ty; $r]; $c] {
array_ref_to_2d_array_ref!(self.as_ref(), $el_ty, $c, $r)
}
}
impl AsMutMatrixParts<$el_ty, $c, $r> for $type
where
Self: AsMut<[$el_ty; $r * $c]>,
$el_ty: MatrixScalar,
{
fn as_mut_parts(&mut self) -> &mut [[$el_ty; $r]; $c] {
array_mut_to_2d_array_mut!(self.as_mut(), $el_ty, $c, $r)
}
}
impl FromMatrixParts<$el_ty, $c, $r> for $type {
fn from_parts(parts: [[$el_ty; $r]; $c]) -> Self {
Self::from_cols_array_2d(&parts)
}
}
};
}
impl_matrix_traits!(2, 2, glam::Mat2, f32);
impl_matrix_traits!(3, 3, glam::Mat3, f32);
impl_matrix_traits!(4, 4, glam::Mat4, f32);

3
vendor/encase/src/impls/im.rs vendored Normal file
View File

@@ -0,0 +1,3 @@
use crate::rts_array::impl_rts_array;
impl_rts_array!(im::Vector<T>; (T: Clone); using len);

3
vendor/encase/src/impls/im_rc.rs vendored Normal file
View File

@@ -0,0 +1,3 @@
use crate::rts_array::impl_rts_array;
impl_rts_array!(im_rc::Vector<T>; (T: Clone); using len);

3
vendor/encase/src/impls/imbl.rs vendored Normal file
View File

@@ -0,0 +1,3 @@
use crate::rts_array::impl_rts_array;
impl_rts_array!(imbl::Vector<T>; (T: Clone); using len);

22
vendor/encase/src/impls/mint.rs vendored Executable file
View File

@@ -0,0 +1,22 @@
use crate::{matrix::impl_matrix, vector::impl_vector};
impl_vector!(2, mint::Vector2<T>; using AsRef AsMut From);
impl_vector!(3, mint::Vector3<T>; using AsRef AsMut From);
impl_vector!(4, mint::Vector4<T>; using AsRef AsMut From);
impl_vector!(2, mint::Point2<T>; using AsRef AsMut From);
impl_vector!(3, mint::Point3<T>; using AsRef AsMut From);
impl_matrix!(2, 2, mint::ColumnMatrix2<T>; using AsRef AsMut From);
impl_matrix!(3, 2, mint::ColumnMatrix2x3<T>; using AsRef AsMut From);
impl_matrix!(4, 2, mint::ColumnMatrix2x4<T>; using AsRef AsMut From);
impl_matrix!(2, 3, mint::ColumnMatrix3x2<T>; using AsRef AsMut From);
impl_matrix!(3, 3, mint::ColumnMatrix3<T>; using AsRef AsMut From);
impl_matrix!(4, 3, mint::ColumnMatrix3x4<T>; using AsRef AsMut From);
impl_matrix!(2, 4, mint::ColumnMatrix4x2<T>; using AsRef AsMut From);
impl_matrix!(3, 4, mint::ColumnMatrix4x3<T>; using AsRef AsMut From);
impl_matrix!(4, 4, mint::ColumnMatrix4<T>; using AsRef AsMut From);

35
vendor/encase/src/impls/mod.rs vendored Normal file
View File

@@ -0,0 +1,35 @@
#[cfg(feature = "archery")]
mod archery;
#[cfg(feature = "static-rc")]
mod static_rc;
#[cfg(feature = "cgmath")]
mod cgmath;
#[cfg(feature = "glam")]
mod glam;
#[cfg(feature = "mint")]
mod mint;
#[cfg(feature = "nalgebra")]
mod nalgebra;
#[cfg(feature = "ultraviolet")]
mod ultraviolet;
#[cfg(feature = "vek")]
mod vek;
#[cfg(feature = "arrayvec")]
mod arrayvec;
#[cfg(feature = "ndarray")]
mod ndarray;
#[cfg(feature = "smallvec")]
mod smallvec;
#[cfg(feature = "tinyvec")]
mod tinyvec;
#[cfg(feature = "im")]
mod im;
#[cfg(feature = "im-rc")]
mod im_rc;
#[cfg(feature = "imbl")]
mod imbl;
#[cfg(all(feature = "rpds", feature = "archery"))]
mod rpds;

102
vendor/encase/src/impls/nalgebra.rs vendored Normal file
View File

@@ -0,0 +1,102 @@
use crate::{
matrix::{impl_matrix, AsMutMatrixParts, AsRefMatrixParts, FromMatrixParts, MatrixScalar},
vector::{impl_vector, AsMutVectorParts, AsRefVectorParts, FromVectorParts, VectorScalar},
};
impl_vector!(2, nalgebra::VectorView2<'_, T>);
impl_vector!(2, nalgebra::VectorViewMut2<'_, T>);
impl_vector!(2, nalgebra::Vector2<T>);
impl_vector!(3, nalgebra::VectorView3<'_, T>);
impl_vector!(3, nalgebra::VectorViewMut3<'_, T>);
impl_vector!(3, nalgebra::Vector3<T>);
impl_vector!(4, nalgebra::VectorView4<'_, T>);
impl_vector!(4, nalgebra::VectorViewMut4<'_, T>);
impl_vector!(4, nalgebra::Vector4<T>);
impl_matrix!(2, 2, nalgebra::MatrixView2<'_, T>);
impl_matrix!(2, 2, nalgebra::MatrixViewMut2<'_, T>);
impl_matrix!(2, 2, nalgebra::Matrix2<T>);
impl_matrix!(3, 2, nalgebra::MatrixView2x3<'_, T>);
impl_matrix!(4, 2, nalgebra::MatrixView2x4<'_, T>);
impl_matrix!(2, 3, nalgebra::MatrixView3x2<'_, T>);
impl_matrix!(3, 2, nalgebra::MatrixViewMut2x3<'_, T>);
impl_matrix!(4, 2, nalgebra::MatrixViewMut2x4<'_, T>);
impl_matrix!(2, 3, nalgebra::MatrixViewMut3x2<'_, T>);
impl_matrix!(3, 2, nalgebra::Matrix2x3<T>);
impl_matrix!(4, 2, nalgebra::Matrix2x4<T>);
impl_matrix!(2, 3, nalgebra::Matrix3x2<T>);
impl_matrix!(3, 3, nalgebra::MatrixView3<'_, T>);
impl_matrix!(3, 3, nalgebra::MatrixViewMut3<'_, T>);
impl_matrix!(3, 3, nalgebra::Matrix3<T>);
impl_matrix!(4, 3, nalgebra::MatrixView3x4<'_, T>);
impl_matrix!(2, 4, nalgebra::MatrixView4x2<'_, T>);
impl_matrix!(3, 4, nalgebra::MatrixView4x3<'_, T>);
impl_matrix!(4, 3, nalgebra::MatrixViewMut3x4<'_, T>);
impl_matrix!(2, 4, nalgebra::MatrixViewMut4x2<'_, T>);
impl_matrix!(3, 4, nalgebra::MatrixViewMut4x3<'_, T>);
impl_matrix!(4, 3, nalgebra::Matrix3x4<T>);
impl_matrix!(2, 4, nalgebra::Matrix4x2<T>);
impl_matrix!(3, 4, nalgebra::Matrix4x3<T>);
impl_matrix!(4, 4, nalgebra::MatrixView4<'_, T>);
impl_matrix!(4, 4, nalgebra::MatrixViewMut4<'_, T>);
impl_matrix!(4, 4, nalgebra::Matrix4<T>);
impl<T: VectorScalar, S, const N: usize> AsRefVectorParts<T, N>
for nalgebra::Matrix<T, nalgebra::Const<N>, nalgebra::Const<1>, S>
where
Self: AsRef<[T; N]>,
{
fn as_ref_parts(&self) -> &[T; N] {
self.as_ref()
}
}
impl<T: VectorScalar, S, const N: usize> AsMutVectorParts<T, N>
for nalgebra::Matrix<T, nalgebra::Const<N>, nalgebra::Const<1>, S>
where
Self: AsMut<[T; N]>,
{
fn as_mut_parts(&mut self) -> &mut [T; N] {
self.as_mut()
}
}
impl<T: VectorScalar, const N: usize> FromVectorParts<T, N> for nalgebra::SMatrix<T, N, 1> {
fn from_parts(parts: [T; N]) -> Self {
Self::from_array_storage(nalgebra::ArrayStorage([parts]))
}
}
impl<T: MatrixScalar, S, const C: usize, const R: usize> AsRefMatrixParts<T, C, R>
for nalgebra::Matrix<T, nalgebra::Const<R>, nalgebra::Const<C>, S>
where
Self: AsRef<[[T; R]; C]>,
{
fn as_ref_parts(&self) -> &[[T; R]; C] {
self.as_ref()
}
}
impl<T: MatrixScalar, S, const C: usize, const R: usize> AsMutMatrixParts<T, C, R>
for nalgebra::Matrix<T, nalgebra::Const<R>, nalgebra::Const<C>, S>
where
Self: AsMut<[[T; R]; C]>,
{
fn as_mut_parts(&mut self) -> &mut [[T; R]; C] {
self.as_mut()
}
}
impl<T: MatrixScalar, const C: usize, const R: usize> FromMatrixParts<T, C, R>
for nalgebra::SMatrix<T, R, C>
{
fn from_parts(parts: [[T; R]; C]) -> Self {
Self::from_array_storage(nalgebra::ArrayStorage(parts))
}
}

3
vendor/encase/src/impls/ndarray.rs vendored Normal file
View File

@@ -0,0 +1,3 @@
use crate::rts_array::impl_rts_array;
impl_rts_array!(ndarray::ArrayBase<S, D>; (T, S: ndarray::RawData<Elem = T>, D: ndarray::Dimension); using len);

12
vendor/encase/src/impls/rpds.rs vendored Normal file
View File

@@ -0,0 +1,12 @@
use crate::rts_array::{impl_rts_array, Length};
impl_rts_array!(rpds::List<T, P>; (T, P: archery::SharedPointerKind); using len);
impl_rts_array!(rpds::Vector<T, P>; (T, P: archery::SharedPointerKind); using len);
impl_rts_array!(rpds::Stack<T, P>; (T, P: archery::SharedPointerKind));
impl_rts_array!(rpds::Queue<T, P>; (T, P: archery::SharedPointerKind); using len);
impl<T, P: archery::SharedPointerKind> Length for rpds::Stack<T, P> {
fn length(&self) -> usize {
self.size()
}
}

4
vendor/encase/src/impls/smallvec.rs vendored Normal file
View File

@@ -0,0 +1,4 @@
use crate::rts_array::impl_rts_array;
// softcap
impl_rts_array!(smallvec::SmallVec<A>; (T, A: smallvec::Array<Item = T>); using len truncate);

4
vendor/encase/src/impls/static_rc.rs vendored Normal file
View File

@@ -0,0 +1,4 @@
use crate::impl_wrapper;
impl_wrapper!(static_rc::StaticRc<T, NUM, DEN>; (T: ?Sized, const NUM: usize, const DEN: usize); using Ref{});
impl_wrapper!(static_rc::StaticRc<T, N, N>; (T: ?Sized, const N: usize); using Mut{} From{ new });

7
vendor/encase/src/impls/tinyvec.rs vendored Normal file
View File

@@ -0,0 +1,7 @@
use crate::rts_array::impl_rts_array;
// hardcap
impl_rts_array!(tinyvec::ArrayVec<A>; (T, A: tinyvec::Array<Item = T>); using len truncate);
// softcap
impl_rts_array!(tinyvec::TinyVec<A>; (T, A: tinyvec::Array<Item = T>); using len truncate);

67
vendor/encase/src/impls/ultraviolet.rs vendored Executable file
View File

@@ -0,0 +1,67 @@
use crate::{
matrix::{impl_matrix, AsMutMatrixParts, AsRefMatrixParts},
vector::{impl_vector, AsMutVectorParts, AsRefVectorParts},
};
impl_vector!(2, ultraviolet::Vec2, f32; using From);
impl_vector!(2, ultraviolet::UVec2, u32; using From);
impl_vector!(2, ultraviolet::IVec2, i32; using From);
impl_vector!(3, ultraviolet::Vec3, f32; using From);
impl_vector!(3, ultraviolet::UVec3, u32; using From);
impl_vector!(3, ultraviolet::IVec3, i32; using From);
impl_vector!(4, ultraviolet::Vec4, f32; using From);
impl_vector!(4, ultraviolet::UVec4, u32; using From);
impl_vector!(4, ultraviolet::IVec4, i32; using From);
impl_matrix!(2, 2, ultraviolet::Mat2, f32; using From);
impl_matrix!(3, 3, ultraviolet::Mat3, f32; using From);
impl_matrix!(4, 4, ultraviolet::Mat4, f32; using From);
macro_rules! impl_vector_traits {
($n:literal, $type:ty, $el_ty:ty) => {
impl AsRefVectorParts<$el_ty, $n> for $type {
fn as_ref_parts(&self) -> &[$el_ty; $n] {
self.as_slice().try_into().unwrap()
}
}
impl AsMutVectorParts<$el_ty, $n> for $type {
fn as_mut_parts(&mut self) -> &mut [$el_ty; $n] {
self.as_mut_slice().try_into().unwrap()
}
}
};
}
impl_vector_traits!(2, ultraviolet::Vec2, f32);
impl_vector_traits!(2, ultraviolet::UVec2, u32);
impl_vector_traits!(2, ultraviolet::IVec2, i32);
impl_vector_traits!(3, ultraviolet::Vec3, f32);
impl_vector_traits!(3, ultraviolet::UVec3, u32);
impl_vector_traits!(3, ultraviolet::IVec3, i32);
impl_vector_traits!(4, ultraviolet::Vec4, f32);
impl_vector_traits!(4, ultraviolet::UVec4, u32);
impl_vector_traits!(4, ultraviolet::IVec4, i32);
macro_rules! impl_matrix_traits {
($c:literal, $r:literal, $type:ty, $el_ty:ty) => {
impl AsRefMatrixParts<$el_ty, $c, $r> for $type {
fn as_ref_parts(&self) -> &[[$el_ty; $r]; $c] {
array_ref_to_2d_array_ref!(self.as_array(), $el_ty, $c, $r)
}
}
impl AsMutMatrixParts<$el_ty, $c, $r> for $type {
fn as_mut_parts(&mut self) -> &mut [[$el_ty; $r]; $c] {
let array = self.as_mut_slice().try_into().unwrap();
array_mut_to_2d_array_mut!(array, $el_ty, $c, $r)
}
}
};
}
impl_matrix_traits!(2, 2, ultraviolet::Mat2, f32);
impl_matrix_traits!(3, 3, ultraviolet::Mat3, f32);
impl_matrix_traits!(4, 4, ultraviolet::Mat4, f32);

57
vendor/encase/src/impls/vek.rs vendored Executable file
View File

@@ -0,0 +1,57 @@
use crate::{
matrix::{impl_matrix, AsMutMatrixParts, AsRefMatrixParts, FromMatrixParts, MatrixScalar},
vector::{impl_vector, AsMutVectorParts, AsRefVectorParts, VectorScalar},
};
impl_vector!(2, vek::Vec2<T>; using From);
impl_vector!(3, vek::Vec3<T>; using From);
impl_vector!(4, vek::Vec4<T>; using From);
impl_matrix!(2, 2, vek::Mat2<T>);
impl_matrix!(3, 3, vek::Mat3<T>);
impl_matrix!(4, 4, vek::Mat4<T>);
macro_rules! impl_vector_traits {
($n:literal, $type:ty) => {
impl<T: VectorScalar> AsRefVectorParts<T, $n> for $type {
fn as_ref_parts(&self) -> &[T; $n] {
self.as_slice().try_into().unwrap()
}
}
impl<T: VectorScalar> AsMutVectorParts<T, $n> for $type {
fn as_mut_parts(&mut self) -> &mut [T; $n] {
self.as_mut_slice().try_into().unwrap()
}
}
};
}
impl_vector_traits!(2, vek::Vec2<T>);
impl_vector_traits!(3, vek::Vec3<T>);
impl_vector_traits!(4, vek::Vec4<T>);
macro_rules! impl_matrix_traits {
($c:literal, $r:literal, $type:ty) => {
impl<T: MatrixScalar> AsRefMatrixParts<T, $c, $r> for $type {
fn as_ref_parts(&self) -> &[[T; $r]; $c] {
let array = self.as_col_slice().try_into().unwrap();
array_ref_to_2d_array_ref!(array, T, $c, $r)
}
}
impl<T: MatrixScalar> AsMutMatrixParts<T, $c, $r> for $type {
fn as_mut_parts(&mut self) -> &mut [[T; $r]; $c] {
let array = self.as_mut_col_slice().try_into().unwrap();
array_mut_to_2d_array_mut!(array, T, $c, $r)
}
}
impl<T: MatrixScalar> FromMatrixParts<T, $c, $r> for $type {
fn from_parts(parts: [[T; $r]; $c]) -> Self {
Self::from_col_arrays(parts)
}
}
};
}
impl_matrix_traits!(2, 2, vek::Mat2<T>);
impl_matrix_traits!(3, 3, vek::Mat3<T>);
impl_matrix_traits!(4, 4, vek::Mat4<T>);

167
vendor/encase/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,167 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![deny(rustdoc::broken_intra_doc_links)]
#![warn(
future_incompatible,
nonstandard_style,
rust_2018_idioms,
rust_2021_compatibility,
unused,
// missing_docs,
single_use_lifetimes,
trivial_casts,
trivial_numeric_casts,
// unreachable_pub,
unused_qualifications,
variant_size_differences
)]
#![doc = include_str!("../README.md")]
#![doc(
html_logo_url = "https://raw.githubusercontent.com/teoxoy/encase/3d6d2e4d7670863e97463a15ceeafac6d13ee73e/logo.svg"
)]
/// Used to implement `ShaderType` for structs
///
/// # Attributes
///
/// Field attributes
///
/// - `#[align(X)]` where `X` is a power of 2 [`u32`] literal (equivalent to [WGSL align attribute](https://gpuweb.github.io/gpuweb/wgsl/#attribute-align))
///
/// Used to increase the alignment of the field
///
/// - `#[size(X)]` where `X` is a [`u32`] literal (equivalent to [WGSL size attribute](https://gpuweb.github.io/gpuweb/wgsl/#attribute-size))
///
/// Used to increase the size of the field
///
/// - `#[size(runtime)]` can only be attached to the last field of the struct
///
/// Used to denote the fact that the field it is attached to is a runtime-sized array
///
/// # Note about generics
///
/// While structs using generic type parameters are supported by this derive macro
///
/// - the `#[align(X)]` and `#[size(X)]` attributes will only work
/// if they are attached to fields whose type contains no generic type parameters
///
/// # Examples
///
/// Simple
///
/// ```
/// # use mint;
/// # use crate::encase::ShaderType;
/// #[derive(ShaderType)]
/// struct AffineTransform2D {
/// matrix: mint::ColumnMatrix2<f32>,
/// translate: mint::Vector2<f32>
/// }
/// ```
///
/// Contains a runtime-sized array
///
/// _The [`ArrayLength`] type can be used to explicitly write or read the length of the contained runtime-sized array_
///
/// ```
/// # use mint;
/// # use crate::encase::ShaderType;
/// # use crate::encase::ArrayLength;
/// #[derive(ShaderType)]
/// struct Positions {
/// length: ArrayLength,
/// #[size(runtime)]
/// positions: Vec<mint::Point2<f32>>
/// }
/// ```
///
/// Complex
///
/// ```
/// # use crate::encase::{ShaderType, ShaderSize};
/// #[derive(ShaderType)]
/// struct Complex<
/// 'a,
/// 'b: 'a,
/// E: 'a + ShaderType + ShaderSize,
/// T: 'b + ShaderType + ShaderSize,
/// const N: usize,
/// > {
/// array: [&'a mut E; N],
/// #[size(runtime)]
/// rts_array: &'a mut Vec<&'b T>,
/// }
/// ```
///
pub use encase_derive::ShaderType;
#[macro_use]
mod utils;
mod core;
mod types;
mod impls;
pub use crate::core::{
CalculateSizeFor, DynamicStorageBuffer, DynamicUniformBuffer, ShaderSize, ShaderType,
StorageBuffer, UniformBuffer,
};
pub use types::runtime_sized_array::ArrayLength;
pub mod internal {
pub use super::core::{
AlignmentValue, BufferMut, BufferRef, CreateFrom, EnlargeError, Error, ReadContext,
ReadFrom, Reader, Result, SizeValue, WriteContext, WriteInto, Writer,
};
}
/// Module containing items necessary to implement `ShaderType` for runtime-sized arrays
pub mod rts_array {
#[doc(inline)]
pub use super::impl_rts_array;
pub use super::types::runtime_sized_array::{Length, Truncate};
}
/// Module containing items necessary to implement `ShaderType` for vectors
pub mod vector {
#[doc(inline)]
pub use super::impl_vector;
pub use super::types::vector::{
AsMutVectorParts, AsRefVectorParts, FromVectorParts, VectorScalar,
};
}
/// Module containing items necessary to implement `ShaderType` for matrices
pub mod matrix {
#[doc(inline)]
pub use super::impl_matrix;
pub use super::types::matrix::{
AsMutMatrixParts, AsRefMatrixParts, FromMatrixParts, MatrixScalar,
};
}
/// Private module used by macros
#[doc(hidden)]
pub mod private {
pub use super::build_struct;
pub use super::core::AlignmentValue;
pub use super::core::BufferMut;
pub use super::core::BufferRef;
pub use super::core::CreateFrom;
pub use super::core::Metadata;
pub use super::core::ReadFrom;
pub use super::core::Reader;
pub use super::core::RuntimeSizedArray;
pub use super::core::SizeValue;
pub use super::core::WriteInto;
pub use super::core::Writer;
pub use super::types::array::ArrayMetadata;
pub use super::types::matrix::*;
pub use super::types::r#struct::StructMetadata;
pub use super::types::runtime_sized_array::{ArrayLength, Length, Truncate};
pub use super::types::vector::*;
pub use super::utils::consume_zsts;
pub use super::CalculateSizeFor;
pub use super::ShaderSize;
pub use super::ShaderType;
pub use const_panic::concat_assert;
}

127
vendor/encase/src/types/array.rs vendored Normal file
View File

@@ -0,0 +1,127 @@
use crate::core::{
BufferMut, BufferRef, CreateFrom, Metadata, ReadFrom, Reader, ShaderSize, ShaderType,
SizeValue, WriteInto, Writer,
};
use core::mem::{size_of, MaybeUninit};
pub struct ArrayMetadata {
pub stride: SizeValue,
pub el_padding: u64,
}
impl Metadata<ArrayMetadata> {
pub const fn stride(self) -> SizeValue {
self.extra.stride
}
pub const fn el_padding(self) -> u64 {
self.extra.el_padding
}
}
impl<T: ShaderType + ShaderSize, const N: usize> ShaderType for [T; N] {
type ExtraMetadata = ArrayMetadata;
const METADATA: Metadata<Self::ExtraMetadata> = {
let alignment = T::METADATA.alignment();
let el_size = SizeValue::from(T::SHADER_SIZE);
let stride = alignment.round_up_size(el_size);
let el_padding = alignment.padding_needed_for(el_size.get());
let size = match N {
0 => panic!("0 sized arrays are not supported!"),
val => stride.mul(val as u64),
};
Metadata {
alignment,
has_uniform_min_alignment: true,
min_size: size,
is_pod: T::METADATA.is_pod() && el_padding == 0,
extra: ArrayMetadata { stride, el_padding },
}
};
const UNIFORM_COMPAT_ASSERT: fn() = || {
crate::utils::consume_zsts([
<T as ShaderType>::UNIFORM_COMPAT_ASSERT(),
if let Some(min_alignment) = Self::METADATA.uniform_min_alignment() {
const_panic::concat_assert!(
min_alignment.is_aligned(Self::METADATA.stride().get()),
"array stride must be a multiple of ",
min_alignment.get(),
" (current stride: ",
Self::METADATA.stride().get(),
")"
);
},
]);
};
}
impl<T: ShaderSize, const N: usize> ShaderSize for [T; N] {}
impl<T: WriteInto, const N: usize> WriteInto for [T; N]
where
Self: ShaderType<ExtraMetadata = ArrayMetadata>,
{
#[inline]
fn write_into<B: BufferMut>(&self, writer: &mut Writer<B>) {
if_pod_and_little_endian!(if pod_and_little_endian {
let ptr = self.as_ptr() as *const u8;
let byte_slice: &[u8] = unsafe { core::slice::from_raw_parts(ptr, size_of::<Self>()) };
writer.write_slice(byte_slice);
} else {
for elem in self {
WriteInto::write_into(elem, writer);
writer.advance(Self::METADATA.el_padding() as usize);
}
});
}
}
impl<T: ReadFrom, const N: usize> ReadFrom for [T; N]
where
Self: ShaderType<ExtraMetadata = ArrayMetadata>,
{
#[inline]
fn read_from<B: BufferRef>(&mut self, reader: &mut Reader<B>) {
if_pod_and_little_endian!(if pod_and_little_endian {
let ptr = self.as_mut_ptr() as *mut u8;
let byte_slice: &mut [u8] =
unsafe { core::slice::from_raw_parts_mut(ptr, size_of::<Self>()) };
reader.read_slice(byte_slice);
} else {
for elem in self {
ReadFrom::read_from(elem, reader);
reader.advance(Self::METADATA.el_padding() as usize);
}
});
}
}
impl<T: CreateFrom, const N: usize> CreateFrom for [T; N]
where
Self: ShaderType<ExtraMetadata = ArrayMetadata>,
{
#[inline]
fn create_from<B: BufferRef>(reader: &mut Reader<B>) -> Self {
if_pod_and_little_endian!(if pod_and_little_endian {
let mut me = MaybeUninit::zeroed();
let ptr: *mut MaybeUninit<Self> = &mut me;
let ptr = ptr.cast::<u8>();
let byte_slice: &mut [u8] =
unsafe { core::slice::from_raw_parts_mut(ptr, size_of::<Self>()) };
reader.read_slice(byte_slice);
// SAFETY: All values were properly initialized by reading the bytes.
unsafe { me.assume_init() }
} else {
core::array::from_fn(|_| {
let res = CreateFrom::create_from(reader);
reader.advance(Self::METADATA.el_padding() as usize);
res
})
})
}
}

218
vendor/encase/src/types/matrix.rs vendored Normal file
View File

@@ -0,0 +1,218 @@
use crate::core::Metadata;
pub trait MatrixScalar: crate::ShaderSize {}
impl_marker_trait_for_f32!(MatrixScalar);
pub struct MatrixMetadata {
pub col_padding: u64,
}
impl Metadata<MatrixMetadata> {
#[inline]
pub const fn col_padding(self) -> u64 {
self.extra.col_padding
}
}
/// Enables reading from the matrix (via `&[[T; R]; C]`)
pub trait AsRefMatrixParts<T: MatrixScalar, const C: usize, const R: usize> {
fn as_ref_parts(&self) -> &[[T; R]; C];
}
/// Enables writing to the matrix (via `&mut [[T; R]; C]`)
pub trait AsMutMatrixParts<T: MatrixScalar, const C: usize, const R: usize> {
fn as_mut_parts(&mut self) -> &mut [[T; R]; C];
}
/// Enables the creation of a matrix (via `[[T; R]; C]`)
pub trait FromMatrixParts<T: MatrixScalar, const C: usize, const R: usize> {
fn from_parts(parts: [[T; R]; C]) -> Self;
}
/// Used to implement `ShaderType` for the given matrix type
///
/// The given matrix type should implement any combination of
/// [`AsRefMatrixParts`], [`AsMutMatrixParts`], [`FromMatrixParts`]
/// depending on needed capability (they can also be derived via `$using`)
///
/// # Args
///
/// - `$c` nr of columns the given matrix contains
///
/// - `$r` nr of rows the given matrix contains
///
/// - `$type` the type (representing a matrix) for which `ShaderType` will be implemented for
///
/// - `$generics` \[optional\] generics that will be passed into the `impl< >`
///
/// - `$el_type` \[optional\] inner element type of the matrix (should implement [`MatrixScalar`])
///
/// - `$using` \[optional\] can be any combination of `AsRef AsMut From`
#[macro_export]
macro_rules! impl_matrix {
($c:literal, $r:literal, $type:ty $( ; using $($using:tt)* )?) => {
$crate::impl_matrix_inner!(__inner, ($c, $r, $type, T, (T)); $( $($using)* )?);
};
($c:literal, $r:literal, $type:ty; ($($generics:tt)*) $( ; using $($using:tt)* )?) => {
$crate::impl_matrix_inner!(__inner, ($c, $r, $type, T, ($($generics)*)); $( $($using)* )?);
};
($c:literal, $r:literal, $type:ty, $el_ty:ty $( ; using $($using:tt)* )?) => {
$crate::impl_matrix_inner!(__inner, ($c, $r, $type, $el_ty, ()); $( $($using)* )?);
};
}
#[doc(hidden)]
#[macro_export]
macro_rules! impl_matrix_inner {
(__inner, ($($other:tt)*); AsRef $($using:tt)*) => {
$crate::impl_matrix_inner!(__ref, $($other)*);
$crate::impl_matrix_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($($other:tt)*); AsMut $($using:tt)*) => {
$crate::impl_matrix_inner!(__mut, $($other)*);
$crate::impl_matrix_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($($other:tt)*); From $($using:tt)*) => {
$crate::impl_matrix_inner!(__from, $($other)*);
$crate::impl_matrix_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($c:literal, $r:literal, $type:ty, $el_ty:ty, ($($generics:tt)*)); ) => {
$crate::impl_matrix_inner!(__main, $c, $r, $type, $el_ty, ($($generics)*));
};
(__ref, $c:literal, $r:literal, $type:ty, $el_ty:ty, ($($generics:tt)*)) => {
impl<$($generics)*> $crate::private::AsRefMatrixParts<$el_ty, $c, $r> for $type
where
Self: ::core::convert::AsRef<[[$el_ty; $r]; $c]>,
$el_ty: $crate::private::MatrixScalar,
{
#[inline]
fn as_ref_parts(&self) -> &[[$el_ty; $r]; $c] {
::core::convert::AsRef::as_ref(self)
}
}
};
(__mut, $c:literal, $r:literal, $type:ty, $el_ty:ty, ($($generics:tt)*)) => {
impl<$($generics)*> $crate::private::AsMutMatrixParts<$el_ty, $c, $r> for $type
where
Self: ::core::convert::AsMut<[[$el_ty; $r]; $c]>,
$el_ty: $crate::private::MatrixScalar,
{
#[inline]
fn as_mut_parts(&mut self) -> &mut [[$el_ty; $r]; $c] {
::core::convert::AsMut::as_mut(self)
}
}
};
(__from, $c:literal, $r:literal, $type:ty, $el_ty:ty, ($($generics:tt)*)) => {
impl<$($generics)*> $crate::private::FromMatrixParts<$el_ty, $c, $r> for $type
where
Self: ::core::convert::From<[[$el_ty; $r]; $c]>,
$el_ty: $crate::private::MatrixScalar,
{
#[inline]
fn from_parts(parts: [[$el_ty; $r]; $c]) -> Self {
::core::convert::From::from(parts)
}
}
};
(__main, $c:literal, $r:literal, $type:ty, $el_ty:ty, ($($generics:tt)*)) => {
const _: () = assert!(
2 <= $c && $c <= 4,
"Matrix should have at least 2 columns and at most 4!",
);
const _: () = assert!(
2 <= $r && $r <= 4,
"Matrix should have at least 2 rows and at most 4!",
);
impl<$($generics)*> $crate::private::ShaderType for $type
where
$el_ty: $crate::private::ShaderSize,
{
type ExtraMetadata = $crate::private::MatrixMetadata;
const METADATA: $crate::private::Metadata<Self::ExtraMetadata> = {
let col_size = $crate::private::SizeValue::from(<$el_ty as $crate::private::ShaderSize>::SHADER_SIZE).mul($r);
let alignment = $crate::private::AlignmentValue::from_next_power_of_two_size(col_size);
let size = alignment.round_up_size(col_size).mul($c);
let col_padding = alignment.padding_needed_for(col_size.get());
$crate::private::Metadata {
alignment,
has_uniform_min_alignment: false,
min_size: size,
is_pod: <[$el_ty; $r] as $crate::private::ShaderType>::METADATA.is_pod() && col_padding == 0,
extra: $crate::private::MatrixMetadata {
col_padding,
},
}
};
}
impl<$($generics)*> $crate::private::ShaderSize for $type
where
$el_ty: $crate::private::ShaderSize
{}
impl<$($generics)*> $crate::private::WriteInto for $type
where
Self: $crate::private::AsRefMatrixParts<$el_ty, $c, $r> + $crate::private::ShaderType<ExtraMetadata = $crate::private::MatrixMetadata>,
$el_ty: $crate::private::MatrixScalar + $crate::private::WriteInto,
{
#[inline]
fn write_into<B: $crate::private::BufferMut>(&self, writer: &mut $crate::private::Writer<B>) {
let columns = $crate::private::AsRefMatrixParts::<$el_ty, $c, $r>::as_ref_parts(self);
$crate::if_pod_and_little_endian!(if pod_and_little_endian {
$crate::private::WriteInto::write_into(columns, writer);
} else {
for col in columns {
$crate::private::WriteInto::write_into(col, writer);
writer.advance(<Self as $crate::private::ShaderType>::METADATA.col_padding() as ::core::primitive::usize);
}
});
}
}
impl<$($generics)*> $crate::private::ReadFrom for $type
where
Self: $crate::private::AsMutMatrixParts<$el_ty, $c, $r> + $crate::private::ShaderType<ExtraMetadata = $crate::private::MatrixMetadata>,
$el_ty: $crate::private::MatrixScalar + $crate::private::ReadFrom,
{
#[inline]
fn read_from<B: $crate::private::BufferRef>(&mut self, reader: &mut $crate::private::Reader<B>) {
let columns = $crate::private::AsMutMatrixParts::<$el_ty, $c, $r>::as_mut_parts(self);
$crate::if_pod_and_little_endian!(if pod_and_little_endian {
$crate::private::ReadFrom::read_from(columns, reader);
} else {
for col in columns {
$crate::private::ReadFrom::read_from(col, reader);
reader.advance(<Self as $crate::private::ShaderType>::METADATA.col_padding() as ::core::primitive::usize);
}
});
}
}
impl<$($generics)*> $crate::private::CreateFrom for $type
where
Self: $crate::private::FromMatrixParts<$el_ty, $c, $r> + $crate::private::ShaderType<ExtraMetadata = $crate::private::MatrixMetadata>,
$el_ty: $crate::private::MatrixScalar + $crate::private::CreateFrom,
{
#[inline]
fn create_from<B: $crate::private::BufferRef>(reader: &mut $crate::private::Reader<B>) -> Self {
let columns = $crate::if_pod_and_little_endian!(if pod_and_little_endian {
$crate::private::CreateFrom::create_from(reader)
} else {
::core::array::from_fn(|_| {
let col = $crate::private::CreateFrom::create_from(reader);
reader.advance(<Self as $crate::private::ShaderType>::METADATA.col_padding() as ::core::primitive::usize);
col
})
});
$crate::private::FromMatrixParts::<$el_ty, $c, $r>::from_parts(columns)
}
}
};
}

14
vendor/encase/src/types/mod.rs vendored Executable file
View File

@@ -0,0 +1,14 @@
#[macro_use]
pub mod scalar;
pub mod vector;
pub mod matrix;
pub mod array;
pub mod r#struct;
pub mod runtime_sized_array;
mod wrapper;

View File

@@ -0,0 +1,272 @@
use std::collections::{LinkedList, VecDeque};
use crate::core::{
BufferMut, BufferRef, CreateFrom, Metadata, ReadFrom, Reader, RuntimeSizedArray, ShaderSize,
WriteInto, Writer,
};
use crate::ShaderType;
/// Helper type meant to be used together with the [`derive@ShaderType`] derive macro
///
/// This type should be interpreted as an [`u32`] in the shader
///
/// # Problem
///
/// There are cases where the use of the WGSL function [`arrayLength()`](https://gpuweb.github.io/gpuweb/wgsl/#array-builtin-functions)
/// might be inadequate because of its return value
///
/// - being a minimum of 1 due to how [`minBindingSize` is calculated](https://gpuweb.github.io/gpuweb/#ref-for-dom-gpubufferbindinglayout-minbindingsize%E2%91%A7)
///
/// - possibly being higher than expected due to padding at the end of a struct or buffer being interpreted as array elements
///
/// - representing the capacity of the array for use cases that require oversized buffers
///
/// # Solution
///
/// Using this type on a field of a struct with the [`derive@ShaderType`] derive macro will automatically:
///
/// - on write, write the length of the contained runtime-sized array as an [`u32`] to the buffer
///
/// - on read, read the value as an [`u32`] from the buffer (rep as `LEN`) and when reading the elements of the contained runtime-sized array a max of `LEN` elements will be read
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
pub struct ArrayLength;
impl ShaderType for ArrayLength {
type ExtraMetadata = ();
const METADATA: Metadata<Self::ExtraMetadata> = Metadata::from_alignment_and_size(4, 4);
}
impl ShaderSize for ArrayLength {}
impl WriteInto for ArrayLength {
fn write_into<B: BufferMut>(&self, writer: &mut Writer<B>) {
let length = writer.ctx.rts_array_length.unwrap();
WriteInto::write_into(&length, writer);
}
}
impl ReadFrom for ArrayLength {
fn read_from<B: BufferRef>(&mut self, reader: &mut Reader<B>) {
let length = CreateFrom::create_from(reader);
reader.ctx.rts_array_max_el_to_read = Some(length);
}
}
impl CreateFrom for ArrayLength {
fn create_from<B: BufferRef>(reader: &mut Reader<B>) -> Self {
let length = CreateFrom::create_from(reader);
reader.ctx.rts_array_max_el_to_read = Some(length);
ArrayLength
}
}
pub trait Length {
fn length(&self) -> usize;
}
pub trait Truncate {
fn truncate(&mut self, _len: usize);
}
/// Used to implement `ShaderType` for the given runtime-sized array type
///
/// The given runtime-sized array type should implement [`Length`] and optionally [`Truncate`]
/// depending on needed capability (they can also be derived via `$using`)
///
/// # Args
///
/// - `$type` the type (representing a runtime-sized array) for which `ShaderType` will be implemented for
///
/// - `$generics` \[optional\] generics that will be passed into the `impl< >`
///
/// - `$using` \[optional\] can be any combination of `len truncate`
#[macro_export]
macro_rules! impl_rts_array {
($type:ty $( ; using $($using:tt)* )?) => {
$crate::impl_rts_array_inner!(__inner, ($type, T); $( $($using)* )?);
};
($type:ty; ($($generics:tt)*) $( ; using $($using:tt)* )?) => {
$crate::impl_rts_array_inner!(__inner, ($type, $($generics)*); $( $($using)* )?);
};
}
#[doc(hidden)]
#[macro_export]
macro_rules! impl_rts_array_inner {
(__inner, ($($other:tt)*); len $($using:tt)*) => {
$crate::impl_rts_array_inner!(__len, $($other)*);
$crate::impl_rts_array_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($($other:tt)*); truncate $($using:tt)*) => {
$crate::impl_rts_array_inner!(__truncate, $($other)*);
$crate::impl_rts_array_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($type:ty, $($generics:tt)*); ) => {
$crate::impl_rts_array_inner!(__main, $type, $($generics)*);
};
(__len, $type:ty, $($generics:tt)*) => {
impl<$($generics)*> $crate::private::Length for $type {
fn length(&self) -> ::core::primitive::usize {
self.len()
}
}
};
(__truncate, $type:ty, $($generics:tt)*) => {
impl<$($generics)*> $crate::private::Truncate for $type {
fn truncate(&mut self, len: ::core::primitive::usize) {
self.truncate(len)
}
}
};
(__main, $type:ty, $($generics:tt)*) => {
impl<$($generics)*> $crate::private::ShaderType for $type
where
T: $crate::private::ShaderType + $crate::private::ShaderSize,
Self: $crate::private::Length,
{
type ExtraMetadata = $crate::private::ArrayMetadata;
const METADATA: $crate::private::Metadata<Self::ExtraMetadata> = {
let alignment = T::METADATA.alignment();
let el_size = $crate::private::SizeValue::from(T::SHADER_SIZE);
let stride = alignment.round_up_size(el_size);
let el_padding = alignment.padding_needed_for(el_size.get());
$crate::private::Metadata {
alignment,
has_uniform_min_alignment: true,
min_size: el_size,
is_pod: false,
extra: $crate::private::ArrayMetadata { stride, el_padding },
}
};
const UNIFORM_COMPAT_ASSERT: fn() = ||
::core::panic!("runtime-sized array can't be used in uniform buffers");
fn size(&self) -> ::core::num::NonZeroU64 {
use ::core::cmp::Ord;
Self::METADATA.stride()
.mul($crate::private::Length::length(self).max(1) as ::core::primitive::u64)
.0
}
}
impl<$($generics)*> $crate::private::RuntimeSizedArray for $type
where
Self: $crate::private::Length,
{
fn len(&self) -> ::core::primitive::usize {
$crate::private::Length::length(self)
}
}
impl<$($generics)*> $crate::private::CalculateSizeFor for $type
where
Self: $crate::private::ShaderType<ExtraMetadata = $crate::private::ArrayMetadata>,
{
fn calculate_size_for(nr_of_el: ::core::primitive::u64) -> ::core::num::NonZeroU64 {
use ::core::cmp::Ord;
<Self as $crate::private::ShaderType>::METADATA.stride().mul(nr_of_el.max(1)).0
}
}
impl<$($generics)*> $crate::private::WriteInto for $type
where
T: $crate::private::WriteInto,
Self: $crate::private::ShaderType<ExtraMetadata = $crate::private::ArrayMetadata>,
for<'a> &'a Self: ::core::iter::IntoIterator<Item = &'a T>,
{
fn write_into<B: $crate::private::BufferMut>(&self, writer: &mut $crate::private::Writer<B>) {
use ::core::iter::IntoIterator;
for item in self.into_iter() {
$crate::private::WriteInto::write_into(item, writer);
writer.advance(<Self as $crate::private::ShaderType>::METADATA.el_padding() as ::core::primitive::usize);
}
}
}
impl<$($generics)*> $crate::private::ReadFrom for $type
where
T: $crate::private::ReadFrom + $crate::private::CreateFrom,
Self: $crate::private::Truncate + $crate::private::Length + ::core::iter::Extend<T> + $crate::private::ShaderType<ExtraMetadata = $crate::private::ArrayMetadata>,
for<'a> &'a mut Self: ::core::iter::IntoIterator<Item = &'a mut T>,
{
fn read_from<B: $crate::private::BufferRef>(&mut self, reader: &mut $crate::private::Reader<B>) {
use ::core::cmp::Ord;
use ::core::iter::{IntoIterator, Extend, Iterator};
let max = reader.ctx.rts_array_max_el_to_read.unwrap_or(::core::primitive::u32::MAX) as ::core::primitive::usize;
let count = max.min(reader.remaining() / <Self as $crate::private::ShaderType>::METADATA.stride().get() as ::core::primitive::usize);
$crate::private::Truncate::truncate(self, count);
for item in self.into_iter() {
$crate::private::ReadFrom::read_from(item, reader);
reader.advance(<Self as $crate::private::ShaderType>::METADATA.el_padding() as ::core::primitive::usize);
}
let remaining = count - $crate::private::Length::length(self);
self.extend(
::core::iter::repeat_with(|| {
let el = $crate::private::CreateFrom::create_from(reader);
reader.advance(<Self as $crate::private::ShaderType>::METADATA.el_padding() as ::core::primitive::usize);
el
})
.take(remaining),
);
}
}
impl<$($generics)*> $crate::private::CreateFrom for $type
where
T: $crate::private::CreateFrom,
Self: ::core::iter::FromIterator<T> + $crate::private::ShaderType<ExtraMetadata = $crate::private::ArrayMetadata>,
{
fn create_from<B: $crate::private::BufferRef>(reader: &mut $crate::private::Reader<B>) -> Self {
use ::core::cmp::Ord;
use ::core::iter::Iterator;
let max = reader.ctx.rts_array_max_el_to_read.unwrap_or(::core::primitive::u32::MAX) as ::core::primitive::usize;
let count = max.min(reader.remaining() / <Self as $crate::private::ShaderType>::METADATA.stride().get() as ::core::primitive::usize);
::core::iter::FromIterator::from_iter(
::core::iter::repeat_with(|| {
let el = $crate::private::CreateFrom::create_from(reader);
reader.advance(<Self as $crate::private::ShaderType>::METADATA.el_padding() as ::core::primitive::usize);
el
})
.take(count),
)
}
}
};
}
impl_rts_array!([T]; using len);
impl_rts_array!(Vec<T>; using len truncate);
impl_rts_array!(VecDeque<T>; using len truncate);
impl_rts_array!(LinkedList<T>; using len);
impl<T> Truncate for LinkedList<T> {
fn truncate(&mut self, len: usize) {
if len < self.len() {
self.split_off(len);
}
}
}
#[cfg(test)]
mod array_length {
use super::ArrayLength;
#[test]
fn derived_traits() {
assert_eq!(ArrayLength, ArrayLength.clone());
assert_eq!(format!("{ArrayLength:?}"), "ArrayLength");
}
}

170
vendor/encase/src/types/scalar.rs vendored Normal file
View File

@@ -0,0 +1,170 @@
use crate::core::{
BufferMut, BufferRef, CreateFrom, Metadata, ReadFrom, Reader, ShaderSize, ShaderType,
WriteInto, Writer,
};
use core::num::{NonZeroI32, NonZeroU32, Wrapping};
use core::sync::atomic::{AtomicI32, AtomicU32};
macro_rules! impl_basic_traits {
($type:ty) => {
impl_basic_traits!(__main, $type, );
};
($type:ty, is_pod) => {
impl_basic_traits!(__main, $type, .pod());
};
(__main, $type:ty, $($tail:tt)*) => {
impl ShaderType for $type {
type ExtraMetadata = ();
const METADATA: Metadata<Self::ExtraMetadata> = Metadata::from_alignment_and_size(4, 4) $($tail)*;
}
impl ShaderSize for $type {}
};
}
macro_rules! impl_traits_for_pod {
($type:ty) => {
impl_basic_traits!($type, is_pod);
impl WriteInto for $type {
#[inline]
fn write_into<B: BufferMut>(&self, writer: &mut Writer<B>) {
writer.write(&<$type>::to_le_bytes(*self));
}
}
impl ReadFrom for $type {
#[inline]
fn read_from<B: BufferRef>(&mut self, reader: &mut Reader<B>) {
*self = <$type>::from_le_bytes(*reader.read());
}
}
impl CreateFrom for $type {
#[inline]
fn create_from<B: BufferRef>(reader: &mut Reader<B>) -> Self {
<$type>::from_le_bytes(*reader.read())
}
}
};
}
impl_traits_for_pod!(f32);
impl_traits_for_pod!(u32);
impl_traits_for_pod!(i32);
macro_rules! impl_traits_for_non_zero_option {
($type:ty) => {
impl_basic_traits!(Option<$type>);
impl WriteInto for Option<$type> {
#[inline]
fn write_into<B: BufferMut>(&self, writer: &mut Writer<B>) {
let value = self.map(|num| num.get()).unwrap_or(0);
WriteInto::write_into(&value, writer);
}
}
impl ReadFrom for Option<$type> {
#[inline]
fn read_from<B: BufferRef>(&mut self, reader: &mut Reader<B>) {
*self = <$type>::new(CreateFrom::create_from(reader));
}
}
impl CreateFrom for Option<$type> {
#[inline]
fn create_from<B: BufferRef>(reader: &mut Reader<B>) -> Self {
<$type>::new(CreateFrom::create_from(reader))
}
}
};
}
impl_traits_for_non_zero_option!(NonZeroU32);
impl_traits_for_non_zero_option!(NonZeroI32);
macro_rules! impl_traits_for_wrapping {
($type:ty) => {
impl_basic_traits!($type);
impl WriteInto for $type {
#[inline]
fn write_into<B: BufferMut>(&self, writer: &mut Writer<B>) {
WriteInto::write_into(&self.0, writer);
}
}
impl ReadFrom for $type {
#[inline]
fn read_from<B: BufferRef>(&mut self, reader: &mut Reader<B>) {
ReadFrom::read_from(&mut self.0, reader);
}
}
impl CreateFrom for $type {
#[inline]
fn create_from<B: BufferRef>(reader: &mut Reader<B>) -> Self {
Wrapping(CreateFrom::create_from(reader))
}
}
};
}
impl_traits_for_wrapping!(Wrapping<u32>);
impl_traits_for_wrapping!(Wrapping<i32>);
macro_rules! impl_traits_for_atomic {
($type:ty) => {
impl_basic_traits!($type);
impl WriteInto for $type {
#[inline]
fn write_into<B: BufferMut>(&self, writer: &mut Writer<B>) {
let value = self.load(std::sync::atomic::Ordering::Relaxed);
WriteInto::write_into(&value, writer);
}
}
impl ReadFrom for $type {
#[inline]
fn read_from<B: BufferRef>(&mut self, reader: &mut Reader<B>) {
ReadFrom::read_from(self.get_mut(), reader);
}
}
impl CreateFrom for $type {
#[inline]
fn create_from<B: BufferRef>(reader: &mut Reader<B>) -> Self {
<$type>::new(CreateFrom::create_from(reader))
}
}
};
}
impl_traits_for_atomic!(AtomicU32);
impl_traits_for_atomic!(AtomicI32);
macro_rules! impl_marker_trait_for_f32 {
($trait:path) => {
impl $trait for ::core::primitive::f32 {}
};
}
macro_rules! impl_marker_trait_for_u32 {
($trait:path) => {
impl $trait for ::core::primitive::u32 {}
impl $trait for ::core::option::Option<::core::num::NonZeroU32> {}
impl $trait for ::core::num::Wrapping<::core::primitive::u32> {}
impl $trait for ::core::sync::atomic::AtomicU32 {}
};
}
macro_rules! impl_marker_trait_for_i32 {
($trait:path) => {
impl $trait for ::core::primitive::i32 {}
impl $trait for ::core::option::Option<::core::num::NonZeroI32> {}
impl $trait for ::core::num::Wrapping<::core::primitive::i32> {}
impl $trait for ::core::sync::atomic::AtomicI32 {}
};
}

20
vendor/encase/src/types/struct.rs vendored Executable file
View File

@@ -0,0 +1,20 @@
use crate::core::Metadata;
pub struct StructMetadata<const N: usize> {
pub offsets: [u64; N],
pub paddings: [u64; N],
}
impl<const N: usize> Metadata<StructMetadata<N>> {
pub const fn offset(self, i: usize) -> u64 {
self.extra.offsets[i]
}
pub const fn last_offset(self) -> u64 {
self.extra.offsets[N - 1]
}
pub const fn padding(self, i: usize) -> u64 {
self.extra.paddings[i]
}
}

173
vendor/encase/src/types/vector.rs vendored Normal file
View File

@@ -0,0 +1,173 @@
pub trait VectorScalar: crate::ShaderSize {}
impl_marker_trait_for_f32!(VectorScalar);
impl_marker_trait_for_u32!(VectorScalar);
impl_marker_trait_for_i32!(VectorScalar);
/// Enables reading from the vector (via `&[T; N]`)
pub trait AsRefVectorParts<T: VectorScalar, const N: usize> {
fn as_ref_parts(&self) -> &[T; N];
}
/// Enables writing to the vector (via `&mut [T; N]`)
pub trait AsMutVectorParts<T: VectorScalar, const N: usize> {
fn as_mut_parts(&mut self) -> &mut [T; N];
}
/// Enables the creation of a vector (via `[T; N]`)
pub trait FromVectorParts<T: VectorScalar, const N: usize> {
fn from_parts(parts: [T; N]) -> Self;
}
/// Used to implement `ShaderType` for the given vector type
///
/// The given vector type should implement any combination of
/// [`AsRefVectorParts`], [`AsMutVectorParts`], [`FromVectorParts`]
/// depending on needed capability (they can also be derived via `$using`)
///
/// # Args
///
/// - `$n` nr of elements the given vector contains
///
/// - `$type` the type (representing a vector) for which `ShaderType` will be implemented for
///
/// - `$generics` \[optional\] generics that will be passed into the `impl< >`
///
/// - `$el_type` \[optional\] inner element type of the vector (should implement [`VectorScalar`])
///
/// - `$using` \[optional\] can be any combination of `AsRef AsMut From`
#[macro_export]
macro_rules! impl_vector {
($n:literal, $type:ty $( ; using $($using:tt)* )?) => {
$crate::impl_vector_inner!(__inner, ($n, $type, T, (T)); $( $($using)* )?);
};
($n:literal, $type:ty; ($($generics:tt)*) $( ; using $($using:tt)* )?) => {
$crate::impl_vector_inner!(__inner, ($n, $type, T, ($($generics)*)); $( $($using)* )?);
};
($n:literal, $type:ty, $el_ty:ty $( ; using $($using:tt)* )?) => {
$crate::impl_vector_inner!(__inner, ($n, $type, $el_ty, ()); $( $($using)* )?);
};
}
#[doc(hidden)]
#[macro_export]
macro_rules! impl_vector_inner {
(__inner, ($($other:tt)*); AsRef $($using:tt)*) => {
$crate::impl_vector_inner!(__ref, $($other)*);
$crate::impl_vector_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($($other:tt)*); AsMut $($using:tt)*) => {
$crate::impl_vector_inner!(__mut, $($other)*);
$crate::impl_vector_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($($other:tt)*); From $($using:tt)*) => {
$crate::impl_vector_inner!(__from, $($other)*);
$crate::impl_vector_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($n:literal, $type:ty, $el_ty:ty, ($($generics:tt)*)); ) => {
$crate::impl_vector_inner!(__main, $n, $type, $el_ty, ($($generics)*));
};
(__ref, $n:literal, $type:ty, $el_ty:ty, ($($generics:tt)*)) => {
impl<$($generics)*> $crate::private::AsRefVectorParts<$el_ty, $n> for $type
where
Self: ::core::convert::AsRef<[$el_ty; $n]>,
$el_ty: $crate::private::VectorScalar,
{
#[inline]
fn as_ref_parts(&self) -> &[$el_ty; $n] {
::core::convert::AsRef::as_ref(self)
}
}
};
(__mut, $n:literal, $type:ty, $el_ty:ty, ($($generics:tt)*)) => {
impl<$($generics)*> $crate::private::AsMutVectorParts<$el_ty, $n> for $type
where
Self: ::core::convert::AsMut<[$el_ty; $n]>,
$el_ty: $crate::private::VectorScalar,
{
#[inline]
fn as_mut_parts(&mut self) -> &mut [$el_ty; $n] {
::core::convert::AsMut::as_mut(self)
}
}
};
(__from, $n:literal, $type:ty, $el_ty:ty, ($($generics:tt)*)) => {
impl<$($generics)*> $crate::private::FromVectorParts<$el_ty, $n> for $type
where
Self: ::core::convert::From<[$el_ty; $n]>,
$el_ty: $crate::private::VectorScalar,
{
#[inline]
fn from_parts(parts: [$el_ty; $n]) -> Self {
::core::convert::From::from(parts)
}
}
};
(__main, $n:literal, $type:ty, $el_ty:ty, ($($generics:tt)*)) => {
const _: () = assert!(
2 <= $n && $n <= 4,
"Vector should have at least 2 elements and at most 4!",
);
impl<$($generics)*> $crate::private::ShaderType for $type
where
$el_ty: $crate::private::ShaderSize,
{
type ExtraMetadata = ();
const METADATA: $crate::private::Metadata<Self::ExtraMetadata> = {
let size = $crate::private::SizeValue::from(<$el_ty as $crate::private::ShaderSize>::SHADER_SIZE).mul($n);
let alignment = $crate::private::AlignmentValue::from_next_power_of_two_size(size);
$crate::private::Metadata {
alignment,
has_uniform_min_alignment: false,
min_size: size,
is_pod: <[$el_ty; $n] as $crate::private::ShaderType>::METADATA.is_pod(),
extra: ()
}
};
}
impl<$($generics)*> $crate::private::ShaderSize for $type
where
$el_ty: $crate::private::ShaderSize
{}
impl<$($generics)*> $crate::private::WriteInto for $type
where
Self: $crate::private::AsRefVectorParts<$el_ty, $n>,
$el_ty: $crate::private::VectorScalar + $crate::private::WriteInto,
{
#[inline]
fn write_into<B: $crate::private::BufferMut>(&self, writer: &mut $crate::private::Writer<B>) {
let elements = $crate::private::AsRefVectorParts::<$el_ty, $n>::as_ref_parts(self);
$crate::private::WriteInto::write_into(elements, writer);
}
}
impl<$($generics)*> $crate::private::ReadFrom for $type
where
Self: $crate::private::AsMutVectorParts<$el_ty, $n>,
$el_ty: $crate::private::VectorScalar + $crate::private::ReadFrom,
{
#[inline]
fn read_from<B: $crate::private::BufferRef>(&mut self, reader: &mut $crate::private::Reader<B>) {
let elements = $crate::private::AsMutVectorParts::<$el_ty, $n>::as_mut_parts(self);
$crate::private::ReadFrom::read_from(elements, reader);
}
}
impl<$($generics)*> $crate::private::CreateFrom for $type
where
Self: $crate::private::FromVectorParts<$el_ty, $n>,
$el_ty: $crate::private::VectorScalar + $crate::private::CreateFrom,
{
#[inline]
fn create_from<B: $crate::private::BufferRef>(reader: &mut $crate::private::Reader<B>) -> Self {
let elements = $crate::private::CreateFrom::create_from(reader);
$crate::private::FromVectorParts::<$el_ty, $n>::from_parts(elements)
}
}
};
}

120
vendor/encase/src/types/wrapper.rs vendored Normal file
View File

@@ -0,0 +1,120 @@
/// Used to implement `ShaderType` for the given wrapper type
///
/// # Args
///
/// - `$type` the type (representing a wrapper) for which `ShaderType` will be implemented for
///
/// - `$generics` \[optional\] generics that will be passed into the `impl< >`
///
/// - `$using` \[optional\] can be any combination of `Ref{ X } Mut{ X } From{ X }`
/// (where `X` denotes a possible function call)
#[macro_export]
macro_rules! impl_wrapper {
($type:ty; using $($using:tt)*) => {
$crate::impl_wrapper_inner!(__inner, ($type, T: ?Sized); $($using)*);
};
($type:ty; ($($generics:tt)*); using $($using:tt)*) => {
$crate::impl_wrapper_inner!(__inner, ($type, $($generics)*); $($using)*);
};
}
#[doc(hidden)]
#[macro_export]
macro_rules! impl_wrapper_inner {
(__inner, ($($other:tt)*); Ref{ $($get_ref:tt)* } $($using:tt)*) => {
$crate::impl_wrapper_inner!(__ref, ($($other)*); { $($get_ref)* });
$crate::impl_wrapper_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($($other:tt)*); Mut{ $($get_mut:tt)* } $($using:tt)*) => {
$crate::impl_wrapper_inner!(__mut, ($($other)*); { $($get_mut)* });
$crate::impl_wrapper_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($($other:tt)*); From{ $($from:tt)* } $($using:tt)*) => {
$crate::impl_wrapper_inner!(__from, ($($other)*); { $($from)* });
$crate::impl_wrapper_inner!(__inner, ($($other)*); $($using)*);
};
(__inner, ($type:ty, $($generics:tt)*); ) => {};
(__ref, ($type:ty, $($generics:tt)*); { $($get_ref:tt)* }) => {
impl<$($generics)*> $crate::private::ShaderType for $type
where
T: $crate::private::ShaderType
{
type ExtraMetadata = T::ExtraMetadata;
const METADATA: $crate::private::Metadata<Self::ExtraMetadata> = T::METADATA.no_pod();
const UNIFORM_COMPAT_ASSERT: fn() = T::UNIFORM_COMPAT_ASSERT;
#[inline]
fn size(&self) -> ::core::num::NonZeroU64 {
<T as $crate::private::ShaderType>::size(&self$($get_ref)*)
}
}
impl<$($generics)*> $crate::private::ShaderSize for $type
where
T: $crate::private::ShaderSize
{
const SHADER_SIZE: ::core::num::NonZeroU64 = T::SHADER_SIZE;
}
impl<$($generics)*> $crate::private::RuntimeSizedArray for $type
where
T: $crate::private::RuntimeSizedArray
{
#[inline]
fn len(&self) -> usize {
<T as $crate::private::RuntimeSizedArray>::len(&self$($get_ref)*)
}
}
impl<$($generics)*> $crate::private::CalculateSizeFor for $type
where
T: $crate::private::CalculateSizeFor
{
#[inline]
fn calculate_size_for(nr_of_el: u64) -> ::core::num::NonZeroU64 {
<T as $crate::private::CalculateSizeFor>::calculate_size_for(nr_of_el)
}
}
impl<$($generics)*> $crate::private::WriteInto for $type
where
T: $crate::private::WriteInto
{
#[inline]
fn write_into<B: $crate::private::BufferMut>(&self, writer: &mut $crate::private::Writer<B>) {
<T as $crate::private::WriteInto>::write_into(&self$($get_ref)*, writer)
}
}
};
(__mut, ($type:ty, $($generics:tt)*); { $($get_mut:tt)* }) => {
impl<$($generics)*> $crate::private::ReadFrom for $type
where
T: $crate::private::ReadFrom
{
#[inline]
fn read_from<B: $crate::private::BufferRef>(&mut self, reader: &mut $crate::private::Reader<B>) {
<T as $crate::private::ReadFrom>::read_from(self$($get_mut)*, reader)
}
}
};
(__from, ($type:ty, $($generics:tt)*); { $($from:tt)* }) => {
impl<$($generics)*> $crate::private::CreateFrom for $type
where
T: $crate::private::CreateFrom
{
#[inline]
fn create_from<B: $crate::private::BufferRef>(reader: &mut $crate::private::Reader<B>) -> Self {
<$type>::$($from)*(<T as $crate::private::CreateFrom>::create_from(reader))
}
}
};
}
impl_wrapper!(&T; using Ref{});
impl_wrapper!(&mut T; using Ref{} Mut{});
impl_wrapper!(Box<T>; using Ref{} Mut{} From{ new });
impl_wrapper!(std::borrow::Cow<'_, T>; (T: ?Sized + ToOwned<Owned = T>); using Ref{} From{ Owned });
impl_wrapper!(std::rc::Rc<T>; using Ref{} From{ new });
impl_wrapper!(std::sync::Arc<T>; using Ref{} From{ new });
impl_wrapper!(core::cell::Cell<T>; (T: Copy); using Ref{ .get() } Mut{ .get_mut() } From{ new });

226
vendor/encase/src/utils.rs vendored Normal file
View File

@@ -0,0 +1,226 @@
use core::mem::MaybeUninit;
#[track_caller]
pub const fn consume_zsts<const N: usize>(_: [(); N]) {}
#[doc(hidden)]
#[macro_export]
macro_rules! build_struct {
($type:ty, $( $field_idents:ident ),*) => {{
let mut uninit_struct = ::core::mem::MaybeUninit::<$type>::uninit();
let ptr = ::core::mem::MaybeUninit::as_mut_ptr(&mut uninit_struct);
$( $crate::build_struct!(__write_to_field; ptr, $field_idents, $field_idents); )*
// SAFETY: Everything has been initialized
unsafe { ::core::mem::MaybeUninit::assume_init(uninit_struct) }
}};
(__write_to_field; $ptr:ident, $field_name:ident, $data:expr) => {
// SAFETY: the pointer `ptr` returned by `as_mut_ptr` is a valid pointer,
// so it's safe to get a pointer to a field through `addr_of_mut!`
let field_ptr = unsafe { ::core::ptr::addr_of_mut!((*$ptr).$field_name) };
// SAFETY: writing to `field_ptr` is safe because it's a pointer
// to one of the struct's fields (therefore valid and aligned)
unsafe { field_ptr.write($data) };
};
}
#[doc(hidden)]
#[macro_export]
macro_rules! if_pod_and_little_endian {
(if pod_and_little_endian $true:block else $false:block) => {{
#[cfg(target_endian = "little")]
// Const branch, should be eliminated at compile time.
if <Self as $crate::private::ShaderType>::METADATA.is_pod() {
$true
} else {
$false
}
#[cfg(not(target_endian = "little"))]
{
$false
}
}};
}
#[cfg(any(feature = "glam", feature = "ultraviolet", feature = "vek"))]
macro_rules! array_ref_to_2d_array_ref {
($array:expr, $ty:ty, $c:literal, $r:literal) => {
// SAFETY:
// transmuting from &[T; R * C] to &[[T; R]; C] is sound since:
// the references have the same size
// size_of::<&[T; R * C]>() = size_of::<usize>()
// size_of::<&[[T; R]; C]>() = size_of::<usize>()
// the values behind the references have the same size and alignment
// size_of::<[T; R * C]>() = size_of::<T>() * R * C
// size_of::<[[T; R]; C]>() = size_of::<[T; R]>() * C = size_of::<T>() * R * C
// align_of::<[T; R * C]>() = align_of::<T>()
// align_of::<[[T; R]; C]>() = align_of::<[T; R]>() = align_of::<T>()
// ref: https://doc.rust-lang.org/reference/type-layout.html
unsafe { ::core::mem::transmute::<&[$ty; $r * $c], &[[$ty; $r]; $c]>($array) }
};
}
#[cfg(any(feature = "glam", feature = "ultraviolet", feature = "vek"))]
macro_rules! array_mut_to_2d_array_mut {
($array:expr, $ty:ty, $c:literal, $r:literal) => {
// SAFETY:
// transmuting from &mut [T; R * C] to &mut [[T; R]; C] is sound since:
// the references have the same size
// size_of::<&mut [T; R * C]>() = size_of::<usize>()
// size_of::<&mut [[T; R]; C]>() = size_of::<usize>()
// the values behind the references have the same size and alignment
// size_of::<[T; R * C]>() = size_of::<T>() * R * C
// size_of::<[[T; R]; C]>() = size_of::<[T; R]>() * C = size_of::<T>() * R * C
// align_of::<[T; R * C]>() = align_of::<T>()
// align_of::<[[T; R]; C]>() = align_of::<[T; R]>() = align_of::<T>()
// ref: https://doc.rust-lang.org/reference/type-layout.html
unsafe { ::core::mem::transmute::<&mut [$ty; $r * $c], &mut [[$ty; $r]; $c]>($array) }
};
}
pub(crate) trait ByteVecExt {
/// Tries to extend `self` with `0`s up to `new_len`, using memset.
fn try_extend(&mut self, new_len: usize) -> Result<(), std::collections::TryReserveError>;
}
impl ByteVecExt for Vec<u8> {
#[inline]
fn try_extend(&mut self, new_len: usize) -> Result<(), std::collections::TryReserveError> {
let additional = new_len.saturating_sub(self.len());
if additional > 0 {
self.try_reserve(additional)?;
let end = unsafe { self.as_mut_ptr().add(self.len()) };
// SAFETY
// 1. dst ptr is valid for writes of count * size_of::<T>() bytes since the call to Vec::reserve() succeeded
// 2. dst ptr is properly aligned since we got it via Vec::as_mut_ptr_range()
unsafe { end.write_bytes(u8::MIN, additional) }
// SAFETY
// 1. new_len is less than or equal to Vec::capacity() since we reserved at least `additional` elements
// 2. The elements at old_len..new_len are initialized since we wrote `additional` bytes
unsafe { self.set_len(new_len) }
}
Ok(())
}
}
impl<T> ByteVecExt for Vec<MaybeUninit<T>> {
#[inline]
fn try_extend(&mut self, new_len: usize) -> Result<(), std::collections::TryReserveError> {
let additional = new_len.saturating_sub(self.len());
if additional > 0 {
self.try_reserve(additional)?;
// It's OK to not initialize the extended elements as MaybeUninit allows
// uninitialized memory.
// SAFETY
// 1. new_len is less than or equal to Vec::capacity() since we reserved at least `additional` elements
// 2. The elements at old_len..new_len are initialized since we wrote `additional` bytes
// 3. MaybeUninit
unsafe { self.set_len(new_len) }
}
Ok(())
}
}
pub(crate) trait SliceExt<T> {
/// Returns a "window" (shared reference to an array of length `N`) into this slice.
///
/// # Panics
///
/// Panics if the range `offset..offset + N` is out of bounds.
fn array<const N: usize>(&self, offset: usize) -> &[T; N];
/// Returns a "window" (mutable reference to an array of length `N`) into this slice.
///
/// # Panics
///
/// Panics if the range `offset..offset + N` is out of bounds.
fn array_mut<const N: usize>(&mut self, offset: usize) -> &mut [T; N];
}
impl<T> SliceExt<T> for [T] {
// from rust core lib https://github.com/rust-lang/rust/blob/11d96b59307b1702fffe871bfc2d0145d070881e/library/core/src/slice/mod.rs#L1794
// track #![feature(split_array)] (https://github.com/rust-lang/rust/issues/90091)
#[inline]
fn array<const N: usize>(&self, offset: usize) -> &[T; N] {
let src = &self[offset..offset + N];
// SAFETY
// casting to &[T; N] is safe since src is a &[T] of length N
unsafe { &*(src.as_ptr() as *const [T; N]) }
}
// from rust core lib https://github.com/rust-lang/rust/blob/11d96b59307b1702fffe871bfc2d0145d070881e/library/core/src/slice/mod.rs#L1827
// track #![feature(split_array)] (https://github.com/rust-lang/rust/issues/90091)
#[inline]
fn array_mut<const N: usize>(&mut self, offset: usize) -> &mut [T; N] {
let src = &mut self[offset..offset + N];
// SAFETY
// casting to &mut [T; N] is safe since src is a &mut [T] of length N
unsafe { &mut *(src.as_mut_ptr() as *mut [T; N]) }
}
}
#[cfg(test)]
mod byte_vec_ext {
use crate::utils::ByteVecExt;
#[test]
fn try_extend() {
let mut vec: Vec<u8> = Vec::new();
vec.try_extend(10).unwrap();
assert_eq!(vec.len(), 10);
assert!(vec.iter().all(|val| *val == 0));
}
#[test]
fn try_extend_noop() {
let mut vec = vec![0; 12];
vec.try_extend(10).unwrap();
assert_eq!(vec.len(), 12);
}
#[test]
fn try_extend_err() {
let mut vec = vec![0; 12];
assert!(vec.try_extend(usize::MAX).is_err());
}
}
#[cfg(test)]
mod slice_ext {
use crate::utils::SliceExt;
#[test]
fn array() {
let arr = [1, 3, 7, 6, 9, 7];
let slice = arr.as_ref();
let sub_arr: &[i32; 2] = slice.array(3);
assert_eq!(sub_arr, &[6, 9]);
}
#[test]
fn array_mut() {
let mut arr = [1, 3, 7, 6, 9, 7];
let slice = arr.as_mut();
let sub_arr: &mut [i32; 2] = slice.array_mut(3);
assert_eq!(sub_arr, &mut [6, 9]);
}
}

View File

@@ -0,0 +1,71 @@
use encase::ShaderType;
#[derive(ShaderType)]
struct S {
x: f32,
}
#[derive(ShaderType)]
struct WrappedF32 {
#[size(16)]
elem: f32,
}
#[test]
#[should_panic]
fn test_struct() {
#[derive(ShaderType)]
struct TestStruct {
a: u32,
b: S,
}
TestStruct::assert_uniform_compat();
}
#[test]
#[should_panic]
fn test_array() {
#[derive(ShaderType)]
struct TestArray {
a: u32,
b: [WrappedF32; 1],
}
TestArray::assert_uniform_compat();
}
#[test]
#[should_panic]
fn test_struct_first() {
#[derive(ShaderType)]
struct TestStructFirst {
a: S,
b: f32,
}
TestStructFirst::assert_uniform_compat();
}
#[test]
#[should_panic]
fn test_array_stride() {
#[derive(ShaderType)]
struct TestArrayStride {
a: [u32; 8],
}
TestArrayStride::assert_uniform_compat();
}
#[test]
#[should_panic]
fn test_rts_array() {
#[derive(ShaderType)]
struct TestRTSArray {
#[size(runtime)]
a: Vec<f32>,
}
TestRTSArray::assert_uniform_compat();
}

View File

@@ -0,0 +1,40 @@
use encase::ShaderType;
#[derive(ShaderType)]
struct S {
x: f32,
}
#[derive(ShaderType)]
struct WrappedF32 {
#[size(16)]
elem: f32,
}
#[derive(ShaderType)]
struct TestStruct {
a: u32,
#[align(16)]
b: S,
}
#[derive(ShaderType)]
struct TestArray {
a: u32,
#[align(16)]
b: [WrappedF32; 1],
}
#[derive(ShaderType)]
struct TestStructFirst {
a: S,
#[align(16)]
b: f32,
}
#[test]
fn assert_uniform_compat_success() {
TestStruct::assert_uniform_compat();
TestArray::assert_uniform_compat();
TestStructFirst::assert_uniform_compat();
}

View File

@@ -0,0 +1,9 @@
use encase::{ArrayLength, ShaderType};
fn main() {}
#[derive(ShaderType)]
struct Test {
a: ArrayLength,
b: ArrayLength,
}

View File

@@ -0,0 +1,11 @@
error: `ArrayLength` type can only be used within a struct containing a runtime-sized array marked as `#[size(runtime)]`!
--> tests/compile_fail/array_length_err.rs:7:8
|
7 | a: ArrayLength,
| ^^^^^^^^^^^
error: only one field can use the `ArrayLength` type!
--> tests/compile_fail/array_length_err.rs:8:8
|
8 | b: ArrayLength,
| ^^^^^^^^^^^

View File

@@ -0,0 +1,6 @@
use encase::ShaderType;
fn main() {}
#[derive(ShaderType)]
struct Test;

View File

@@ -0,0 +1,7 @@
error: Only non empty structs with named fields are supported!
--> tests/compile_fail/general_struct_err.rs:5:10
|
5 | #[derive(ShaderType)]
| ^^^^^^^^^^
|
= note: this error originates in the derive macro `ShaderType` (in Nightly builds, run with -Z macro-backtrace for more info)

View File

@@ -0,0 +1,15 @@
use encase::ShaderType;
fn main() {}
#[derive(ShaderType)]
struct Test {
#[align]
a: u32,
#[align()]
b: u32,
#[align(invalid)]
c: u32,
#[align(3)]
d: u32,
}

View File

@@ -0,0 +1,23 @@
error: expected attribute arguments in parentheses: `align(...)`
--> tests/compile_fail/invalid_align_attr.rs:7:7
|
7 | #[align]
| ^^^^^
error: expected a power of 2 u32 literal
--> tests/compile_fail/invalid_align_attr.rs:9:13
|
9 | #[align()]
| ^
error: expected a power of 2 u32 literal
--> tests/compile_fail/invalid_align_attr.rs:11:13
|
11 | #[align(invalid)]
| ^^^^^^^
error: expected a power of 2 u32 literal
--> tests/compile_fail/invalid_align_attr.rs:13:14
|
13 | #[align(3)]
| ^

View File

@@ -0,0 +1,15 @@
use encase::ShaderType;
fn main() {}
#[derive(ShaderType)]
struct Test {
#[size]
a: u32,
#[size()]
b: u32,
#[size(invalid)]
c: u32,
#[size(-1)]
d: u32,
}

View File

@@ -0,0 +1,23 @@
error: expected attribute arguments in parentheses: `size(...)`
--> tests/compile_fail/invalid_size_attr.rs:7:7
|
7 | #[size]
| ^^^^
error: expected u32 literal
--> tests/compile_fail/invalid_size_attr.rs:9:12
|
9 | #[size()]
| ^
error: expected u32 literal
--> tests/compile_fail/invalid_size_attr.rs:11:12
|
11 | #[size(invalid)]
| ^^^^^^^
error: expected u32 literal or `runtime` identifier
--> tests/compile_fail/invalid_size_attr.rs:13:14
|
13 | #[size(-1)]
| ^

36
vendor/encase/tests/errors.rs vendored Normal file
View File

@@ -0,0 +1,36 @@
use encase::{internal::Error, ShaderType, StorageBuffer};
#[test]
fn buffer_too_small() {
#[derive(ShaderType)]
struct Test {
a: u32,
}
let mut v = Test { a: 4 };
let mut buffer = StorageBuffer::new([0u8]);
assert!(matches!(
buffer.write(&v),
Err(Error::BufferTooSmall {
expected: 4,
found: 1
})
));
assert!(matches!(
buffer.read(&mut v),
Err(Error::BufferTooSmall {
expected: 4,
found: 1
})
));
assert!(matches!(
buffer.create::<Test>(),
Err(Error::BufferTooSmall {
expected: 4,
found: 1
})
));
}

160
vendor/encase/tests/general.rs vendored Normal file
View File

@@ -0,0 +1,160 @@
use encase::{ArrayLength, CalculateSizeFor, ShaderType, StorageBuffer};
macro_rules! gen {
($rng:ident, $ty:ty) => {{
let mut buf = [0; 4];
use rand::RngCore;
$rng.fill_bytes(&mut buf);
<$ty>::from_ne_bytes(buf)
}};
}
macro_rules! gen_arr {
($rng:ident, $ty:ty, $n:literal) => {{
[(); $n].map(|_| gen!($rng, $ty))
}};
}
macro_rules! gen_2d_arr {
($rng:ident, $ty:ty, $n:literal, $m:literal) => {{
[[(); $m]; $n].map(|arr| arr.map(|_| gen!($rng, $ty)))
}};
}
macro_rules! gen_inner {
($n:literal, $($tail:tt)*) => {{
[(); $n].map(|_| $($tail)*)
}};
}
#[derive(ShaderType)]
struct A {
f: f32,
u: u32,
i: i32,
nu: Option<core::num::NonZeroU32>,
ni: Option<core::num::NonZeroI32>,
wu: core::num::Wrapping<u32>,
wi: core::num::Wrapping<i32>,
au: core::sync::atomic::AtomicU32,
ai: core::sync::atomic::AtomicI32,
v2: mint::Vector2<f32>,
v3: mint::Vector3<u32>,
v4: mint::Vector4<i32>,
p2: mint::Point2<f32>,
p3: mint::Point3<f32>,
mat2: mint::ColumnMatrix2<f32>,
mat2x3: mint::ColumnMatrix2x3<f32>,
mat2x4: mint::ColumnMatrix2x4<f32>,
mat3x2: mint::ColumnMatrix3x2<f32>,
mat3: mint::ColumnMatrix3<f32>,
mat3x4: mint::ColumnMatrix3x4<f32>,
mat4x2: mint::ColumnMatrix4x2<f32>,
mat4x3: mint::ColumnMatrix4x3<f32>,
mat4: mint::ColumnMatrix4<f32>,
arrf: [f32; 32],
arru: [u32; 32],
arri: [i32; 32],
arrvf: [mint::Vector2<f32>; 16],
arrvu: [mint::Vector3<u32>; 16],
arrvi: [mint::Vector4<i32>; 16],
arrm2: [mint::ColumnMatrix2<f32>; 8],
arrm3: [mint::ColumnMatrix3<f32>; 8],
arrm4: [mint::ColumnMatrix4<f32>; 8],
rt_arr_len: ArrayLength,
#[size(runtime)]
rt_arr: Vec<mint::ColumnMatrix2x3<f32>>,
}
fn gen_a(rng: &mut rand::rngs::StdRng) -> A {
A {
f: gen!(rng, f32),
u: gen!(rng, u32),
i: gen!(rng, i32),
nu: core::num::NonZeroU32::new(gen!(rng, u32)),
ni: core::num::NonZeroI32::new(gen!(rng, i32)),
wu: core::num::Wrapping(gen!(rng, u32)),
wi: core::num::Wrapping(gen!(rng, i32)),
au: core::sync::atomic::AtomicU32::new(gen!(rng, u32)),
ai: core::sync::atomic::AtomicI32::new(gen!(rng, i32)),
v2: mint::Vector2::from(gen_arr!(rng, f32, 2)),
v3: mint::Vector3::from(gen_arr!(rng, u32, 3)),
v4: mint::Vector4::from(gen_arr!(rng, i32, 4)),
p2: mint::Point2::from(gen_arr!(rng, f32, 2)),
p3: mint::Point3::from(gen_arr!(rng, f32, 3)),
mat2: mint::ColumnMatrix2::from(gen_2d_arr!(rng, f32, 2, 2)),
mat2x3: mint::ColumnMatrix2x3::from(gen_2d_arr!(rng, f32, 3, 2)),
mat2x4: mint::ColumnMatrix2x4::from(gen_2d_arr!(rng, f32, 4, 2)),
mat3x2: mint::ColumnMatrix3x2::from(gen_2d_arr!(rng, f32, 2, 3)),
mat3: mint::ColumnMatrix3::from(gen_2d_arr!(rng, f32, 3, 3)),
mat3x4: mint::ColumnMatrix3x4::from(gen_2d_arr!(rng, f32, 4, 3)),
mat4x2: mint::ColumnMatrix4x2::from(gen_2d_arr!(rng, f32, 2, 4)),
mat4x3: mint::ColumnMatrix4x3::from(gen_2d_arr!(rng, f32, 3, 4)),
mat4: mint::ColumnMatrix4::from(gen_2d_arr!(rng, f32, 4, 4)),
arrf: gen_arr!(rng, f32, 32),
arru: gen_arr!(rng, u32, 32),
arri: gen_arr!(rng, i32, 32),
arrvf: gen_inner!(16, mint::Vector2::from(gen_arr!(rng, f32, 2))),
arrvu: gen_inner!(16, mint::Vector3::from(gen_arr!(rng, u32, 3))),
arrvi: gen_inner!(16, mint::Vector4::from(gen_arr!(rng, i32, 4))),
arrm2: gen_inner!(8, mint::ColumnMatrix2::from(gen_2d_arr!(rng, f32, 2, 2))),
arrm3: gen_inner!(8, mint::ColumnMatrix3::from(gen_2d_arr!(rng, f32, 3, 3))),
arrm4: gen_inner!(8, mint::ColumnMatrix4::from(gen_2d_arr!(rng, f32, 4, 4))),
rt_arr_len: ArrayLength,
rt_arr: vec![mint::ColumnMatrix2x3::from(gen_2d_arr!(rng, f32, 3, 2)); 64],
}
}
#[test]
fn size() {
use rand::SeedableRng;
let mut rng = rand::rngs::StdRng::seed_from_u64(1234);
let a = gen_a(&mut rng);
assert_eq!(a.size().get(), 4080);
}
#[test]
fn calculate_size_for() {
assert_eq!(<&A>::calculate_size_for(12).get(), 2832);
}
#[test]
fn all_types() {
use rand::SeedableRng;
let mut rng = rand::rngs::StdRng::seed_from_u64(1234);
let a = gen_a(&mut rng);
let mut raw_buffer = Vec::new();
let mut buffer = StorageBuffer::new(&mut raw_buffer);
buffer.write(&a).unwrap();
let mut a_clone: A = buffer.create().unwrap();
let mut raw_buffer_2 = Vec::new();
let mut buffer_2 = StorageBuffer::new(&mut raw_buffer_2);
buffer_2.write(&a_clone).unwrap();
assert_eq!(buffer.as_ref(), buffer_2.as_ref());
a_clone.rt_arr.truncate(10);
// a_clone.rt_arr.reserve_exact(0);
buffer_2.read(&mut a_clone).unwrap();
buffer_2.write(&a_clone).unwrap();
assert_eq!(raw_buffer, raw_buffer_2);
}
#[test]
fn test_opt_writing() {
let one = 1_u32;
let two = 2_u32;
let data = [&one, &two];
let data2 = [one, two];
let mut in_byte_buffer: Vec<u8> = Vec::new();
let mut in_byte_buffer2: Vec<u8> = Vec::new();
let mut in_buffer = StorageBuffer::new(&mut in_byte_buffer);
let mut in_buffer2 = StorageBuffer::new(&mut in_byte_buffer2);
in_buffer.write(&data).unwrap();
in_buffer2.write(&data2).unwrap();
assert_eq!(in_byte_buffer, in_byte_buffer2);
}

115
vendor/encase/tests/hygiene.rs vendored Normal file
View File

@@ -0,0 +1,115 @@
#![no_implicit_prelude]
#![allow(non_camel_case_types)]
macro_rules! decl_primitives_as_traits {
($($primitive:ident),*) => {$(#[allow(dead_code)] trait $primitive {})*};
}
// from core::primitive
decl_primitives_as_traits!(
bool, char, f32, f64, i128, i16, i32, i64, i8, isize, str, u128, u16, u32, u64, u8, usize
);
mod impl_vector {
use ::core::{
convert::{AsMut, AsRef, From},
marker::PhantomData,
unimplemented,
};
pub struct Test<'a, T> {
data: PhantomData<&'a T>,
}
impl<'a, T, const N: usize> AsRef<[T; N]> for Test<'a, T> {
fn as_ref(&self) -> &[T; N] {
unimplemented!()
}
}
impl<'a, T, const N: usize> AsMut<[T; N]> for Test<'a, T> {
fn as_mut(&mut self) -> &mut [T; N] {
unimplemented!()
}
}
impl<'a, T, const N: usize> From<[T; N]> for Test<'a, T> {
fn from(_: [T; N]) -> Self {
unimplemented!()
}
}
}
::encase::impl_vector!(2, impl_vector::Test<'a, T>; ('a, T: 'a); using AsRef AsMut From);
mod impl_matrix {
use ::core::{
convert::{AsMut, AsRef, From},
marker::PhantomData,
unimplemented,
};
pub struct Test<'a, T> {
data: PhantomData<&'a T>,
}
impl<'a, T, const N: usize, const M: usize> AsRef<[[T; M]; N]> for Test<'a, T> {
fn as_ref(&self) -> &[[T; M]; N] {
unimplemented!()
}
}
impl<'a, T, const N: usize, const M: usize> AsMut<[[T; M]; N]> for Test<'a, T> {
fn as_mut(&mut self) -> &mut [[T; M]; N] {
unimplemented!()
}
}
impl<'a, T, const N: usize, const M: usize> From<[[T; M]; N]> for Test<'a, T> {
fn from(_: [[T; M]; N]) -> Self {
unimplemented!()
}
}
}
::encase::impl_matrix!(2, 2, impl_matrix::Test<'a, T>; ('a, T: 'a); using AsRef AsMut From);
mod impl_rts_array {
use ::core::{marker::PhantomData, unimplemented};
pub trait Array {
type Item;
}
pub struct Test<A: Array> {
data: PhantomData<A>,
}
impl<A: Array> Test<A> {
pub fn len(&self) -> usize {
unimplemented!()
}
pub fn truncate(&mut self, _len: usize) {
unimplemented!()
}
}
}
::encase::impl_rts_array!(impl_rts_array::Test<A>; (T, A: impl_rts_array::Array<Item = T>); using len truncate);
#[derive(::encase::ShaderType)]
struct Test {
a: [::mint::Vector3<::core::primitive::f32>; 2],
b: ::core::primitive::u32,
}
#[derive(::encase::ShaderType)]
struct TestGeneric<
'a,
T: 'a + ::encase::ShaderType + ::encase::ShaderSize,
const N: ::core::primitive::usize,
> {
#[size(90)]
a: &'a mut Test,
b: &'a mut [T; N],
#[align(16)]
#[size(runtime)]
c: &'a mut ::std::vec::Vec<[::mint::Vector3<::core::primitive::f32>; 2]>,
}

12
vendor/encase/tests/metadata.rs vendored Normal file
View File

@@ -0,0 +1,12 @@
use encase::ShaderType;
#[derive(ShaderType)]
struct WrappedF32 {
#[size(16)]
value: f32,
}
#[test]
fn field_padding() {
assert_eq!(WrappedF32::METADATA.padding(0), 12);
}

26
vendor/encase/tests/pass/attributes.rs vendored Normal file
View File

@@ -0,0 +1,26 @@
use encase::ShaderType;
fn main() {}
#[derive(ShaderType)]
struct TestAttributes {
#[align(16)]
a: u32,
#[size(8)]
b: u32,
}
#[derive(ShaderType)]
struct TestRtArray {
#[size(8)]
a: u32,
#[align(16)]
#[size(runtime)]
b: Vec<u32>,
}
#[derive(ShaderType)]
struct TestDocComment {
/// This is an unsigned integer
a: u32,
}

16
vendor/encase/tests/pass/wrappers.rs vendored Normal file
View File

@@ -0,0 +1,16 @@
use core::cell::Cell;
use encase::ShaderType;
use std::{borrow::Cow, rc::Rc, sync::Arc};
fn main() {}
#[derive(ShaderType)]
struct Test<'a> {
a: &'a u32,
b: &'a mut u32,
c: Box<u32>,
d: Cow<'a, u32>,
e: Rc<u32>,
f: Arc<u32>,
g: Cell<u32>,
}

View File

@@ -0,0 +1,23 @@
struct A {
array_length: u32,
array_length_call_ret_val: u32,
a: vec3<u32>,
@align(16)
arr: array<u32>,
}
@group(0) @binding(0)
var<storage> in: A;
@group(0) @binding(1)
var<storage, read_write> out: A;
@compute @workgroup_size(1, 1, 1)
fn main() {
out.array_length = in.array_length;
out.a = in.a;
for (var i = 0u; i < arrayLength(&in.arr); i = i + 1u) {
out.arr[i] = in.arr[i];
}
out.array_length_call_ret_val = arrayLength(&in.arr);
}

View File

@@ -0,0 +1,45 @@
struct A {
u: u32,
v: u32,
w: vec2<u32>,
@size(16) @align(8)
x: u32,
xx: u32,
}
struct B {
a: vec2<u32>,
b: vec3<u32>,
c: u32,
d: u32,
@align(16)
e: A,
f: vec3<u32>,
g: array<A, 3>,
h: i32,
@align(32)
i: array<A>,
}
@group(0) @binding(0)
var<storage> in: B;
@group(0) @binding(1)
var<storage, read_write> out: B;
@compute @workgroup_size(1, 1, 1)
fn main() {
out.a = in.a;
out.b = in.b;
out.c = in.c;
out.d = in.d;
out.e = in.e;
out.f = in.f;
out.g[0] = in.g[0];
out.g[1] = in.g[1];
out.g[2] = in.g[2];
out.h = in.h;
for (var i = 0u; i < arrayLength(&in.i); i = i + 1u) {
out.i[i] = in.i[i];
}
}

8
vendor/encase/tests/trybuild.rs vendored Normal file
View File

@@ -0,0 +1,8 @@
#![cfg(not(miri))] // Can't run (and no point running) trybuild through miri
#[test]
fn trybuild() {
let t = trybuild::TestCases::new();
t.pass("tests/pass/*.rs");
t.compile_fail("tests/compile_fail/*.rs");
}

18
vendor/encase/tests/uniform.rs vendored Normal file
View File

@@ -0,0 +1,18 @@
use encase::{ShaderType, UniformBuffer};
#[derive(Debug, ShaderType, PartialEq, Eq)]
struct TestUniform {
a: u32,
b: u32,
}
#[test]
fn uniform() {
let mut val = TestUniform { a: 4, b: 23 };
let mut buffer = UniformBuffer::new(Vec::new());
buffer.write(&val).unwrap();
buffer.read(&mut val).unwrap();
assert_eq!(val, buffer.create().unwrap());
}

279
vendor/encase/tests/wgpu.rs vendored Normal file
View File

@@ -0,0 +1,279 @@
#![cfg(not(miri))] // Can't run wgpu through miri
use encase::{ArrayLength, ShaderType, StorageBuffer};
use futures::executor::block_on;
use mint::{Vector2, Vector3};
use wgpu::{include_wgsl, util::DeviceExt};
#[derive(Debug, ShaderType, PartialEq)]
struct A {
u: u32,
v: u32,
w: Vector2<u32>,
#[size(16)]
#[align(8)]
x: u32,
xx: u32,
}
#[derive(Debug, ShaderType, PartialEq)]
struct B {
a: Vector2<u32>,
b: Vector3<u32>,
c: u32,
d: u32,
#[align(16)]
e: A,
f: Vector3<u32>,
g: [A; 3],
h: i32,
#[align(32)]
#[size(runtime)]
i: Vec<A>,
}
#[test]
fn test_wgpu() {
let b = B {
a: Vector2 { x: 45, y: 564 },
b: Vector3 {
x: 465,
y: 56664,
z: 5646,
},
c: 4,
d: 3,
e: A {
u: 5,
v: 566,
w: Vector2 { x: 4345, y: 43564 },
x: 5444,
xx: 305444,
},
f: Vector3 {
x: 455465,
y: 55665464,
z: 5564546,
},
g: [
A {
u: 105,
v: 10566,
w: Vector2 {
x: 14345,
y: 143564,
},
x: 105444,
xx: 305444,
},
A {
u: 205,
v: 20566,
w: Vector2 {
x: 24345,
y: 243564,
},
x: 205444,
xx: 305444,
},
A {
u: 305,
v: 30566,
w: Vector2 {
x: 34345,
y: 343564,
},
x: 305444,
xx: 305444,
},
],
h: 5,
i: Vec::from([A {
u: 205,
v: 20566,
w: Vector2 {
x: 24345,
y: 243564,
},
x: 205444,
xx: 305444,
}]),
};
let mut in_byte_buffer = Vec::new();
let mut in_buffer = StorageBuffer::new(&mut in_byte_buffer);
in_buffer.write(&b).unwrap();
assert_eq!(in_byte_buffer.len(), b.size().get() as _);
let shader = include_wgsl!("./shaders/general.wgsl");
let out_byte_buffer = in_out::<B, B>(shader, &in_byte_buffer, false);
assert_eq!(in_byte_buffer, out_byte_buffer);
let out_buffer = StorageBuffer::new(out_byte_buffer);
let out_val: B = out_buffer.create().unwrap();
assert_eq!(b, out_val);
}
#[test]
fn array_length() {
#[derive(Debug, ShaderType, PartialEq)]
struct A {
array_length: ArrayLength,
array_length_call_ret_val: u32,
a: Vector3<u32>,
#[align(16)]
#[size(runtime)]
arr: Vec<u32>,
}
let in_value = A {
array_length: ArrayLength,
array_length_call_ret_val: 4,
a: Vector3 { x: 5, y: 4, z: 6 },
arr: vec![45],
};
let mut in_byte_buffer = Vec::new();
let mut in_buffer = StorageBuffer::new(&mut in_byte_buffer);
in_buffer.write(&in_value).unwrap();
assert_eq!(in_byte_buffer.len(), in_value.size().get() as _);
let shader = include_wgsl!("./shaders/array_length.wgsl");
let out_byte_buffer = in_out::<A, A>(shader, &in_byte_buffer, false);
assert_eq!(in_byte_buffer, out_byte_buffer);
let out_buffer = StorageBuffer::new(out_byte_buffer);
let out_val: A = out_buffer.create().unwrap();
assert_eq!(in_value, out_val);
}
fn in_out<IN: encase::ShaderType, OUT: encase::ShaderType>(
shader: wgpu::ShaderModuleDescriptor,
data: &[u8],
is_uniform: bool,
) -> Vec<u8> {
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::PRIMARY,
dx12_shader_compiler: wgpu::Dx12Compiler::Fxc,
..Default::default()
});
let adapter = block_on(instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
..Default::default()
}))
.unwrap();
println!("Adapter info: {:#?}", adapter.get_info());
let (device, queue) =
block_on(adapter.request_device(&wgpu::DeviceDescriptor::default(), None)).unwrap();
let input_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Input Buffer"),
contents: data,
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::UNIFORM,
});
let output_gpu_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Output Buffer"),
size: data.len() as _,
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
let mapping_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Mapping Buffer"),
size: data.len() as _,
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
mapped_at_creation: false,
});
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: if is_uniform {
wgpu::BufferBindingType::Uniform
} else {
wgpu::BufferBindingType::Storage { read_only: true }
},
has_dynamic_offset: false,
min_binding_size: Some(IN::min_size()),
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: false },
has_dynamic_offset: false,
min_binding_size: Some(OUT::min_size()),
},
count: None,
},
],
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
let shader = device.create_shader_module(shader);
let compute_pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: None,
layout: Some(&pipeline_layout),
module: &shader,
entry_point: "main",
compilation_options: wgpu::PipelineCompilationOptions::default(),
cache: None,
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: input_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: output_gpu_buffer.as_entire_binding(),
},
],
});
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor::default());
{
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor::default());
cpass.set_pipeline(&compute_pipeline);
cpass.set_bind_group(0, &bind_group, &[]);
cpass.dispatch_workgroups(1, 1, 1);
}
encoder.copy_buffer_to_buffer(&output_gpu_buffer, 0, &mapping_buffer, 0, data.len() as _);
queue.submit(core::iter::once(encoder.finish()));
let output_slice = mapping_buffer.slice(..);
output_slice.map_async(wgpu::MapMode::Read, |_| {});
device.poll(wgpu::Maintain::Wait);
let output = output_slice.get_mapped_range().to_vec();
mapping_buffer.unmap();
output
}