Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

View File

@@ -0,0 +1 @@
{"files":{"Cargo.lock":"478f69148ba2891e55d764645d9a6a0359f983a77ff40cc6b02d5ec62f50105d","Cargo.toml":"8c72608cd345f059df314860960da46978779233cedc3553db34c98a4e9fac88","README.md":"23283d5bdd4ef362cc5f848b03f58e0ad2354d4fbcfc477032c5db2cb0b6d9db","src/allocator.rs":"7b7ad145112074cf066742e20bd2656aa642fed64f69e5290515ba1ece1bb905","src/lib.rs":"86d91995ea56d2ccf12974cb28f02028d096e00acbe417b1acdaa0d7133ae9f2"},"package":"b89c83349105e3732062a895becfc71a8f921bb71ecbbdd8ff99263e3b53a0ca"}

121
vendor/gpu-descriptor/Cargo.lock generated vendored Normal file
View File

@@ -0,0 +1,121 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "bitflags"
version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de"
[[package]]
name = "foldhash"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2"
[[package]]
name = "gpu-descriptor"
version = "0.3.2"
dependencies = [
"bitflags",
"gpu-descriptor-types",
"hashbrown",
"serde",
"tracing",
]
[[package]]
name = "gpu-descriptor-types"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fdf242682df893b86f33a73828fb09ca4b2d3bb6cc95249707fc684d27484b91"
dependencies = [
"bitflags",
]
[[package]]
name = "hashbrown"
version = "0.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289"
dependencies = [
"foldhash",
]
[[package]]
name = "pin-project-lite"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff"
[[package]]
name = "proc-macro2"
version = "1.0.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
dependencies = [
"proc-macro2",
]
[[package]]
name = "serde"
version = "1.0.216"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.216"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "syn"
version = "2.0.90"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "tracing"
version = "0.1.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
dependencies = [
"pin-project-lite",
"tracing-core",
]
[[package]]
name = "tracing-core"
version = "0.1.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c"
[[package]]
name = "unicode-ident"
version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83"

64
vendor/gpu-descriptor/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,64 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
name = "gpu-descriptor"
version = "0.3.2"
authors = ["Zakarum <zakarumych@ya.ru>"]
build = false
autolib = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "Implementation agnostic descriptor allocator for Vulkan like APIs"
homepage = "https://github.com/zakarumych/gpu-descriptor"
documentation = "https://docs.rs/gpu-descriptor"
readme = "README.md"
keywords = [
"gpu",
"vulkan",
"no-std",
]
license = "MIT OR Apache-2.0"
repository = "https://github.com/zakarumych/gpu-descriptor"
[features]
default = ["std"]
std = []
[lib]
name = "gpu_descriptor"
path = "src/lib.rs"
[dependencies.bitflags]
version = "2.6"
default-features = false
[dependencies.gpu-descriptor-types]
version = "0.2"
[dependencies.hashbrown]
version = "0.15"
features = ["default-hasher"]
default-features = false
[dependencies.serde]
version = "1.0"
features = ["derive"]
optional = true
default-features = false
[dependencies.tracing]
version = "0.1"
optional = true
default-features = false

44
vendor/gpu-descriptor/README.md vendored Normal file
View File

@@ -0,0 +1,44 @@
# gpu-descriptor
[![crates](https://img.shields.io/crates/v/gpu-descriptor.svg?style=for-the-badge&label=gpu-descriptor)](https://crates.io/crates/gpu-descriptor)
[![docs](https://img.shields.io/badge/docs.rs-gpu--descriptor-66c2a5?style=for-the-badge&labelColor=555555&logoColor=white)](https://docs.rs/gpu-descriptor)
[![actions](https://img.shields.io/github/workflow/status/zakarumych/gpu-descriptor/badge/master?style=for-the-badge)](https://github.com/zakarumych/gpu-descriptor/actions?query=workflow%3ARust)
[![MIT/Apache](https://img.shields.io/badge/license-MIT%2FApache-blue.svg?style=for-the-badge)](COPYING)
![loc](https://img.shields.io/tokei/lines/github/zakarumych/gpu-descriptor?style=for-the-badge)
Library for Vulkan-like APIs to allocated descriptor sets
from descriptor pools fast, with least overhead and zero fragmentation.
Straightforward usage:
```rust
use gpu_descriptor::DescriptorAllocator;
let mut allocator = DescriptorAllocator::new(max_update_after_bind_descriptors_in_all_pools); // Limit as dictated by API for selected hardware
let result = allocator.allocate(
device, // Implementation of `gpu_descriptor::DescriptorDevice`. Comes from plugins.
layout, // Descriptor set layout recognized by device's type.
flags, // Flags specified when layout was created.
layout_descriptor_count, // Descriptors count in the layout.
count, // count of sets to allocated.
);
```
## License
Licensed under either of
* Apache License, Version 2.0, ([license/APACHE](license/APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([license/MIT](license/MIT) or http://opensource.org/licenses/MIT)
at your option.
## Contributions
Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions.
## Support me on Patreon
[![Support me on Patreon](https://img.shields.io/endpoint.svg?url=https%3A%2F%2Fshieldsio-patreon.vercel.app%2Fapi%3Fusername%3Dzakarum%26type%3Dpatrons&style=for-the-badge)](https://patreon.com/zakarum)

634
vendor/gpu-descriptor/src/allocator.rs vendored Normal file
View File

@@ -0,0 +1,634 @@
use {
alloc::{collections::VecDeque, vec::Vec},
core::{
convert::TryFrom as _,
fmt::{self, Debug, Display},
},
gpu_descriptor_types::{
CreatePoolError, DescriptorDevice, DescriptorPoolCreateFlags, DescriptorTotalCount,
DeviceAllocationError,
},
hashbrown::HashMap,
};
bitflags::bitflags! {
/// Flags to augment descriptor set allocation.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
pub struct DescriptorSetLayoutCreateFlags: u32 {
/// Specified that descriptor set must be allocated from\
/// pool with `DescriptorPoolCreateFlags::UPDATE_AFTER_BIND`.
///
/// This flag must be specified when and only when layout was created with matching backend-specific flag,
/// that allows layout to have UpdateAfterBind bindings.
const UPDATE_AFTER_BIND = 0x2;
}
}
/// Descriptor set from allocator.
#[derive(Debug)]
pub struct DescriptorSet<S> {
raw: S,
pool_id: u64,
size: DescriptorTotalCount,
update_after_bind: bool,
}
impl<S> DescriptorSet<S> {
/// Returns reference to raw descriptor set.
pub fn raw(&self) -> &S {
&self.raw
}
/// Returns mutable reference to raw descriptor set.
///
/// # Safety
///
/// Object must not be replaced.
pub unsafe fn raw_mut(&mut self) -> &mut S {
&mut self.raw
}
}
/// AllocationError that may occur during descriptor sets allocation.
#[derive(Debug)]
pub enum AllocationError {
/// Backend reported that device memory has been exhausted.\
/// Deallocating device memory or other resources may increase chance
/// that another allocation would succeed.
OutOfDeviceMemory,
/// Backend reported that host memory has been exhausted.\
/// Deallocating host memory may increase chance that another allocation would succeed.
OutOfHostMemory,
/// The total number of descriptors across all pools created\
/// with flag `CREATE_UPDATE_AFTER_BIND_BIT` set exceeds `max_update_after_bind_descriptors_in_all_pools`
/// Or fragmentation of the underlying hardware resources occurs.
Fragmentation,
}
impl Display for AllocationError {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
AllocationError::OutOfDeviceMemory => fmt.write_str("Device memory exhausted"),
AllocationError::OutOfHostMemory => fmt.write_str("Host memory exhausted"),
AllocationError::Fragmentation => fmt.write_str("Fragmentation"),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for AllocationError {}
impl From<CreatePoolError> for AllocationError {
fn from(err: CreatePoolError) -> Self {
match err {
CreatePoolError::OutOfDeviceMemory => AllocationError::OutOfDeviceMemory,
CreatePoolError::OutOfHostMemory => AllocationError::OutOfHostMemory,
CreatePoolError::Fragmentation => AllocationError::Fragmentation,
}
}
}
const MIN_SETS: u32 = 64;
const MAX_SETS: u32 = 512;
#[derive(Debug)]
struct DescriptorPool<P> {
raw: P,
/// Number of sets allocated from pool.
allocated: u32,
/// Expected number of sets available.
available: u32,
}
#[derive(Debug)]
struct DescriptorBucket<P> {
offset: u64,
pools: VecDeque<DescriptorPool<P>>,
total: u32,
update_after_bind: bool,
size: DescriptorTotalCount,
}
impl<P> Drop for DescriptorBucket<P> {
#[cfg(feature = "tracing")]
fn drop(&mut self) {
#[cfg(feature = "std")]
{
if std::thread::panicking() {
return;
}
}
if self.total > 0 {
tracing::error!("Descriptor sets were not deallocated");
}
}
#[cfg(all(not(feature = "tracing"), feature = "std"))]
fn drop(&mut self) {
if std::thread::panicking() {
return;
}
if self.total > 0 {
eprintln!("Descriptor sets were not deallocated")
}
}
#[cfg(all(not(feature = "tracing"), not(feature = "std")))]
fn drop(&mut self) {
if self.total > 0 {
panic!("Descriptor sets were not deallocated")
}
}
}
impl<P> DescriptorBucket<P> {
fn new(update_after_bind: bool, size: DescriptorTotalCount) -> Self {
DescriptorBucket {
offset: 0,
pools: VecDeque::new(),
total: 0,
update_after_bind,
size,
}
}
fn new_pool_size(&self, minimal_set_count: u32) -> (DescriptorTotalCount, u32) {
let mut max_sets = MIN_SETS // at least MIN_SETS
.max(minimal_set_count) // at least enough for allocation
.max(self.total.min(MAX_SETS)) // at least as much as was allocated so far capped to MAX_SETS
.checked_next_power_of_two() // rounded up to nearest 2^N
.unwrap_or(i32::MAX as u32);
max_sets = (u32::MAX / self.size.sampler.max(1)).min(max_sets);
max_sets = (u32::MAX / self.size.combined_image_sampler.max(1)).min(max_sets);
max_sets = (u32::MAX / self.size.sampled_image.max(1)).min(max_sets);
max_sets = (u32::MAX / self.size.storage_image.max(1)).min(max_sets);
max_sets = (u32::MAX / self.size.uniform_texel_buffer.max(1)).min(max_sets);
max_sets = (u32::MAX / self.size.storage_texel_buffer.max(1)).min(max_sets);
max_sets = (u32::MAX / self.size.uniform_buffer.max(1)).min(max_sets);
max_sets = (u32::MAX / self.size.storage_buffer.max(1)).min(max_sets);
max_sets = (u32::MAX / self.size.uniform_buffer_dynamic.max(1)).min(max_sets);
max_sets = (u32::MAX / self.size.storage_buffer_dynamic.max(1)).min(max_sets);
max_sets = (u32::MAX / self.size.input_attachment.max(1)).min(max_sets);
max_sets = (u32::MAX / self.size.acceleration_structure.max(1)).min(max_sets);
max_sets = (u32::MAX / self.size.inline_uniform_block_bytes.max(1)).min(max_sets);
max_sets = (u32::MAX / self.size.inline_uniform_block_bindings.max(1)).min(max_sets);
let mut pool_size = DescriptorTotalCount {
sampler: self.size.sampler * max_sets,
combined_image_sampler: self.size.combined_image_sampler * max_sets,
sampled_image: self.size.sampled_image * max_sets,
storage_image: self.size.storage_image * max_sets,
uniform_texel_buffer: self.size.uniform_texel_buffer * max_sets,
storage_texel_buffer: self.size.storage_texel_buffer * max_sets,
uniform_buffer: self.size.uniform_buffer * max_sets,
storage_buffer: self.size.storage_buffer * max_sets,
uniform_buffer_dynamic: self.size.uniform_buffer_dynamic * max_sets,
storage_buffer_dynamic: self.size.storage_buffer_dynamic * max_sets,
input_attachment: self.size.input_attachment * max_sets,
acceleration_structure: self.size.acceleration_structure * max_sets,
inline_uniform_block_bytes: self.size.inline_uniform_block_bytes * max_sets,
inline_uniform_block_bindings: self.size.inline_uniform_block_bindings * max_sets,
};
if pool_size == Default::default() {
pool_size.sampler = 1;
}
(pool_size, max_sets)
}
unsafe fn allocate<L, S>(
&mut self,
device: &impl DescriptorDevice<L, P, S>,
layout: &L,
mut count: u32,
allocated_sets: &mut Vec<DescriptorSet<S>>,
) -> Result<(), AllocationError> {
debug_assert!(usize::try_from(count).is_ok(), "Must be ensured by caller");
if count == 0 {
return Ok(());
}
for (index, pool) in self.pools.iter_mut().enumerate().rev() {
if pool.available == 0 {
continue;
}
let allocate = pool.available.min(count);
#[cfg(feature = "tracing")]
tracing::trace!("Allocate `{}` sets from exising pool", allocate);
let result = device.alloc_descriptor_sets(
&mut pool.raw,
(0..allocate).map(|_| layout),
&mut Allocation {
size: self.size,
update_after_bind: self.update_after_bind,
pool_id: index as u64 + self.offset,
sets: allocated_sets,
},
);
match result {
Ok(()) => {}
Err(DeviceAllocationError::OutOfDeviceMemory) => {
return Err(AllocationError::OutOfDeviceMemory)
}
Err(DeviceAllocationError::OutOfHostMemory) => {
return Err(AllocationError::OutOfHostMemory)
}
Err(DeviceAllocationError::FragmentedPool) => {
// Should not happen, but better this than panicing.
#[cfg(feature = "tracing")]
tracing::error!("Unexpectedly failed to allocated descriptor sets due to pool fragmentation");
pool.available = 0;
continue;
}
Err(DeviceAllocationError::OutOfPoolMemory) => {
pool.available = 0;
continue;
}
}
count -= allocate;
pool.available -= allocate;
pool.allocated += allocate;
self.total += allocate;
if count == 0 {
return Ok(());
}
}
while count > 0 {
let (pool_size, max_sets) = self.new_pool_size(count);
#[cfg(feature = "tracing")]
tracing::trace!(
"Create new pool with {} sets and {:?} descriptors",
max_sets,
pool_size,
);
let mut raw = device.create_descriptor_pool(
&pool_size,
max_sets,
if self.update_after_bind {
DescriptorPoolCreateFlags::FREE_DESCRIPTOR_SET
| DescriptorPoolCreateFlags::UPDATE_AFTER_BIND
} else {
DescriptorPoolCreateFlags::FREE_DESCRIPTOR_SET
},
)?;
let pool_id = self.pools.len() as u64 + self.offset;
let allocate = max_sets.min(count);
let result = device.alloc_descriptor_sets(
&mut raw,
(0..allocate).map(|_| layout),
&mut Allocation {
pool_id,
size: self.size,
update_after_bind: self.update_after_bind,
sets: allocated_sets,
},
);
match result {
Ok(()) => {}
Err(err) => {
device.destroy_descriptor_pool(raw);
match err {
DeviceAllocationError::OutOfDeviceMemory => {
return Err(AllocationError::OutOfDeviceMemory)
}
DeviceAllocationError::OutOfHostMemory => {
return Err(AllocationError::OutOfHostMemory)
}
DeviceAllocationError::FragmentedPool => {
// Should not happen, but better this than panicing.
#[cfg(feature = "tracing")]
tracing::error!("Unexpectedly failed to allocated descriptor sets due to pool fragmentation");
}
DeviceAllocationError::OutOfPoolMemory => {}
}
panic!("Failed to allocate descriptor sets from fresh pool");
}
}
count -= allocate;
self.pools.push_back(DescriptorPool {
raw,
allocated: allocate,
available: max_sets - allocate,
});
self.total += allocate;
}
Ok(())
}
unsafe fn free<L, S>(
&mut self,
device: &impl DescriptorDevice<L, P, S>,
raw_sets: impl IntoIterator<Item = S>,
pool_id: u64,
) {
let pool = usize::try_from(pool_id - self.offset)
.ok()
.and_then(|index| self.pools.get_mut(index))
.expect("Invalid pool id");
let mut raw_sets = raw_sets.into_iter();
let mut count = 0;
device.dealloc_descriptor_sets(&mut pool.raw, raw_sets.by_ref().inspect(|_| count += 1));
debug_assert!(
raw_sets.next().is_none(),
"Device must deallocated all sets from iterator"
);
pool.available += count;
pool.allocated -= count;
self.total -= count;
#[cfg(feature = "tracing")]
tracing::trace!("Freed {} from descriptor bucket", count);
while let Some(pool) = self.pools.pop_front() {
if self.pools.is_empty() || pool.allocated != 0 {
self.pools.push_front(pool);
break;
}
#[cfg(feature = "tracing")]
tracing::trace!("Destroying old descriptor pool");
device.destroy_descriptor_pool(pool.raw);
self.offset += 1;
}
}
unsafe fn cleanup<L, S>(&mut self, device: &impl DescriptorDevice<L, P, S>) {
while let Some(pool) = self.pools.pop_front() {
if pool.allocated != 0 {
self.pools.push_front(pool);
break;
}
#[cfg(feature = "tracing")]
tracing::trace!("Destroying old descriptor pool");
device.destroy_descriptor_pool(pool.raw);
self.offset += 1;
}
}
}
/// Descriptor allocator.
/// Can be used to allocate descriptor sets for any layout.
#[derive(Debug)]
pub struct DescriptorAllocator<P, S> {
buckets: HashMap<(DescriptorTotalCount, bool), DescriptorBucket<P>>,
sets_cache: Vec<DescriptorSet<S>>,
raw_sets_cache: Vec<S>,
max_update_after_bind_descriptors_in_all_pools: u32,
current_update_after_bind_descriptors_in_all_pools: u32,
total: u32,
}
impl<P, S> Drop for DescriptorAllocator<P, S> {
fn drop(&mut self) {
if self.buckets.drain().any(|(_, bucket)| bucket.total != 0) {
#[cfg(feature = "tracing")]
tracing::error!(
"`DescriptorAllocator` is dropped while some descriptor sets were not deallocated"
);
}
}
}
impl<P, S> DescriptorAllocator<P, S> {
/// Create new allocator instance.
pub fn new(max_update_after_bind_descriptors_in_all_pools: u32) -> Self {
DescriptorAllocator {
buckets: HashMap::default(),
total: 0,
sets_cache: Vec::new(),
raw_sets_cache: Vec::new(),
max_update_after_bind_descriptors_in_all_pools,
current_update_after_bind_descriptors_in_all_pools: 0,
}
}
/// Allocate descriptor set with specified layout.
///
/// # Safety
///
/// * Same `device` instance must be passed to all method calls of
/// one `DescriptorAllocator` instance.
/// * `flags` must match flags that were used to create the layout.
/// * `layout_descriptor_count` must match descriptor numbers in the layout.
pub unsafe fn allocate<L, D>(
&mut self,
device: &D,
layout: &L,
flags: DescriptorSetLayoutCreateFlags,
layout_descriptor_count: &DescriptorTotalCount,
count: u32,
) -> Result<Vec<DescriptorSet<S>>, AllocationError>
where
S: Debug,
L: Debug,
D: DescriptorDevice<L, P, S>,
{
if count == 0 {
return Ok(Vec::new());
}
let descriptor_count = count * layout_descriptor_count.total();
let update_after_bind = flags.contains(DescriptorSetLayoutCreateFlags::UPDATE_AFTER_BIND);
if update_after_bind
&& self.max_update_after_bind_descriptors_in_all_pools
- self.current_update_after_bind_descriptors_in_all_pools
< descriptor_count
{
return Err(AllocationError::Fragmentation);
}
#[cfg(feature = "tracing")]
tracing::trace!(
"Allocating {} sets with layout {:?} @ {:?}",
count,
layout,
layout_descriptor_count
);
let bucket = self
.buckets
.entry((*layout_descriptor_count, update_after_bind))
.or_insert_with(|| DescriptorBucket::new(update_after_bind, *layout_descriptor_count));
match bucket.allocate(device, layout, count, &mut self.sets_cache) {
Ok(()) => {
self.total += descriptor_count;
if update_after_bind {
self.current_update_after_bind_descriptors_in_all_pools += descriptor_count;
}
Ok(core::mem::take(&mut self.sets_cache))
}
Err(err) => {
debug_assert!(self.raw_sets_cache.is_empty());
// Free sets allocated so far.
let mut last = None;
for set in self.sets_cache.drain(..) {
if Some(set.pool_id) != last {
if let Some(last_id) = last {
// Free contiguous range of sets from one pool in one go.
bucket.free(device, self.raw_sets_cache.drain(..), last_id);
}
}
last = Some(set.pool_id);
self.raw_sets_cache.push(set.raw);
}
if let Some(last_id) = last {
bucket.free(device, self.raw_sets_cache.drain(..), last_id);
}
Err(err)
}
}
}
/// Free descriptor sets.
///
/// # Safety
///
/// * Same `device` instance must be passed to all method calls of
/// one `DescriptorAllocator` instance.
/// * None of descriptor sets can be referenced in any pending command buffers.
/// * All command buffers where at least one of descriptor sets referenced
/// move to invalid state.
pub unsafe fn free<L, D, I>(&mut self, device: &D, sets: I)
where
D: DescriptorDevice<L, P, S>,
I: IntoIterator<Item = DescriptorSet<S>>,
{
debug_assert!(self.raw_sets_cache.is_empty());
let mut last_key = (EMPTY_COUNT, false);
let mut last_pool_id = None;
let mut descriptor_count = 0;
// Batch freeing of adjacent descriptor sets that belong to the same bucket and pool.
for set in sets {
descriptor_count += set.size.total();
if last_key != (set.size, set.update_after_bind) || last_pool_id != Some(set.pool_id) {
if let Some(pool_id) = last_pool_id {
self.free_raw_sets_cache(device, &last_key, pool_id, descriptor_count);
descriptor_count = 0;
}
last_key = (set.size, set.update_after_bind);
last_pool_id = Some(set.pool_id);
}
self.raw_sets_cache.push(set.raw);
}
if let Some(pool_id) = last_pool_id {
self.free_raw_sets_cache(device, &last_key, pool_id, descriptor_count);
}
}
/// Frees the cached descriptor sets which must be allocated from the same bucket and pool.
unsafe fn free_raw_sets_cache<L, D>(
&mut self,
device: &D,
bucket_key: &(DescriptorTotalCount, bool),
pool_id: u64,
descriptor_count: u32,
) where
D: DescriptorDevice<L, P, S>,
{
let bucket = self
.buckets
.get_mut(bucket_key)
.expect("Set must be allocated from this allocator");
debug_assert!(u32::try_from(self.raw_sets_cache.len())
.ok()
.is_some_and(|count| count <= bucket.total));
bucket.free(device, self.raw_sets_cache.drain(..), pool_id);
self.total -= descriptor_count;
if bucket.update_after_bind {
self.current_update_after_bind_descriptors_in_all_pools -= descriptor_count;
}
}
/// Perform cleanup to allow resources reuse.
///
/// # Safety
///
/// * Same `device` instance must be passed to all method calls of
/// one `DescriptorAllocator` instance.
pub unsafe fn cleanup<L>(&mut self, device: &impl DescriptorDevice<L, P, S>) {
for bucket in self.buckets.values_mut() {
bucket.cleanup(device)
}
self.buckets.retain(|_, bucket| !bucket.pools.is_empty());
}
}
/// Empty descriptor per_type.
const EMPTY_COUNT: DescriptorTotalCount = DescriptorTotalCount {
sampler: 0,
combined_image_sampler: 0,
sampled_image: 0,
storage_image: 0,
uniform_texel_buffer: 0,
storage_texel_buffer: 0,
uniform_buffer: 0,
storage_buffer: 0,
uniform_buffer_dynamic: 0,
storage_buffer_dynamic: 0,
input_attachment: 0,
acceleration_structure: 0,
inline_uniform_block_bytes: 0,
inline_uniform_block_bindings: 0,
};
struct Allocation<'a, S> {
update_after_bind: bool,
size: DescriptorTotalCount,
pool_id: u64,
sets: &'a mut Vec<DescriptorSet<S>>,
}
impl<S> Extend<S> for Allocation<'_, S> {
fn extend<T: IntoIterator<Item = S>>(&mut self, iter: T) {
let update_after_bind = self.update_after_bind;
let size = self.size;
let pool_id = self.pool_id;
self.sets.extend(iter.into_iter().map(|raw| DescriptorSet {
raw,
pool_id,
update_after_bind,
size,
}))
}
}

35
vendor/gpu-descriptor/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,35 @@
//!
//! Library for Vulkan-like APIs to allocated descriptor sets
//! from descriptor pools fast, with least overhead and zero fragmentation.
//!
//! Straightforward usage:
//! ```ignore
//! use gpu_descriptor::DescriptorAllocator;
//!
//! let mut allocator = DescriptorAllocator::new(max_update_after_bind_descriptors_in_all_pools); // Limit as dictated by API for selected hardware
//!
//! let result = allocator.allocate(
//! device, // Implementation of `gpu_descriptor::DescriptorDevice`. Comes from plugins.
//! layout, // Descriptor set layout recognized by device's type.
//! flags, // Flags specified when layout was created.
//! layout_descriptor_count, // Descriptors count in the layout.
//! count, // count of sets to allocated.
//! );
//! ```
//!
#![cfg_attr(not(feature = "std"), no_std)]
#![warn(
missing_docs,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
unused_qualifications
)]
extern crate alloc;
mod allocator;
pub use {crate::allocator::*, gpu_descriptor_types::*};