Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

View File

@@ -0,0 +1,133 @@
#![deny(unsafe_code, clippy::unwrap_used)]
#[cfg(feature = "visualizer")]
pub(crate) mod visualizer;
use std::{backtrace::Backtrace, sync::Arc};
use log::{log, Level};
use super::{AllocationReport, AllocationType, SubAllocator, SubAllocatorBase};
use crate::{AllocationError, Result};
#[derive(Debug)]
pub(crate) struct DedicatedBlockAllocator {
size: u64,
allocated: u64,
/// Only used if [`crate::AllocatorDebugSettings::store_stack_traces`] is [`true`]
name: Option<String>,
backtrace: Arc<Backtrace>,
}
impl DedicatedBlockAllocator {
pub(crate) fn new(size: u64) -> Self {
Self {
size,
allocated: 0,
name: None,
backtrace: Arc::new(Backtrace::disabled()),
}
}
}
impl SubAllocatorBase for DedicatedBlockAllocator {}
impl SubAllocator for DedicatedBlockAllocator {
fn allocate(
&mut self,
size: u64,
_alignment: u64,
_allocation_type: AllocationType,
_granularity: u64,
name: &str,
backtrace: Arc<Backtrace>,
) -> Result<(u64, std::num::NonZeroU64)> {
if self.allocated != 0 {
return Err(AllocationError::OutOfMemory);
}
if self.size != size {
return Err(AllocationError::Internal(
"DedicatedBlockAllocator size must match allocation size.".into(),
));
}
self.allocated = size;
self.name = Some(name.to_string());
self.backtrace = backtrace;
#[allow(clippy::unwrap_used)]
let dummy_id = std::num::NonZeroU64::new(1).unwrap();
Ok((0, dummy_id))
}
fn free(&mut self, chunk_id: Option<std::num::NonZeroU64>) -> Result<()> {
if chunk_id != std::num::NonZeroU64::new(1) {
Err(AllocationError::Internal("Chunk ID must be 1.".into()))
} else {
self.allocated = 0;
Ok(())
}
}
fn rename_allocation(
&mut self,
chunk_id: Option<std::num::NonZeroU64>,
name: &str,
) -> Result<()> {
if chunk_id != std::num::NonZeroU64::new(1) {
Err(AllocationError::Internal("Chunk ID must be 1.".into()))
} else {
self.name = Some(name.into());
Ok(())
}
}
fn report_memory_leaks(
&self,
log_level: Level,
memory_type_index: usize,
memory_block_index: usize,
) {
let empty = "".to_string();
let name = self.name.as_ref().unwrap_or(&empty);
log!(
log_level,
r#"leak detected: {{
memory type: {}
memory block: {}
dedicated allocation: {{
size: 0x{:x},
name: {},
backtrace: {}
}}
}}"#,
memory_type_index,
memory_block_index,
self.size,
name,
self.backtrace
)
}
fn report_allocations(&self) -> Vec<AllocationReport> {
vec![AllocationReport {
name: self
.name
.clone()
.unwrap_or_else(|| "<Unnamed Dedicated allocation>".to_owned()),
offset: 0,
size: self.size,
#[cfg(feature = "visualizer")]
backtrace: self.backtrace.clone(),
}]
}
fn allocated(&self) -> u64 {
self.allocated
}
fn supports_general_allocations(&self) -> bool {
false
}
}

View File

@@ -0,0 +1,8 @@
use super::DedicatedBlockAllocator;
use crate::visualizer::SubAllocatorVisualizer;
impl SubAllocatorVisualizer for DedicatedBlockAllocator {
fn draw_base_info(&self, ui: &mut egui::Ui) {
ui.label("Dedicated Block");
}
}

View File

@@ -0,0 +1,416 @@
#![deny(unsafe_code, clippy::unwrap_used)]
#[cfg(feature = "visualizer")]
pub(crate) mod visualizer;
use std::{
backtrace::Backtrace,
collections::{HashMap, HashSet},
sync::Arc,
};
use log::{log, Level};
use super::{AllocationReport, AllocationType, SubAllocator, SubAllocatorBase};
use crate::{AllocationError, Result};
const USE_BEST_FIT: bool = true;
fn align_down(val: u64, alignment: u64) -> u64 {
val & !(alignment - 1u64)
}
fn align_up(val: u64, alignment: u64) -> u64 {
align_down(val + alignment - 1u64, alignment)
}
#[derive(Debug)]
pub(crate) struct MemoryChunk {
pub(crate) chunk_id: std::num::NonZeroU64,
pub(crate) size: u64,
pub(crate) offset: u64,
pub(crate) allocation_type: AllocationType,
pub(crate) name: Option<String>,
/// Only used if [`crate::AllocatorDebugSettings::store_stack_traces`] is [`true`]
pub(crate) backtrace: Arc<Backtrace>,
next: Option<std::num::NonZeroU64>,
prev: Option<std::num::NonZeroU64>,
}
#[derive(Debug)]
pub(crate) struct FreeListAllocator {
size: u64,
allocated: u64,
pub(crate) chunk_id_counter: u64,
pub(crate) chunks: HashMap<std::num::NonZeroU64, MemoryChunk>,
free_chunks: HashSet<std::num::NonZeroU64>,
}
/// Test if two suballocations will overlap the same page.
fn is_on_same_page(offset_a: u64, size_a: u64, offset_b: u64, page_size: u64) -> bool {
let end_a = offset_a + size_a - 1;
let end_page_a = align_down(end_a, page_size);
let start_b = offset_b;
let start_page_b = align_down(start_b, page_size);
end_page_a == start_page_b
}
/// Test if two allocation types will be conflicting or not.
fn has_granularity_conflict(type0: AllocationType, type1: AllocationType) -> bool {
if type0 == AllocationType::Free || type1 == AllocationType::Free {
return false;
}
type0 != type1
}
impl FreeListAllocator {
pub(crate) fn new(size: u64) -> Self {
#[allow(clippy::unwrap_used)]
let initial_chunk_id = std::num::NonZeroU64::new(1).unwrap();
let mut chunks = HashMap::default();
chunks.insert(
initial_chunk_id,
MemoryChunk {
chunk_id: initial_chunk_id,
size,
offset: 0,
allocation_type: AllocationType::Free,
name: None,
backtrace: Arc::new(Backtrace::disabled()),
prev: None,
next: None,
},
);
let mut free_chunks = HashSet::default();
free_chunks.insert(initial_chunk_id);
Self {
size,
allocated: 0,
// 0 is not allowed as a chunk ID, 1 is used by the initial chunk, next chunk is going to be 2.
// The system well take the counter as the ID, and the increment the counter.
chunk_id_counter: 2,
chunks,
free_chunks,
}
}
/// Generates a new unique chunk ID
fn get_new_chunk_id(&mut self) -> Result<std::num::NonZeroU64> {
if self.chunk_id_counter == u64::MAX {
// End of chunk id counter reached, no more allocations are possible.
return Err(AllocationError::OutOfMemory);
}
let id = self.chunk_id_counter;
self.chunk_id_counter += 1;
std::num::NonZeroU64::new(id).ok_or_else(|| {
AllocationError::Internal("New chunk id was 0, which is not allowed.".into())
})
}
/// Finds the specified `chunk_id` in the list of free chunks and removes if from the list
fn remove_id_from_free_list(&mut self, chunk_id: std::num::NonZeroU64) {
self.free_chunks.remove(&chunk_id);
}
/// Merges two adjacent chunks. Right chunk will be merged into the left chunk
fn merge_free_chunks(
&mut self,
chunk_left: std::num::NonZeroU64,
chunk_right: std::num::NonZeroU64,
) -> Result<()> {
// Gather data from right chunk and remove it
let (right_size, right_next) = {
let chunk = self.chunks.remove(&chunk_right).ok_or_else(|| {
AllocationError::Internal("Chunk ID not present in chunk list.".into())
})?;
self.remove_id_from_free_list(chunk.chunk_id);
(chunk.size, chunk.next)
};
// Merge into left chunk
{
let chunk = self.chunks.get_mut(&chunk_left).ok_or_else(|| {
AllocationError::Internal("Chunk ID not present in chunk list.".into())
})?;
chunk.next = right_next;
chunk.size += right_size;
}
// Patch pointers
if let Some(right_next) = right_next {
let chunk = self.chunks.get_mut(&right_next).ok_or_else(|| {
AllocationError::Internal("Chunk ID not present in chunk list.".into())
})?;
chunk.prev = Some(chunk_left);
}
Ok(())
}
}
impl SubAllocatorBase for FreeListAllocator {}
impl SubAllocator for FreeListAllocator {
fn allocate(
&mut self,
size: u64,
alignment: u64,
allocation_type: AllocationType,
granularity: u64,
name: &str,
backtrace: Arc<Backtrace>,
) -> Result<(u64, std::num::NonZeroU64)> {
let free_size = self.size - self.allocated;
if size > free_size {
return Err(AllocationError::OutOfMemory);
}
let mut best_fit_id: Option<std::num::NonZeroU64> = None;
let mut best_offset = 0u64;
let mut best_aligned_size = 0u64;
let mut best_chunk_size = 0u64;
for current_chunk_id in self.free_chunks.iter() {
let current_chunk = self.chunks.get(current_chunk_id).ok_or_else(|| {
AllocationError::Internal(
"Chunk ID in free list is not present in chunk list.".into(),
)
})?;
if current_chunk.size < size {
continue;
}
let mut offset = align_up(current_chunk.offset, alignment);
if let Some(prev_idx) = current_chunk.prev {
let previous = self.chunks.get(&prev_idx).ok_or_else(|| {
AllocationError::Internal("Invalid previous chunk reference.".into())
})?;
if is_on_same_page(previous.offset, previous.size, offset, granularity)
&& has_granularity_conflict(previous.allocation_type, allocation_type)
{
offset = align_up(offset, granularity);
}
}
let padding = offset - current_chunk.offset;
let aligned_size = padding + size;
if aligned_size > current_chunk.size {
continue;
}
if let Some(next_idx) = current_chunk.next {
let next = self.chunks.get(&next_idx).ok_or_else(|| {
AllocationError::Internal("Invalid next chunk reference.".into())
})?;
if is_on_same_page(offset, size, next.offset, granularity)
&& has_granularity_conflict(allocation_type, next.allocation_type)
{
continue;
}
}
if USE_BEST_FIT {
if best_fit_id.is_none() || current_chunk.size < best_chunk_size {
best_fit_id = Some(*current_chunk_id);
best_aligned_size = aligned_size;
best_offset = offset;
best_chunk_size = current_chunk.size;
};
} else {
best_fit_id = Some(*current_chunk_id);
best_aligned_size = aligned_size;
best_offset = offset;
best_chunk_size = current_chunk.size;
break;
}
}
let first_fit_id = best_fit_id.ok_or(AllocationError::OutOfMemory)?;
let chunk_id = if best_chunk_size > best_aligned_size {
let new_chunk_id = self.get_new_chunk_id()?;
let new_chunk = {
let free_chunk = self.chunks.get_mut(&first_fit_id).ok_or_else(|| {
AllocationError::Internal("Chunk ID must be in chunk list.".into())
})?;
let new_chunk = MemoryChunk {
chunk_id: new_chunk_id,
size: best_aligned_size,
offset: free_chunk.offset,
allocation_type,
name: Some(name.to_string()),
backtrace,
prev: free_chunk.prev,
next: Some(first_fit_id),
};
free_chunk.prev = Some(new_chunk.chunk_id);
free_chunk.offset += best_aligned_size;
free_chunk.size -= best_aligned_size;
new_chunk
};
if let Some(prev_id) = new_chunk.prev {
let prev_chunk = self.chunks.get_mut(&prev_id).ok_or_else(|| {
AllocationError::Internal("Invalid previous chunk reference.".into())
})?;
prev_chunk.next = Some(new_chunk.chunk_id);
}
self.chunks.insert(new_chunk_id, new_chunk);
new_chunk_id
} else {
let chunk = self
.chunks
.get_mut(&first_fit_id)
.ok_or_else(|| AllocationError::Internal("Invalid chunk reference.".into()))?;
chunk.allocation_type = allocation_type;
chunk.name = Some(name.to_string());
chunk.backtrace = backtrace;
self.remove_id_from_free_list(first_fit_id);
first_fit_id
};
self.allocated += best_aligned_size;
Ok((best_offset, chunk_id))
}
fn free(&mut self, chunk_id: Option<std::num::NonZeroU64>) -> Result<()> {
let chunk_id = chunk_id
.ok_or_else(|| AllocationError::Internal("Chunk ID must be a valid value.".into()))?;
let (next_id, prev_id) = {
let chunk = self.chunks.get_mut(&chunk_id).ok_or_else(|| {
AllocationError::Internal(
"Attempting to free chunk that is not in chunk list.".into(),
)
})?;
chunk.allocation_type = AllocationType::Free;
chunk.name = None;
chunk.backtrace = Arc::new(Backtrace::disabled());
self.allocated -= chunk.size;
self.free_chunks.insert(chunk.chunk_id);
(chunk.next, chunk.prev)
};
if let Some(next_id) = next_id {
if self.chunks[&next_id].allocation_type == AllocationType::Free {
self.merge_free_chunks(chunk_id, next_id)?;
}
}
if let Some(prev_id) = prev_id {
if self.chunks[&prev_id].allocation_type == AllocationType::Free {
self.merge_free_chunks(prev_id, chunk_id)?;
}
}
Ok(())
}
fn rename_allocation(
&mut self,
chunk_id: Option<std::num::NonZeroU64>,
name: &str,
) -> Result<()> {
let chunk_id = chunk_id
.ok_or_else(|| AllocationError::Internal("Chunk ID must be a valid value.".into()))?;
let chunk = self.chunks.get_mut(&chunk_id).ok_or_else(|| {
AllocationError::Internal(
"Attempting to rename chunk that is not in chunk list.".into(),
)
})?;
if chunk.allocation_type == AllocationType::Free {
return Err(AllocationError::Internal(
"Attempting to rename a freed allocation.".into(),
));
}
chunk.name = Some(name.into());
Ok(())
}
fn report_memory_leaks(
&self,
log_level: Level,
memory_type_index: usize,
memory_block_index: usize,
) {
for (chunk_id, chunk) in self.chunks.iter() {
if chunk.allocation_type == AllocationType::Free {
continue;
}
let empty = "".to_string();
let name = chunk.name.as_ref().unwrap_or(&empty);
log!(
log_level,
r#"leak detected: {{
memory type: {}
memory block: {}
chunk: {{
chunk_id: {},
size: 0x{:x},
offset: 0x{:x},
allocation_type: {:?},
name: {},
backtrace: {}
}}
}}"#,
memory_type_index,
memory_block_index,
chunk_id,
chunk.size,
chunk.offset,
chunk.allocation_type,
name,
chunk.backtrace
);
}
}
fn report_allocations(&self) -> Vec<AllocationReport> {
self.chunks
.iter()
.filter(|(_key, chunk)| chunk.allocation_type != AllocationType::Free)
.map(|(_key, chunk)| AllocationReport {
name: chunk
.name
.clone()
.unwrap_or_else(|| "<Unnamed FreeList allocation>".to_owned()),
offset: chunk.offset,
size: chunk.size,
#[cfg(feature = "visualizer")]
backtrace: chunk.backtrace.clone(),
})
.collect::<Vec<_>>()
}
fn allocated(&self) -> u64 {
self.allocated
}
fn supports_general_allocations(&self) -> bool {
true
}
}

View File

@@ -0,0 +1,25 @@
use super::FreeListAllocator;
use crate::visualizer::{
render_memory_chunks_ui, ColorScheme, MemoryChunksVisualizationSettings, SubAllocatorVisualizer,
};
impl SubAllocatorVisualizer for FreeListAllocator {
fn supports_visualization(&self) -> bool {
true
}
fn draw_base_info(&self, ui: &mut egui::Ui) {
ui.label("free list sub-allocator");
ui.label(format!("chunk count: {}", self.chunks.len()));
ui.label(format!("chunk id counter: {}", self.chunk_id_counter));
}
fn draw_visualization(
&self,
color_scheme: &ColorScheme,
ui: &mut egui::Ui,
settings: &MemoryChunksVisualizationSettings,
) {
render_memory_chunks_ui(ui, color_scheme, settings, self.size, self.chunks.values());
}
}

View File

@@ -0,0 +1,162 @@
use std::{backtrace::Backtrace, fmt, ops::Range, sync::Arc};
use log::*;
use crate::result::*;
pub(crate) mod dedicated_block_allocator;
pub(crate) use dedicated_block_allocator::DedicatedBlockAllocator;
pub(crate) mod free_list_allocator;
pub(crate) use free_list_allocator::FreeListAllocator;
#[derive(PartialEq, Copy, Clone, Debug)]
#[repr(u8)]
pub(crate) enum AllocationType {
Free,
Linear,
NonLinear,
}
impl AllocationType {
#[cfg(feature = "visualizer")]
pub fn as_str(self) -> &'static str {
match self {
Self::Free => "Free",
Self::Linear => "Linear",
Self::NonLinear => "Non-Linear",
}
}
}
/// Describes an allocation in the [`AllocatorReport`].
#[derive(Clone)]
pub struct AllocationReport {
/// The name provided to the `allocate()` function.
pub name: String,
/// The offset in bytes of the allocation in its memory block.
pub offset: u64,
/// The size in bytes of the allocation.
pub size: u64,
#[cfg(feature = "visualizer")]
pub(crate) backtrace: Arc<Backtrace>,
}
/// Describes a memory block in the [`AllocatorReport`].
#[derive(Clone)]
pub struct MemoryBlockReport {
/// The size in bytes of this memory block.
pub size: u64,
/// The range of allocations in [`AllocatorReport::allocations`] that are associated
/// to this memory block.
pub allocations: Range<usize>,
}
/// A report that can be generated for informational purposes using `Allocator::generate_report()`.
#[derive(Clone)]
pub struct AllocatorReport {
/// All live allocations, sub-allocated from memory blocks.
pub allocations: Vec<AllocationReport>,
/// All memory blocks.
pub blocks: Vec<MemoryBlockReport>,
/// Sum of the memory used by all allocations, in bytes.
pub total_allocated_bytes: u64,
/// Sum of the memory reserved by all memory blocks including unallocated regions, in bytes.
pub total_reserved_bytes: u64,
}
impl fmt::Debug for AllocationReport {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let name = if !self.name.is_empty() {
self.name.as_str()
} else {
"--"
};
write!(f, "{name:?}: {}", fmt_bytes(self.size))
}
}
impl fmt::Debug for AllocatorReport {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut allocations = self.allocations.clone();
allocations.sort_by_key(|alloc| std::cmp::Reverse(alloc.size));
let max_num_allocations_to_print = f.precision().unwrap_or(usize::MAX);
allocations.truncate(max_num_allocations_to_print);
f.debug_struct("AllocatorReport")
.field(
"summary",
&std::format_args!(
"{} / {}",
fmt_bytes(self.total_allocated_bytes),
fmt_bytes(self.total_reserved_bytes)
),
)
.field("blocks", &self.blocks.len())
.field("allocations", &self.allocations.len())
.field("largest", &allocations.as_slice())
.finish()
}
}
#[cfg(feature = "visualizer")]
pub(crate) trait SubAllocatorBase: crate::visualizer::SubAllocatorVisualizer {}
#[cfg(not(feature = "visualizer"))]
pub(crate) trait SubAllocatorBase {}
pub(crate) trait SubAllocator: SubAllocatorBase + fmt::Debug + Sync + Send {
fn allocate(
&mut self,
size: u64,
alignment: u64,
allocation_type: AllocationType,
granularity: u64,
name: &str,
backtrace: Arc<Backtrace>,
) -> Result<(u64, std::num::NonZeroU64)>;
fn free(&mut self, chunk_id: Option<std::num::NonZeroU64>) -> Result<()>;
fn rename_allocation(
&mut self,
chunk_id: Option<std::num::NonZeroU64>,
name: &str,
) -> Result<()>;
fn report_memory_leaks(
&self,
log_level: Level,
memory_type_index: usize,
memory_block_index: usize,
);
fn report_allocations(&self) -> Vec<AllocationReport>;
#[must_use]
fn supports_general_allocations(&self) -> bool;
#[must_use]
fn allocated(&self) -> u64;
/// Helper function: reports if the suballocator is empty (meaning, having no allocations).
#[must_use]
fn is_empty(&self) -> bool {
self.allocated() == 0
}
}
pub(crate) fn fmt_bytes(mut amount: u64) -> String {
const SUFFIX: [&str; 5] = ["B", "KB", "MB", "GB", "TB"];
let mut idx = 0;
let mut print_amount = amount as f64;
loop {
if amount < 1024 {
return format!("{:.2} {}", print_amount, SUFFIX[idx]);
}
print_amount = amount as f64 / 1024.0;
amount /= 1024;
idx += 1;
}
}

1151
vendor/gpu-allocator/src/d3d12/mod.rs vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,249 @@
#![allow(clippy::new_without_default)]
use windows::Win32::Graphics::Direct3D12::*;
use super::Allocator;
use crate::visualizer::{
render_allocation_reports_ui, AllocationReportVisualizeSettings, ColorScheme,
MemoryChunksVisualizationSettings,
};
struct AllocatorVisualizerBlockWindow {
memory_type_index: usize,
block_index: usize,
settings: MemoryChunksVisualizationSettings,
}
impl AllocatorVisualizerBlockWindow {
fn new(memory_type_index: usize, block_index: usize) -> Self {
Self {
memory_type_index,
block_index,
settings: Default::default(),
}
}
}
pub struct AllocatorVisualizer {
selected_blocks: Vec<AllocatorVisualizerBlockWindow>,
color_scheme: ColorScheme,
breakdown_settings: AllocationReportVisualizeSettings,
}
fn format_heap_type(heap_type: D3D12_HEAP_TYPE) -> &'static str {
let names = [
"D3D12_HEAP_TYPE_DEFAULT_INVALID",
"D3D12_HEAP_TYPE_DEFAULT",
"D3D12_HEAP_TYPE_UPLOAD",
"D3D12_HEAP_TYPE_READBACK",
"D3D12_HEAP_TYPE_CUSTOM",
];
names[heap_type.0 as usize]
}
fn format_cpu_page_property(prop: D3D12_CPU_PAGE_PROPERTY) -> &'static str {
let names = [
"D3D12_CPU_PAGE_PROPERTY_UNKNOWN",
"D3D12_CPU_PAGE_PROPERTY_NOT_AVAILABLE",
"D3D12_CPU_PAGE_PROPERTY_WRITE_COMBINE",
"D3D12_CPU_PAGE_PROPERTY_WRITE_BACK",
];
names[prop.0 as usize]
}
fn format_memory_pool(pool: D3D12_MEMORY_POOL) -> &'static str {
let names = [
"D3D12_MEMORY_POOL_UNKNOWN",
"D3D12_MEMORY_POOL_L0",
"D3D12_MEMORY_POOL_L1",
];
names[pool.0 as usize]
}
impl AllocatorVisualizer {
pub fn new() -> Self {
Self {
selected_blocks: Vec::default(),
color_scheme: ColorScheme::default(),
breakdown_settings: Default::default(),
}
}
pub fn set_color_scheme(&mut self, color_scheme: ColorScheme) {
self.color_scheme = color_scheme;
}
pub fn render_memory_block_ui(&mut self, ui: &mut egui::Ui, alloc: &Allocator) {
ui.collapsing(
format!("Memory Types: ({} types)", alloc.memory_types.len()),
|ui| {
for (mem_type_idx, mem_type) in alloc.memory_types.iter().enumerate() {
ui.collapsing(
format!(
"Type: {} ({} blocks)",
mem_type_idx,
mem_type.memory_blocks.len()
),
|ui| {
let mut total_block_size = 0;
let mut total_allocated = 0;
for block in mem_type.memory_blocks.iter().flatten() {
total_block_size += block.size;
total_allocated += block.sub_allocator.allocated();
}
let active_block_count = mem_type
.memory_blocks
.iter()
.filter(|block| block.is_some())
.count();
ui.label(format!("heap category: {:?}", mem_type.heap_category));
ui.label(format!(
"Heap Type: {} ({})",
format_heap_type(mem_type.heap_properties.Type),
mem_type.heap_properties.Type.0
));
ui.label(format!(
"CpuPageProperty: {} ({})",
format_cpu_page_property(mem_type.heap_properties.CPUPageProperty),
mem_type.heap_properties.CPUPageProperty.0
));
ui.label(format!(
"MemoryPoolPreference: {} ({})",
format_memory_pool(mem_type.heap_properties.MemoryPoolPreference),
mem_type.heap_properties.MemoryPoolPreference.0
));
ui.label(format!("total block size: {} KiB", total_block_size / 1024));
ui.label(format!("total allocated: {} KiB", total_allocated / 1024));
ui.label(format!(
"committed resource allocations: {}",
mem_type.committed_allocations.num_allocations
));
ui.label(format!(
"total committed resource allocations: {} KiB",
mem_type.committed_allocations.total_size
));
ui.label(format!("block count: {}", active_block_count));
for (block_idx, block) in mem_type.memory_blocks.iter().enumerate() {
let Some(block) = block else { continue };
ui.collapsing(format!("Block: {}", block_idx), |ui| {
ui.label(format!("size: {} KiB", block.size / 1024));
ui.label(format!(
"allocated: {} KiB",
block.sub_allocator.allocated() / 1024
));
ui.label(format!("D3D12 heap: {:?}", block.heap));
block.sub_allocator.draw_base_info(ui);
if block.sub_allocator.supports_visualization()
&& ui.button("visualize").clicked()
&& !self.selected_blocks.iter().any(|x| {
x.memory_type_index == mem_type_idx
&& x.block_index == block_idx
})
{
self.selected_blocks.push(
AllocatorVisualizerBlockWindow::new(
mem_type_idx,
block_idx,
),
);
}
});
}
},
);
}
},
);
}
pub fn render_memory_block_window(
&mut self,
ctx: &egui::Context,
allocator: &Allocator,
open: &mut bool,
) {
egui::Window::new("Allocator Memory Blocks")
.open(open)
.show(ctx, |ui| self.render_breakdown_ui(ui, allocator));
}
pub fn render_memory_block_visualization_windows(
&mut self,
ctx: &egui::Context,
allocator: &Allocator,
) {
// Draw each window.
let color_scheme = &self.color_scheme;
self.selected_blocks.retain_mut(|window| {
let mut open = true;
egui::Window::new(format!(
"Block Visualizer {}:{}",
window.memory_type_index, window.block_index
))
.default_size([1920.0 * 0.5, 1080.0 * 0.5])
.open(&mut open)
.show(ctx, |ui| {
let memblock = &allocator.memory_types[window.memory_type_index].memory_blocks
[window.block_index]
.as_ref();
if let Some(memblock) = memblock {
ui.label(format!(
"Memory type {}, Memory block {}, Block size: {} KiB",
window.memory_type_index,
window.block_index,
memblock.size / 1024
));
window
.settings
.ui(ui, allocator.debug_settings.store_stack_traces);
ui.separator();
memblock
.sub_allocator
.draw_visualization(color_scheme, ui, &window.settings);
} else {
ui.label("Deallocated memory block");
}
});
open
});
}
pub fn render_breakdown_ui(&mut self, ui: &mut egui::Ui, allocator: &Allocator) {
render_allocation_reports_ui(
ui,
&mut self.breakdown_settings,
allocator
.memory_types
.iter()
.flat_map(|memory_type| memory_type.memory_blocks.iter())
.flatten()
.flat_map(|memory_block| memory_block.sub_allocator.report_allocations()),
);
}
pub fn render_breakdown_window(
&mut self,
ctx: &egui::Context,
allocator: &Allocator,
open: &mut bool,
) {
egui::Window::new("Allocator Breakdown")
.open(open)
.show(ctx, |ui| self.render_breakdown_ui(ui, allocator));
}
}

333
vendor/gpu-allocator/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,333 @@
//! This crate provides a fully written in Rust memory allocator for Vulkan, DirectX 12 and Metal.
//!
//! # [Windows-rs] and [winapi]
//!
//! `gpu-allocator` recently migrated from [winapi] to [windows-rs] but still provides convenient helpers to convert to and from [winapi] types, enabled when compiling with the `public-winapi` crate feature.
//!
//! [Windows-rs]: https://github.com/microsoft/windows-rs
//! [winapi]: https://github.com/retep998/winapi-rs
//!
//! # Setting up the Vulkan memory allocator
//!
//! ```no_run
//! # #[cfg(feature = "vulkan")]
//! # fn main() {
//! use gpu_allocator::vulkan::*;
//! # use ash::vk;
//! # let device = todo!();
//! # let instance = todo!();
//! # let physical_device = todo!();
//!
//! let mut allocator = Allocator::new(&AllocatorCreateDesc {
//! instance,
//! device,
//! physical_device,
//! debug_settings: Default::default(),
//! buffer_device_address: true, // Ideally, check the BufferDeviceAddressFeatures struct.
//! allocation_sizes: Default::default(),
//! });
//! # }
//! # #[cfg(not(feature = "vulkan"))]
//! # fn main() {}
//! ```
//!
//! # Simple Vulkan allocation example
//!
//! ```no_run
//! # #[cfg(feature = "vulkan")]
//! # fn main() {
//! use gpu_allocator::vulkan::*;
//! use gpu_allocator::MemoryLocation;
//! # use ash::vk;
//! # let device = todo!();
//! # let instance = todo!();
//! # let physical_device = todo!();
//! # let mut allocator = Allocator::new(&AllocatorCreateDesc {
//! # instance,
//! # device,
//! # physical_device,
//! # debug_settings: Default::default(),
//! # buffer_device_address: true, // Ideally, check the BufferDeviceAddressFeatures struct.
//! # allocation_sizes: Default::default(),
//! # }).unwrap();
//!
//! // Setup vulkan info
//! let vk_info = vk::BufferCreateInfo::default()
//! .size(512)
//! .usage(vk::BufferUsageFlags::STORAGE_BUFFER);
//!
//! let buffer = unsafe { device.create_buffer(&vk_info, None) }.unwrap();
//! let requirements = unsafe { device.get_buffer_memory_requirements(buffer) };
//!
//! let allocation = allocator
//! .allocate(&AllocationCreateDesc {
//! name: "Example allocation",
//! requirements,
//! location: MemoryLocation::CpuToGpu,
//! linear: true, // Buffers are always linear
//! allocation_scheme: AllocationScheme::GpuAllocatorManaged,
//! }).unwrap();
//!
//! // Bind memory to the buffer
//! unsafe { device.bind_buffer_memory(buffer, allocation.memory(), allocation.offset()).unwrap() };
//!
//! // Cleanup
//! allocator.free(allocation).unwrap();
//! unsafe { device.destroy_buffer(buffer, None) };
//! # }
//! # #[cfg(not(feature = "vulkan"))]
//! # fn main() {}
//! ```
//!
//! # Setting up the D3D12 memory allocator
//!
//! ```no_run
//! # #[cfg(feature = "d3d12")]
//! # fn main() {
//! use gpu_allocator::d3d12::*;
//! # let device = todo!();
//!
//! let mut allocator = Allocator::new(&AllocatorCreateDesc {
//! device: ID3D12DeviceVersion::Device(device),
//! debug_settings: Default::default(),
//! allocation_sizes: Default::default(),
//! });
//! # }
//! # #[cfg(not(feature = "d3d12"))]
//! # fn main() {}
//! ```
//!
//! # Simple d3d12 allocation example
//!
//! ```no_run
//! # #[cfg(feature = "d3d12")]
//! # fn main() -> windows::core::Result<()> {
//! use gpu_allocator::d3d12::*;
//! use gpu_allocator::MemoryLocation;
//! # use windows::Win32::Graphics::{Dxgi, Direct3D12};
//! # let device = todo!();
//!
//! # let mut allocator = Allocator::new(&AllocatorCreateDesc {
//! # device: ID3D12DeviceVersion::Device(device),
//! # debug_settings: Default::default(),
//! # allocation_sizes: Default::default(),
//! # }).unwrap();
//!
//! let buffer_desc = Direct3D12::D3D12_RESOURCE_DESC {
//! Dimension: Direct3D12::D3D12_RESOURCE_DIMENSION_BUFFER,
//! Alignment: 0,
//! Width: 512,
//! Height: 1,
//! DepthOrArraySize: 1,
//! MipLevels: 1,
//! Format: Dxgi::Common::DXGI_FORMAT_UNKNOWN,
//! SampleDesc: Dxgi::Common::DXGI_SAMPLE_DESC {
//! Count: 1,
//! Quality: 0,
//! },
//! Layout: Direct3D12::D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
//! Flags: Direct3D12::D3D12_RESOURCE_FLAG_NONE,
//! };
//! let allocation_desc = AllocationCreateDesc::from_d3d12_resource_desc(
//! &allocator.device(),
//! &buffer_desc,
//! "Example allocation",
//! MemoryLocation::GpuOnly,
//! );
//! let allocation = allocator.allocate(&allocation_desc).unwrap();
//! let mut resource: Option<Direct3D12::ID3D12Resource> = None;
//! let hr = unsafe {
//! device.CreatePlacedResource(
//! allocation.heap(),
//! allocation.offset(),
//! &buffer_desc,
//! Direct3D12::D3D12_RESOURCE_STATE_COMMON,
//! None,
//! &mut resource,
//! )
//! }?;
//!
//! // Cleanup
//! drop(resource);
//! allocator.free(allocation).unwrap();
//! # Ok(())
//! # }
//! # #[cfg(not(feature = "d3d12"))]
//! # fn main() {}
//! ```
//!
//! # Setting up the Metal memory allocator
//!
//! ```no_run
//! # #[cfg(feature = "metal")]
//! # fn main() {
//! # use std::sync::Arc;
//! use gpu_allocator::metal::*;
//!
//! # let device = Arc::new(metal::Device::system_default().unwrap());
//! let mut allocator = Allocator::new(&AllocatorCreateDesc {
//! device: device.clone(),
//! debug_settings: Default::default(),
//! allocation_sizes: Default::default(),
//! });
//! # }
//! # #[cfg(not(feature = "metal"))]
//! # fn main() {}
//! ```
//!
//! # Simple Metal allocation example
//! ```no_run
//! # #[cfg(feature = "metal")]
//! # fn main() {
//! # use std::sync::Arc;
//! use gpu_allocator::metal::*;
//! use gpu_allocator::MemoryLocation;
//! # let device = Arc::new(metal::Device::system_default().unwrap());
//! # let mut allocator = Allocator::new(&AllocatorCreateDesc {
//! # device: device.clone(),
//! # debug_settings: Default::default(),
//! # allocation_sizes: Default::default(),
//! # })
//! # .unwrap();
//!
//! let allocation_desc = AllocationCreateDesc::buffer(
//! &device,
//! "Example allocation",
//! 512, // size in bytes
//! gpu_allocator::MemoryLocation::GpuOnly,
//! );
//! let allocation = allocator.allocate(&allocation_desc).unwrap();
//! let resource = allocation.make_buffer().unwrap();
//!
//! // Cleanup
//! drop(resource);
//! allocator.free(&allocation).unwrap();
//! # }
//! # #[cfg(not(feature = "metal"))]
//! # fn main() {}
//! ```
mod result;
pub use result::*;
pub(crate) mod allocator;
pub use allocator::{AllocationReport, AllocatorReport, MemoryBlockReport};
#[cfg(feature = "visualizer")]
pub mod visualizer;
#[cfg(feature = "vulkan")]
pub mod vulkan;
#[cfg(all(windows, feature = "d3d12"))]
pub mod d3d12;
#[cfg(all(any(target_os = "macos", target_os = "ios"), feature = "metal"))]
pub mod metal;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum MemoryLocation {
/// The allocated resource is stored at an unknown memory location; let the driver decide what's the best location
Unknown,
/// Store the allocation in GPU only accessible memory - typically this is the faster GPU resource and this should be
/// where most of the allocations live.
GpuOnly,
/// Memory useful for uploading data to the GPU and potentially for constant buffers
CpuToGpu,
/// Memory useful for CPU readback of data
GpuToCpu,
}
#[derive(Copy, Clone, Debug)]
pub struct AllocatorDebugSettings {
/// Logs out debugging information about the various heaps the current device has on startup
pub log_memory_information: bool,
/// Logs out all memory leaks on shutdown with log level Warn
pub log_leaks_on_shutdown: bool,
/// Stores a copy of the full backtrace for every allocation made, this makes it easier to debug leaks
/// or other memory allocations, but storing stack traces has a RAM overhead so should be disabled
/// in shipping applications.
pub store_stack_traces: bool,
/// Log out every allocation as it's being made with log level Debug, rather spammy so off by default
pub log_allocations: bool,
/// Log out every free that is being called with log level Debug, rather spammy so off by default
pub log_frees: bool,
/// Log out stack traces when either `log_allocations` or `log_frees` is enabled.
pub log_stack_traces: bool,
}
impl Default for AllocatorDebugSettings {
fn default() -> Self {
Self {
log_memory_information: false,
log_leaks_on_shutdown: true,
store_stack_traces: false,
log_allocations: false,
log_frees: false,
log_stack_traces: false,
}
}
}
/// The sizes of the memory blocks that the allocator will create.
///
/// Useful for tuning the allocator to your application's needs. For example most games will be fine with the default
/// values, but eg. an app might want to use smaller block sizes to reduce the amount of memory used.
///
/// Clamped between 4MB and 256MB, and rounds up to the nearest multiple of 4MB for alignment reasons.
#[derive(Clone, Copy, Debug)]
pub struct AllocationSizes {
/// The size of the memory blocks that will be created for the GPU only memory type.
///
/// Defaults to 256MB.
device_memblock_size: u64,
/// The size of the memory blocks that will be created for the CPU visible memory types.
///
/// Defaults to 64MB.
host_memblock_size: u64,
}
impl AllocationSizes {
pub fn new(device_memblock_size: u64, host_memblock_size: u64) -> Self {
const FOUR_MB: u64 = 4 * 1024 * 1024;
const TWO_HUNDRED_AND_FIFTY_SIX_MB: u64 = 256 * 1024 * 1024;
let mut device_memblock_size =
device_memblock_size.clamp(FOUR_MB, TWO_HUNDRED_AND_FIFTY_SIX_MB);
let mut host_memblock_size =
host_memblock_size.clamp(FOUR_MB, TWO_HUNDRED_AND_FIFTY_SIX_MB);
if device_memblock_size % FOUR_MB != 0 {
let val = device_memblock_size / FOUR_MB + 1;
device_memblock_size = val * FOUR_MB;
log::warn!(
"Device memory block size must be a multiple of 4MB, clamping to {}MB",
device_memblock_size / 1024 / 1024
)
}
if host_memblock_size % FOUR_MB != 0 {
let val = host_memblock_size / FOUR_MB + 1;
host_memblock_size = val * FOUR_MB;
log::warn!(
"Host memory block size must be a multiple of 4MB, clamping to {}MB",
host_memblock_size / 1024 / 1024
)
}
Self {
device_memblock_size,
host_memblock_size,
}
}
}
impl Default for AllocationSizes {
fn default() -> Self {
Self {
device_memblock_size: 256 * 1024 * 1024,
host_memblock_size: 64 * 1024 * 1024,
}
}
}

522
vendor/gpu-allocator/src/metal/mod.rs vendored Normal file
View File

@@ -0,0 +1,522 @@
#![deny(clippy::unimplemented, clippy::unwrap_used, clippy::ok_expect)]
use std::{backtrace::Backtrace, sync::Arc};
use log::debug;
use crate::{
allocator::{self, AllocatorReport, MemoryBlockReport},
AllocationError, AllocationSizes, AllocatorDebugSettings, MemoryLocation, Result,
};
fn memory_location_to_metal(location: MemoryLocation) -> metal::MTLResourceOptions {
match location {
MemoryLocation::GpuOnly => metal::MTLResourceOptions::StorageModePrivate,
MemoryLocation::CpuToGpu | MemoryLocation::GpuToCpu | MemoryLocation::Unknown => {
metal::MTLResourceOptions::StorageModeShared
}
}
}
#[derive(Debug)]
pub struct Allocation {
chunk_id: Option<std::num::NonZeroU64>,
offset: u64,
size: u64,
memory_block_index: usize,
memory_type_index: usize,
heap: Arc<metal::Heap>,
name: Option<Box<str>>,
}
impl Allocation {
pub fn heap(&self) -> &metal::Heap {
self.heap.as_ref()
}
pub fn make_buffer(&self) -> Option<metal::Buffer> {
let resource =
self.heap
.new_buffer_with_offset(self.size, self.heap.resource_options(), self.offset);
if let Some(resource) = &resource {
if let Some(name) = &self.name {
resource.set_label(name);
}
}
resource
}
pub fn make_texture(&self, desc: &metal::TextureDescriptor) -> Option<metal::Texture> {
let resource = self.heap.new_texture_with_offset(desc, self.offset);
if let Some(resource) = &resource {
if let Some(name) = &self.name {
resource.set_label(name);
}
}
resource
}
pub fn make_acceleration_structure(&self) -> Option<metal::AccelerationStructure> {
let resource = self
.heap
.new_acceleration_structure_with_size_offset(self.size, self.offset);
if let Some(resource) = &resource {
if let Some(name) = &self.name {
resource.set_label(name);
}
}
resource
}
fn is_null(&self) -> bool {
self.chunk_id.is_none()
}
}
#[derive(Clone, Debug)]
pub struct AllocationCreateDesc<'a> {
/// Name of the allocation, for tracking and debugging purposes
pub name: &'a str,
/// Location where the memory allocation should be stored
pub location: MemoryLocation,
pub size: u64,
pub alignment: u64,
}
impl<'a> AllocationCreateDesc<'a> {
pub fn buffer(
device: &metal::Device,
name: &'a str,
length: u64,
location: MemoryLocation,
) -> Self {
let size_and_align =
device.heap_buffer_size_and_align(length, memory_location_to_metal(location));
Self {
name,
location,
size: size_and_align.size,
alignment: size_and_align.align,
}
}
pub fn texture(device: &metal::Device, name: &'a str, desc: &metal::TextureDescriptor) -> Self {
let size_and_align = device.heap_texture_size_and_align(desc);
Self {
name,
location: match desc.storage_mode() {
metal::MTLStorageMode::Shared
| metal::MTLStorageMode::Managed
| metal::MTLStorageMode::Memoryless => MemoryLocation::Unknown,
metal::MTLStorageMode::Private => MemoryLocation::GpuOnly,
},
size: size_and_align.size,
alignment: size_and_align.align,
}
}
pub fn acceleration_structure_with_size(
device: &metal::Device,
name: &'a str,
size: u64,
location: MemoryLocation,
) -> Self {
let size_and_align = device.heap_acceleration_structure_size_and_align_with_size(size);
Self {
name,
location,
size: size_and_align.size,
alignment: size_and_align.align,
}
}
}
pub struct Allocator {
device: Arc<metal::Device>,
debug_settings: AllocatorDebugSettings,
memory_types: Vec<MemoryType>,
allocation_sizes: AllocationSizes,
}
#[derive(Debug)]
pub struct AllocatorCreateDesc {
pub device: Arc<metal::Device>,
pub debug_settings: AllocatorDebugSettings,
pub allocation_sizes: AllocationSizes,
}
#[derive(Debug)]
pub struct CommittedAllocationStatistics {
pub num_allocations: usize,
pub total_size: u64,
}
#[derive(Debug)]
struct MemoryBlock {
heap: Arc<metal::Heap>,
size: u64,
sub_allocator: Box<dyn allocator::SubAllocator>,
}
impl MemoryBlock {
fn new(
device: &Arc<metal::Device>,
size: u64,
heap_descriptor: &metal::HeapDescriptor,
dedicated: bool,
memory_location: MemoryLocation,
) -> Result<Self> {
heap_descriptor.set_size(size);
let heap = Arc::new(device.new_heap(heap_descriptor));
heap.set_label(&format!("MemoryBlock {memory_location:?}"));
let sub_allocator: Box<dyn allocator::SubAllocator> = if dedicated {
Box::new(allocator::DedicatedBlockAllocator::new(size))
} else {
Box::new(allocator::FreeListAllocator::new(size))
};
Ok(Self {
heap,
size,
sub_allocator,
})
}
}
#[derive(Debug)]
struct MemoryType {
memory_blocks: Vec<Option<MemoryBlock>>,
_committed_allocations: CommittedAllocationStatistics,
memory_location: MemoryLocation,
heap_properties: metal::HeapDescriptor,
memory_type_index: usize,
active_general_blocks: usize,
}
impl MemoryType {
fn allocate(
&mut self,
device: &Arc<metal::Device>,
desc: &AllocationCreateDesc<'_>,
backtrace: Arc<Backtrace>,
allocation_sizes: &AllocationSizes,
) -> Result<Allocation> {
let allocation_type = allocator::AllocationType::Linear;
let memblock_size = if self.heap_properties.storage_mode() == metal::MTLStorageMode::Private
{
allocation_sizes.device_memblock_size
} else {
allocation_sizes.host_memblock_size
};
let size = desc.size;
let alignment = desc.alignment;
// Create a dedicated block for large memory allocations
if size > memblock_size {
let mem_block = MemoryBlock::new(
device,
size,
&self.heap_properties,
true,
self.memory_location,
)?;
let block_index = self.memory_blocks.iter().position(|block| block.is_none());
let block_index = match block_index {
Some(i) => {
self.memory_blocks[i].replace(mem_block);
i
}
None => {
self.memory_blocks.push(Some(mem_block));
self.memory_blocks.len() - 1
}
};
let mem_block = self.memory_blocks[block_index]
.as_mut()
.ok_or_else(|| AllocationError::Internal("Memory block must be Some".into()))?;
let (offset, chunk_id) = mem_block.sub_allocator.allocate(
size,
alignment,
allocation_type,
1,
desc.name,
backtrace,
)?;
return Ok(Allocation {
chunk_id: Some(chunk_id),
size,
offset,
memory_block_index: block_index,
memory_type_index: self.memory_type_index,
heap: mem_block.heap.clone(),
name: Some(desc.name.into()),
});
}
let mut empty_block_index = None;
for (mem_block_i, mem_block) in self.memory_blocks.iter_mut().enumerate().rev() {
if let Some(mem_block) = mem_block {
let allocation = mem_block.sub_allocator.allocate(
size,
alignment,
allocation_type,
1,
desc.name,
backtrace.clone(),
);
match allocation {
Ok((offset, chunk_id)) => {
return Ok(Allocation {
chunk_id: Some(chunk_id),
offset,
size,
memory_block_index: mem_block_i,
memory_type_index: self.memory_type_index,
heap: mem_block.heap.clone(),
name: Some(desc.name.into()),
});
}
Err(AllocationError::OutOfMemory) => {} // Block is full, continue search.
Err(err) => return Err(err), // Unhandled error, return.
}
} else if empty_block_index.is_none() {
empty_block_index = Some(mem_block_i);
}
}
let new_memory_block = MemoryBlock::new(
device,
memblock_size,
&self.heap_properties,
false,
self.memory_location,
)?;
let new_block_index = if let Some(block_index) = empty_block_index {
self.memory_blocks[block_index] = Some(new_memory_block);
block_index
} else {
self.memory_blocks.push(Some(new_memory_block));
self.memory_blocks.len() - 1
};
self.active_general_blocks += 1;
let mem_block = self.memory_blocks[new_block_index]
.as_mut()
.ok_or_else(|| AllocationError::Internal("Memory block must be Some".into()))?;
let allocation = mem_block.sub_allocator.allocate(
size,
alignment,
allocation_type,
1,
desc.name,
backtrace,
);
let (offset, chunk_id) = match allocation {
Err(AllocationError::OutOfMemory) => Err(AllocationError::Internal(
"Allocation that must succeed failed. This is a bug in the allocator.".into(),
)),
a => a,
}?;
Ok(Allocation {
chunk_id: Some(chunk_id),
offset,
size,
memory_block_index: new_block_index,
memory_type_index: self.memory_type_index,
heap: mem_block.heap.clone(),
name: Some(desc.name.into()),
})
}
fn free(&mut self, allocation: &Allocation) -> Result<()> {
let block_idx = allocation.memory_block_index;
let mem_block = self.memory_blocks[block_idx]
.as_mut()
.ok_or_else(|| AllocationError::Internal("Memory block must be Some.".into()))?;
mem_block.sub_allocator.free(allocation.chunk_id)?;
if mem_block.sub_allocator.is_empty() {
if mem_block.sub_allocator.supports_general_allocations() {
if self.active_general_blocks > 1 {
let block = self.memory_blocks[block_idx].take();
if block.is_none() {
return Err(AllocationError::Internal(
"Memory block must be Some.".into(),
));
}
// Note that `block` will be destroyed on `drop` here
self.active_general_blocks -= 1;
}
} else {
let block = self.memory_blocks[block_idx].take();
if block.is_none() {
return Err(AllocationError::Internal(
"Memory block must be Some.".into(),
));
}
// Note that `block` will be destroyed on `drop` here
}
}
Ok(())
}
}
impl Allocator {
pub fn new(desc: &AllocatorCreateDesc) -> Result<Self> {
let heap_types = [
(MemoryLocation::GpuOnly, {
let heap_desc = metal::HeapDescriptor::new();
heap_desc.set_cpu_cache_mode(metal::MTLCPUCacheMode::DefaultCache);
heap_desc.set_storage_mode(metal::MTLStorageMode::Private);
heap_desc.set_heap_type(metal::MTLHeapType::Placement);
heap_desc
}),
(MemoryLocation::CpuToGpu, {
let heap_desc = metal::HeapDescriptor::new();
heap_desc.set_cpu_cache_mode(metal::MTLCPUCacheMode::WriteCombined);
heap_desc.set_storage_mode(metal::MTLStorageMode::Shared);
heap_desc.set_heap_type(metal::MTLHeapType::Placement);
heap_desc
}),
(MemoryLocation::GpuToCpu, {
let heap_desc = metal::HeapDescriptor::new();
heap_desc.set_cpu_cache_mode(metal::MTLCPUCacheMode::DefaultCache);
heap_desc.set_storage_mode(metal::MTLStorageMode::Shared);
heap_desc.set_heap_type(metal::MTLHeapType::Placement);
heap_desc
}),
];
let memory_types = heap_types
.into_iter()
.enumerate()
.map(|(i, (memory_location, heap_descriptor))| MemoryType {
memory_blocks: vec![],
_committed_allocations: CommittedAllocationStatistics {
num_allocations: 0,
total_size: 0,
},
memory_location,
heap_properties: heap_descriptor,
memory_type_index: i,
active_general_blocks: 0,
})
.collect();
Ok(Self {
device: desc.device.clone(),
debug_settings: desc.debug_settings,
memory_types,
allocation_sizes: desc.allocation_sizes,
})
}
pub fn allocate(&mut self, desc: &AllocationCreateDesc<'_>) -> Result<Allocation> {
let size = desc.size;
let alignment = desc.alignment;
let backtrace = Arc::new(if self.debug_settings.store_stack_traces {
Backtrace::force_capture()
} else {
Backtrace::disabled()
});
if self.debug_settings.log_allocations {
debug!(
"Allocating `{}` of {} bytes with an alignment of {}.",
&desc.name, size, alignment
);
if self.debug_settings.log_stack_traces {
let backtrace = Backtrace::force_capture();
debug!("Allocation stack trace: {}", backtrace);
}
}
if size == 0 || !alignment.is_power_of_two() {
return Err(AllocationError::InvalidAllocationCreateDesc);
}
// Find memory type
let memory_type = self
.memory_types
.iter_mut()
.find(|memory_type| {
// Is location compatible
desc.location == MemoryLocation::Unknown
|| desc.location == memory_type.memory_location
})
.ok_or(AllocationError::NoCompatibleMemoryTypeFound)?;
memory_type.allocate(&self.device, desc, backtrace, &self.allocation_sizes)
}
pub fn free(&mut self, allocation: &Allocation) -> Result<()> {
if self.debug_settings.log_frees {
let name = allocation.name.as_deref().unwrap_or("<null>");
debug!("Freeing `{}`.", name);
if self.debug_settings.log_stack_traces {
let backtrace = Backtrace::force_capture();
debug!("Free stack trace: {}", backtrace);
}
}
if allocation.is_null() {
return Ok(());
}
self.memory_types[allocation.memory_type_index].free(allocation)?;
Ok(())
}
pub fn get_heaps(&self) -> Vec<&metal::HeapRef> {
// Get all memory blocks
let mut heaps: Vec<&metal::HeapRef> = Vec::new();
for memory_type in &self.memory_types {
for block in memory_type.memory_blocks.iter().flatten() {
heaps.push(block.heap.as_ref());
}
}
heaps
}
pub fn generate_report(&self) -> AllocatorReport {
let mut allocations = vec![];
let mut blocks = vec![];
let mut total_reserved_bytes = 0;
for memory_type in &self.memory_types {
for block in memory_type.memory_blocks.iter().flatten() {
total_reserved_bytes += block.size;
let first_allocation = allocations.len();
allocations.extend(block.sub_allocator.report_allocations());
blocks.push(MemoryBlockReport {
size: block.size,
allocations: first_allocation..allocations.len(),
});
}
}
let total_allocated_bytes = allocations.iter().map(|report| report.size).sum();
AllocatorReport {
allocations,
blocks,
total_allocated_bytes,
total_reserved_bytes,
}
}
}

25
vendor/gpu-allocator/src/result.rs vendored Normal file
View File

@@ -0,0 +1,25 @@
use thiserror::Error;
#[derive(Error, Debug)]
pub enum AllocationError {
#[error("Out of memory")]
OutOfMemory,
#[error("Failed to map memory: {0}")]
FailedToMap(String),
#[error("No compatible memory type available")]
NoCompatibleMemoryTypeFound,
#[error("Invalid AllocationCreateDesc")]
InvalidAllocationCreateDesc,
#[error("Invalid AllocatorCreateDesc {0}")]
InvalidAllocatorCreateDesc(String),
#[error("Internal error: {0}")]
Internal(String),
#[error("Initial `BARRIER_LAYOUT` needs at least `Device10`")]
BarrierLayoutNeedsDevice10,
#[error("Castable formats require enhanced barriers")]
CastableFormatsRequiresEnhancedBarriers,
#[error("Castable formats require at least `Device12`")]
CastableFormatsRequiresAtLeastDevice12,
}
pub type Result<V, E = AllocationError> = ::std::result::Result<V, E>;

View File

@@ -0,0 +1,141 @@
use std::backtrace::BacktraceStatus;
use egui::{Label, Response, Sense, Ui, WidgetText};
use egui_extras::{Column, TableBuilder};
use crate::allocator::{fmt_bytes, AllocationReport};
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
pub(crate) enum AllocationReportVisualizeSorting {
#[default]
None,
Idx,
Name,
Size,
}
#[derive(Debug, Default)]
pub(crate) struct AllocationReportVisualizeSettings {
pub filter: String,
pub sorting: AllocationReportVisualizeSorting,
pub ascending: bool,
}
pub(crate) fn render_allocation_reports_ui(
ui: &mut Ui,
settings: &mut AllocationReportVisualizeSettings,
allocations: impl IntoIterator<Item = AllocationReport>,
) {
ui.horizontal(|ui| {
ui.label("Filter");
ui.text_edit_singleline(&mut settings.filter);
});
let breakdown_filter = settings.filter.to_lowercase();
let mut allocations = allocations
.into_iter()
.enumerate()
.filter(|(_, report)| report.name.to_lowercase().contains(&breakdown_filter))
.collect::<Vec<_>>();
let total_size_under_filter: u64 = allocations.iter().map(|a| a.1.size).sum();
ui.label(format!("Total: {}", fmt_bytes(total_size_under_filter)));
let row_height = ui.text_style_height(&egui::TextStyle::Body);
let table = TableBuilder::new(ui)
.striped(true)
.resizable(true)
.column(Column::exact(30.0))
.column(Column::initial(300.0).at_least(200.0).clip(true))
.column(Column::exact(70.0));
fn header_button(ui: &mut Ui, label: &str) -> Response {
let label = WidgetText::from(label).strong();
let label = Label::new(label).sense(Sense::click());
ui.add(label)
}
let table = table.header(row_height, |mut row| {
row.col(|ui| {
if header_button(ui, "Idx").clicked() {
if settings.sorting == AllocationReportVisualizeSorting::Idx {
settings.ascending = !settings.ascending;
} else {
settings.sorting = AllocationReportVisualizeSorting::Idx;
settings.ascending = false;
}
}
});
row.col(|ui| {
if header_button(ui, "Name").clicked() {
if settings.sorting == AllocationReportVisualizeSorting::Name {
settings.ascending = !settings.ascending;
} else {
settings.sorting = AllocationReportVisualizeSorting::Name;
settings.ascending = false;
}
}
});
row.col(|ui| {
if header_button(ui, "Size").clicked() {
if settings.sorting == AllocationReportVisualizeSorting::Size {
settings.ascending = !settings.ascending;
} else {
settings.sorting = AllocationReportVisualizeSorting::Size;
settings.ascending = false;
}
}
});
});
match (settings.sorting, settings.ascending) {
(AllocationReportVisualizeSorting::None, _) => {}
(AllocationReportVisualizeSorting::Idx, true) => allocations.sort_by_key(|(idx, _)| *idx),
(AllocationReportVisualizeSorting::Idx, false) => {
allocations.sort_by_key(|(idx, _)| std::cmp::Reverse(*idx))
}
(AllocationReportVisualizeSorting::Name, true) => {
allocations.sort_by(|(_, alloc1), (_, alloc2)| alloc1.name.cmp(&alloc2.name))
}
(AllocationReportVisualizeSorting::Name, false) => {
allocations.sort_by(|(_, alloc1), (_, alloc2)| alloc1.name.cmp(&alloc2.name).reverse())
}
(AllocationReportVisualizeSorting::Size, true) => {
allocations.sort_by_key(|(_, alloc)| alloc.size)
}
(AllocationReportVisualizeSorting::Size, false) => {
allocations.sort_by_key(|(_, alloc)| std::cmp::Reverse(alloc.size))
}
}
table.body(|mut body| {
for (idx, alloc) in allocations {
body.row(row_height, |mut row| {
let AllocationReport {
name,
size,
backtrace,
..
} = alloc;
row.col(|ui| {
ui.label(idx.to_string());
});
let resp = row.col(|ui| {
ui.label(name);
});
if backtrace.status() == BacktraceStatus::Captured {
resp.1.on_hover_ui(|ui| {
ui.label(backtrace.to_string());
});
}
row.col(|ui| {
ui.label(fmt_bytes(size));
});
});
}
});
}

View File

@@ -0,0 +1,133 @@
use std::backtrace::BacktraceStatus;
use egui::{Color32, DragValue, Rect, ScrollArea, Sense, Ui, Vec2};
use super::ColorScheme;
use crate::allocator::free_list_allocator::MemoryChunk;
pub(crate) struct MemoryChunksVisualizationSettings {
pub width_in_bytes: u64,
pub show_backtraces: bool,
}
impl Default for MemoryChunksVisualizationSettings {
fn default() -> Self {
Self {
width_in_bytes: 1024,
show_backtraces: false,
}
}
}
impl MemoryChunksVisualizationSettings {
pub fn ui(&mut self, ui: &mut Ui, store_stack_traces: bool) {
if store_stack_traces {
ui.checkbox(&mut self.show_backtraces, "Show backtraces");
}
// Slider for changing the 'zoom' level of the visualizer.
const BYTES_PER_UNIT_MIN: i32 = 1;
const BYTES_PER_UNIT_MAX: i32 = 1024 * 1024;
ui.horizontal(|ui| {
ui.add(
DragValue::new(&mut self.width_in_bytes)
.clamp_range(BYTES_PER_UNIT_MIN..=BYTES_PER_UNIT_MAX)
.speed(10.0),
);
ui.label("Bytes per line");
});
}
}
pub(crate) fn render_memory_chunks_ui<'a>(
ui: &mut Ui,
color_scheme: &ColorScheme,
settings: &MemoryChunksVisualizationSettings,
total_size_in_bytes: u64,
data: impl IntoIterator<Item = &'a MemoryChunk>,
) {
let line_height = ui.text_style_height(&egui::TextStyle::Body);
let number_of_rows =
(total_size_in_bytes as f32 / settings.width_in_bytes as f32).ceil() as usize;
ScrollArea::new([false, true]).show_rows(ui, line_height, number_of_rows, |ui, range| {
// Let range be in bytes
let start_in_bytes = range.start as u64 * settings.width_in_bytes;
let end_in_bytes = range.end as u64 * settings.width_in_bytes;
let mut data = data
.into_iter()
.filter(|chunk| {
(chunk.offset + chunk.size) > start_in_bytes && chunk.offset < end_in_bytes
})
.collect::<Vec<_>>();
data.sort_by_key(|chunk| chunk.offset);
let screen_width = ui.available_width();
let mut cursor_idx = 0;
let mut bytes_required = data[cursor_idx].offset + data[cursor_idx].size - start_in_bytes;
for _ in range {
ui.horizontal(|ui| {
let mut bytes_left = settings.width_in_bytes;
let mut cursor = ui.cursor().min;
while cursor_idx < data.len() && bytes_left > 0 {
// Block is depleted, so reset for more chunks
while bytes_required == 0 {
cursor_idx += 1;
if cursor_idx < data.len() {
bytes_required = data[cursor_idx].size;
}
continue;
}
let bytes_used = bytes_required.min(bytes_left);
let width_used =
bytes_used as f32 * screen_width / settings.width_in_bytes as f32;
// Draw the rectangle
let resp = ui.allocate_rect(
Rect::from_min_size(cursor, Vec2::new(width_used, line_height)),
Sense::click(),
);
if ui.is_rect_visible(resp.rect) {
ui.painter().rect(
resp.rect,
egui::Rounding::ZERO,
color_scheme
.get_allocation_type_color(data[cursor_idx].allocation_type),
egui::Stroke::new(1.0, Color32::BLACK),
);
resp.on_hover_ui_at_pointer(|ui| {
let chunk = &data[cursor_idx];
ui.label(format!("id: {}", chunk.chunk_id));
ui.label(format!("offset: 0x{:x}", chunk.offset));
ui.label(format!("size: 0x{:x}", chunk.size));
ui.label(format!(
"allocation_type: {}",
chunk.allocation_type.as_str()
));
if let Some(name) = &chunk.name {
ui.label(format!("name: {}", name));
}
if settings.show_backtraces
&& chunk.backtrace.status() == BacktraceStatus::Captured
{
ui.label(chunk.backtrace.to_string());
}
});
}
// Update our cursors
cursor.x += width_used;
bytes_left -= bytes_used;
bytes_required -= bytes_used;
}
});
}
});
}

View File

@@ -0,0 +1,56 @@
use egui::{Color32, Ui};
mod allocation_reports;
mod memory_chunks;
pub(crate) use allocation_reports::*;
pub(crate) use memory_chunks::*;
use crate::allocator::AllocationType;
pub const DEFAULT_COLOR_ALLOCATION_TYPE_FREE: Color32 = Color32::from_rgb(159, 159, 159); // gray
pub const DEFAULT_COLOR_ALLOCATION_TYPE_LINEAR: Color32 = Color32::from_rgb(91, 206, 250); // blue
pub const DEFAULT_COLOR_ALLOCATION_TYPE_NON_LINEAR: Color32 = Color32::from_rgb(250, 169, 184); // pink
#[derive(Clone)]
pub struct ColorScheme {
pub free_color: Color32,
pub linear_color: Color32,
pub non_linear_color: Color32,
}
impl Default for ColorScheme {
fn default() -> Self {
Self {
free_color: DEFAULT_COLOR_ALLOCATION_TYPE_FREE,
linear_color: DEFAULT_COLOR_ALLOCATION_TYPE_LINEAR,
non_linear_color: DEFAULT_COLOR_ALLOCATION_TYPE_NON_LINEAR,
}
}
}
impl ColorScheme {
pub(crate) fn get_allocation_type_color(&self, allocation_type: AllocationType) -> Color32 {
match allocation_type {
AllocationType::Free => self.free_color,
AllocationType::Linear => self.linear_color,
AllocationType::NonLinear => self.non_linear_color,
}
}
}
pub(crate) trait SubAllocatorVisualizer {
fn supports_visualization(&self) -> bool {
false
}
fn draw_base_info(&self, ui: &mut Ui) {
ui.label("No sub allocator information available");
}
fn draw_visualization(
&self,
_color_scheme: &ColorScheme,
_ui: &mut Ui,
_settings: &MemoryChunksVisualizationSettings,
) {
}
}

979
vendor/gpu-allocator/src/vulkan/mod.rs vendored Normal file
View File

@@ -0,0 +1,979 @@
#![deny(clippy::unimplemented, clippy::unwrap_used, clippy::ok_expect)]
#[cfg(feature = "visualizer")]
mod visualizer;
use std::{backtrace::Backtrace, fmt, marker::PhantomData, sync::Arc};
use ash::vk;
use log::{debug, Level};
#[cfg(feature = "visualizer")]
pub use visualizer::AllocatorVisualizer;
use super::allocator;
use crate::{
allocator::{AllocatorReport, MemoryBlockReport},
AllocationError, AllocationSizes, AllocatorDebugSettings, MemoryLocation, Result,
};
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum AllocationScheme {
/// Perform a dedicated, driver-managed allocation for the given buffer, allowing
/// it to perform optimizations on this type of allocation.
DedicatedBuffer(vk::Buffer),
/// Perform a dedicated, driver-managed allocation for the given image, allowing
/// it to perform optimizations on this type of allocation.
DedicatedImage(vk::Image),
/// The memory for this resource will be allocated and managed by gpu-allocator.
GpuAllocatorManaged,
}
#[derive(Clone, Debug)]
pub struct AllocationCreateDesc<'a> {
/// Name of the allocation, for tracking and debugging purposes
pub name: &'a str,
/// Vulkan memory requirements for an allocation
pub requirements: vk::MemoryRequirements,
/// Location where the memory allocation should be stored
pub location: MemoryLocation,
/// If the resource is linear (buffer / linear texture) or a regular (tiled) texture.
pub linear: bool,
/// Determines how this allocation should be managed.
pub allocation_scheme: AllocationScheme,
}
/// Wrapper type to only mark a raw pointer [`Send`] + [`Sync`] without having to
/// mark the entire [`Allocation`] as such, instead relying on the compiler to
/// auto-implement this or fail if fields are added that violate this constraint
#[derive(Clone, Copy, Debug)]
pub(crate) struct SendSyncPtr(std::ptr::NonNull<std::ffi::c_void>);
// Sending is fine because mapped_ptr does not change based on the thread we are in
unsafe impl Send for SendSyncPtr {}
// Sync is also okay because Sending &Allocation is safe: a mutable reference
// to the data in mapped_ptr is never exposed while `self` is immutably borrowed.
// In order to break safety guarantees, the user needs to `unsafe`ly dereference
// `mapped_ptr` themselves.
unsafe impl Sync for SendSyncPtr {}
pub struct AllocatorCreateDesc {
pub instance: ash::Instance,
pub device: ash::Device,
pub physical_device: vk::PhysicalDevice,
pub debug_settings: AllocatorDebugSettings,
pub buffer_device_address: bool,
pub allocation_sizes: AllocationSizes,
}
/// A piece of allocated memory.
///
/// Could be contained in its own individual underlying memory object or as a sub-region
/// of a larger allocation.
///
/// # Copying data into a CPU-mapped [`Allocation`]
///
/// You'll very likely want to copy data into CPU-mapped [`Allocation`]s in order to send that data to the GPU.
/// Doing this data transfer correctly without invoking undefined behavior can be quite fraught and non-obvious<sup>[\[1\]]</sup>.
///
/// To help you do this correctly, [`Allocation`] implements [`presser::Slab`], which means you can directly
/// pass it in to many of `presser`'s [helper functions] (for example, [`copy_from_slice_to_offset`]).
///
/// In most cases, this will work perfectly. However, note that if you try to use an [`Allocation`] as a
/// [`Slab`] and it is not valid to do so (if it is not CPU-mapped or if its `size > isize::MAX`),
/// you will cause a panic. If you aren't sure about these conditions, you may use [`Allocation::try_as_mapped_slab`].
///
/// ## Example
///
/// Say we've created an [`Allocation`] called `my_allocation`, which is CPU-mapped.
/// ```ignore
/// let mut my_allocation: Allocation = my_allocator.allocate(...)?;
/// ```
///
/// And we want to fill it with some data in the form of a `my_gpu_data: Vec<MyGpuVector>`, defined as such:
///
/// ```ignore
/// // note that this is size(12) but align(16), thus we have 4 padding bytes.
/// // this would mean a `&[MyGpuVector]` is invalid to cast as a `&[u8]`, but
/// // we can still use `presser` to copy it directly in a valid manner.
/// #[repr(C, align(16))]
/// #[derive(Clone, Copy)]
/// struct MyGpuVertex {
/// x: f32,
/// y: f32,
/// z: f32,
/// }
///
/// let my_gpu_data: Vec<MyGpuData> = make_vertex_data();
/// ```
///
/// Depending on how the data we're copying will be used, the Vulkan device may have a minimum
/// alignment requirement for that data:
///
/// ```ignore
/// let min_gpu_align = my_vulkan_device_specifications.min_alignment_thing;
/// ```
///
/// Finally, we can use [`presser::copy_from_slice_to_offset_with_align`] to perform the copy,
/// simply passing `&mut my_allocation` since [`Allocation`] implements [`Slab`].
///
/// ```ignore
/// let copy_record = presser::copy_from_slice_to_offset_with_align(
/// &my_gpu_data[..], // a slice containing all elements of my_gpu_data
/// &mut my_allocation, // our Allocation
/// 0, // start as close to the beginning of the allocation as possible
/// min_gpu_align, // the minimum alignment we queried previously
/// )?;
/// ```
///
/// It's important to note that the data may not have actually been copied starting at the requested
/// `start_offset` (0 in the example above) depending on the alignment of the underlying allocation
/// as well as the alignment requirements of `MyGpuVector` and the `min_gpu_align` we passed in. Thus,
/// we can query the `copy_record` for the actual starting offset:
///
/// ```ignore
/// let actual_data_start_offset = copy_record.copy_start_offset;
/// ```
///
/// ## Safety
///
/// It is technically not fully safe to use an [`Allocation`] as a [`presser::Slab`] because we can't validate that the
/// GPU is not using the data in the buffer while `self` is borrowed. However, trying
/// to validate this statically is really hard and the community has basically decided that
/// requiring `unsafe` for functions like this creates too much "unsafe-noise", ultimately making it
/// harder to debug more insidious unsafety that is unrelated to GPU-CPU sync issues.
///
/// So, as would always be the case, you must ensure the GPU
/// is not using the data in `self` for the duration that you hold the returned [`MappedAllocationSlab`].
///
/// [`Slab`]: presser::Slab
/// [`copy_from_slice_to_offset`]: presser::copy_from_slice_to_offset
/// [helper functions]: presser#functions
/// [\[1\]]: presser#motivation
#[derive(Debug)]
pub struct Allocation {
chunk_id: Option<std::num::NonZeroU64>,
offset: u64,
size: u64,
memory_block_index: usize,
memory_type_index: usize,
device_memory: vk::DeviceMemory,
mapped_ptr: Option<SendSyncPtr>,
dedicated_allocation: bool,
memory_properties: vk::MemoryPropertyFlags,
name: Option<Box<str>>,
}
impl Allocation {
/// Tries to borrow the CPU-mapped memory that backs this allocation as a [`presser::Slab`], which you can then
/// use to safely copy data into the raw, potentially-uninitialized buffer.
/// See [the documentation of Allocation][Allocation#example] for an example of this.
///
/// Returns [`None`] if `self.mapped_ptr()` is `None`, or if `self.size()` is greater than `isize::MAX` because
/// this could lead to undefined behavior.
///
/// Note that [`Allocation`] implements [`Slab`] natively, so you can actually pass this allocation as a [`Slab`]
/// directly. However, if `self` is not actually a valid [`Slab`] (this function would return `None` as described above),
/// then trying to use it as a [`Slab`] will panic.
///
/// # Safety
///
/// See the note about safety in [the documentation of Allocation][Allocation#safety]
///
/// [`Slab`]: presser::Slab
// best to be explicit where the lifetime is coming from since we're doing unsafe things
// and relying on an inferred lifetime type in the PhantomData below
#[allow(clippy::needless_lifetimes)]
pub fn try_as_mapped_slab<'a>(&'a mut self) -> Option<MappedAllocationSlab<'a>> {
let mapped_ptr = self.mapped_ptr()?.cast().as_ptr();
if self.size > isize::MAX as _ {
return None;
}
// this will always succeed since size is <= isize::MAX which is < usize::MAX
let size = self.size as usize;
Some(MappedAllocationSlab {
_borrowed_alloc: PhantomData,
mapped_ptr,
size,
})
}
pub fn chunk_id(&self) -> Option<std::num::NonZeroU64> {
self.chunk_id
}
///Returns the [`vk::MemoryPropertyFlags`] of this allocation.
pub fn memory_properties(&self) -> vk::MemoryPropertyFlags {
self.memory_properties
}
/// Returns the [`vk::DeviceMemory`] object that is backing this allocation.
/// This memory object can be shared with multiple other allocations and shouldn't be freed (or allocated from)
/// without this library, because that will lead to undefined behavior.
///
/// # Safety
/// The result of this function can safely be used to pass into [`ash::Device::bind_buffer_memory()`],
/// [`ash::Device::bind_image_memory()`] etc. It is exposed for this reason. Keep in mind to also
/// pass [`Self::offset()`] along to those.
pub unsafe fn memory(&self) -> vk::DeviceMemory {
self.device_memory
}
/// Returns [`true`] if this allocation is using a dedicated underlying allocation.
pub fn is_dedicated(&self) -> bool {
self.dedicated_allocation
}
/// Returns the offset of the allocation on the [`vk::DeviceMemory`].
/// When binding the memory to a buffer or image, this offset needs to be supplied as well.
pub fn offset(&self) -> u64 {
self.offset
}
/// Returns the size of the allocation
pub fn size(&self) -> u64 {
self.size
}
/// Returns a valid mapped pointer if the memory is host visible, otherwise it will return None.
/// The pointer already points to the exact memory region of the suballocation, so no offset needs to be applied.
pub fn mapped_ptr(&self) -> Option<std::ptr::NonNull<std::ffi::c_void>> {
self.mapped_ptr.map(|SendSyncPtr(p)| p)
}
/// Returns a valid mapped slice if the memory is host visible, otherwise it will return None.
/// The slice already references the exact memory region of the allocation, so no offset needs to be applied.
pub fn mapped_slice(&self) -> Option<&[u8]> {
self.mapped_ptr().map(|ptr| unsafe {
std::slice::from_raw_parts(ptr.cast().as_ptr(), self.size as usize)
})
}
/// Returns a valid mapped mutable slice if the memory is host visible, otherwise it will return None.
/// The slice already references the exact memory region of the allocation, so no offset needs to be applied.
pub fn mapped_slice_mut(&mut self) -> Option<&mut [u8]> {
self.mapped_ptr().map(|ptr| unsafe {
std::slice::from_raw_parts_mut(ptr.cast().as_ptr(), self.size as usize)
})
}
pub fn is_null(&self) -> bool {
self.chunk_id.is_none()
}
}
impl Default for Allocation {
fn default() -> Self {
Self {
chunk_id: None,
offset: 0,
size: 0,
memory_block_index: !0,
memory_type_index: !0,
device_memory: vk::DeviceMemory::null(),
mapped_ptr: None,
memory_properties: vk::MemoryPropertyFlags::empty(),
name: None,
dedicated_allocation: false,
}
}
}
/// A wrapper struct over a borrowed [`Allocation`] that infallibly implements [`presser::Slab`].
///
/// This type should be acquired by calling [`Allocation::try_as_mapped_slab`].
pub struct MappedAllocationSlab<'a> {
_borrowed_alloc: PhantomData<&'a mut Allocation>,
mapped_ptr: *mut u8,
size: usize,
}
// SAFETY: See the safety comment of Allocation::as_mapped_slab above.
unsafe impl<'a> presser::Slab for MappedAllocationSlab<'a> {
fn base_ptr(&self) -> *const u8 {
self.mapped_ptr
}
fn base_ptr_mut(&mut self) -> *mut u8 {
self.mapped_ptr
}
fn size(&self) -> usize {
self.size
}
}
// SAFETY: See the safety comment of Allocation::as_mapped_slab above.
unsafe impl presser::Slab for Allocation {
fn base_ptr(&self) -> *const u8 {
self.mapped_ptr
.expect("tried to use a non-mapped Allocation as a Slab")
.0
.as_ptr()
.cast()
}
fn base_ptr_mut(&mut self) -> *mut u8 {
self.mapped_ptr
.expect("tried to use a non-mapped Allocation as a Slab")
.0
.as_ptr()
.cast()
}
fn size(&self) -> usize {
if self.size > isize::MAX as _ {
panic!("tried to use an Allocation with size > isize::MAX as a Slab")
}
// this will always work if the above passed
self.size as usize
}
}
#[derive(Debug)]
pub(crate) struct MemoryBlock {
pub(crate) device_memory: vk::DeviceMemory,
pub(crate) size: u64,
pub(crate) mapped_ptr: Option<SendSyncPtr>,
pub(crate) sub_allocator: Box<dyn allocator::SubAllocator>,
#[cfg(feature = "visualizer")]
pub(crate) dedicated_allocation: bool,
}
impl MemoryBlock {
fn new(
device: &ash::Device,
size: u64,
mem_type_index: usize,
mapped: bool,
buffer_device_address: bool,
allocation_scheme: AllocationScheme,
requires_personal_block: bool,
) -> Result<Self> {
let device_memory = {
let alloc_info = vk::MemoryAllocateInfo::default()
.allocation_size(size)
.memory_type_index(mem_type_index as u32);
let allocation_flags = vk::MemoryAllocateFlags::DEVICE_ADDRESS;
let mut flags_info = vk::MemoryAllocateFlagsInfo::default().flags(allocation_flags);
// TODO(manon): Test this based on if the device has this feature enabled or not
let alloc_info = if buffer_device_address {
alloc_info.push_next(&mut flags_info)
} else {
alloc_info
};
// Flag the memory as dedicated if required.
let mut dedicated_memory_info = vk::MemoryDedicatedAllocateInfo::default();
let alloc_info = match allocation_scheme {
AllocationScheme::DedicatedBuffer(buffer) => {
dedicated_memory_info = dedicated_memory_info.buffer(buffer);
alloc_info.push_next(&mut dedicated_memory_info)
}
AllocationScheme::DedicatedImage(image) => {
dedicated_memory_info = dedicated_memory_info.image(image);
alloc_info.push_next(&mut dedicated_memory_info)
}
AllocationScheme::GpuAllocatorManaged => alloc_info,
};
unsafe { device.allocate_memory(&alloc_info, None) }.map_err(|e| match e {
vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => AllocationError::OutOfMemory,
e => AllocationError::Internal(format!(
"Unexpected error in vkAllocateMemory: {:?}",
e
)),
})?
};
let mapped_ptr = mapped
.then(|| {
unsafe {
device.map_memory(
device_memory,
0,
vk::WHOLE_SIZE,
vk::MemoryMapFlags::empty(),
)
}
.map_err(|e| {
unsafe { device.free_memory(device_memory, None) };
AllocationError::FailedToMap(e.to_string())
})
.and_then(|p| {
std::ptr::NonNull::new(p).map(SendSyncPtr).ok_or_else(|| {
AllocationError::FailedToMap("Returned mapped pointer is null".to_owned())
})
})
})
.transpose()?;
let sub_allocator: Box<dyn allocator::SubAllocator> = if allocation_scheme
!= AllocationScheme::GpuAllocatorManaged
|| requires_personal_block
{
Box::new(allocator::DedicatedBlockAllocator::new(size))
} else {
Box::new(allocator::FreeListAllocator::new(size))
};
Ok(Self {
device_memory,
size,
mapped_ptr,
sub_allocator,
#[cfg(feature = "visualizer")]
dedicated_allocation: allocation_scheme != AllocationScheme::GpuAllocatorManaged,
})
}
fn destroy(self, device: &ash::Device) {
if self.mapped_ptr.is_some() {
unsafe { device.unmap_memory(self.device_memory) };
}
unsafe { device.free_memory(self.device_memory, None) };
}
}
#[derive(Debug)]
pub(crate) struct MemoryType {
pub(crate) memory_blocks: Vec<Option<MemoryBlock>>,
pub(crate) memory_properties: vk::MemoryPropertyFlags,
pub(crate) memory_type_index: usize,
pub(crate) heap_index: usize,
pub(crate) mappable: bool,
pub(crate) active_general_blocks: usize,
pub(crate) buffer_device_address: bool,
}
impl MemoryType {
fn allocate(
&mut self,
device: &ash::Device,
desc: &AllocationCreateDesc<'_>,
granularity: u64,
backtrace: Arc<Backtrace>,
allocation_sizes: &AllocationSizes,
) -> Result<Allocation> {
let allocation_type = if desc.linear {
allocator::AllocationType::Linear
} else {
allocator::AllocationType::NonLinear
};
let memblock_size = if self
.memory_properties
.contains(vk::MemoryPropertyFlags::HOST_VISIBLE)
{
allocation_sizes.host_memblock_size
} else {
allocation_sizes.device_memblock_size
};
let size = desc.requirements.size;
let alignment = desc.requirements.alignment;
let dedicated_allocation = desc.allocation_scheme != AllocationScheme::GpuAllocatorManaged;
let requires_personal_block = size > memblock_size;
// Create a dedicated block for large memory allocations or allocations that require dedicated memory allocations.
if dedicated_allocation || requires_personal_block {
let mem_block = MemoryBlock::new(
device,
size,
self.memory_type_index,
self.mappable,
self.buffer_device_address,
desc.allocation_scheme,
requires_personal_block,
)?;
let mut block_index = None;
for (i, block) in self.memory_blocks.iter().enumerate() {
if block.is_none() {
block_index = Some(i);
break;
}
}
let block_index = match block_index {
Some(i) => {
self.memory_blocks[i].replace(mem_block);
i
}
None => {
self.memory_blocks.push(Some(mem_block));
self.memory_blocks.len() - 1
}
};
let mem_block = self.memory_blocks[block_index]
.as_mut()
.ok_or_else(|| AllocationError::Internal("Memory block must be Some".into()))?;
let (offset, chunk_id) = mem_block.sub_allocator.allocate(
size,
alignment,
allocation_type,
granularity,
desc.name,
backtrace,
)?;
return Ok(Allocation {
chunk_id: Some(chunk_id),
offset,
size,
memory_block_index: block_index,
memory_type_index: self.memory_type_index,
device_memory: mem_block.device_memory,
mapped_ptr: mem_block.mapped_ptr,
memory_properties: self.memory_properties,
name: Some(desc.name.into()),
dedicated_allocation,
});
}
let mut empty_block_index = None;
for (mem_block_i, mem_block) in self.memory_blocks.iter_mut().enumerate().rev() {
if let Some(mem_block) = mem_block {
let allocation = mem_block.sub_allocator.allocate(
size,
alignment,
allocation_type,
granularity,
desc.name,
backtrace.clone(),
);
match allocation {
Ok((offset, chunk_id)) => {
let mapped_ptr = if let Some(SendSyncPtr(mapped_ptr)) = mem_block.mapped_ptr
{
let offset_ptr = unsafe { mapped_ptr.as_ptr().add(offset as usize) };
std::ptr::NonNull::new(offset_ptr).map(SendSyncPtr)
} else {
None
};
return Ok(Allocation {
chunk_id: Some(chunk_id),
offset,
size,
memory_block_index: mem_block_i,
memory_type_index: self.memory_type_index,
device_memory: mem_block.device_memory,
memory_properties: self.memory_properties,
mapped_ptr,
dedicated_allocation: false,
name: Some(desc.name.into()),
});
}
Err(err) => match err {
AllocationError::OutOfMemory => {} // Block is full, continue search.
_ => return Err(err), // Unhandled error, return.
},
}
} else if empty_block_index.is_none() {
empty_block_index = Some(mem_block_i);
}
}
let new_memory_block = MemoryBlock::new(
device,
memblock_size,
self.memory_type_index,
self.mappable,
self.buffer_device_address,
desc.allocation_scheme,
false,
)?;
let new_block_index = if let Some(block_index) = empty_block_index {
self.memory_blocks[block_index] = Some(new_memory_block);
block_index
} else {
self.memory_blocks.push(Some(new_memory_block));
self.memory_blocks.len() - 1
};
self.active_general_blocks += 1;
let mem_block = self.memory_blocks[new_block_index]
.as_mut()
.ok_or_else(|| AllocationError::Internal("Memory block must be Some".into()))?;
let allocation = mem_block.sub_allocator.allocate(
size,
alignment,
allocation_type,
granularity,
desc.name,
backtrace,
);
let (offset, chunk_id) = match allocation {
Ok(value) => value,
Err(err) => match err {
AllocationError::OutOfMemory => {
return Err(AllocationError::Internal(
"Allocation that must succeed failed. This is a bug in the allocator."
.into(),
))
}
_ => return Err(err),
},
};
let mapped_ptr = if let Some(SendSyncPtr(mapped_ptr)) = mem_block.mapped_ptr {
let offset_ptr = unsafe { mapped_ptr.as_ptr().add(offset as usize) };
std::ptr::NonNull::new(offset_ptr).map(SendSyncPtr)
} else {
None
};
Ok(Allocation {
chunk_id: Some(chunk_id),
offset,
size,
memory_block_index: new_block_index,
memory_type_index: self.memory_type_index,
device_memory: mem_block.device_memory,
mapped_ptr,
memory_properties: self.memory_properties,
name: Some(desc.name.into()),
dedicated_allocation: false,
})
}
#[allow(clippy::needless_pass_by_value)]
fn free(&mut self, allocation: Allocation, device: &ash::Device) -> Result<()> {
let block_idx = allocation.memory_block_index;
let mem_block = self.memory_blocks[block_idx]
.as_mut()
.ok_or_else(|| AllocationError::Internal("Memory block must be Some.".into()))?;
mem_block.sub_allocator.free(allocation.chunk_id)?;
if mem_block.sub_allocator.is_empty() {
if mem_block.sub_allocator.supports_general_allocations() {
if self.active_general_blocks > 1 {
let block = self.memory_blocks[block_idx].take();
let block = block.ok_or_else(|| {
AllocationError::Internal("Memory block must be Some.".into())
})?;
block.destroy(device);
self.active_general_blocks -= 1;
}
} else {
let block = self.memory_blocks[block_idx].take();
let block = block.ok_or_else(|| {
AllocationError::Internal("Memory block must be Some.".into())
})?;
block.destroy(device);
}
}
Ok(())
}
}
pub struct Allocator {
pub(crate) memory_types: Vec<MemoryType>,
pub(crate) memory_heaps: Vec<vk::MemoryHeap>,
device: ash::Device,
pub(crate) buffer_image_granularity: u64,
pub(crate) debug_settings: AllocatorDebugSettings,
allocation_sizes: AllocationSizes,
}
impl fmt::Debug for Allocator {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.generate_report().fmt(f)
}
}
impl Allocator {
pub fn new(desc: &AllocatorCreateDesc) -> Result<Self> {
if desc.physical_device == vk::PhysicalDevice::null() {
return Err(AllocationError::InvalidAllocatorCreateDesc(
"AllocatorCreateDesc field `physical_device` is null.".into(),
));
}
let mem_props = unsafe {
desc.instance
.get_physical_device_memory_properties(desc.physical_device)
};
let memory_types = &mem_props.memory_types_as_slice();
let memory_heaps = mem_props.memory_heaps_as_slice().to_vec();
if desc.debug_settings.log_memory_information {
debug!("memory type count: {}", mem_props.memory_type_count);
debug!("memory heap count: {}", mem_props.memory_heap_count);
for (i, mem_type) in memory_types.iter().enumerate() {
let flags = mem_type.property_flags;
debug!(
"memory type[{}]: prop flags: 0x{:x}, heap[{}]",
i,
flags.as_raw(),
mem_type.heap_index,
);
}
for (i, heap) in memory_heaps.iter().enumerate() {
debug!(
"heap[{}] flags: 0x{:x}, size: {} MiB",
i,
heap.flags.as_raw(),
heap.size / (1024 * 1024)
);
}
}
let memory_types = memory_types
.iter()
.enumerate()
.map(|(i, mem_type)| MemoryType {
memory_blocks: Vec::default(),
memory_properties: mem_type.property_flags,
memory_type_index: i,
heap_index: mem_type.heap_index as usize,
mappable: mem_type
.property_flags
.contains(vk::MemoryPropertyFlags::HOST_VISIBLE),
active_general_blocks: 0,
buffer_device_address: desc.buffer_device_address,
})
.collect::<Vec<_>>();
let physical_device_properties = unsafe {
desc.instance
.get_physical_device_properties(desc.physical_device)
};
let granularity = physical_device_properties.limits.buffer_image_granularity;
Ok(Self {
memory_types,
memory_heaps,
device: desc.device.clone(),
buffer_image_granularity: granularity,
debug_settings: desc.debug_settings,
allocation_sizes: AllocationSizes::default(),
})
}
pub fn allocate(&mut self, desc: &AllocationCreateDesc<'_>) -> Result<Allocation> {
let size = desc.requirements.size;
let alignment = desc.requirements.alignment;
let backtrace = Arc::new(if self.debug_settings.store_stack_traces {
Backtrace::force_capture()
} else {
Backtrace::disabled()
});
if self.debug_settings.log_allocations {
debug!(
"Allocating `{}` of {} bytes with an alignment of {}.",
&desc.name, size, alignment
);
if self.debug_settings.log_stack_traces {
let backtrace = Backtrace::force_capture();
debug!("Allocation stack trace: {}", backtrace);
}
}
if size == 0 || !alignment.is_power_of_two() {
return Err(AllocationError::InvalidAllocationCreateDesc);
}
let mem_loc_preferred_bits = match desc.location {
MemoryLocation::GpuOnly => vk::MemoryPropertyFlags::DEVICE_LOCAL,
MemoryLocation::CpuToGpu => {
vk::MemoryPropertyFlags::HOST_VISIBLE
| vk::MemoryPropertyFlags::HOST_COHERENT
| vk::MemoryPropertyFlags::DEVICE_LOCAL
}
MemoryLocation::GpuToCpu => {
vk::MemoryPropertyFlags::HOST_VISIBLE
| vk::MemoryPropertyFlags::HOST_COHERENT
| vk::MemoryPropertyFlags::HOST_CACHED
}
MemoryLocation::Unknown => vk::MemoryPropertyFlags::empty(),
};
let mut memory_type_index_opt =
self.find_memorytype_index(&desc.requirements, mem_loc_preferred_bits);
if memory_type_index_opt.is_none() {
let mem_loc_required_bits = match desc.location {
MemoryLocation::GpuOnly => vk::MemoryPropertyFlags::DEVICE_LOCAL,
MemoryLocation::CpuToGpu | MemoryLocation::GpuToCpu => {
vk::MemoryPropertyFlags::HOST_VISIBLE | vk::MemoryPropertyFlags::HOST_COHERENT
}
MemoryLocation::Unknown => vk::MemoryPropertyFlags::empty(),
};
memory_type_index_opt =
self.find_memorytype_index(&desc.requirements, mem_loc_required_bits);
}
let memory_type_index = match memory_type_index_opt {
Some(x) => x as usize,
None => return Err(AllocationError::NoCompatibleMemoryTypeFound),
};
//Do not try to create a block if the heap is smaller than the required size (avoids validation warnings).
let memory_type = &mut self.memory_types[memory_type_index];
let allocation = if size > self.memory_heaps[memory_type.heap_index].size {
Err(AllocationError::OutOfMemory)
} else {
memory_type.allocate(
&self.device,
desc,
self.buffer_image_granularity,
backtrace.clone(),
&self.allocation_sizes,
)
};
if desc.location == MemoryLocation::CpuToGpu {
if allocation.is_err() {
let mem_loc_preferred_bits =
vk::MemoryPropertyFlags::HOST_VISIBLE | vk::MemoryPropertyFlags::HOST_COHERENT;
let memory_type_index_opt =
self.find_memorytype_index(&desc.requirements, mem_loc_preferred_bits);
let memory_type_index = match memory_type_index_opt {
Some(x) => x as usize,
None => return Err(AllocationError::NoCompatibleMemoryTypeFound),
};
self.memory_types[memory_type_index].allocate(
&self.device,
desc,
self.buffer_image_granularity,
backtrace,
&self.allocation_sizes,
)
} else {
allocation
}
} else {
allocation
}
}
pub fn free(&mut self, allocation: Allocation) -> Result<()> {
if self.debug_settings.log_frees {
let name = allocation.name.as_deref().unwrap_or("<null>");
debug!("Freeing `{}`.", name);
if self.debug_settings.log_stack_traces {
let backtrace = Backtrace::force_capture();
debug!("Free stack trace: {}", backtrace);
}
}
if allocation.is_null() {
return Ok(());
}
self.memory_types[allocation.memory_type_index].free(allocation, &self.device)?;
Ok(())
}
pub fn rename_allocation(&mut self, allocation: &mut Allocation, name: &str) -> Result<()> {
allocation.name = Some(name.into());
if allocation.is_null() {
return Ok(());
}
let mem_type = &mut self.memory_types[allocation.memory_type_index];
let mem_block = mem_type.memory_blocks[allocation.memory_block_index]
.as_mut()
.ok_or_else(|| AllocationError::Internal("Memory block must be Some.".into()))?;
mem_block
.sub_allocator
.rename_allocation(allocation.chunk_id, name)?;
Ok(())
}
pub fn report_memory_leaks(&self, log_level: Level) {
for (mem_type_i, mem_type) in self.memory_types.iter().enumerate() {
for (block_i, mem_block) in mem_type.memory_blocks.iter().enumerate() {
if let Some(mem_block) = mem_block {
mem_block
.sub_allocator
.report_memory_leaks(log_level, mem_type_i, block_i);
}
}
}
}
fn find_memorytype_index(
&self,
memory_req: &vk::MemoryRequirements,
flags: vk::MemoryPropertyFlags,
) -> Option<u32> {
self.memory_types
.iter()
.find(|memory_type| {
(1 << memory_type.memory_type_index) & memory_req.memory_type_bits != 0
&& memory_type.memory_properties.contains(flags)
})
.map(|memory_type| memory_type.memory_type_index as _)
}
pub fn generate_report(&self) -> AllocatorReport {
let mut allocations = vec![];
let mut blocks = vec![];
let mut total_reserved_bytes = 0;
for memory_type in &self.memory_types {
for block in memory_type.memory_blocks.iter().flatten() {
total_reserved_bytes += block.size;
let first_allocation = allocations.len();
allocations.extend(block.sub_allocator.report_allocations());
blocks.push(MemoryBlockReport {
size: block.size,
allocations: first_allocation..allocations.len(),
});
}
}
let total_allocated_bytes = allocations.iter().map(|report| report.size).sum();
AllocatorReport {
allocations,
blocks,
total_allocated_bytes,
total_reserved_bytes,
}
}
}
impl Drop for Allocator {
fn drop(&mut self) {
if self.debug_settings.log_leaks_on_shutdown {
self.report_memory_leaks(Level::Warn);
}
// Free all remaining memory blocks
for mem_type in self.memory_types.iter_mut() {
for mem_block in mem_type.memory_blocks.iter_mut() {
let block = mem_block.take();
if let Some(block) = block {
block.destroy(&self.device);
}
}
}
}
}

View File

@@ -0,0 +1,226 @@
#![allow(clippy::new_without_default)]
use super::Allocator;
use crate::visualizer::{
render_allocation_reports_ui, AllocationReportVisualizeSettings, ColorScheme,
MemoryChunksVisualizationSettings,
};
struct AllocatorVisualizerBlockWindow {
memory_type_index: usize,
block_index: usize,
settings: MemoryChunksVisualizationSettings,
}
impl AllocatorVisualizerBlockWindow {
fn new(memory_type_index: usize, block_index: usize) -> Self {
Self {
memory_type_index,
block_index,
settings: Default::default(),
}
}
}
pub struct AllocatorVisualizer {
selected_blocks: Vec<AllocatorVisualizerBlockWindow>,
color_scheme: ColorScheme,
breakdown_settings: AllocationReportVisualizeSettings,
}
impl AllocatorVisualizer {
pub fn new() -> Self {
Self {
selected_blocks: Vec::default(),
color_scheme: ColorScheme::default(),
breakdown_settings: Default::default(),
}
}
pub fn set_color_scheme(&mut self, color_scheme: ColorScheme) {
self.color_scheme = color_scheme;
}
pub fn render_memory_block_ui(&mut self, ui: &mut egui::Ui, alloc: &Allocator) {
ui.label(format!(
"buffer image granularity: {:?}",
alloc.buffer_image_granularity
));
ui.collapsing(
format!("Memory Heaps ({} heaps)", alloc.memory_heaps.len()),
|ui| {
for (i, heap) in alloc.memory_heaps.iter().enumerate() {
ui.collapsing(format!("Heap: {}", i), |ui| {
ui.label(format!("flags: {:?}", heap.flags));
ui.label(format!(
"size: {} MiB",
heap.size as f64 / (1024 * 1024) as f64
));
});
}
},
);
ui.collapsing(
format!("Memory Types: ({} types)", alloc.memory_types.len()),
|ui| {
for (mem_type_idx, mem_type) in alloc.memory_types.iter().enumerate() {
ui.collapsing(
format!(
"Type: {} ({} blocks)",
mem_type_idx,
mem_type.memory_blocks.len(),
),
|ui| {
let mut total_block_size = 0;
let mut total_allocated = 0;
for block in mem_type.memory_blocks.iter().flatten() {
total_block_size += block.size;
total_allocated += block.sub_allocator.allocated();
}
let active_block_count = mem_type
.memory_blocks
.iter()
.filter(|block| block.is_some())
.count();
ui.label(format!("properties: {:?}", mem_type.memory_properties));
ui.label(format!("heap index: {}", mem_type.heap_index));
ui.label(format!("total block size: {} KiB", total_block_size / 1024));
ui.label(format!("total allocated: {} KiB", total_allocated / 1024));
ui.label(format!("block count: {}", active_block_count));
for (block_idx, block) in mem_type.memory_blocks.iter().enumerate() {
let Some(block) = block else { continue };
ui.collapsing(format!("Block: {}", block_idx), |ui| {
use ash::vk::Handle;
ui.label(format!("size: {} KiB", block.size / 1024));
ui.label(format!(
"allocated: {} KiB",
block.sub_allocator.allocated() / 1024
));
ui.label(format!(
"vk device memory: 0x{:x}",
block.device_memory.as_raw()
));
if let Some(mapped_ptr) = block.mapped_ptr {
ui.label(format!(
"mapped pointer: {:#p}",
mapped_ptr.0.as_ptr()
));
}
if block.dedicated_allocation {
ui.label("Dedicated Allocation");
}
block.sub_allocator.draw_base_info(ui);
if block.sub_allocator.supports_visualization()
&& ui.button("visualize").clicked()
&& !self.selected_blocks.iter().any(|x| {
x.memory_type_index == mem_type_idx
&& x.block_index == block_idx
})
{
self.selected_blocks.push(
AllocatorVisualizerBlockWindow::new(
mem_type_idx,
block_idx,
),
);
}
});
}
},
);
}
},
);
}
pub fn render_memory_block_window(
&mut self,
ctx: &egui::Context,
allocator: &Allocator,
open: &mut bool,
) {
egui::Window::new("Allocator Memory Blocks")
.open(open)
.show(ctx, |ui| self.render_breakdown_ui(ui, allocator));
}
pub fn render_memory_block_visualization_windows(
&mut self,
ctx: &egui::Context,
allocator: &Allocator,
) {
// Draw each window.
let color_scheme = &self.color_scheme;
self.selected_blocks.retain_mut(|window| {
let mut open = true;
egui::Window::new(format!(
"Block Visualizer {}:{}",
window.memory_type_index, window.block_index
))
.default_size([1920.0 * 0.5, 1080.0 * 0.5])
.open(&mut open)
.show(ctx, |ui| {
let memblock = &allocator.memory_types[window.memory_type_index].memory_blocks
[window.block_index]
.as_ref();
if let Some(memblock) = memblock {
ui.label(format!(
"Memory type {}, Memory block {}, Block size: {} KiB",
window.memory_type_index,
window.block_index,
memblock.size / 1024
));
window
.settings
.ui(ui, allocator.debug_settings.store_stack_traces);
ui.separator();
memblock
.sub_allocator
.draw_visualization(color_scheme, ui, &window.settings);
} else {
ui.label("Deallocated memory block");
}
});
open
});
}
pub fn render_breakdown_ui(&mut self, ui: &mut egui::Ui, allocator: &Allocator) {
render_allocation_reports_ui(
ui,
&mut self.breakdown_settings,
allocator
.memory_types
.iter()
.flat_map(|memory_type| memory_type.memory_blocks.iter())
.flatten()
.flat_map(|memory_block| memory_block.sub_allocator.report_allocations()),
);
}
pub fn render_breakdown_window(
&mut self,
ctx: &egui::Context,
allocator: &Allocator,
open: &mut bool,
) {
egui::Window::new("Allocator Breakdown")
.open(open)
.show(ctx, |ui| self.render_breakdown_ui(ui, allocator));
}
}