Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

View File

@@ -0,0 +1 @@
{"files":{"Cargo.lock":"3ca79349a9d9a4636c763d35a0caad53c655a13ff0612020ae8ec8fe3b42b4ff","Cargo.toml":"4d6d3bb1f8fe80fe67f31bf41d3469cc311e14daa295f42d0e7944b287b9f8d3","LICENSE-APACHE":"0178e21322b0e88aa3aeb3146f6a9611bc1f8df6d98bdfb34be28b9dd56a8107","LICENSE-MIT":"ad41be6cc6538b29b9346648f41432b5e460bad6be073b5eeaa41320ea2921dc","README.md":"d51d23364f3944098ca4b6118d46b5b353c2bfcd0cce0b3ade317537bdf69143","examples/d3d12-buffer-winrs.rs":"e7bb5565a26c1608ed57bcf5895e7e45bed5af85040f5a572049e6b74c99631c","examples/d3d12-buffer.rs":"c84cfdeae3a347fe561529b60963daaccc10880a9aeb64ae992689c30e16ea11","examples/metal-buffer.rs":"0fbd65a5a8381013199ff98f76c3830b96eb5e46591d4043f54614d582f26523","examples/vulkan-buffer.rs":"49f57f1f4542126047e217c81083b08ed798637fd90d6b7560bc9fab21732953","src/allocator/dedicated_block_allocator/mod.rs":"ec52728fb0c9d40173472640f8005ee7eca450170b7c3113adfd2e887e387f29","src/allocator/dedicated_block_allocator/visualizer.rs":"5b9019dd73ebe7bb9e9d103c48368014b73cdd4ae7f36a706ae047919f56fac6","src/allocator/free_list_allocator/mod.rs":"afe3417f40cdf71ec99c5885066d715e56d8d6c19662b17846cc8ce54222b1e2","src/allocator/free_list_allocator/visualizer.rs":"46214d07285d72a0a29c8d7e76322243853eba7d25d87ebfbb17c75e7815d07f","src/allocator/mod.rs":"86a95a58a30ec59aa7a9bc5822deca4c2a82c737e5633008400c2b291683a024","src/d3d12/mod.rs":"3cb8289c2effe1430c70cbb87b705fcaca7915d36b441f7d8a9a591bca18be25","src/d3d12/visualizer.rs":"41d1b5b897ff7b0a3fda359cee3f6b5c921617a4e1fefe7ddd2341bb1da87691","src/lib.rs":"36c8bf74f77da2113651e4e13d2bd9dbb7f6c854c089573461aaea8378c4d02c","src/metal/mod.rs":"f55592a96135da25785df21d8057fe5c1843b38f1a99d817cadba17d9d036ab7","src/result.rs":"6c7d85ee13afbd0b17c1b81ed0b6d7094247dd693444b62c28daf4d9f2248846","src/visualizer/allocation_reports.rs":"441a85fd68a8903fd9e1413756730e3e5cf9aa61803983e61f7cbca27ee39071","src/visualizer/memory_chunks.rs":"f521a4ce056d610d095c7fd65b110b8c046e84850746ec38b4d66f27b0ec70ae","src/visualizer/mod.rs":"7d56c956abba968400aa6794e399db4b7ec10135a948beef21ea13ba3bd1fd9e","src/vulkan/mod.rs":"e6dd3e67aea9f321e7c53bf87448a645bc83e3bbc92e6aef76223b18dd610fe5","src/vulkan/visualizer.rs":"6357703e89e3f5e9b78649eb16af907c107d0d121c23a4094dc0794a38fd4929"},"package":"c151a2a5ef800297b4e79efa4f4bec035c5f51d5ae587287c9b952bdf734cacd"}

754
vendor/gpu-allocator/Cargo.lock generated vendored Normal file
View File

@@ -0,0 +1,754 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "ab_glyph"
version = "0.2.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "79faae4620f45232f599d9bc7b290f88247a0834162c4495ab2f02d60004adfb"
dependencies = [
"ab_glyph_rasterizer",
"owned_ttf_parser",
]
[[package]]
name = "ab_glyph_rasterizer"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c71b1793ee61086797f5c80b6efa2b8ffa6d5dd703f118545808a7f2e27f7046"
[[package]]
name = "accesskit"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74a4b14f3d99c1255dcba8f45621ab1a2e7540a0009652d33989005a4d0bfc6b"
dependencies = [
"enumn",
"serde",
]
[[package]]
name = "ahash"
version = "0.8.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011"
dependencies = [
"cfg-if",
"once_cell",
"serde",
"version_check",
"zerocopy",
]
[[package]]
name = "aho-corasick"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
dependencies = [
"memchr",
]
[[package]]
name = "ash"
version = "0.38.0+1.3.281"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0bb44936d800fea8f016d7f2311c6a4f97aebd5dc86f09906139ec848cf3a46f"
dependencies = [
"libloading",
]
[[package]]
name = "autocfg"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de"
[[package]]
name = "block"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "core-foundation"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f"
dependencies = [
"core-foundation-sys",
"libc",
]
[[package]]
name = "core-foundation-sys"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f"
[[package]]
name = "core-graphics-types"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "45390e6114f68f718cc7a830514a96f903cccd70d02a8f6d9f643ac4ba45afaf"
dependencies = [
"bitflags 1.3.2",
"core-foundation",
"libc",
]
[[package]]
name = "dispatch"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd0c93bb4b0c6d9b77f4435b0ae98c24d17f1c45b2ff844c6151a07256ca923b"
[[package]]
name = "ecolor"
version = "0.27.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "20930a432bbd57a6d55e07976089708d4893f3d556cf42a0d79e9e321fa73b10"
dependencies = [
"serde",
]
[[package]]
name = "egui"
version = "0.27.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "584c5d1bf9a67b25778a3323af222dbe1a1feb532190e103901187f92c7fe29a"
dependencies = [
"accesskit",
"ahash",
"epaint",
"nohash-hasher",
"serde",
]
[[package]]
name = "egui_extras"
version = "0.27.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b78779f35ded1a853786c9ce0b43fe1053e10a21ea3b23ebea411805ce41593"
dependencies = [
"egui",
"enum-map",
"log",
"serde",
]
[[package]]
name = "emath"
version = "0.27.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e4c3a552cfca14630702449d35f41c84a0d15963273771c6059175a803620f3f"
dependencies = [
"serde",
]
[[package]]
name = "enum-map"
version = "2.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6866f3bfdf8207509a033af1a75a7b08abda06bbaaeae6669323fd5a097df2e9"
dependencies = [
"enum-map-derive",
"serde",
]
[[package]]
name = "enum-map-derive"
version = "0.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f282cfdfe92516eb26c2af8589c274c7c17681f5ecc03c18255fe741c6aa64eb"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "enumn"
version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "env_logger"
version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580"
dependencies = [
"humantime",
"is-terminal",
"log",
"regex",
"termcolor",
]
[[package]]
name = "epaint"
version = "0.27.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b381f8b149657a4acf837095351839f32cd5c4aec1817fc4df84e18d76334176"
dependencies = [
"ab_glyph",
"ahash",
"ecolor",
"emath",
"nohash-hasher",
"parking_lot",
"serde",
]
[[package]]
name = "foreign-types"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d737d9aa519fb7b749cbc3b962edcf310a8dd1f4b67c91c4f83975dbdd17d965"
dependencies = [
"foreign-types-macros",
"foreign-types-shared",
]
[[package]]
name = "foreign-types-macros"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "foreign-types-shared"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b"
[[package]]
name = "gpu-allocator"
version = "0.27.0"
dependencies = [
"ash",
"egui",
"egui_extras",
"env_logger",
"log",
"metal",
"presser",
"thiserror",
"winapi",
"windows",
]
[[package]]
name = "hermit-abi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024"
[[package]]
name = "humantime"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
[[package]]
name = "is-terminal"
version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b"
dependencies = [
"hermit-abi",
"libc",
"windows-sys",
]
[[package]]
name = "libc"
version = "0.2.155"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c"
[[package]]
name = "libloading"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e310b3a6b5907f99202fcdb4960ff45b93735d7c7d96b760fcff8db2dc0e103d"
dependencies = [
"cfg-if",
"windows-targets",
]
[[package]]
name = "lock_api"
version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
[[package]]
name = "malloc_buf"
version = "0.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb"
dependencies = [
"libc",
]
[[package]]
name = "memchr"
version = "2.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
[[package]]
name = "metal"
version = "0.29.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ecfd3296f8c56b7c1f6fbac3c71cefa9d78ce009850c45000015f206dc7fa21"
dependencies = [
"bitflags 2.6.0",
"block",
"core-graphics-types",
"dispatch",
"foreign-types",
"log",
"objc",
"paste",
]
[[package]]
name = "nohash-hasher"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451"
[[package]]
name = "objc"
version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "915b1b472bc21c53464d6c8461c9d3af805ba1ef837e1cac254428f4a77177b1"
dependencies = [
"malloc_buf",
]
[[package]]
name = "once_cell"
version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
[[package]]
name = "owned_ttf_parser"
version = "0.24.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "490d3a563d3122bf7c911a59b0add9389e5ec0f5f0c3ac6b91ff235a0e6a7f90"
dependencies = [
"ttf-parser",
]
[[package]]
name = "parking_lot"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27"
dependencies = [
"lock_api",
"parking_lot_core",
]
[[package]]
name = "parking_lot_core"
version = "0.9.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8"
dependencies = [
"cfg-if",
"libc",
"redox_syscall",
"smallvec",
"windows-targets",
]
[[package]]
name = "paste"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
[[package]]
name = "presser"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8cf8e6a8aa66ce33f63993ffc4ea4271eb5b0530a9002db8455ea6050c77bfa"
[[package]]
name = "proc-macro2"
version = "1.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7"
dependencies = [
"proc-macro2",
]
[[package]]
name = "redox_syscall"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4"
dependencies = [
"bitflags 2.6.0",
]
[[package]]
name = "regex"
version = "1.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata",
"regex-syntax",
]
[[package]]
name = "regex-automata"
version = "0.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b"
[[package]]
name = "scopeguard"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "serde"
version = "1.0.204"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.204"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "smallvec"
version = "1.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
[[package]]
name = "syn"
version = "2.0.71"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b146dcf730474b4bcd16c311627b31ede9ab149045db4d6088b3becaea046462"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "termcolor"
version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755"
dependencies = [
"winapi-util",
]
[[package]]
name = "thiserror"
version = "1.0.62"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2675633b1499176c2dff06b0856a27976a8f9d436737b4cf4f312d4d91d8bbb"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.62"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d20468752b09f49e909e55a5d338caa8bedf615594e9d80bc4c565d30faf798c"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "ttf-parser"
version = "0.24.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8686b91785aff82828ed725225925b33b4fde44c4bb15876e5f7c832724c420a"
[[package]]
name = "unicode-ident"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "version_check"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b"
dependencies = [
"windows-sys",
]
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "windows"
version = "0.58.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6"
dependencies = [
"windows-core",
"windows-targets",
]
[[package]]
name = "windows-core"
version = "0.58.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99"
dependencies = [
"windows-implement",
"windows-interface",
"windows-result",
"windows-strings",
"windows-targets",
]
[[package]]
name = "windows-implement"
version = "0.58.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "windows-interface"
version = "0.58.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "windows-result"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-strings"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10"
dependencies = [
"windows-result",
"windows-targets",
]
[[package]]
name = "windows-sys"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-targets"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_gnullvm",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
[[package]]
name = "windows_i686_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
[[package]]
name = "windows_i686_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
[[package]]
name = "windows_i686_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "zerocopy"
version = "0.7.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.7.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e"
dependencies = [
"proc-macro2",
"quote",
"syn",
]

162
vendor/gpu-allocator/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,162 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
rust-version = "1.70"
name = "gpu-allocator"
version = "0.27.0"
authors = ["Traverse Research <opensource@traverseresearch.nl>"]
include = [
"/README.md",
"/LICENSE-*",
"/src",
"/examples",
]
description = "Memory allocator for GPU memory in Vulkan and DirectX 12"
homepage = "https://github.com/Traverse-Research/gpu-allocator"
documentation = "https://docs.rs/gpu-allocator/"
readme = "README.md"
keywords = [
"vulkan",
"memory",
"allocator",
]
categories = [
"rendering",
"rendering::graphics-api",
]
license = "MIT OR Apache-2.0"
repository = "https://github.com/Traverse-Research/gpu-allocator"
[package.metadata.docs.rs]
all-features = true
[[example]]
name = "vulkan-buffer"
required-features = [
"vulkan",
"ash/loaded",
]
[[example]]
name = "d3d12-buffer"
required-features = [
"d3d12",
"public-winapi",
]
[[example]]
name = "d3d12-buffer-winrs"
required-features = ["d3d12"]
[[example]]
name = "metal-buffer"
required-features = ["metal"]
[dependencies.ash]
version = "0.38"
features = ["debug"]
optional = true
default-features = false
[dependencies.egui]
version = ">=0.24, <=0.27"
optional = true
default-features = false
[dependencies.egui_extras]
version = ">=0.24, <=0.27"
optional = true
default-features = false
[dependencies.log]
version = "0.4"
[dependencies.presser]
version = "0.3"
[dependencies.thiserror]
version = "1.0"
[dev-dependencies.ash]
version = "0.38"
features = [
"debug",
"loaded",
]
default-features = false
[dev-dependencies.env_logger]
version = "0.10"
[features]
d3d12 = ["dep:windows"]
default = [
"d3d12",
"vulkan",
]
metal = ["dep:metal"]
public-winapi = ["dep:winapi"]
visualizer = [
"dep:egui",
"dep:egui_extras",
]
vulkan = ["dep:ash"]
[target."cfg(any(target_os = \"macos\", target_os = \"ios\"))".dependencies.metal]
version = "0.29.0"
features = [
"link",
"dispatch",
]
optional = true
default-features = false
[target."cfg(windows)".dependencies.winapi]
version = "0.3.9"
features = [
"d3d12",
"winerror",
"impl-default",
"impl-debug",
]
optional = true
[target."cfg(windows)".dependencies.windows]
version = ">=0.53,<=0.58"
features = [
"Win32_Graphics_Direct3D12",
"Win32_Graphics_Dxgi_Common",
]
optional = true
[target."cfg(windows)".dev-dependencies.winapi]
version = "0.3.9"
features = [
"d3d12",
"d3d12sdklayers",
"dxgi1_6",
"winerror",
"impl-default",
"impl-debug",
"winuser",
"windowsx",
"libloaderapi",
]
[target."cfg(windows)".dev-dependencies.windows]
version = "0.58"
features = [
"Win32_Graphics_Direct3D",
"Win32_Graphics_Direct3D12",
"Win32_Graphics_Dxgi_Common",
]

201
vendor/gpu-allocator/LICENSE-APACHE vendored Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2021 Traverse Research B.V.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

7
vendor/gpu-allocator/LICENSE-MIT vendored Normal file
View File

@@ -0,0 +1,7 @@
Copyright (c) 2021 Traverse Research B.V.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

186
vendor/gpu-allocator/README.md vendored Normal file
View File

@@ -0,0 +1,186 @@
# 📒 gpu-allocator
[![Actions Status](https://img.shields.io/github/actions/workflow/status/Traverse-Research/gpu-allocator/ci.yml?branch=main&logo=github)](https://github.com/Traverse-Research/gpu-allocator/actions)
[![Latest version](https://img.shields.io/crates/v/gpu-allocator.svg?logo=rust)](https://crates.io/crates/gpu-allocator)
[![Docs](https://img.shields.io/docsrs/gpu-allocator?logo=docs.rs)](https://docs.rs/gpu-allocator/)
[![LICENSE](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE-MIT)
[![LICENSE](https://img.shields.io/badge/license-apache-blue.svg?logo=apache)](LICENSE-APACHE)
[![Contributor Covenant](https://img.shields.io/badge/contributor%20covenant-v1.4%20adopted-ff69b4.svg)](../main/CODE_OF_CONDUCT.md)
[![MSRV](https://img.shields.io/badge/rustc-1.70.0+-ab6000.svg)](https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html)
[![Banner](banner.png)](https://traverseresearch.nl)
```toml
[dependencies]
gpu-allocator = "0.27.0"
```
![Visualizer](visualizer.png)
This crate provides a fully written in Rust memory allocator for Vulkan, DirectX 12 and Metal.
## [Windows-rs] and [winapi]
`gpu-allocator` recently migrated from [winapi] to [windows-rs] but still provides convenient helpers to convert to and from [winapi] types, enabled when compiling with the `public-winapi` crate feature.
[Windows-rs]: https://github.com/microsoft/windows-rs
[winapi]: https://github.com/retep998/winapi-rs
## Setting up the Vulkan memory allocator
```rust
use gpu_allocator::vulkan::*;
let mut allocator = Allocator::new(&AllocatorCreateDesc {
instance,
device,
physical_device,
debug_settings: Default::default(),
buffer_device_address: true, // Ideally, check the BufferDeviceAddressFeatures struct.
allocation_sizes: Default::default(),
});
```
## Simple Vulkan allocation example
```rust
use gpu_allocator::vulkan::*;
use gpu_allocator::MemoryLocation;
// Setup vulkan info
let vk_info = vk::BufferCreateInfo::default()
.size(512)
.usage(vk::BufferUsageFlags::STORAGE_BUFFER);
let buffer = unsafe { device.create_buffer(&vk_info, None) }.unwrap();
let requirements = unsafe { device.get_buffer_memory_requirements(buffer) };
let allocation = allocator
.allocate(&AllocationCreateDesc {
name: "Example allocation",
requirements,
location: MemoryLocation::CpuToGpu,
linear: true, // Buffers are always linear
allocation_scheme: AllocationScheme::GpuAllocatorManaged,
}).unwrap();
// Bind memory to the buffer
unsafe { device.bind_buffer_memory(buffer, allocation.memory(), allocation.offset()).unwrap() };
// Cleanup
allocator.free(allocation).unwrap();
unsafe { device.destroy_buffer(buffer, None) };
```
## Setting up the D3D12 memory allocator
```rust
use gpu_allocator::d3d12::*;
let mut allocator = Allocator::new(&AllocatorCreateDesc {
device: ID3D12DeviceVersion::Device(device),
debug_settings: Default::default(),
allocation_sizes: Default::default(),
});
```
## Simple d3d12 allocation example
```rust
use gpu_allocator::d3d12::*;
use gpu_allocator::MemoryLocation;
let buffer_desc = Direct3D12::D3D12_RESOURCE_DESC {
Dimension: Direct3D12::D3D12_RESOURCE_DIMENSION_BUFFER,
Alignment: 0,
Width: 512,
Height: 1,
DepthOrArraySize: 1,
MipLevels: 1,
Format: Dxgi::Common::DXGI_FORMAT_UNKNOWN,
SampleDesc: Dxgi::Common::DXGI_SAMPLE_DESC {
Count: 1,
Quality: 0,
},
Layout: Direct3D12::D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
Flags: Direct3D12::D3D12_RESOURCE_FLAG_NONE,
};
let allocation_desc = AllocationCreateDesc::from_d3d12_resource_desc(
&allocator.device(),
&buffer_desc,
"Example allocation",
MemoryLocation::GpuOnly,
);
let allocation = allocator.allocate(&allocation_desc).unwrap();
let mut resource: Option<Direct3D12::ID3D12Resource> = None;
let hr = unsafe {
device.CreatePlacedResource(
allocation.heap(),
allocation.offset(),
&buffer_desc,
Direct3D12::D3D12_RESOURCE_STATE_COMMON,
None,
&mut resource,
)
}?;
// Cleanup
drop(resource);
allocator.free(allocation).unwrap();
```
## Setting up the Metal memory allocator
```rust
use gpu_allocator::metal::*;
let mut allocator = Allocator::new(&AllocatorCreateDesc {
device: device.clone(),
debug_settings: Default::default(),
allocation_sizes: Default::default(),
});
```
## Simple Metal allocation example
```rust
use gpu_allocator::metal::*;
use gpu_allocator::MemoryLocation;
let allocation_desc = AllocationCreateDesc::buffer(
&device,
"Example allocation",
512, // size in bytes
gpu_allocator::MemoryLocation::GpuOnly,
);
let allocation = allocator.allocate(&allocation_desc).unwrap();
let resource = allocation.make_buffer().unwrap();
// Cleanup
drop(resource);
allocator.free(&allocation).unwrap();
```
## Minimum Supported Rust Version
The MSRV for this crate and the `vulkan`, `d3d12` and `metal` features is Rust 1.70. Any other features such as the `visualizer` (with all the `egui` dependencies) may have a higher requirement and are not tested in our CI.
## License
Licensed under either of
- Apache License, Version 2.0, ([LICENSE-APACHE](../master/LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
- MIT license ([LICENSE-MIT](../master/LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
## Alternative libraries
- [vk-mem-rs](https://github.com/gwihlidal/vk-mem-rs)
## Contribution
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.

View File

@@ -0,0 +1,243 @@
//! Example showcasing [`gpu-allocator`] with types and functions from the [`windows`] crate.
use gpu_allocator::{
d3d12::{
AllocationCreateDesc, Allocator, AllocatorCreateDesc, ID3D12DeviceVersion, ResourceCategory,
},
MemoryLocation,
};
use log::*;
use windows::{
core::{Interface, Result},
Win32::{
Foundation::E_NOINTERFACE,
Graphics::{
Direct3D::{D3D_FEATURE_LEVEL_11_0, D3D_FEATURE_LEVEL_11_1, D3D_FEATURE_LEVEL_12_0},
Direct3D12::{
D3D12CreateDevice, ID3D12Device, ID3D12Resource,
D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT, D3D12_RESOURCE_DESC,
D3D12_RESOURCE_DIMENSION_BUFFER, D3D12_RESOURCE_FLAG_NONE,
D3D12_RESOURCE_STATE_COMMON, D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
},
Dxgi::{
Common::{DXGI_FORMAT_UNKNOWN, DXGI_SAMPLE_DESC},
CreateDXGIFactory2, IDXGIAdapter4, IDXGIFactory6, DXGI_ADAPTER_FLAG3_SOFTWARE,
DXGI_ERROR_NOT_FOUND,
},
},
},
};
fn create_d3d12_device(dxgi_factory: &IDXGIFactory6) -> Option<ID3D12Device> {
for idx in 0.. {
// TODO: Might as well return Result<> from this function
let adapter1 = match unsafe { dxgi_factory.EnumAdapters1(idx) } {
Ok(a) => a,
Err(e) if e.code() == DXGI_ERROR_NOT_FOUND => break,
Err(e) => panic!("{:?}", e),
};
let adapter4: IDXGIAdapter4 = adapter1.cast().unwrap();
let desc = unsafe { adapter4.GetDesc3() }.unwrap();
// Skip software adapters
// Vote for https://github.com/microsoft/windows-rs/issues/793!
if (desc.Flags & DXGI_ADAPTER_FLAG3_SOFTWARE) == DXGI_ADAPTER_FLAG3_SOFTWARE {
continue;
}
let feature_levels = [
(D3D_FEATURE_LEVEL_11_0, "D3D_FEATURE_LEVEL_11_0"),
(D3D_FEATURE_LEVEL_11_1, "D3D_FEATURE_LEVEL_11_1"),
(D3D_FEATURE_LEVEL_12_0, "D3D_FEATURE_LEVEL_12_0"),
];
let device =
feature_levels
.iter()
.rev()
.find_map(|&(feature_level, feature_level_name)| {
let mut device = None;
match unsafe { D3D12CreateDevice(&adapter4, feature_level, &mut device) } {
Ok(()) => {
info!("Using D3D12 feature level: {}", feature_level_name);
Some(device.unwrap())
}
Err(e) if e.code() == E_NOINTERFACE => {
error!("ID3D12Device interface not supported");
None
}
Err(e) => {
info!(
"D3D12 feature level {} not supported: {}",
feature_level_name, e
);
None
}
}
});
if device.is_some() {
return device;
}
}
None
}
fn main() -> Result<()> {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("trace")).init();
let dxgi_factory = unsafe {
CreateDXGIFactory2(windows::Win32::Graphics::Dxgi::DXGI_CREATE_FACTORY_FLAGS::default())
}?;
let device = create_d3d12_device(&dxgi_factory).expect("Failed to create D3D12 device.");
// Setting up the allocator
let mut allocator = Allocator::new(&AllocatorCreateDesc {
device: ID3D12DeviceVersion::Device(device.clone()),
debug_settings: Default::default(),
allocation_sizes: Default::default(),
})
.unwrap();
// Test allocating Gpu Only memory
{
let test_buffer_desc = D3D12_RESOURCE_DESC {
Dimension: D3D12_RESOURCE_DIMENSION_BUFFER,
Alignment: D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT as u64,
Width: 512,
Height: 1,
DepthOrArraySize: 1,
MipLevels: 1,
Format: DXGI_FORMAT_UNKNOWN,
SampleDesc: DXGI_SAMPLE_DESC {
Count: 1,
Quality: 0,
},
Layout: D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
Flags: D3D12_RESOURCE_FLAG_NONE,
};
let allocation_desc = AllocationCreateDesc::from_d3d12_resource_desc(
allocator.device(),
&test_buffer_desc,
"Test allocation (Gpu only)",
MemoryLocation::GpuOnly,
);
let allocation = allocator.allocate(&allocation_desc).unwrap();
let mut resource: Option<ID3D12Resource> = None;
unsafe {
device.CreatePlacedResource(
allocation.heap(),
allocation.offset(),
&test_buffer_desc,
D3D12_RESOURCE_STATE_COMMON,
None,
&mut resource,
)
}?;
drop(resource);
allocator.free(allocation).unwrap();
info!("Allocation and deallocation of GpuOnly memory was successful.");
}
// Test allocating Cpu to Gpu memory
{
let test_buffer_desc = D3D12_RESOURCE_DESC {
Dimension: D3D12_RESOURCE_DIMENSION_BUFFER,
Alignment: D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT as u64,
Width: 512,
Height: 1,
DepthOrArraySize: 1,
MipLevels: 1,
Format: DXGI_FORMAT_UNKNOWN,
SampleDesc: DXGI_SAMPLE_DESC {
Count: 1,
Quality: 0,
},
Layout: D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
Flags: D3D12_RESOURCE_FLAG_NONE,
};
let alloc_info = unsafe { device.GetResourceAllocationInfo(0, &[test_buffer_desc]) };
let allocation = allocator
.allocate(&AllocationCreateDesc {
name: "Test allocation (Cpu To Gpu)",
location: MemoryLocation::CpuToGpu,
size: alloc_info.SizeInBytes,
alignment: alloc_info.Alignment,
resource_category: ResourceCategory::Buffer,
})
.unwrap();
let mut resource: Option<ID3D12Resource> = None;
unsafe {
device.CreatePlacedResource(
allocation.heap(),
allocation.offset(),
&test_buffer_desc,
D3D12_RESOURCE_STATE_COMMON,
None,
&mut resource,
)
}?;
drop(resource);
allocator.free(allocation).unwrap();
info!("Allocation and deallocation of CpuToGpu memory was successful.");
}
// Test allocating Gpu to Cpu memory
{
let test_buffer_desc = D3D12_RESOURCE_DESC {
Dimension: D3D12_RESOURCE_DIMENSION_BUFFER,
Alignment: D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT as u64,
Width: 512,
Height: 1,
DepthOrArraySize: 1,
MipLevels: 1,
Format: DXGI_FORMAT_UNKNOWN,
SampleDesc: DXGI_SAMPLE_DESC {
Count: 1,
Quality: 0,
},
Layout: D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
Flags: D3D12_RESOURCE_FLAG_NONE,
};
let alloc_info = unsafe { device.GetResourceAllocationInfo(0, &[test_buffer_desc]) };
let allocation = allocator
.allocate(&AllocationCreateDesc {
name: "Test allocation (Gpu to Cpu)",
location: MemoryLocation::GpuToCpu,
size: alloc_info.SizeInBytes,
alignment: alloc_info.Alignment,
resource_category: ResourceCategory::Buffer,
})
.unwrap();
let mut resource: Option<ID3D12Resource> = None;
unsafe {
device.CreatePlacedResource(
allocation.heap(),
allocation.offset(),
&test_buffer_desc,
D3D12_RESOURCE_STATE_COMMON,
None,
&mut resource,
)
}?;
drop(resource);
allocator.free(allocation).unwrap();
info!("Allocation and deallocation of CpuToGpu memory was successful.");
}
Ok(())
}

View File

@@ -0,0 +1,283 @@
//! Example showcasing [`winapi`] interop with [`gpu-allocator`] which is driven by the [`windows`] crate.
use winapi::{
shared::{dxgiformat, winerror},
um::{d3d12, d3dcommon},
Interface,
};
mod all_dxgi {
pub use winapi::shared::{dxgi1_3::*, dxgi1_6::*, dxgitype::*};
}
use gpu_allocator::{
d3d12::{
AllocationCreateDesc, Allocator, AllocatorCreateDesc, ID3D12DeviceVersion,
ResourceCategory, ToWinapi, ToWindows,
},
MemoryLocation,
};
use log::*;
fn create_d3d12_device(
dxgi_factory: *mut all_dxgi::IDXGIFactory6,
) -> Option<*mut d3d12::ID3D12Device> {
for idx in 0.. {
let mut adapter4: *mut all_dxgi::IDXGIAdapter4 = std::ptr::null_mut();
let hr = unsafe {
dxgi_factory.as_ref().unwrap().EnumAdapters1(
idx,
<*mut *mut all_dxgi::IDXGIAdapter4>::cast(&mut adapter4),
)
};
if hr == winerror::DXGI_ERROR_NOT_FOUND {
break;
}
assert_eq!(hr, winerror::S_OK);
let mut desc = all_dxgi::DXGI_ADAPTER_DESC3::default();
let hr = unsafe { adapter4.as_ref().unwrap().GetDesc3(&mut desc) };
if hr != winerror::S_OK {
error!("Failed to get adapter description for adapter");
continue;
}
// Skip software adapters
if (desc.Flags & all_dxgi::DXGI_ADAPTER_FLAG3_SOFTWARE)
== all_dxgi::DXGI_ADAPTER_FLAG3_SOFTWARE
{
continue;
}
let feature_levels = [
(d3dcommon::D3D_FEATURE_LEVEL_11_0, "D3D_FEATURE_LEVEL_11_0"),
(d3dcommon::D3D_FEATURE_LEVEL_11_1, "D3D_FEATURE_LEVEL_11_1"),
(d3dcommon::D3D_FEATURE_LEVEL_12_0, "D3D_FEATURE_LEVEL_12_0"),
];
let device =
feature_levels
.iter()
.rev()
.find_map(|&(feature_level, feature_level_name)| {
let mut device: *mut d3d12::ID3D12Device = std::ptr::null_mut();
let hr = unsafe {
d3d12::D3D12CreateDevice(
adapter4.cast(),
feature_level,
&d3d12::ID3D12Device::uuidof(),
<*mut *mut d3d12::ID3D12Device>::cast(&mut device),
)
};
match hr {
winerror::S_OK => {
info!("Using D3D12 feature level: {}.", feature_level_name);
Some(device)
}
winerror::E_NOINTERFACE => {
error!("ID3D12Device interface not supported.");
None
}
_ => {
info!(
"D3D12 feature level: {} not supported: {:x}",
feature_level_name, hr
);
None
}
}
});
if device.is_some() {
return device;
}
}
None
}
fn main() {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("trace")).init();
let dxgi_factory = {
let mut dxgi_factory: *mut all_dxgi::IDXGIFactory6 = std::ptr::null_mut();
let hr = unsafe {
all_dxgi::CreateDXGIFactory2(
0,
&all_dxgi::IID_IDXGIFactory6,
<*mut *mut all_dxgi::IDXGIFactory6>::cast(&mut dxgi_factory),
)
};
assert_eq!(hr, winerror::S_OK, "Failed to create DXGI factory");
dxgi_factory
};
let device = create_d3d12_device(dxgi_factory).expect("Failed to create D3D12 device.");
// Setting up the allocator
let mut allocator = Allocator::new(&AllocatorCreateDesc {
device: ID3D12DeviceVersion::Device(device.as_windows().clone()),
debug_settings: Default::default(),
allocation_sizes: Default::default(),
})
.unwrap();
let device = unsafe { device.as_ref() }.unwrap();
// Test allocating Gpu Only memory
{
let test_buffer_desc = d3d12::D3D12_RESOURCE_DESC {
Dimension: d3d12::D3D12_RESOURCE_DIMENSION_BUFFER,
Alignment: d3d12::D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT as u64,
Width: 512,
Height: 1,
DepthOrArraySize: 1,
MipLevels: 1,
Format: dxgiformat::DXGI_FORMAT_UNKNOWN,
SampleDesc: all_dxgi::DXGI_SAMPLE_DESC {
Count: 1,
Quality: 0,
},
Layout: d3d12::D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
Flags: d3d12::D3D12_RESOURCE_FLAG_NONE,
};
let allocation_desc = AllocationCreateDesc::from_winapi_d3d12_resource_desc(
device,
&test_buffer_desc,
"Test allocation (Gpu Only)",
MemoryLocation::GpuOnly,
);
let allocation = allocator.allocate(&allocation_desc).unwrap();
let mut resource: *mut d3d12::ID3D12Resource = std::ptr::null_mut();
let hr = unsafe {
device.CreatePlacedResource(
allocation.heap().as_winapi() as *mut _,
allocation.offset(),
&test_buffer_desc,
d3d12::D3D12_RESOURCE_STATE_COMMON,
std::ptr::null(),
&d3d12::IID_ID3D12Resource,
<*mut *mut d3d12::ID3D12Resource>::cast(&mut resource),
)
};
if hr != winerror::S_OK {
panic!("Failed to create placed resource.");
}
unsafe { resource.as_ref().unwrap().Release() };
allocator.free(allocation).unwrap();
info!("Allocation and deallocation of GpuOnly memory was successful.");
}
// Test allocating Cpu to Gpu memory
{
let test_buffer_desc = d3d12::D3D12_RESOURCE_DESC {
Dimension: d3d12::D3D12_RESOURCE_DIMENSION_BUFFER,
Alignment: d3d12::D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT as u64,
Width: 512,
Height: 1,
DepthOrArraySize: 1,
MipLevels: 1,
Format: dxgiformat::DXGI_FORMAT_UNKNOWN,
SampleDesc: all_dxgi::DXGI_SAMPLE_DESC {
Count: 1,
Quality: 0,
},
Layout: d3d12::D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
Flags: d3d12::D3D12_RESOURCE_FLAG_NONE,
};
let alloc_info = unsafe { device.GetResourceAllocationInfo(0, 1, &test_buffer_desc) };
let allocation = allocator
.allocate(&AllocationCreateDesc {
name: "Test allocation (Cpu to Gpu)",
location: MemoryLocation::CpuToGpu,
size: alloc_info.SizeInBytes,
alignment: alloc_info.Alignment,
resource_category: ResourceCategory::Buffer,
})
.unwrap();
let mut resource: *mut d3d12::ID3D12Resource = std::ptr::null_mut();
let hr = unsafe {
device.CreatePlacedResource(
allocation.heap().as_winapi() as *mut _,
allocation.offset(),
&test_buffer_desc,
d3d12::D3D12_RESOURCE_STATE_COMMON,
std::ptr::null(),
&d3d12::IID_ID3D12Resource,
<*mut *mut d3d12::ID3D12Resource>::cast(&mut resource),
)
};
if hr != winerror::S_OK {
panic!("Failed to create placed resource.");
}
unsafe { resource.as_ref().unwrap().Release() };
allocator.free(allocation).unwrap();
info!("Allocation and deallocation of CpuToGpu memory was successful.");
}
// Test allocating Gpu to Cpu memory
{
let test_buffer_desc = d3d12::D3D12_RESOURCE_DESC {
Dimension: d3d12::D3D12_RESOURCE_DIMENSION_BUFFER,
Alignment: d3d12::D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT as u64,
Width: 512,
Height: 1,
DepthOrArraySize: 1,
MipLevels: 1,
Format: dxgiformat::DXGI_FORMAT_UNKNOWN,
SampleDesc: all_dxgi::DXGI_SAMPLE_DESC {
Count: 1,
Quality: 0,
},
Layout: d3d12::D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
Flags: d3d12::D3D12_RESOURCE_FLAG_NONE,
};
let alloc_info = unsafe { device.GetResourceAllocationInfo(0, 1, &test_buffer_desc) };
let allocation = allocator
.allocate(&AllocationCreateDesc {
name: "Test allocation (Gpu to Cpu)",
location: MemoryLocation::GpuToCpu,
size: alloc_info.SizeInBytes,
alignment: alloc_info.Alignment,
resource_category: ResourceCategory::Buffer,
})
.unwrap();
let mut resource: *mut d3d12::ID3D12Resource = std::ptr::null_mut();
let hr = unsafe {
device.CreatePlacedResource(
allocation.heap().as_winapi() as *mut _,
allocation.offset(),
&test_buffer_desc,
d3d12::D3D12_RESOURCE_STATE_COMMON,
std::ptr::null(),
&d3d12::IID_ID3D12Resource,
<*mut *mut d3d12::ID3D12Resource>::cast(&mut resource),
)
};
if hr != winerror::S_OK {
panic!("Failed to create placed resource.");
}
unsafe { resource.as_ref().unwrap().Release() };
allocator.free(allocation).unwrap();
info!("Allocation and deallocation of CpuToGpu memory was successful.");
}
drop(allocator); // Explicitly drop before destruction of device.
unsafe { device.Release() };
unsafe { dxgi_factory.as_ref().unwrap().Release() };
}

View File

@@ -0,0 +1,93 @@
use std::sync::Arc;
use gpu_allocator::metal::{AllocationCreateDesc, Allocator, AllocatorCreateDesc};
use log::info;
fn main() {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("trace")).init();
let device = Arc::new(metal::Device::system_default().unwrap());
// Setting up the allocator
let mut allocator = Allocator::new(&AllocatorCreateDesc {
device: device.clone(),
debug_settings: Default::default(),
allocation_sizes: Default::default(),
})
.unwrap();
// Test allocating Gpu Only memory
{
let allocation_desc = AllocationCreateDesc::buffer(
&device,
"Test allocation (Gpu Only)",
512,
gpu_allocator::MemoryLocation::GpuOnly,
);
let allocation = allocator.allocate(&allocation_desc).unwrap();
let _buffer = allocation.make_buffer().unwrap();
allocator.free(&allocation).unwrap();
info!("Allocation and deallocation of GpuOnly memory was successful.");
}
// Test allocating Cpu to Gpu memory
{
let allocation_desc = AllocationCreateDesc::buffer(
&device,
"Test allocation (Cpu to Gpu)",
512,
gpu_allocator::MemoryLocation::CpuToGpu,
);
let allocation = allocator.allocate(&allocation_desc).unwrap();
let _buffer = allocation.make_buffer().unwrap();
allocator.free(&allocation).unwrap();
info!("Allocation and deallocation of CpuToGpu memory was successful.");
}
// Test allocating Gpu to Cpu memory
{
let allocation_desc = AllocationCreateDesc::buffer(
&device,
"Test allocation (Gpu to Cpu)",
512,
gpu_allocator::MemoryLocation::GpuToCpu,
);
let allocation = allocator.allocate(&allocation_desc).unwrap();
let _buffer = allocation.make_buffer().unwrap();
allocator.free(&allocation).unwrap();
info!("Allocation and deallocation of GpuToCpu memory was successful.");
}
// Test allocating texture
{
let texture_desc = metal::TextureDescriptor::new();
texture_desc.set_pixel_format(metal::MTLPixelFormat::RGBA8Unorm);
texture_desc.set_width(64);
texture_desc.set_height(64);
texture_desc.set_storage_mode(metal::MTLStorageMode::Private);
let allocation_desc =
AllocationCreateDesc::texture(&device, "Test allocation (Texture)", &texture_desc);
let allocation = allocator.allocate(&allocation_desc).unwrap();
let _texture = allocation.make_texture(&texture_desc).unwrap();
allocator.free(&allocation).unwrap();
info!("Allocation and deallocation of Texture was successful.");
}
// Test allocating acceleration structure
{
let empty_array = metal::Array::from_slice(&[]);
let acc_desc = metal::PrimitiveAccelerationStructureDescriptor::descriptor();
acc_desc.set_geometry_descriptors(empty_array);
let sizes = device.acceleration_structure_sizes_with_descriptor(&acc_desc);
let allocation_desc = AllocationCreateDesc::acceleration_structure_with_size(
&device,
"Test allocation (Acceleration structure)",
sizes.acceleration_structure_size,
gpu_allocator::MemoryLocation::GpuOnly,
);
let allocation = allocator.allocate(&allocation_desc).unwrap();
let _acc_structure = allocation.make_acceleration_structure();
allocator.free(&allocation).unwrap();
info!("Allocation and deallocation of Acceleration structure was successful.");
}
}

View File

@@ -0,0 +1,198 @@
use std::default::Default;
use ash::vk;
use gpu_allocator::{
vulkan::{AllocationCreateDesc, AllocationScheme, Allocator, AllocatorCreateDesc},
MemoryLocation,
};
use log::info;
fn main() {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("trace")).init();
let entry = unsafe { ash::Entry::load() }.unwrap();
// Create Vulkan instance
let instance = {
let app_name = c"Vulkan gpu-allocator test";
let appinfo = vk::ApplicationInfo::default()
.application_name(app_name)
.application_version(0)
.engine_name(app_name)
.engine_version(0)
.api_version(vk::make_api_version(0, 1, 0, 0));
let layer_names_raw = [c"VK_LAYER_KHRONOS_validation".as_ptr()];
let create_info = vk::InstanceCreateInfo::default()
.application_info(&appinfo)
.enabled_layer_names(&layer_names_raw);
unsafe {
entry
.create_instance(&create_info, None)
.expect("Instance creation error")
}
};
// Look for vulkan physical device
let (pdevice, queue_family_index) = {
let pdevices = unsafe {
instance
.enumerate_physical_devices()
.expect("Physical device error")
};
pdevices
.iter()
.find_map(|pdevice| {
unsafe { instance.get_physical_device_queue_family_properties(*pdevice) }
.iter()
.enumerate()
.find_map(|(index, &info)| {
let supports_graphics = info.queue_flags.contains(vk::QueueFlags::GRAPHICS);
if supports_graphics {
Some((*pdevice, index))
} else {
None
}
})
})
.expect("Couldn't find suitable device.")
};
// Create vulkan device
let device = {
let device_extension_names_raw = vec![];
let features = vk::PhysicalDeviceFeatures {
shader_clip_distance: 1,
..Default::default()
};
let priorities = [1.0];
let queue_info = vk::DeviceQueueCreateInfo::default()
.queue_family_index(queue_family_index as u32)
.queue_priorities(&priorities);
let create_info = vk::DeviceCreateInfo::default()
.queue_create_infos(std::slice::from_ref(&queue_info))
.enabled_extension_names(&device_extension_names_raw)
.enabled_features(&features);
unsafe { instance.create_device(pdevice, &create_info, None).unwrap() }
};
// Setting up the allocator
let mut allocator = Allocator::new(&AllocatorCreateDesc {
instance: instance.clone(),
device: device.clone(),
physical_device: pdevice,
debug_settings: Default::default(),
buffer_device_address: false,
allocation_sizes: Default::default(),
})
.unwrap();
// Test allocating Gpu Only memory
{
let test_buffer_info = vk::BufferCreateInfo::default()
.size(512)
.usage(vk::BufferUsageFlags::STORAGE_BUFFER)
.sharing_mode(vk::SharingMode::EXCLUSIVE);
let test_buffer = unsafe { device.create_buffer(&test_buffer_info, None) }.unwrap();
let requirements = unsafe { device.get_buffer_memory_requirements(test_buffer) };
let location = MemoryLocation::GpuOnly;
let allocation = allocator
.allocate(&AllocationCreateDesc {
requirements,
location,
linear: true,
allocation_scheme: AllocationScheme::GpuAllocatorManaged,
name: "Test allocation (Gpu Only)",
})
.unwrap();
unsafe {
device
.bind_buffer_memory(test_buffer, allocation.memory(), allocation.offset())
.unwrap()
};
allocator.free(allocation).unwrap();
unsafe { device.destroy_buffer(test_buffer, None) };
info!("Allocation and deallocation of GpuOnly memory was successful.");
}
// Test allocating Cpu to Gpu memory
{
let test_buffer_info = vk::BufferCreateInfo::default()
.size(512)
.usage(vk::BufferUsageFlags::STORAGE_BUFFER)
.sharing_mode(vk::SharingMode::EXCLUSIVE);
let test_buffer = unsafe { device.create_buffer(&test_buffer_info, None) }.unwrap();
let requirements = unsafe { device.get_buffer_memory_requirements(test_buffer) };
let location = MemoryLocation::CpuToGpu;
let allocation = allocator
.allocate(&AllocationCreateDesc {
requirements,
location,
linear: true,
allocation_scheme: AllocationScheme::GpuAllocatorManaged,
name: "Test allocation (Cpu to Gpu)",
})
.unwrap();
unsafe {
device
.bind_buffer_memory(test_buffer, allocation.memory(), allocation.offset())
.unwrap()
};
allocator.free(allocation).unwrap();
unsafe { device.destroy_buffer(test_buffer, None) };
info!("Allocation and deallocation of CpuToGpu memory was successful.");
}
// Test allocating Gpu to Cpu memory
{
let test_buffer_info = vk::BufferCreateInfo::default()
.size(512)
.usage(vk::BufferUsageFlags::STORAGE_BUFFER)
.sharing_mode(vk::SharingMode::EXCLUSIVE);
let test_buffer = unsafe { device.create_buffer(&test_buffer_info, None) }.unwrap();
let requirements = unsafe { device.get_buffer_memory_requirements(test_buffer) };
let location = MemoryLocation::GpuToCpu;
let allocation = allocator
.allocate(&AllocationCreateDesc {
requirements,
location,
linear: true,
allocation_scheme: AllocationScheme::GpuAllocatorManaged,
name: "Test allocation (Gpu to Cpu)",
})
.unwrap();
unsafe {
device
.bind_buffer_memory(test_buffer, allocation.memory(), allocation.offset())
.unwrap()
};
allocator.free(allocation).unwrap();
unsafe { device.destroy_buffer(test_buffer, None) };
info!("Allocation and deallocation of GpuToCpu memory was successful.");
}
drop(allocator); // Explicitly drop before destruction of device and instance.
unsafe { device.destroy_device(None) };
unsafe { instance.destroy_instance(None) };
}

View File

@@ -0,0 +1,133 @@
#![deny(unsafe_code, clippy::unwrap_used)]
#[cfg(feature = "visualizer")]
pub(crate) mod visualizer;
use std::{backtrace::Backtrace, sync::Arc};
use log::{log, Level};
use super::{AllocationReport, AllocationType, SubAllocator, SubAllocatorBase};
use crate::{AllocationError, Result};
#[derive(Debug)]
pub(crate) struct DedicatedBlockAllocator {
size: u64,
allocated: u64,
/// Only used if [`crate::AllocatorDebugSettings::store_stack_traces`] is [`true`]
name: Option<String>,
backtrace: Arc<Backtrace>,
}
impl DedicatedBlockAllocator {
pub(crate) fn new(size: u64) -> Self {
Self {
size,
allocated: 0,
name: None,
backtrace: Arc::new(Backtrace::disabled()),
}
}
}
impl SubAllocatorBase for DedicatedBlockAllocator {}
impl SubAllocator for DedicatedBlockAllocator {
fn allocate(
&mut self,
size: u64,
_alignment: u64,
_allocation_type: AllocationType,
_granularity: u64,
name: &str,
backtrace: Arc<Backtrace>,
) -> Result<(u64, std::num::NonZeroU64)> {
if self.allocated != 0 {
return Err(AllocationError::OutOfMemory);
}
if self.size != size {
return Err(AllocationError::Internal(
"DedicatedBlockAllocator size must match allocation size.".into(),
));
}
self.allocated = size;
self.name = Some(name.to_string());
self.backtrace = backtrace;
#[allow(clippy::unwrap_used)]
let dummy_id = std::num::NonZeroU64::new(1).unwrap();
Ok((0, dummy_id))
}
fn free(&mut self, chunk_id: Option<std::num::NonZeroU64>) -> Result<()> {
if chunk_id != std::num::NonZeroU64::new(1) {
Err(AllocationError::Internal("Chunk ID must be 1.".into()))
} else {
self.allocated = 0;
Ok(())
}
}
fn rename_allocation(
&mut self,
chunk_id: Option<std::num::NonZeroU64>,
name: &str,
) -> Result<()> {
if chunk_id != std::num::NonZeroU64::new(1) {
Err(AllocationError::Internal("Chunk ID must be 1.".into()))
} else {
self.name = Some(name.into());
Ok(())
}
}
fn report_memory_leaks(
&self,
log_level: Level,
memory_type_index: usize,
memory_block_index: usize,
) {
let empty = "".to_string();
let name = self.name.as_ref().unwrap_or(&empty);
log!(
log_level,
r#"leak detected: {{
memory type: {}
memory block: {}
dedicated allocation: {{
size: 0x{:x},
name: {},
backtrace: {}
}}
}}"#,
memory_type_index,
memory_block_index,
self.size,
name,
self.backtrace
)
}
fn report_allocations(&self) -> Vec<AllocationReport> {
vec![AllocationReport {
name: self
.name
.clone()
.unwrap_or_else(|| "<Unnamed Dedicated allocation>".to_owned()),
offset: 0,
size: self.size,
#[cfg(feature = "visualizer")]
backtrace: self.backtrace.clone(),
}]
}
fn allocated(&self) -> u64 {
self.allocated
}
fn supports_general_allocations(&self) -> bool {
false
}
}

View File

@@ -0,0 +1,8 @@
use super::DedicatedBlockAllocator;
use crate::visualizer::SubAllocatorVisualizer;
impl SubAllocatorVisualizer for DedicatedBlockAllocator {
fn draw_base_info(&self, ui: &mut egui::Ui) {
ui.label("Dedicated Block");
}
}

View File

@@ -0,0 +1,416 @@
#![deny(unsafe_code, clippy::unwrap_used)]
#[cfg(feature = "visualizer")]
pub(crate) mod visualizer;
use std::{
backtrace::Backtrace,
collections::{HashMap, HashSet},
sync::Arc,
};
use log::{log, Level};
use super::{AllocationReport, AllocationType, SubAllocator, SubAllocatorBase};
use crate::{AllocationError, Result};
const USE_BEST_FIT: bool = true;
fn align_down(val: u64, alignment: u64) -> u64 {
val & !(alignment - 1u64)
}
fn align_up(val: u64, alignment: u64) -> u64 {
align_down(val + alignment - 1u64, alignment)
}
#[derive(Debug)]
pub(crate) struct MemoryChunk {
pub(crate) chunk_id: std::num::NonZeroU64,
pub(crate) size: u64,
pub(crate) offset: u64,
pub(crate) allocation_type: AllocationType,
pub(crate) name: Option<String>,
/// Only used if [`crate::AllocatorDebugSettings::store_stack_traces`] is [`true`]
pub(crate) backtrace: Arc<Backtrace>,
next: Option<std::num::NonZeroU64>,
prev: Option<std::num::NonZeroU64>,
}
#[derive(Debug)]
pub(crate) struct FreeListAllocator {
size: u64,
allocated: u64,
pub(crate) chunk_id_counter: u64,
pub(crate) chunks: HashMap<std::num::NonZeroU64, MemoryChunk>,
free_chunks: HashSet<std::num::NonZeroU64>,
}
/// Test if two suballocations will overlap the same page.
fn is_on_same_page(offset_a: u64, size_a: u64, offset_b: u64, page_size: u64) -> bool {
let end_a = offset_a + size_a - 1;
let end_page_a = align_down(end_a, page_size);
let start_b = offset_b;
let start_page_b = align_down(start_b, page_size);
end_page_a == start_page_b
}
/// Test if two allocation types will be conflicting or not.
fn has_granularity_conflict(type0: AllocationType, type1: AllocationType) -> bool {
if type0 == AllocationType::Free || type1 == AllocationType::Free {
return false;
}
type0 != type1
}
impl FreeListAllocator {
pub(crate) fn new(size: u64) -> Self {
#[allow(clippy::unwrap_used)]
let initial_chunk_id = std::num::NonZeroU64::new(1).unwrap();
let mut chunks = HashMap::default();
chunks.insert(
initial_chunk_id,
MemoryChunk {
chunk_id: initial_chunk_id,
size,
offset: 0,
allocation_type: AllocationType::Free,
name: None,
backtrace: Arc::new(Backtrace::disabled()),
prev: None,
next: None,
},
);
let mut free_chunks = HashSet::default();
free_chunks.insert(initial_chunk_id);
Self {
size,
allocated: 0,
// 0 is not allowed as a chunk ID, 1 is used by the initial chunk, next chunk is going to be 2.
// The system well take the counter as the ID, and the increment the counter.
chunk_id_counter: 2,
chunks,
free_chunks,
}
}
/// Generates a new unique chunk ID
fn get_new_chunk_id(&mut self) -> Result<std::num::NonZeroU64> {
if self.chunk_id_counter == u64::MAX {
// End of chunk id counter reached, no more allocations are possible.
return Err(AllocationError::OutOfMemory);
}
let id = self.chunk_id_counter;
self.chunk_id_counter += 1;
std::num::NonZeroU64::new(id).ok_or_else(|| {
AllocationError::Internal("New chunk id was 0, which is not allowed.".into())
})
}
/// Finds the specified `chunk_id` in the list of free chunks and removes if from the list
fn remove_id_from_free_list(&mut self, chunk_id: std::num::NonZeroU64) {
self.free_chunks.remove(&chunk_id);
}
/// Merges two adjacent chunks. Right chunk will be merged into the left chunk
fn merge_free_chunks(
&mut self,
chunk_left: std::num::NonZeroU64,
chunk_right: std::num::NonZeroU64,
) -> Result<()> {
// Gather data from right chunk and remove it
let (right_size, right_next) = {
let chunk = self.chunks.remove(&chunk_right).ok_or_else(|| {
AllocationError::Internal("Chunk ID not present in chunk list.".into())
})?;
self.remove_id_from_free_list(chunk.chunk_id);
(chunk.size, chunk.next)
};
// Merge into left chunk
{
let chunk = self.chunks.get_mut(&chunk_left).ok_or_else(|| {
AllocationError::Internal("Chunk ID not present in chunk list.".into())
})?;
chunk.next = right_next;
chunk.size += right_size;
}
// Patch pointers
if let Some(right_next) = right_next {
let chunk = self.chunks.get_mut(&right_next).ok_or_else(|| {
AllocationError::Internal("Chunk ID not present in chunk list.".into())
})?;
chunk.prev = Some(chunk_left);
}
Ok(())
}
}
impl SubAllocatorBase for FreeListAllocator {}
impl SubAllocator for FreeListAllocator {
fn allocate(
&mut self,
size: u64,
alignment: u64,
allocation_type: AllocationType,
granularity: u64,
name: &str,
backtrace: Arc<Backtrace>,
) -> Result<(u64, std::num::NonZeroU64)> {
let free_size = self.size - self.allocated;
if size > free_size {
return Err(AllocationError::OutOfMemory);
}
let mut best_fit_id: Option<std::num::NonZeroU64> = None;
let mut best_offset = 0u64;
let mut best_aligned_size = 0u64;
let mut best_chunk_size = 0u64;
for current_chunk_id in self.free_chunks.iter() {
let current_chunk = self.chunks.get(current_chunk_id).ok_or_else(|| {
AllocationError::Internal(
"Chunk ID in free list is not present in chunk list.".into(),
)
})?;
if current_chunk.size < size {
continue;
}
let mut offset = align_up(current_chunk.offset, alignment);
if let Some(prev_idx) = current_chunk.prev {
let previous = self.chunks.get(&prev_idx).ok_or_else(|| {
AllocationError::Internal("Invalid previous chunk reference.".into())
})?;
if is_on_same_page(previous.offset, previous.size, offset, granularity)
&& has_granularity_conflict(previous.allocation_type, allocation_type)
{
offset = align_up(offset, granularity);
}
}
let padding = offset - current_chunk.offset;
let aligned_size = padding + size;
if aligned_size > current_chunk.size {
continue;
}
if let Some(next_idx) = current_chunk.next {
let next = self.chunks.get(&next_idx).ok_or_else(|| {
AllocationError::Internal("Invalid next chunk reference.".into())
})?;
if is_on_same_page(offset, size, next.offset, granularity)
&& has_granularity_conflict(allocation_type, next.allocation_type)
{
continue;
}
}
if USE_BEST_FIT {
if best_fit_id.is_none() || current_chunk.size < best_chunk_size {
best_fit_id = Some(*current_chunk_id);
best_aligned_size = aligned_size;
best_offset = offset;
best_chunk_size = current_chunk.size;
};
} else {
best_fit_id = Some(*current_chunk_id);
best_aligned_size = aligned_size;
best_offset = offset;
best_chunk_size = current_chunk.size;
break;
}
}
let first_fit_id = best_fit_id.ok_or(AllocationError::OutOfMemory)?;
let chunk_id = if best_chunk_size > best_aligned_size {
let new_chunk_id = self.get_new_chunk_id()?;
let new_chunk = {
let free_chunk = self.chunks.get_mut(&first_fit_id).ok_or_else(|| {
AllocationError::Internal("Chunk ID must be in chunk list.".into())
})?;
let new_chunk = MemoryChunk {
chunk_id: new_chunk_id,
size: best_aligned_size,
offset: free_chunk.offset,
allocation_type,
name: Some(name.to_string()),
backtrace,
prev: free_chunk.prev,
next: Some(first_fit_id),
};
free_chunk.prev = Some(new_chunk.chunk_id);
free_chunk.offset += best_aligned_size;
free_chunk.size -= best_aligned_size;
new_chunk
};
if let Some(prev_id) = new_chunk.prev {
let prev_chunk = self.chunks.get_mut(&prev_id).ok_or_else(|| {
AllocationError::Internal("Invalid previous chunk reference.".into())
})?;
prev_chunk.next = Some(new_chunk.chunk_id);
}
self.chunks.insert(new_chunk_id, new_chunk);
new_chunk_id
} else {
let chunk = self
.chunks
.get_mut(&first_fit_id)
.ok_or_else(|| AllocationError::Internal("Invalid chunk reference.".into()))?;
chunk.allocation_type = allocation_type;
chunk.name = Some(name.to_string());
chunk.backtrace = backtrace;
self.remove_id_from_free_list(first_fit_id);
first_fit_id
};
self.allocated += best_aligned_size;
Ok((best_offset, chunk_id))
}
fn free(&mut self, chunk_id: Option<std::num::NonZeroU64>) -> Result<()> {
let chunk_id = chunk_id
.ok_or_else(|| AllocationError::Internal("Chunk ID must be a valid value.".into()))?;
let (next_id, prev_id) = {
let chunk = self.chunks.get_mut(&chunk_id).ok_or_else(|| {
AllocationError::Internal(
"Attempting to free chunk that is not in chunk list.".into(),
)
})?;
chunk.allocation_type = AllocationType::Free;
chunk.name = None;
chunk.backtrace = Arc::new(Backtrace::disabled());
self.allocated -= chunk.size;
self.free_chunks.insert(chunk.chunk_id);
(chunk.next, chunk.prev)
};
if let Some(next_id) = next_id {
if self.chunks[&next_id].allocation_type == AllocationType::Free {
self.merge_free_chunks(chunk_id, next_id)?;
}
}
if let Some(prev_id) = prev_id {
if self.chunks[&prev_id].allocation_type == AllocationType::Free {
self.merge_free_chunks(prev_id, chunk_id)?;
}
}
Ok(())
}
fn rename_allocation(
&mut self,
chunk_id: Option<std::num::NonZeroU64>,
name: &str,
) -> Result<()> {
let chunk_id = chunk_id
.ok_or_else(|| AllocationError::Internal("Chunk ID must be a valid value.".into()))?;
let chunk = self.chunks.get_mut(&chunk_id).ok_or_else(|| {
AllocationError::Internal(
"Attempting to rename chunk that is not in chunk list.".into(),
)
})?;
if chunk.allocation_type == AllocationType::Free {
return Err(AllocationError::Internal(
"Attempting to rename a freed allocation.".into(),
));
}
chunk.name = Some(name.into());
Ok(())
}
fn report_memory_leaks(
&self,
log_level: Level,
memory_type_index: usize,
memory_block_index: usize,
) {
for (chunk_id, chunk) in self.chunks.iter() {
if chunk.allocation_type == AllocationType::Free {
continue;
}
let empty = "".to_string();
let name = chunk.name.as_ref().unwrap_or(&empty);
log!(
log_level,
r#"leak detected: {{
memory type: {}
memory block: {}
chunk: {{
chunk_id: {},
size: 0x{:x},
offset: 0x{:x},
allocation_type: {:?},
name: {},
backtrace: {}
}}
}}"#,
memory_type_index,
memory_block_index,
chunk_id,
chunk.size,
chunk.offset,
chunk.allocation_type,
name,
chunk.backtrace
);
}
}
fn report_allocations(&self) -> Vec<AllocationReport> {
self.chunks
.iter()
.filter(|(_key, chunk)| chunk.allocation_type != AllocationType::Free)
.map(|(_key, chunk)| AllocationReport {
name: chunk
.name
.clone()
.unwrap_or_else(|| "<Unnamed FreeList allocation>".to_owned()),
offset: chunk.offset,
size: chunk.size,
#[cfg(feature = "visualizer")]
backtrace: chunk.backtrace.clone(),
})
.collect::<Vec<_>>()
}
fn allocated(&self) -> u64 {
self.allocated
}
fn supports_general_allocations(&self) -> bool {
true
}
}

View File

@@ -0,0 +1,25 @@
use super::FreeListAllocator;
use crate::visualizer::{
render_memory_chunks_ui, ColorScheme, MemoryChunksVisualizationSettings, SubAllocatorVisualizer,
};
impl SubAllocatorVisualizer for FreeListAllocator {
fn supports_visualization(&self) -> bool {
true
}
fn draw_base_info(&self, ui: &mut egui::Ui) {
ui.label("free list sub-allocator");
ui.label(format!("chunk count: {}", self.chunks.len()));
ui.label(format!("chunk id counter: {}", self.chunk_id_counter));
}
fn draw_visualization(
&self,
color_scheme: &ColorScheme,
ui: &mut egui::Ui,
settings: &MemoryChunksVisualizationSettings,
) {
render_memory_chunks_ui(ui, color_scheme, settings, self.size, self.chunks.values());
}
}

View File

@@ -0,0 +1,162 @@
use std::{backtrace::Backtrace, fmt, ops::Range, sync::Arc};
use log::*;
use crate::result::*;
pub(crate) mod dedicated_block_allocator;
pub(crate) use dedicated_block_allocator::DedicatedBlockAllocator;
pub(crate) mod free_list_allocator;
pub(crate) use free_list_allocator::FreeListAllocator;
#[derive(PartialEq, Copy, Clone, Debug)]
#[repr(u8)]
pub(crate) enum AllocationType {
Free,
Linear,
NonLinear,
}
impl AllocationType {
#[cfg(feature = "visualizer")]
pub fn as_str(self) -> &'static str {
match self {
Self::Free => "Free",
Self::Linear => "Linear",
Self::NonLinear => "Non-Linear",
}
}
}
/// Describes an allocation in the [`AllocatorReport`].
#[derive(Clone)]
pub struct AllocationReport {
/// The name provided to the `allocate()` function.
pub name: String,
/// The offset in bytes of the allocation in its memory block.
pub offset: u64,
/// The size in bytes of the allocation.
pub size: u64,
#[cfg(feature = "visualizer")]
pub(crate) backtrace: Arc<Backtrace>,
}
/// Describes a memory block in the [`AllocatorReport`].
#[derive(Clone)]
pub struct MemoryBlockReport {
/// The size in bytes of this memory block.
pub size: u64,
/// The range of allocations in [`AllocatorReport::allocations`] that are associated
/// to this memory block.
pub allocations: Range<usize>,
}
/// A report that can be generated for informational purposes using `Allocator::generate_report()`.
#[derive(Clone)]
pub struct AllocatorReport {
/// All live allocations, sub-allocated from memory blocks.
pub allocations: Vec<AllocationReport>,
/// All memory blocks.
pub blocks: Vec<MemoryBlockReport>,
/// Sum of the memory used by all allocations, in bytes.
pub total_allocated_bytes: u64,
/// Sum of the memory reserved by all memory blocks including unallocated regions, in bytes.
pub total_reserved_bytes: u64,
}
impl fmt::Debug for AllocationReport {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let name = if !self.name.is_empty() {
self.name.as_str()
} else {
"--"
};
write!(f, "{name:?}: {}", fmt_bytes(self.size))
}
}
impl fmt::Debug for AllocatorReport {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut allocations = self.allocations.clone();
allocations.sort_by_key(|alloc| std::cmp::Reverse(alloc.size));
let max_num_allocations_to_print = f.precision().unwrap_or(usize::MAX);
allocations.truncate(max_num_allocations_to_print);
f.debug_struct("AllocatorReport")
.field(
"summary",
&std::format_args!(
"{} / {}",
fmt_bytes(self.total_allocated_bytes),
fmt_bytes(self.total_reserved_bytes)
),
)
.field("blocks", &self.blocks.len())
.field("allocations", &self.allocations.len())
.field("largest", &allocations.as_slice())
.finish()
}
}
#[cfg(feature = "visualizer")]
pub(crate) trait SubAllocatorBase: crate::visualizer::SubAllocatorVisualizer {}
#[cfg(not(feature = "visualizer"))]
pub(crate) trait SubAllocatorBase {}
pub(crate) trait SubAllocator: SubAllocatorBase + fmt::Debug + Sync + Send {
fn allocate(
&mut self,
size: u64,
alignment: u64,
allocation_type: AllocationType,
granularity: u64,
name: &str,
backtrace: Arc<Backtrace>,
) -> Result<(u64, std::num::NonZeroU64)>;
fn free(&mut self, chunk_id: Option<std::num::NonZeroU64>) -> Result<()>;
fn rename_allocation(
&mut self,
chunk_id: Option<std::num::NonZeroU64>,
name: &str,
) -> Result<()>;
fn report_memory_leaks(
&self,
log_level: Level,
memory_type_index: usize,
memory_block_index: usize,
);
fn report_allocations(&self) -> Vec<AllocationReport>;
#[must_use]
fn supports_general_allocations(&self) -> bool;
#[must_use]
fn allocated(&self) -> u64;
/// Helper function: reports if the suballocator is empty (meaning, having no allocations).
#[must_use]
fn is_empty(&self) -> bool {
self.allocated() == 0
}
}
pub(crate) fn fmt_bytes(mut amount: u64) -> String {
const SUFFIX: [&str; 5] = ["B", "KB", "MB", "GB", "TB"];
let mut idx = 0;
let mut print_amount = amount as f64;
loop {
if amount < 1024 {
return format!("{:.2} {}", print_amount, SUFFIX[idx]);
}
print_amount = amount as f64 / 1024.0;
amount /= 1024;
idx += 1;
}
}

1151
vendor/gpu-allocator/src/d3d12/mod.rs vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,249 @@
#![allow(clippy::new_without_default)]
use windows::Win32::Graphics::Direct3D12::*;
use super::Allocator;
use crate::visualizer::{
render_allocation_reports_ui, AllocationReportVisualizeSettings, ColorScheme,
MemoryChunksVisualizationSettings,
};
struct AllocatorVisualizerBlockWindow {
memory_type_index: usize,
block_index: usize,
settings: MemoryChunksVisualizationSettings,
}
impl AllocatorVisualizerBlockWindow {
fn new(memory_type_index: usize, block_index: usize) -> Self {
Self {
memory_type_index,
block_index,
settings: Default::default(),
}
}
}
pub struct AllocatorVisualizer {
selected_blocks: Vec<AllocatorVisualizerBlockWindow>,
color_scheme: ColorScheme,
breakdown_settings: AllocationReportVisualizeSettings,
}
fn format_heap_type(heap_type: D3D12_HEAP_TYPE) -> &'static str {
let names = [
"D3D12_HEAP_TYPE_DEFAULT_INVALID",
"D3D12_HEAP_TYPE_DEFAULT",
"D3D12_HEAP_TYPE_UPLOAD",
"D3D12_HEAP_TYPE_READBACK",
"D3D12_HEAP_TYPE_CUSTOM",
];
names[heap_type.0 as usize]
}
fn format_cpu_page_property(prop: D3D12_CPU_PAGE_PROPERTY) -> &'static str {
let names = [
"D3D12_CPU_PAGE_PROPERTY_UNKNOWN",
"D3D12_CPU_PAGE_PROPERTY_NOT_AVAILABLE",
"D3D12_CPU_PAGE_PROPERTY_WRITE_COMBINE",
"D3D12_CPU_PAGE_PROPERTY_WRITE_BACK",
];
names[prop.0 as usize]
}
fn format_memory_pool(pool: D3D12_MEMORY_POOL) -> &'static str {
let names = [
"D3D12_MEMORY_POOL_UNKNOWN",
"D3D12_MEMORY_POOL_L0",
"D3D12_MEMORY_POOL_L1",
];
names[pool.0 as usize]
}
impl AllocatorVisualizer {
pub fn new() -> Self {
Self {
selected_blocks: Vec::default(),
color_scheme: ColorScheme::default(),
breakdown_settings: Default::default(),
}
}
pub fn set_color_scheme(&mut self, color_scheme: ColorScheme) {
self.color_scheme = color_scheme;
}
pub fn render_memory_block_ui(&mut self, ui: &mut egui::Ui, alloc: &Allocator) {
ui.collapsing(
format!("Memory Types: ({} types)", alloc.memory_types.len()),
|ui| {
for (mem_type_idx, mem_type) in alloc.memory_types.iter().enumerate() {
ui.collapsing(
format!(
"Type: {} ({} blocks)",
mem_type_idx,
mem_type.memory_blocks.len()
),
|ui| {
let mut total_block_size = 0;
let mut total_allocated = 0;
for block in mem_type.memory_blocks.iter().flatten() {
total_block_size += block.size;
total_allocated += block.sub_allocator.allocated();
}
let active_block_count = mem_type
.memory_blocks
.iter()
.filter(|block| block.is_some())
.count();
ui.label(format!("heap category: {:?}", mem_type.heap_category));
ui.label(format!(
"Heap Type: {} ({})",
format_heap_type(mem_type.heap_properties.Type),
mem_type.heap_properties.Type.0
));
ui.label(format!(
"CpuPageProperty: {} ({})",
format_cpu_page_property(mem_type.heap_properties.CPUPageProperty),
mem_type.heap_properties.CPUPageProperty.0
));
ui.label(format!(
"MemoryPoolPreference: {} ({})",
format_memory_pool(mem_type.heap_properties.MemoryPoolPreference),
mem_type.heap_properties.MemoryPoolPreference.0
));
ui.label(format!("total block size: {} KiB", total_block_size / 1024));
ui.label(format!("total allocated: {} KiB", total_allocated / 1024));
ui.label(format!(
"committed resource allocations: {}",
mem_type.committed_allocations.num_allocations
));
ui.label(format!(
"total committed resource allocations: {} KiB",
mem_type.committed_allocations.total_size
));
ui.label(format!("block count: {}", active_block_count));
for (block_idx, block) in mem_type.memory_blocks.iter().enumerate() {
let Some(block) = block else { continue };
ui.collapsing(format!("Block: {}", block_idx), |ui| {
ui.label(format!("size: {} KiB", block.size / 1024));
ui.label(format!(
"allocated: {} KiB",
block.sub_allocator.allocated() / 1024
));
ui.label(format!("D3D12 heap: {:?}", block.heap));
block.sub_allocator.draw_base_info(ui);
if block.sub_allocator.supports_visualization()
&& ui.button("visualize").clicked()
&& !self.selected_blocks.iter().any(|x| {
x.memory_type_index == mem_type_idx
&& x.block_index == block_idx
})
{
self.selected_blocks.push(
AllocatorVisualizerBlockWindow::new(
mem_type_idx,
block_idx,
),
);
}
});
}
},
);
}
},
);
}
pub fn render_memory_block_window(
&mut self,
ctx: &egui::Context,
allocator: &Allocator,
open: &mut bool,
) {
egui::Window::new("Allocator Memory Blocks")
.open(open)
.show(ctx, |ui| self.render_breakdown_ui(ui, allocator));
}
pub fn render_memory_block_visualization_windows(
&mut self,
ctx: &egui::Context,
allocator: &Allocator,
) {
// Draw each window.
let color_scheme = &self.color_scheme;
self.selected_blocks.retain_mut(|window| {
let mut open = true;
egui::Window::new(format!(
"Block Visualizer {}:{}",
window.memory_type_index, window.block_index
))
.default_size([1920.0 * 0.5, 1080.0 * 0.5])
.open(&mut open)
.show(ctx, |ui| {
let memblock = &allocator.memory_types[window.memory_type_index].memory_blocks
[window.block_index]
.as_ref();
if let Some(memblock) = memblock {
ui.label(format!(
"Memory type {}, Memory block {}, Block size: {} KiB",
window.memory_type_index,
window.block_index,
memblock.size / 1024
));
window
.settings
.ui(ui, allocator.debug_settings.store_stack_traces);
ui.separator();
memblock
.sub_allocator
.draw_visualization(color_scheme, ui, &window.settings);
} else {
ui.label("Deallocated memory block");
}
});
open
});
}
pub fn render_breakdown_ui(&mut self, ui: &mut egui::Ui, allocator: &Allocator) {
render_allocation_reports_ui(
ui,
&mut self.breakdown_settings,
allocator
.memory_types
.iter()
.flat_map(|memory_type| memory_type.memory_blocks.iter())
.flatten()
.flat_map(|memory_block| memory_block.sub_allocator.report_allocations()),
);
}
pub fn render_breakdown_window(
&mut self,
ctx: &egui::Context,
allocator: &Allocator,
open: &mut bool,
) {
egui::Window::new("Allocator Breakdown")
.open(open)
.show(ctx, |ui| self.render_breakdown_ui(ui, allocator));
}
}

333
vendor/gpu-allocator/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,333 @@
//! This crate provides a fully written in Rust memory allocator for Vulkan, DirectX 12 and Metal.
//!
//! # [Windows-rs] and [winapi]
//!
//! `gpu-allocator` recently migrated from [winapi] to [windows-rs] but still provides convenient helpers to convert to and from [winapi] types, enabled when compiling with the `public-winapi` crate feature.
//!
//! [Windows-rs]: https://github.com/microsoft/windows-rs
//! [winapi]: https://github.com/retep998/winapi-rs
//!
//! # Setting up the Vulkan memory allocator
//!
//! ```no_run
//! # #[cfg(feature = "vulkan")]
//! # fn main() {
//! use gpu_allocator::vulkan::*;
//! # use ash::vk;
//! # let device = todo!();
//! # let instance = todo!();
//! # let physical_device = todo!();
//!
//! let mut allocator = Allocator::new(&AllocatorCreateDesc {
//! instance,
//! device,
//! physical_device,
//! debug_settings: Default::default(),
//! buffer_device_address: true, // Ideally, check the BufferDeviceAddressFeatures struct.
//! allocation_sizes: Default::default(),
//! });
//! # }
//! # #[cfg(not(feature = "vulkan"))]
//! # fn main() {}
//! ```
//!
//! # Simple Vulkan allocation example
//!
//! ```no_run
//! # #[cfg(feature = "vulkan")]
//! # fn main() {
//! use gpu_allocator::vulkan::*;
//! use gpu_allocator::MemoryLocation;
//! # use ash::vk;
//! # let device = todo!();
//! # let instance = todo!();
//! # let physical_device = todo!();
//! # let mut allocator = Allocator::new(&AllocatorCreateDesc {
//! # instance,
//! # device,
//! # physical_device,
//! # debug_settings: Default::default(),
//! # buffer_device_address: true, // Ideally, check the BufferDeviceAddressFeatures struct.
//! # allocation_sizes: Default::default(),
//! # }).unwrap();
//!
//! // Setup vulkan info
//! let vk_info = vk::BufferCreateInfo::default()
//! .size(512)
//! .usage(vk::BufferUsageFlags::STORAGE_BUFFER);
//!
//! let buffer = unsafe { device.create_buffer(&vk_info, None) }.unwrap();
//! let requirements = unsafe { device.get_buffer_memory_requirements(buffer) };
//!
//! let allocation = allocator
//! .allocate(&AllocationCreateDesc {
//! name: "Example allocation",
//! requirements,
//! location: MemoryLocation::CpuToGpu,
//! linear: true, // Buffers are always linear
//! allocation_scheme: AllocationScheme::GpuAllocatorManaged,
//! }).unwrap();
//!
//! // Bind memory to the buffer
//! unsafe { device.bind_buffer_memory(buffer, allocation.memory(), allocation.offset()).unwrap() };
//!
//! // Cleanup
//! allocator.free(allocation).unwrap();
//! unsafe { device.destroy_buffer(buffer, None) };
//! # }
//! # #[cfg(not(feature = "vulkan"))]
//! # fn main() {}
//! ```
//!
//! # Setting up the D3D12 memory allocator
//!
//! ```no_run
//! # #[cfg(feature = "d3d12")]
//! # fn main() {
//! use gpu_allocator::d3d12::*;
//! # let device = todo!();
//!
//! let mut allocator = Allocator::new(&AllocatorCreateDesc {
//! device: ID3D12DeviceVersion::Device(device),
//! debug_settings: Default::default(),
//! allocation_sizes: Default::default(),
//! });
//! # }
//! # #[cfg(not(feature = "d3d12"))]
//! # fn main() {}
//! ```
//!
//! # Simple d3d12 allocation example
//!
//! ```no_run
//! # #[cfg(feature = "d3d12")]
//! # fn main() -> windows::core::Result<()> {
//! use gpu_allocator::d3d12::*;
//! use gpu_allocator::MemoryLocation;
//! # use windows::Win32::Graphics::{Dxgi, Direct3D12};
//! # let device = todo!();
//!
//! # let mut allocator = Allocator::new(&AllocatorCreateDesc {
//! # device: ID3D12DeviceVersion::Device(device),
//! # debug_settings: Default::default(),
//! # allocation_sizes: Default::default(),
//! # }).unwrap();
//!
//! let buffer_desc = Direct3D12::D3D12_RESOURCE_DESC {
//! Dimension: Direct3D12::D3D12_RESOURCE_DIMENSION_BUFFER,
//! Alignment: 0,
//! Width: 512,
//! Height: 1,
//! DepthOrArraySize: 1,
//! MipLevels: 1,
//! Format: Dxgi::Common::DXGI_FORMAT_UNKNOWN,
//! SampleDesc: Dxgi::Common::DXGI_SAMPLE_DESC {
//! Count: 1,
//! Quality: 0,
//! },
//! Layout: Direct3D12::D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
//! Flags: Direct3D12::D3D12_RESOURCE_FLAG_NONE,
//! };
//! let allocation_desc = AllocationCreateDesc::from_d3d12_resource_desc(
//! &allocator.device(),
//! &buffer_desc,
//! "Example allocation",
//! MemoryLocation::GpuOnly,
//! );
//! let allocation = allocator.allocate(&allocation_desc).unwrap();
//! let mut resource: Option<Direct3D12::ID3D12Resource> = None;
//! let hr = unsafe {
//! device.CreatePlacedResource(
//! allocation.heap(),
//! allocation.offset(),
//! &buffer_desc,
//! Direct3D12::D3D12_RESOURCE_STATE_COMMON,
//! None,
//! &mut resource,
//! )
//! }?;
//!
//! // Cleanup
//! drop(resource);
//! allocator.free(allocation).unwrap();
//! # Ok(())
//! # }
//! # #[cfg(not(feature = "d3d12"))]
//! # fn main() {}
//! ```
//!
//! # Setting up the Metal memory allocator
//!
//! ```no_run
//! # #[cfg(feature = "metal")]
//! # fn main() {
//! # use std::sync::Arc;
//! use gpu_allocator::metal::*;
//!
//! # let device = Arc::new(metal::Device::system_default().unwrap());
//! let mut allocator = Allocator::new(&AllocatorCreateDesc {
//! device: device.clone(),
//! debug_settings: Default::default(),
//! allocation_sizes: Default::default(),
//! });
//! # }
//! # #[cfg(not(feature = "metal"))]
//! # fn main() {}
//! ```
//!
//! # Simple Metal allocation example
//! ```no_run
//! # #[cfg(feature = "metal")]
//! # fn main() {
//! # use std::sync::Arc;
//! use gpu_allocator::metal::*;
//! use gpu_allocator::MemoryLocation;
//! # let device = Arc::new(metal::Device::system_default().unwrap());
//! # let mut allocator = Allocator::new(&AllocatorCreateDesc {
//! # device: device.clone(),
//! # debug_settings: Default::default(),
//! # allocation_sizes: Default::default(),
//! # })
//! # .unwrap();
//!
//! let allocation_desc = AllocationCreateDesc::buffer(
//! &device,
//! "Example allocation",
//! 512, // size in bytes
//! gpu_allocator::MemoryLocation::GpuOnly,
//! );
//! let allocation = allocator.allocate(&allocation_desc).unwrap();
//! let resource = allocation.make_buffer().unwrap();
//!
//! // Cleanup
//! drop(resource);
//! allocator.free(&allocation).unwrap();
//! # }
//! # #[cfg(not(feature = "metal"))]
//! # fn main() {}
//! ```
mod result;
pub use result::*;
pub(crate) mod allocator;
pub use allocator::{AllocationReport, AllocatorReport, MemoryBlockReport};
#[cfg(feature = "visualizer")]
pub mod visualizer;
#[cfg(feature = "vulkan")]
pub mod vulkan;
#[cfg(all(windows, feature = "d3d12"))]
pub mod d3d12;
#[cfg(all(any(target_os = "macos", target_os = "ios"), feature = "metal"))]
pub mod metal;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum MemoryLocation {
/// The allocated resource is stored at an unknown memory location; let the driver decide what's the best location
Unknown,
/// Store the allocation in GPU only accessible memory - typically this is the faster GPU resource and this should be
/// where most of the allocations live.
GpuOnly,
/// Memory useful for uploading data to the GPU and potentially for constant buffers
CpuToGpu,
/// Memory useful for CPU readback of data
GpuToCpu,
}
#[derive(Copy, Clone, Debug)]
pub struct AllocatorDebugSettings {
/// Logs out debugging information about the various heaps the current device has on startup
pub log_memory_information: bool,
/// Logs out all memory leaks on shutdown with log level Warn
pub log_leaks_on_shutdown: bool,
/// Stores a copy of the full backtrace for every allocation made, this makes it easier to debug leaks
/// or other memory allocations, but storing stack traces has a RAM overhead so should be disabled
/// in shipping applications.
pub store_stack_traces: bool,
/// Log out every allocation as it's being made with log level Debug, rather spammy so off by default
pub log_allocations: bool,
/// Log out every free that is being called with log level Debug, rather spammy so off by default
pub log_frees: bool,
/// Log out stack traces when either `log_allocations` or `log_frees` is enabled.
pub log_stack_traces: bool,
}
impl Default for AllocatorDebugSettings {
fn default() -> Self {
Self {
log_memory_information: false,
log_leaks_on_shutdown: true,
store_stack_traces: false,
log_allocations: false,
log_frees: false,
log_stack_traces: false,
}
}
}
/// The sizes of the memory blocks that the allocator will create.
///
/// Useful for tuning the allocator to your application's needs. For example most games will be fine with the default
/// values, but eg. an app might want to use smaller block sizes to reduce the amount of memory used.
///
/// Clamped between 4MB and 256MB, and rounds up to the nearest multiple of 4MB for alignment reasons.
#[derive(Clone, Copy, Debug)]
pub struct AllocationSizes {
/// The size of the memory blocks that will be created for the GPU only memory type.
///
/// Defaults to 256MB.
device_memblock_size: u64,
/// The size of the memory blocks that will be created for the CPU visible memory types.
///
/// Defaults to 64MB.
host_memblock_size: u64,
}
impl AllocationSizes {
pub fn new(device_memblock_size: u64, host_memblock_size: u64) -> Self {
const FOUR_MB: u64 = 4 * 1024 * 1024;
const TWO_HUNDRED_AND_FIFTY_SIX_MB: u64 = 256 * 1024 * 1024;
let mut device_memblock_size =
device_memblock_size.clamp(FOUR_MB, TWO_HUNDRED_AND_FIFTY_SIX_MB);
let mut host_memblock_size =
host_memblock_size.clamp(FOUR_MB, TWO_HUNDRED_AND_FIFTY_SIX_MB);
if device_memblock_size % FOUR_MB != 0 {
let val = device_memblock_size / FOUR_MB + 1;
device_memblock_size = val * FOUR_MB;
log::warn!(
"Device memory block size must be a multiple of 4MB, clamping to {}MB",
device_memblock_size / 1024 / 1024
)
}
if host_memblock_size % FOUR_MB != 0 {
let val = host_memblock_size / FOUR_MB + 1;
host_memblock_size = val * FOUR_MB;
log::warn!(
"Host memory block size must be a multiple of 4MB, clamping to {}MB",
host_memblock_size / 1024 / 1024
)
}
Self {
device_memblock_size,
host_memblock_size,
}
}
}
impl Default for AllocationSizes {
fn default() -> Self {
Self {
device_memblock_size: 256 * 1024 * 1024,
host_memblock_size: 64 * 1024 * 1024,
}
}
}

522
vendor/gpu-allocator/src/metal/mod.rs vendored Normal file
View File

@@ -0,0 +1,522 @@
#![deny(clippy::unimplemented, clippy::unwrap_used, clippy::ok_expect)]
use std::{backtrace::Backtrace, sync::Arc};
use log::debug;
use crate::{
allocator::{self, AllocatorReport, MemoryBlockReport},
AllocationError, AllocationSizes, AllocatorDebugSettings, MemoryLocation, Result,
};
fn memory_location_to_metal(location: MemoryLocation) -> metal::MTLResourceOptions {
match location {
MemoryLocation::GpuOnly => metal::MTLResourceOptions::StorageModePrivate,
MemoryLocation::CpuToGpu | MemoryLocation::GpuToCpu | MemoryLocation::Unknown => {
metal::MTLResourceOptions::StorageModeShared
}
}
}
#[derive(Debug)]
pub struct Allocation {
chunk_id: Option<std::num::NonZeroU64>,
offset: u64,
size: u64,
memory_block_index: usize,
memory_type_index: usize,
heap: Arc<metal::Heap>,
name: Option<Box<str>>,
}
impl Allocation {
pub fn heap(&self) -> &metal::Heap {
self.heap.as_ref()
}
pub fn make_buffer(&self) -> Option<metal::Buffer> {
let resource =
self.heap
.new_buffer_with_offset(self.size, self.heap.resource_options(), self.offset);
if let Some(resource) = &resource {
if let Some(name) = &self.name {
resource.set_label(name);
}
}
resource
}
pub fn make_texture(&self, desc: &metal::TextureDescriptor) -> Option<metal::Texture> {
let resource = self.heap.new_texture_with_offset(desc, self.offset);
if let Some(resource) = &resource {
if let Some(name) = &self.name {
resource.set_label(name);
}
}
resource
}
pub fn make_acceleration_structure(&self) -> Option<metal::AccelerationStructure> {
let resource = self
.heap
.new_acceleration_structure_with_size_offset(self.size, self.offset);
if let Some(resource) = &resource {
if let Some(name) = &self.name {
resource.set_label(name);
}
}
resource
}
fn is_null(&self) -> bool {
self.chunk_id.is_none()
}
}
#[derive(Clone, Debug)]
pub struct AllocationCreateDesc<'a> {
/// Name of the allocation, for tracking and debugging purposes
pub name: &'a str,
/// Location where the memory allocation should be stored
pub location: MemoryLocation,
pub size: u64,
pub alignment: u64,
}
impl<'a> AllocationCreateDesc<'a> {
pub fn buffer(
device: &metal::Device,
name: &'a str,
length: u64,
location: MemoryLocation,
) -> Self {
let size_and_align =
device.heap_buffer_size_and_align(length, memory_location_to_metal(location));
Self {
name,
location,
size: size_and_align.size,
alignment: size_and_align.align,
}
}
pub fn texture(device: &metal::Device, name: &'a str, desc: &metal::TextureDescriptor) -> Self {
let size_and_align = device.heap_texture_size_and_align(desc);
Self {
name,
location: match desc.storage_mode() {
metal::MTLStorageMode::Shared
| metal::MTLStorageMode::Managed
| metal::MTLStorageMode::Memoryless => MemoryLocation::Unknown,
metal::MTLStorageMode::Private => MemoryLocation::GpuOnly,
},
size: size_and_align.size,
alignment: size_and_align.align,
}
}
pub fn acceleration_structure_with_size(
device: &metal::Device,
name: &'a str,
size: u64,
location: MemoryLocation,
) -> Self {
let size_and_align = device.heap_acceleration_structure_size_and_align_with_size(size);
Self {
name,
location,
size: size_and_align.size,
alignment: size_and_align.align,
}
}
}
pub struct Allocator {
device: Arc<metal::Device>,
debug_settings: AllocatorDebugSettings,
memory_types: Vec<MemoryType>,
allocation_sizes: AllocationSizes,
}
#[derive(Debug)]
pub struct AllocatorCreateDesc {
pub device: Arc<metal::Device>,
pub debug_settings: AllocatorDebugSettings,
pub allocation_sizes: AllocationSizes,
}
#[derive(Debug)]
pub struct CommittedAllocationStatistics {
pub num_allocations: usize,
pub total_size: u64,
}
#[derive(Debug)]
struct MemoryBlock {
heap: Arc<metal::Heap>,
size: u64,
sub_allocator: Box<dyn allocator::SubAllocator>,
}
impl MemoryBlock {
fn new(
device: &Arc<metal::Device>,
size: u64,
heap_descriptor: &metal::HeapDescriptor,
dedicated: bool,
memory_location: MemoryLocation,
) -> Result<Self> {
heap_descriptor.set_size(size);
let heap = Arc::new(device.new_heap(heap_descriptor));
heap.set_label(&format!("MemoryBlock {memory_location:?}"));
let sub_allocator: Box<dyn allocator::SubAllocator> = if dedicated {
Box::new(allocator::DedicatedBlockAllocator::new(size))
} else {
Box::new(allocator::FreeListAllocator::new(size))
};
Ok(Self {
heap,
size,
sub_allocator,
})
}
}
#[derive(Debug)]
struct MemoryType {
memory_blocks: Vec<Option<MemoryBlock>>,
_committed_allocations: CommittedAllocationStatistics,
memory_location: MemoryLocation,
heap_properties: metal::HeapDescriptor,
memory_type_index: usize,
active_general_blocks: usize,
}
impl MemoryType {
fn allocate(
&mut self,
device: &Arc<metal::Device>,
desc: &AllocationCreateDesc<'_>,
backtrace: Arc<Backtrace>,
allocation_sizes: &AllocationSizes,
) -> Result<Allocation> {
let allocation_type = allocator::AllocationType::Linear;
let memblock_size = if self.heap_properties.storage_mode() == metal::MTLStorageMode::Private
{
allocation_sizes.device_memblock_size
} else {
allocation_sizes.host_memblock_size
};
let size = desc.size;
let alignment = desc.alignment;
// Create a dedicated block for large memory allocations
if size > memblock_size {
let mem_block = MemoryBlock::new(
device,
size,
&self.heap_properties,
true,
self.memory_location,
)?;
let block_index = self.memory_blocks.iter().position(|block| block.is_none());
let block_index = match block_index {
Some(i) => {
self.memory_blocks[i].replace(mem_block);
i
}
None => {
self.memory_blocks.push(Some(mem_block));
self.memory_blocks.len() - 1
}
};
let mem_block = self.memory_blocks[block_index]
.as_mut()
.ok_or_else(|| AllocationError::Internal("Memory block must be Some".into()))?;
let (offset, chunk_id) = mem_block.sub_allocator.allocate(
size,
alignment,
allocation_type,
1,
desc.name,
backtrace,
)?;
return Ok(Allocation {
chunk_id: Some(chunk_id),
size,
offset,
memory_block_index: block_index,
memory_type_index: self.memory_type_index,
heap: mem_block.heap.clone(),
name: Some(desc.name.into()),
});
}
let mut empty_block_index = None;
for (mem_block_i, mem_block) in self.memory_blocks.iter_mut().enumerate().rev() {
if let Some(mem_block) = mem_block {
let allocation = mem_block.sub_allocator.allocate(
size,
alignment,
allocation_type,
1,
desc.name,
backtrace.clone(),
);
match allocation {
Ok((offset, chunk_id)) => {
return Ok(Allocation {
chunk_id: Some(chunk_id),
offset,
size,
memory_block_index: mem_block_i,
memory_type_index: self.memory_type_index,
heap: mem_block.heap.clone(),
name: Some(desc.name.into()),
});
}
Err(AllocationError::OutOfMemory) => {} // Block is full, continue search.
Err(err) => return Err(err), // Unhandled error, return.
}
} else if empty_block_index.is_none() {
empty_block_index = Some(mem_block_i);
}
}
let new_memory_block = MemoryBlock::new(
device,
memblock_size,
&self.heap_properties,
false,
self.memory_location,
)?;
let new_block_index = if let Some(block_index) = empty_block_index {
self.memory_blocks[block_index] = Some(new_memory_block);
block_index
} else {
self.memory_blocks.push(Some(new_memory_block));
self.memory_blocks.len() - 1
};
self.active_general_blocks += 1;
let mem_block = self.memory_blocks[new_block_index]
.as_mut()
.ok_or_else(|| AllocationError::Internal("Memory block must be Some".into()))?;
let allocation = mem_block.sub_allocator.allocate(
size,
alignment,
allocation_type,
1,
desc.name,
backtrace,
);
let (offset, chunk_id) = match allocation {
Err(AllocationError::OutOfMemory) => Err(AllocationError::Internal(
"Allocation that must succeed failed. This is a bug in the allocator.".into(),
)),
a => a,
}?;
Ok(Allocation {
chunk_id: Some(chunk_id),
offset,
size,
memory_block_index: new_block_index,
memory_type_index: self.memory_type_index,
heap: mem_block.heap.clone(),
name: Some(desc.name.into()),
})
}
fn free(&mut self, allocation: &Allocation) -> Result<()> {
let block_idx = allocation.memory_block_index;
let mem_block = self.memory_blocks[block_idx]
.as_mut()
.ok_or_else(|| AllocationError::Internal("Memory block must be Some.".into()))?;
mem_block.sub_allocator.free(allocation.chunk_id)?;
if mem_block.sub_allocator.is_empty() {
if mem_block.sub_allocator.supports_general_allocations() {
if self.active_general_blocks > 1 {
let block = self.memory_blocks[block_idx].take();
if block.is_none() {
return Err(AllocationError::Internal(
"Memory block must be Some.".into(),
));
}
// Note that `block` will be destroyed on `drop` here
self.active_general_blocks -= 1;
}
} else {
let block = self.memory_blocks[block_idx].take();
if block.is_none() {
return Err(AllocationError::Internal(
"Memory block must be Some.".into(),
));
}
// Note that `block` will be destroyed on `drop` here
}
}
Ok(())
}
}
impl Allocator {
pub fn new(desc: &AllocatorCreateDesc) -> Result<Self> {
let heap_types = [
(MemoryLocation::GpuOnly, {
let heap_desc = metal::HeapDescriptor::new();
heap_desc.set_cpu_cache_mode(metal::MTLCPUCacheMode::DefaultCache);
heap_desc.set_storage_mode(metal::MTLStorageMode::Private);
heap_desc.set_heap_type(metal::MTLHeapType::Placement);
heap_desc
}),
(MemoryLocation::CpuToGpu, {
let heap_desc = metal::HeapDescriptor::new();
heap_desc.set_cpu_cache_mode(metal::MTLCPUCacheMode::WriteCombined);
heap_desc.set_storage_mode(metal::MTLStorageMode::Shared);
heap_desc.set_heap_type(metal::MTLHeapType::Placement);
heap_desc
}),
(MemoryLocation::GpuToCpu, {
let heap_desc = metal::HeapDescriptor::new();
heap_desc.set_cpu_cache_mode(metal::MTLCPUCacheMode::DefaultCache);
heap_desc.set_storage_mode(metal::MTLStorageMode::Shared);
heap_desc.set_heap_type(metal::MTLHeapType::Placement);
heap_desc
}),
];
let memory_types = heap_types
.into_iter()
.enumerate()
.map(|(i, (memory_location, heap_descriptor))| MemoryType {
memory_blocks: vec![],
_committed_allocations: CommittedAllocationStatistics {
num_allocations: 0,
total_size: 0,
},
memory_location,
heap_properties: heap_descriptor,
memory_type_index: i,
active_general_blocks: 0,
})
.collect();
Ok(Self {
device: desc.device.clone(),
debug_settings: desc.debug_settings,
memory_types,
allocation_sizes: desc.allocation_sizes,
})
}
pub fn allocate(&mut self, desc: &AllocationCreateDesc<'_>) -> Result<Allocation> {
let size = desc.size;
let alignment = desc.alignment;
let backtrace = Arc::new(if self.debug_settings.store_stack_traces {
Backtrace::force_capture()
} else {
Backtrace::disabled()
});
if self.debug_settings.log_allocations {
debug!(
"Allocating `{}` of {} bytes with an alignment of {}.",
&desc.name, size, alignment
);
if self.debug_settings.log_stack_traces {
let backtrace = Backtrace::force_capture();
debug!("Allocation stack trace: {}", backtrace);
}
}
if size == 0 || !alignment.is_power_of_two() {
return Err(AllocationError::InvalidAllocationCreateDesc);
}
// Find memory type
let memory_type = self
.memory_types
.iter_mut()
.find(|memory_type| {
// Is location compatible
desc.location == MemoryLocation::Unknown
|| desc.location == memory_type.memory_location
})
.ok_or(AllocationError::NoCompatibleMemoryTypeFound)?;
memory_type.allocate(&self.device, desc, backtrace, &self.allocation_sizes)
}
pub fn free(&mut self, allocation: &Allocation) -> Result<()> {
if self.debug_settings.log_frees {
let name = allocation.name.as_deref().unwrap_or("<null>");
debug!("Freeing `{}`.", name);
if self.debug_settings.log_stack_traces {
let backtrace = Backtrace::force_capture();
debug!("Free stack trace: {}", backtrace);
}
}
if allocation.is_null() {
return Ok(());
}
self.memory_types[allocation.memory_type_index].free(allocation)?;
Ok(())
}
pub fn get_heaps(&self) -> Vec<&metal::HeapRef> {
// Get all memory blocks
let mut heaps: Vec<&metal::HeapRef> = Vec::new();
for memory_type in &self.memory_types {
for block in memory_type.memory_blocks.iter().flatten() {
heaps.push(block.heap.as_ref());
}
}
heaps
}
pub fn generate_report(&self) -> AllocatorReport {
let mut allocations = vec![];
let mut blocks = vec![];
let mut total_reserved_bytes = 0;
for memory_type in &self.memory_types {
for block in memory_type.memory_blocks.iter().flatten() {
total_reserved_bytes += block.size;
let first_allocation = allocations.len();
allocations.extend(block.sub_allocator.report_allocations());
blocks.push(MemoryBlockReport {
size: block.size,
allocations: first_allocation..allocations.len(),
});
}
}
let total_allocated_bytes = allocations.iter().map(|report| report.size).sum();
AllocatorReport {
allocations,
blocks,
total_allocated_bytes,
total_reserved_bytes,
}
}
}

25
vendor/gpu-allocator/src/result.rs vendored Normal file
View File

@@ -0,0 +1,25 @@
use thiserror::Error;
#[derive(Error, Debug)]
pub enum AllocationError {
#[error("Out of memory")]
OutOfMemory,
#[error("Failed to map memory: {0}")]
FailedToMap(String),
#[error("No compatible memory type available")]
NoCompatibleMemoryTypeFound,
#[error("Invalid AllocationCreateDesc")]
InvalidAllocationCreateDesc,
#[error("Invalid AllocatorCreateDesc {0}")]
InvalidAllocatorCreateDesc(String),
#[error("Internal error: {0}")]
Internal(String),
#[error("Initial `BARRIER_LAYOUT` needs at least `Device10`")]
BarrierLayoutNeedsDevice10,
#[error("Castable formats require enhanced barriers")]
CastableFormatsRequiresEnhancedBarriers,
#[error("Castable formats require at least `Device12`")]
CastableFormatsRequiresAtLeastDevice12,
}
pub type Result<V, E = AllocationError> = ::std::result::Result<V, E>;

View File

@@ -0,0 +1,141 @@
use std::backtrace::BacktraceStatus;
use egui::{Label, Response, Sense, Ui, WidgetText};
use egui_extras::{Column, TableBuilder};
use crate::allocator::{fmt_bytes, AllocationReport};
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
pub(crate) enum AllocationReportVisualizeSorting {
#[default]
None,
Idx,
Name,
Size,
}
#[derive(Debug, Default)]
pub(crate) struct AllocationReportVisualizeSettings {
pub filter: String,
pub sorting: AllocationReportVisualizeSorting,
pub ascending: bool,
}
pub(crate) fn render_allocation_reports_ui(
ui: &mut Ui,
settings: &mut AllocationReportVisualizeSettings,
allocations: impl IntoIterator<Item = AllocationReport>,
) {
ui.horizontal(|ui| {
ui.label("Filter");
ui.text_edit_singleline(&mut settings.filter);
});
let breakdown_filter = settings.filter.to_lowercase();
let mut allocations = allocations
.into_iter()
.enumerate()
.filter(|(_, report)| report.name.to_lowercase().contains(&breakdown_filter))
.collect::<Vec<_>>();
let total_size_under_filter: u64 = allocations.iter().map(|a| a.1.size).sum();
ui.label(format!("Total: {}", fmt_bytes(total_size_under_filter)));
let row_height = ui.text_style_height(&egui::TextStyle::Body);
let table = TableBuilder::new(ui)
.striped(true)
.resizable(true)
.column(Column::exact(30.0))
.column(Column::initial(300.0).at_least(200.0).clip(true))
.column(Column::exact(70.0));
fn header_button(ui: &mut Ui, label: &str) -> Response {
let label = WidgetText::from(label).strong();
let label = Label::new(label).sense(Sense::click());
ui.add(label)
}
let table = table.header(row_height, |mut row| {
row.col(|ui| {
if header_button(ui, "Idx").clicked() {
if settings.sorting == AllocationReportVisualizeSorting::Idx {
settings.ascending = !settings.ascending;
} else {
settings.sorting = AllocationReportVisualizeSorting::Idx;
settings.ascending = false;
}
}
});
row.col(|ui| {
if header_button(ui, "Name").clicked() {
if settings.sorting == AllocationReportVisualizeSorting::Name {
settings.ascending = !settings.ascending;
} else {
settings.sorting = AllocationReportVisualizeSorting::Name;
settings.ascending = false;
}
}
});
row.col(|ui| {
if header_button(ui, "Size").clicked() {
if settings.sorting == AllocationReportVisualizeSorting::Size {
settings.ascending = !settings.ascending;
} else {
settings.sorting = AllocationReportVisualizeSorting::Size;
settings.ascending = false;
}
}
});
});
match (settings.sorting, settings.ascending) {
(AllocationReportVisualizeSorting::None, _) => {}
(AllocationReportVisualizeSorting::Idx, true) => allocations.sort_by_key(|(idx, _)| *idx),
(AllocationReportVisualizeSorting::Idx, false) => {
allocations.sort_by_key(|(idx, _)| std::cmp::Reverse(*idx))
}
(AllocationReportVisualizeSorting::Name, true) => {
allocations.sort_by(|(_, alloc1), (_, alloc2)| alloc1.name.cmp(&alloc2.name))
}
(AllocationReportVisualizeSorting::Name, false) => {
allocations.sort_by(|(_, alloc1), (_, alloc2)| alloc1.name.cmp(&alloc2.name).reverse())
}
(AllocationReportVisualizeSorting::Size, true) => {
allocations.sort_by_key(|(_, alloc)| alloc.size)
}
(AllocationReportVisualizeSorting::Size, false) => {
allocations.sort_by_key(|(_, alloc)| std::cmp::Reverse(alloc.size))
}
}
table.body(|mut body| {
for (idx, alloc) in allocations {
body.row(row_height, |mut row| {
let AllocationReport {
name,
size,
backtrace,
..
} = alloc;
row.col(|ui| {
ui.label(idx.to_string());
});
let resp = row.col(|ui| {
ui.label(name);
});
if backtrace.status() == BacktraceStatus::Captured {
resp.1.on_hover_ui(|ui| {
ui.label(backtrace.to_string());
});
}
row.col(|ui| {
ui.label(fmt_bytes(size));
});
});
}
});
}

View File

@@ -0,0 +1,133 @@
use std::backtrace::BacktraceStatus;
use egui::{Color32, DragValue, Rect, ScrollArea, Sense, Ui, Vec2};
use super::ColorScheme;
use crate::allocator::free_list_allocator::MemoryChunk;
pub(crate) struct MemoryChunksVisualizationSettings {
pub width_in_bytes: u64,
pub show_backtraces: bool,
}
impl Default for MemoryChunksVisualizationSettings {
fn default() -> Self {
Self {
width_in_bytes: 1024,
show_backtraces: false,
}
}
}
impl MemoryChunksVisualizationSettings {
pub fn ui(&mut self, ui: &mut Ui, store_stack_traces: bool) {
if store_stack_traces {
ui.checkbox(&mut self.show_backtraces, "Show backtraces");
}
// Slider for changing the 'zoom' level of the visualizer.
const BYTES_PER_UNIT_MIN: i32 = 1;
const BYTES_PER_UNIT_MAX: i32 = 1024 * 1024;
ui.horizontal(|ui| {
ui.add(
DragValue::new(&mut self.width_in_bytes)
.clamp_range(BYTES_PER_UNIT_MIN..=BYTES_PER_UNIT_MAX)
.speed(10.0),
);
ui.label("Bytes per line");
});
}
}
pub(crate) fn render_memory_chunks_ui<'a>(
ui: &mut Ui,
color_scheme: &ColorScheme,
settings: &MemoryChunksVisualizationSettings,
total_size_in_bytes: u64,
data: impl IntoIterator<Item = &'a MemoryChunk>,
) {
let line_height = ui.text_style_height(&egui::TextStyle::Body);
let number_of_rows =
(total_size_in_bytes as f32 / settings.width_in_bytes as f32).ceil() as usize;
ScrollArea::new([false, true]).show_rows(ui, line_height, number_of_rows, |ui, range| {
// Let range be in bytes
let start_in_bytes = range.start as u64 * settings.width_in_bytes;
let end_in_bytes = range.end as u64 * settings.width_in_bytes;
let mut data = data
.into_iter()
.filter(|chunk| {
(chunk.offset + chunk.size) > start_in_bytes && chunk.offset < end_in_bytes
})
.collect::<Vec<_>>();
data.sort_by_key(|chunk| chunk.offset);
let screen_width = ui.available_width();
let mut cursor_idx = 0;
let mut bytes_required = data[cursor_idx].offset + data[cursor_idx].size - start_in_bytes;
for _ in range {
ui.horizontal(|ui| {
let mut bytes_left = settings.width_in_bytes;
let mut cursor = ui.cursor().min;
while cursor_idx < data.len() && bytes_left > 0 {
// Block is depleted, so reset for more chunks
while bytes_required == 0 {
cursor_idx += 1;
if cursor_idx < data.len() {
bytes_required = data[cursor_idx].size;
}
continue;
}
let bytes_used = bytes_required.min(bytes_left);
let width_used =
bytes_used as f32 * screen_width / settings.width_in_bytes as f32;
// Draw the rectangle
let resp = ui.allocate_rect(
Rect::from_min_size(cursor, Vec2::new(width_used, line_height)),
Sense::click(),
);
if ui.is_rect_visible(resp.rect) {
ui.painter().rect(
resp.rect,
egui::Rounding::ZERO,
color_scheme
.get_allocation_type_color(data[cursor_idx].allocation_type),
egui::Stroke::new(1.0, Color32::BLACK),
);
resp.on_hover_ui_at_pointer(|ui| {
let chunk = &data[cursor_idx];
ui.label(format!("id: {}", chunk.chunk_id));
ui.label(format!("offset: 0x{:x}", chunk.offset));
ui.label(format!("size: 0x{:x}", chunk.size));
ui.label(format!(
"allocation_type: {}",
chunk.allocation_type.as_str()
));
if let Some(name) = &chunk.name {
ui.label(format!("name: {}", name));
}
if settings.show_backtraces
&& chunk.backtrace.status() == BacktraceStatus::Captured
{
ui.label(chunk.backtrace.to_string());
}
});
}
// Update our cursors
cursor.x += width_used;
bytes_left -= bytes_used;
bytes_required -= bytes_used;
}
});
}
});
}

View File

@@ -0,0 +1,56 @@
use egui::{Color32, Ui};
mod allocation_reports;
mod memory_chunks;
pub(crate) use allocation_reports::*;
pub(crate) use memory_chunks::*;
use crate::allocator::AllocationType;
pub const DEFAULT_COLOR_ALLOCATION_TYPE_FREE: Color32 = Color32::from_rgb(159, 159, 159); // gray
pub const DEFAULT_COLOR_ALLOCATION_TYPE_LINEAR: Color32 = Color32::from_rgb(91, 206, 250); // blue
pub const DEFAULT_COLOR_ALLOCATION_TYPE_NON_LINEAR: Color32 = Color32::from_rgb(250, 169, 184); // pink
#[derive(Clone)]
pub struct ColorScheme {
pub free_color: Color32,
pub linear_color: Color32,
pub non_linear_color: Color32,
}
impl Default for ColorScheme {
fn default() -> Self {
Self {
free_color: DEFAULT_COLOR_ALLOCATION_TYPE_FREE,
linear_color: DEFAULT_COLOR_ALLOCATION_TYPE_LINEAR,
non_linear_color: DEFAULT_COLOR_ALLOCATION_TYPE_NON_LINEAR,
}
}
}
impl ColorScheme {
pub(crate) fn get_allocation_type_color(&self, allocation_type: AllocationType) -> Color32 {
match allocation_type {
AllocationType::Free => self.free_color,
AllocationType::Linear => self.linear_color,
AllocationType::NonLinear => self.non_linear_color,
}
}
}
pub(crate) trait SubAllocatorVisualizer {
fn supports_visualization(&self) -> bool {
false
}
fn draw_base_info(&self, ui: &mut Ui) {
ui.label("No sub allocator information available");
}
fn draw_visualization(
&self,
_color_scheme: &ColorScheme,
_ui: &mut Ui,
_settings: &MemoryChunksVisualizationSettings,
) {
}
}

979
vendor/gpu-allocator/src/vulkan/mod.rs vendored Normal file
View File

@@ -0,0 +1,979 @@
#![deny(clippy::unimplemented, clippy::unwrap_used, clippy::ok_expect)]
#[cfg(feature = "visualizer")]
mod visualizer;
use std::{backtrace::Backtrace, fmt, marker::PhantomData, sync::Arc};
use ash::vk;
use log::{debug, Level};
#[cfg(feature = "visualizer")]
pub use visualizer::AllocatorVisualizer;
use super::allocator;
use crate::{
allocator::{AllocatorReport, MemoryBlockReport},
AllocationError, AllocationSizes, AllocatorDebugSettings, MemoryLocation, Result,
};
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum AllocationScheme {
/// Perform a dedicated, driver-managed allocation for the given buffer, allowing
/// it to perform optimizations on this type of allocation.
DedicatedBuffer(vk::Buffer),
/// Perform a dedicated, driver-managed allocation for the given image, allowing
/// it to perform optimizations on this type of allocation.
DedicatedImage(vk::Image),
/// The memory for this resource will be allocated and managed by gpu-allocator.
GpuAllocatorManaged,
}
#[derive(Clone, Debug)]
pub struct AllocationCreateDesc<'a> {
/// Name of the allocation, for tracking and debugging purposes
pub name: &'a str,
/// Vulkan memory requirements for an allocation
pub requirements: vk::MemoryRequirements,
/// Location where the memory allocation should be stored
pub location: MemoryLocation,
/// If the resource is linear (buffer / linear texture) or a regular (tiled) texture.
pub linear: bool,
/// Determines how this allocation should be managed.
pub allocation_scheme: AllocationScheme,
}
/// Wrapper type to only mark a raw pointer [`Send`] + [`Sync`] without having to
/// mark the entire [`Allocation`] as such, instead relying on the compiler to
/// auto-implement this or fail if fields are added that violate this constraint
#[derive(Clone, Copy, Debug)]
pub(crate) struct SendSyncPtr(std::ptr::NonNull<std::ffi::c_void>);
// Sending is fine because mapped_ptr does not change based on the thread we are in
unsafe impl Send for SendSyncPtr {}
// Sync is also okay because Sending &Allocation is safe: a mutable reference
// to the data in mapped_ptr is never exposed while `self` is immutably borrowed.
// In order to break safety guarantees, the user needs to `unsafe`ly dereference
// `mapped_ptr` themselves.
unsafe impl Sync for SendSyncPtr {}
pub struct AllocatorCreateDesc {
pub instance: ash::Instance,
pub device: ash::Device,
pub physical_device: vk::PhysicalDevice,
pub debug_settings: AllocatorDebugSettings,
pub buffer_device_address: bool,
pub allocation_sizes: AllocationSizes,
}
/// A piece of allocated memory.
///
/// Could be contained in its own individual underlying memory object or as a sub-region
/// of a larger allocation.
///
/// # Copying data into a CPU-mapped [`Allocation`]
///
/// You'll very likely want to copy data into CPU-mapped [`Allocation`]s in order to send that data to the GPU.
/// Doing this data transfer correctly without invoking undefined behavior can be quite fraught and non-obvious<sup>[\[1\]]</sup>.
///
/// To help you do this correctly, [`Allocation`] implements [`presser::Slab`], which means you can directly
/// pass it in to many of `presser`'s [helper functions] (for example, [`copy_from_slice_to_offset`]).
///
/// In most cases, this will work perfectly. However, note that if you try to use an [`Allocation`] as a
/// [`Slab`] and it is not valid to do so (if it is not CPU-mapped or if its `size > isize::MAX`),
/// you will cause a panic. If you aren't sure about these conditions, you may use [`Allocation::try_as_mapped_slab`].
///
/// ## Example
///
/// Say we've created an [`Allocation`] called `my_allocation`, which is CPU-mapped.
/// ```ignore
/// let mut my_allocation: Allocation = my_allocator.allocate(...)?;
/// ```
///
/// And we want to fill it with some data in the form of a `my_gpu_data: Vec<MyGpuVector>`, defined as such:
///
/// ```ignore
/// // note that this is size(12) but align(16), thus we have 4 padding bytes.
/// // this would mean a `&[MyGpuVector]` is invalid to cast as a `&[u8]`, but
/// // we can still use `presser` to copy it directly in a valid manner.
/// #[repr(C, align(16))]
/// #[derive(Clone, Copy)]
/// struct MyGpuVertex {
/// x: f32,
/// y: f32,
/// z: f32,
/// }
///
/// let my_gpu_data: Vec<MyGpuData> = make_vertex_data();
/// ```
///
/// Depending on how the data we're copying will be used, the Vulkan device may have a minimum
/// alignment requirement for that data:
///
/// ```ignore
/// let min_gpu_align = my_vulkan_device_specifications.min_alignment_thing;
/// ```
///
/// Finally, we can use [`presser::copy_from_slice_to_offset_with_align`] to perform the copy,
/// simply passing `&mut my_allocation` since [`Allocation`] implements [`Slab`].
///
/// ```ignore
/// let copy_record = presser::copy_from_slice_to_offset_with_align(
/// &my_gpu_data[..], // a slice containing all elements of my_gpu_data
/// &mut my_allocation, // our Allocation
/// 0, // start as close to the beginning of the allocation as possible
/// min_gpu_align, // the minimum alignment we queried previously
/// )?;
/// ```
///
/// It's important to note that the data may not have actually been copied starting at the requested
/// `start_offset` (0 in the example above) depending on the alignment of the underlying allocation
/// as well as the alignment requirements of `MyGpuVector` and the `min_gpu_align` we passed in. Thus,
/// we can query the `copy_record` for the actual starting offset:
///
/// ```ignore
/// let actual_data_start_offset = copy_record.copy_start_offset;
/// ```
///
/// ## Safety
///
/// It is technically not fully safe to use an [`Allocation`] as a [`presser::Slab`] because we can't validate that the
/// GPU is not using the data in the buffer while `self` is borrowed. However, trying
/// to validate this statically is really hard and the community has basically decided that
/// requiring `unsafe` for functions like this creates too much "unsafe-noise", ultimately making it
/// harder to debug more insidious unsafety that is unrelated to GPU-CPU sync issues.
///
/// So, as would always be the case, you must ensure the GPU
/// is not using the data in `self` for the duration that you hold the returned [`MappedAllocationSlab`].
///
/// [`Slab`]: presser::Slab
/// [`copy_from_slice_to_offset`]: presser::copy_from_slice_to_offset
/// [helper functions]: presser#functions
/// [\[1\]]: presser#motivation
#[derive(Debug)]
pub struct Allocation {
chunk_id: Option<std::num::NonZeroU64>,
offset: u64,
size: u64,
memory_block_index: usize,
memory_type_index: usize,
device_memory: vk::DeviceMemory,
mapped_ptr: Option<SendSyncPtr>,
dedicated_allocation: bool,
memory_properties: vk::MemoryPropertyFlags,
name: Option<Box<str>>,
}
impl Allocation {
/// Tries to borrow the CPU-mapped memory that backs this allocation as a [`presser::Slab`], which you can then
/// use to safely copy data into the raw, potentially-uninitialized buffer.
/// See [the documentation of Allocation][Allocation#example] for an example of this.
///
/// Returns [`None`] if `self.mapped_ptr()` is `None`, or if `self.size()` is greater than `isize::MAX` because
/// this could lead to undefined behavior.
///
/// Note that [`Allocation`] implements [`Slab`] natively, so you can actually pass this allocation as a [`Slab`]
/// directly. However, if `self` is not actually a valid [`Slab`] (this function would return `None` as described above),
/// then trying to use it as a [`Slab`] will panic.
///
/// # Safety
///
/// See the note about safety in [the documentation of Allocation][Allocation#safety]
///
/// [`Slab`]: presser::Slab
// best to be explicit where the lifetime is coming from since we're doing unsafe things
// and relying on an inferred lifetime type in the PhantomData below
#[allow(clippy::needless_lifetimes)]
pub fn try_as_mapped_slab<'a>(&'a mut self) -> Option<MappedAllocationSlab<'a>> {
let mapped_ptr = self.mapped_ptr()?.cast().as_ptr();
if self.size > isize::MAX as _ {
return None;
}
// this will always succeed since size is <= isize::MAX which is < usize::MAX
let size = self.size as usize;
Some(MappedAllocationSlab {
_borrowed_alloc: PhantomData,
mapped_ptr,
size,
})
}
pub fn chunk_id(&self) -> Option<std::num::NonZeroU64> {
self.chunk_id
}
///Returns the [`vk::MemoryPropertyFlags`] of this allocation.
pub fn memory_properties(&self) -> vk::MemoryPropertyFlags {
self.memory_properties
}
/// Returns the [`vk::DeviceMemory`] object that is backing this allocation.
/// This memory object can be shared with multiple other allocations and shouldn't be freed (or allocated from)
/// without this library, because that will lead to undefined behavior.
///
/// # Safety
/// The result of this function can safely be used to pass into [`ash::Device::bind_buffer_memory()`],
/// [`ash::Device::bind_image_memory()`] etc. It is exposed for this reason. Keep in mind to also
/// pass [`Self::offset()`] along to those.
pub unsafe fn memory(&self) -> vk::DeviceMemory {
self.device_memory
}
/// Returns [`true`] if this allocation is using a dedicated underlying allocation.
pub fn is_dedicated(&self) -> bool {
self.dedicated_allocation
}
/// Returns the offset of the allocation on the [`vk::DeviceMemory`].
/// When binding the memory to a buffer or image, this offset needs to be supplied as well.
pub fn offset(&self) -> u64 {
self.offset
}
/// Returns the size of the allocation
pub fn size(&self) -> u64 {
self.size
}
/// Returns a valid mapped pointer if the memory is host visible, otherwise it will return None.
/// The pointer already points to the exact memory region of the suballocation, so no offset needs to be applied.
pub fn mapped_ptr(&self) -> Option<std::ptr::NonNull<std::ffi::c_void>> {
self.mapped_ptr.map(|SendSyncPtr(p)| p)
}
/// Returns a valid mapped slice if the memory is host visible, otherwise it will return None.
/// The slice already references the exact memory region of the allocation, so no offset needs to be applied.
pub fn mapped_slice(&self) -> Option<&[u8]> {
self.mapped_ptr().map(|ptr| unsafe {
std::slice::from_raw_parts(ptr.cast().as_ptr(), self.size as usize)
})
}
/// Returns a valid mapped mutable slice if the memory is host visible, otherwise it will return None.
/// The slice already references the exact memory region of the allocation, so no offset needs to be applied.
pub fn mapped_slice_mut(&mut self) -> Option<&mut [u8]> {
self.mapped_ptr().map(|ptr| unsafe {
std::slice::from_raw_parts_mut(ptr.cast().as_ptr(), self.size as usize)
})
}
pub fn is_null(&self) -> bool {
self.chunk_id.is_none()
}
}
impl Default for Allocation {
fn default() -> Self {
Self {
chunk_id: None,
offset: 0,
size: 0,
memory_block_index: !0,
memory_type_index: !0,
device_memory: vk::DeviceMemory::null(),
mapped_ptr: None,
memory_properties: vk::MemoryPropertyFlags::empty(),
name: None,
dedicated_allocation: false,
}
}
}
/// A wrapper struct over a borrowed [`Allocation`] that infallibly implements [`presser::Slab`].
///
/// This type should be acquired by calling [`Allocation::try_as_mapped_slab`].
pub struct MappedAllocationSlab<'a> {
_borrowed_alloc: PhantomData<&'a mut Allocation>,
mapped_ptr: *mut u8,
size: usize,
}
// SAFETY: See the safety comment of Allocation::as_mapped_slab above.
unsafe impl<'a> presser::Slab for MappedAllocationSlab<'a> {
fn base_ptr(&self) -> *const u8 {
self.mapped_ptr
}
fn base_ptr_mut(&mut self) -> *mut u8 {
self.mapped_ptr
}
fn size(&self) -> usize {
self.size
}
}
// SAFETY: See the safety comment of Allocation::as_mapped_slab above.
unsafe impl presser::Slab for Allocation {
fn base_ptr(&self) -> *const u8 {
self.mapped_ptr
.expect("tried to use a non-mapped Allocation as a Slab")
.0
.as_ptr()
.cast()
}
fn base_ptr_mut(&mut self) -> *mut u8 {
self.mapped_ptr
.expect("tried to use a non-mapped Allocation as a Slab")
.0
.as_ptr()
.cast()
}
fn size(&self) -> usize {
if self.size > isize::MAX as _ {
panic!("tried to use an Allocation with size > isize::MAX as a Slab")
}
// this will always work if the above passed
self.size as usize
}
}
#[derive(Debug)]
pub(crate) struct MemoryBlock {
pub(crate) device_memory: vk::DeviceMemory,
pub(crate) size: u64,
pub(crate) mapped_ptr: Option<SendSyncPtr>,
pub(crate) sub_allocator: Box<dyn allocator::SubAllocator>,
#[cfg(feature = "visualizer")]
pub(crate) dedicated_allocation: bool,
}
impl MemoryBlock {
fn new(
device: &ash::Device,
size: u64,
mem_type_index: usize,
mapped: bool,
buffer_device_address: bool,
allocation_scheme: AllocationScheme,
requires_personal_block: bool,
) -> Result<Self> {
let device_memory = {
let alloc_info = vk::MemoryAllocateInfo::default()
.allocation_size(size)
.memory_type_index(mem_type_index as u32);
let allocation_flags = vk::MemoryAllocateFlags::DEVICE_ADDRESS;
let mut flags_info = vk::MemoryAllocateFlagsInfo::default().flags(allocation_flags);
// TODO(manon): Test this based on if the device has this feature enabled or not
let alloc_info = if buffer_device_address {
alloc_info.push_next(&mut flags_info)
} else {
alloc_info
};
// Flag the memory as dedicated if required.
let mut dedicated_memory_info = vk::MemoryDedicatedAllocateInfo::default();
let alloc_info = match allocation_scheme {
AllocationScheme::DedicatedBuffer(buffer) => {
dedicated_memory_info = dedicated_memory_info.buffer(buffer);
alloc_info.push_next(&mut dedicated_memory_info)
}
AllocationScheme::DedicatedImage(image) => {
dedicated_memory_info = dedicated_memory_info.image(image);
alloc_info.push_next(&mut dedicated_memory_info)
}
AllocationScheme::GpuAllocatorManaged => alloc_info,
};
unsafe { device.allocate_memory(&alloc_info, None) }.map_err(|e| match e {
vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => AllocationError::OutOfMemory,
e => AllocationError::Internal(format!(
"Unexpected error in vkAllocateMemory: {:?}",
e
)),
})?
};
let mapped_ptr = mapped
.then(|| {
unsafe {
device.map_memory(
device_memory,
0,
vk::WHOLE_SIZE,
vk::MemoryMapFlags::empty(),
)
}
.map_err(|e| {
unsafe { device.free_memory(device_memory, None) };
AllocationError::FailedToMap(e.to_string())
})
.and_then(|p| {
std::ptr::NonNull::new(p).map(SendSyncPtr).ok_or_else(|| {
AllocationError::FailedToMap("Returned mapped pointer is null".to_owned())
})
})
})
.transpose()?;
let sub_allocator: Box<dyn allocator::SubAllocator> = if allocation_scheme
!= AllocationScheme::GpuAllocatorManaged
|| requires_personal_block
{
Box::new(allocator::DedicatedBlockAllocator::new(size))
} else {
Box::new(allocator::FreeListAllocator::new(size))
};
Ok(Self {
device_memory,
size,
mapped_ptr,
sub_allocator,
#[cfg(feature = "visualizer")]
dedicated_allocation: allocation_scheme != AllocationScheme::GpuAllocatorManaged,
})
}
fn destroy(self, device: &ash::Device) {
if self.mapped_ptr.is_some() {
unsafe { device.unmap_memory(self.device_memory) };
}
unsafe { device.free_memory(self.device_memory, None) };
}
}
#[derive(Debug)]
pub(crate) struct MemoryType {
pub(crate) memory_blocks: Vec<Option<MemoryBlock>>,
pub(crate) memory_properties: vk::MemoryPropertyFlags,
pub(crate) memory_type_index: usize,
pub(crate) heap_index: usize,
pub(crate) mappable: bool,
pub(crate) active_general_blocks: usize,
pub(crate) buffer_device_address: bool,
}
impl MemoryType {
fn allocate(
&mut self,
device: &ash::Device,
desc: &AllocationCreateDesc<'_>,
granularity: u64,
backtrace: Arc<Backtrace>,
allocation_sizes: &AllocationSizes,
) -> Result<Allocation> {
let allocation_type = if desc.linear {
allocator::AllocationType::Linear
} else {
allocator::AllocationType::NonLinear
};
let memblock_size = if self
.memory_properties
.contains(vk::MemoryPropertyFlags::HOST_VISIBLE)
{
allocation_sizes.host_memblock_size
} else {
allocation_sizes.device_memblock_size
};
let size = desc.requirements.size;
let alignment = desc.requirements.alignment;
let dedicated_allocation = desc.allocation_scheme != AllocationScheme::GpuAllocatorManaged;
let requires_personal_block = size > memblock_size;
// Create a dedicated block for large memory allocations or allocations that require dedicated memory allocations.
if dedicated_allocation || requires_personal_block {
let mem_block = MemoryBlock::new(
device,
size,
self.memory_type_index,
self.mappable,
self.buffer_device_address,
desc.allocation_scheme,
requires_personal_block,
)?;
let mut block_index = None;
for (i, block) in self.memory_blocks.iter().enumerate() {
if block.is_none() {
block_index = Some(i);
break;
}
}
let block_index = match block_index {
Some(i) => {
self.memory_blocks[i].replace(mem_block);
i
}
None => {
self.memory_blocks.push(Some(mem_block));
self.memory_blocks.len() - 1
}
};
let mem_block = self.memory_blocks[block_index]
.as_mut()
.ok_or_else(|| AllocationError::Internal("Memory block must be Some".into()))?;
let (offset, chunk_id) = mem_block.sub_allocator.allocate(
size,
alignment,
allocation_type,
granularity,
desc.name,
backtrace,
)?;
return Ok(Allocation {
chunk_id: Some(chunk_id),
offset,
size,
memory_block_index: block_index,
memory_type_index: self.memory_type_index,
device_memory: mem_block.device_memory,
mapped_ptr: mem_block.mapped_ptr,
memory_properties: self.memory_properties,
name: Some(desc.name.into()),
dedicated_allocation,
});
}
let mut empty_block_index = None;
for (mem_block_i, mem_block) in self.memory_blocks.iter_mut().enumerate().rev() {
if let Some(mem_block) = mem_block {
let allocation = mem_block.sub_allocator.allocate(
size,
alignment,
allocation_type,
granularity,
desc.name,
backtrace.clone(),
);
match allocation {
Ok((offset, chunk_id)) => {
let mapped_ptr = if let Some(SendSyncPtr(mapped_ptr)) = mem_block.mapped_ptr
{
let offset_ptr = unsafe { mapped_ptr.as_ptr().add(offset as usize) };
std::ptr::NonNull::new(offset_ptr).map(SendSyncPtr)
} else {
None
};
return Ok(Allocation {
chunk_id: Some(chunk_id),
offset,
size,
memory_block_index: mem_block_i,
memory_type_index: self.memory_type_index,
device_memory: mem_block.device_memory,
memory_properties: self.memory_properties,
mapped_ptr,
dedicated_allocation: false,
name: Some(desc.name.into()),
});
}
Err(err) => match err {
AllocationError::OutOfMemory => {} // Block is full, continue search.
_ => return Err(err), // Unhandled error, return.
},
}
} else if empty_block_index.is_none() {
empty_block_index = Some(mem_block_i);
}
}
let new_memory_block = MemoryBlock::new(
device,
memblock_size,
self.memory_type_index,
self.mappable,
self.buffer_device_address,
desc.allocation_scheme,
false,
)?;
let new_block_index = if let Some(block_index) = empty_block_index {
self.memory_blocks[block_index] = Some(new_memory_block);
block_index
} else {
self.memory_blocks.push(Some(new_memory_block));
self.memory_blocks.len() - 1
};
self.active_general_blocks += 1;
let mem_block = self.memory_blocks[new_block_index]
.as_mut()
.ok_or_else(|| AllocationError::Internal("Memory block must be Some".into()))?;
let allocation = mem_block.sub_allocator.allocate(
size,
alignment,
allocation_type,
granularity,
desc.name,
backtrace,
);
let (offset, chunk_id) = match allocation {
Ok(value) => value,
Err(err) => match err {
AllocationError::OutOfMemory => {
return Err(AllocationError::Internal(
"Allocation that must succeed failed. This is a bug in the allocator."
.into(),
))
}
_ => return Err(err),
},
};
let mapped_ptr = if let Some(SendSyncPtr(mapped_ptr)) = mem_block.mapped_ptr {
let offset_ptr = unsafe { mapped_ptr.as_ptr().add(offset as usize) };
std::ptr::NonNull::new(offset_ptr).map(SendSyncPtr)
} else {
None
};
Ok(Allocation {
chunk_id: Some(chunk_id),
offset,
size,
memory_block_index: new_block_index,
memory_type_index: self.memory_type_index,
device_memory: mem_block.device_memory,
mapped_ptr,
memory_properties: self.memory_properties,
name: Some(desc.name.into()),
dedicated_allocation: false,
})
}
#[allow(clippy::needless_pass_by_value)]
fn free(&mut self, allocation: Allocation, device: &ash::Device) -> Result<()> {
let block_idx = allocation.memory_block_index;
let mem_block = self.memory_blocks[block_idx]
.as_mut()
.ok_or_else(|| AllocationError::Internal("Memory block must be Some.".into()))?;
mem_block.sub_allocator.free(allocation.chunk_id)?;
if mem_block.sub_allocator.is_empty() {
if mem_block.sub_allocator.supports_general_allocations() {
if self.active_general_blocks > 1 {
let block = self.memory_blocks[block_idx].take();
let block = block.ok_or_else(|| {
AllocationError::Internal("Memory block must be Some.".into())
})?;
block.destroy(device);
self.active_general_blocks -= 1;
}
} else {
let block = self.memory_blocks[block_idx].take();
let block = block.ok_or_else(|| {
AllocationError::Internal("Memory block must be Some.".into())
})?;
block.destroy(device);
}
}
Ok(())
}
}
pub struct Allocator {
pub(crate) memory_types: Vec<MemoryType>,
pub(crate) memory_heaps: Vec<vk::MemoryHeap>,
device: ash::Device,
pub(crate) buffer_image_granularity: u64,
pub(crate) debug_settings: AllocatorDebugSettings,
allocation_sizes: AllocationSizes,
}
impl fmt::Debug for Allocator {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.generate_report().fmt(f)
}
}
impl Allocator {
pub fn new(desc: &AllocatorCreateDesc) -> Result<Self> {
if desc.physical_device == vk::PhysicalDevice::null() {
return Err(AllocationError::InvalidAllocatorCreateDesc(
"AllocatorCreateDesc field `physical_device` is null.".into(),
));
}
let mem_props = unsafe {
desc.instance
.get_physical_device_memory_properties(desc.physical_device)
};
let memory_types = &mem_props.memory_types_as_slice();
let memory_heaps = mem_props.memory_heaps_as_slice().to_vec();
if desc.debug_settings.log_memory_information {
debug!("memory type count: {}", mem_props.memory_type_count);
debug!("memory heap count: {}", mem_props.memory_heap_count);
for (i, mem_type) in memory_types.iter().enumerate() {
let flags = mem_type.property_flags;
debug!(
"memory type[{}]: prop flags: 0x{:x}, heap[{}]",
i,
flags.as_raw(),
mem_type.heap_index,
);
}
for (i, heap) in memory_heaps.iter().enumerate() {
debug!(
"heap[{}] flags: 0x{:x}, size: {} MiB",
i,
heap.flags.as_raw(),
heap.size / (1024 * 1024)
);
}
}
let memory_types = memory_types
.iter()
.enumerate()
.map(|(i, mem_type)| MemoryType {
memory_blocks: Vec::default(),
memory_properties: mem_type.property_flags,
memory_type_index: i,
heap_index: mem_type.heap_index as usize,
mappable: mem_type
.property_flags
.contains(vk::MemoryPropertyFlags::HOST_VISIBLE),
active_general_blocks: 0,
buffer_device_address: desc.buffer_device_address,
})
.collect::<Vec<_>>();
let physical_device_properties = unsafe {
desc.instance
.get_physical_device_properties(desc.physical_device)
};
let granularity = physical_device_properties.limits.buffer_image_granularity;
Ok(Self {
memory_types,
memory_heaps,
device: desc.device.clone(),
buffer_image_granularity: granularity,
debug_settings: desc.debug_settings,
allocation_sizes: AllocationSizes::default(),
})
}
pub fn allocate(&mut self, desc: &AllocationCreateDesc<'_>) -> Result<Allocation> {
let size = desc.requirements.size;
let alignment = desc.requirements.alignment;
let backtrace = Arc::new(if self.debug_settings.store_stack_traces {
Backtrace::force_capture()
} else {
Backtrace::disabled()
});
if self.debug_settings.log_allocations {
debug!(
"Allocating `{}` of {} bytes with an alignment of {}.",
&desc.name, size, alignment
);
if self.debug_settings.log_stack_traces {
let backtrace = Backtrace::force_capture();
debug!("Allocation stack trace: {}", backtrace);
}
}
if size == 0 || !alignment.is_power_of_two() {
return Err(AllocationError::InvalidAllocationCreateDesc);
}
let mem_loc_preferred_bits = match desc.location {
MemoryLocation::GpuOnly => vk::MemoryPropertyFlags::DEVICE_LOCAL,
MemoryLocation::CpuToGpu => {
vk::MemoryPropertyFlags::HOST_VISIBLE
| vk::MemoryPropertyFlags::HOST_COHERENT
| vk::MemoryPropertyFlags::DEVICE_LOCAL
}
MemoryLocation::GpuToCpu => {
vk::MemoryPropertyFlags::HOST_VISIBLE
| vk::MemoryPropertyFlags::HOST_COHERENT
| vk::MemoryPropertyFlags::HOST_CACHED
}
MemoryLocation::Unknown => vk::MemoryPropertyFlags::empty(),
};
let mut memory_type_index_opt =
self.find_memorytype_index(&desc.requirements, mem_loc_preferred_bits);
if memory_type_index_opt.is_none() {
let mem_loc_required_bits = match desc.location {
MemoryLocation::GpuOnly => vk::MemoryPropertyFlags::DEVICE_LOCAL,
MemoryLocation::CpuToGpu | MemoryLocation::GpuToCpu => {
vk::MemoryPropertyFlags::HOST_VISIBLE | vk::MemoryPropertyFlags::HOST_COHERENT
}
MemoryLocation::Unknown => vk::MemoryPropertyFlags::empty(),
};
memory_type_index_opt =
self.find_memorytype_index(&desc.requirements, mem_loc_required_bits);
}
let memory_type_index = match memory_type_index_opt {
Some(x) => x as usize,
None => return Err(AllocationError::NoCompatibleMemoryTypeFound),
};
//Do not try to create a block if the heap is smaller than the required size (avoids validation warnings).
let memory_type = &mut self.memory_types[memory_type_index];
let allocation = if size > self.memory_heaps[memory_type.heap_index].size {
Err(AllocationError::OutOfMemory)
} else {
memory_type.allocate(
&self.device,
desc,
self.buffer_image_granularity,
backtrace.clone(),
&self.allocation_sizes,
)
};
if desc.location == MemoryLocation::CpuToGpu {
if allocation.is_err() {
let mem_loc_preferred_bits =
vk::MemoryPropertyFlags::HOST_VISIBLE | vk::MemoryPropertyFlags::HOST_COHERENT;
let memory_type_index_opt =
self.find_memorytype_index(&desc.requirements, mem_loc_preferred_bits);
let memory_type_index = match memory_type_index_opt {
Some(x) => x as usize,
None => return Err(AllocationError::NoCompatibleMemoryTypeFound),
};
self.memory_types[memory_type_index].allocate(
&self.device,
desc,
self.buffer_image_granularity,
backtrace,
&self.allocation_sizes,
)
} else {
allocation
}
} else {
allocation
}
}
pub fn free(&mut self, allocation: Allocation) -> Result<()> {
if self.debug_settings.log_frees {
let name = allocation.name.as_deref().unwrap_or("<null>");
debug!("Freeing `{}`.", name);
if self.debug_settings.log_stack_traces {
let backtrace = Backtrace::force_capture();
debug!("Free stack trace: {}", backtrace);
}
}
if allocation.is_null() {
return Ok(());
}
self.memory_types[allocation.memory_type_index].free(allocation, &self.device)?;
Ok(())
}
pub fn rename_allocation(&mut self, allocation: &mut Allocation, name: &str) -> Result<()> {
allocation.name = Some(name.into());
if allocation.is_null() {
return Ok(());
}
let mem_type = &mut self.memory_types[allocation.memory_type_index];
let mem_block = mem_type.memory_blocks[allocation.memory_block_index]
.as_mut()
.ok_or_else(|| AllocationError::Internal("Memory block must be Some.".into()))?;
mem_block
.sub_allocator
.rename_allocation(allocation.chunk_id, name)?;
Ok(())
}
pub fn report_memory_leaks(&self, log_level: Level) {
for (mem_type_i, mem_type) in self.memory_types.iter().enumerate() {
for (block_i, mem_block) in mem_type.memory_blocks.iter().enumerate() {
if let Some(mem_block) = mem_block {
mem_block
.sub_allocator
.report_memory_leaks(log_level, mem_type_i, block_i);
}
}
}
}
fn find_memorytype_index(
&self,
memory_req: &vk::MemoryRequirements,
flags: vk::MemoryPropertyFlags,
) -> Option<u32> {
self.memory_types
.iter()
.find(|memory_type| {
(1 << memory_type.memory_type_index) & memory_req.memory_type_bits != 0
&& memory_type.memory_properties.contains(flags)
})
.map(|memory_type| memory_type.memory_type_index as _)
}
pub fn generate_report(&self) -> AllocatorReport {
let mut allocations = vec![];
let mut blocks = vec![];
let mut total_reserved_bytes = 0;
for memory_type in &self.memory_types {
for block in memory_type.memory_blocks.iter().flatten() {
total_reserved_bytes += block.size;
let first_allocation = allocations.len();
allocations.extend(block.sub_allocator.report_allocations());
blocks.push(MemoryBlockReport {
size: block.size,
allocations: first_allocation..allocations.len(),
});
}
}
let total_allocated_bytes = allocations.iter().map(|report| report.size).sum();
AllocatorReport {
allocations,
blocks,
total_allocated_bytes,
total_reserved_bytes,
}
}
}
impl Drop for Allocator {
fn drop(&mut self) {
if self.debug_settings.log_leaks_on_shutdown {
self.report_memory_leaks(Level::Warn);
}
// Free all remaining memory blocks
for mem_type in self.memory_types.iter_mut() {
for mem_block in mem_type.memory_blocks.iter_mut() {
let block = mem_block.take();
if let Some(block) = block {
block.destroy(&self.device);
}
}
}
}
}

View File

@@ -0,0 +1,226 @@
#![allow(clippy::new_without_default)]
use super::Allocator;
use crate::visualizer::{
render_allocation_reports_ui, AllocationReportVisualizeSettings, ColorScheme,
MemoryChunksVisualizationSettings,
};
struct AllocatorVisualizerBlockWindow {
memory_type_index: usize,
block_index: usize,
settings: MemoryChunksVisualizationSettings,
}
impl AllocatorVisualizerBlockWindow {
fn new(memory_type_index: usize, block_index: usize) -> Self {
Self {
memory_type_index,
block_index,
settings: Default::default(),
}
}
}
pub struct AllocatorVisualizer {
selected_blocks: Vec<AllocatorVisualizerBlockWindow>,
color_scheme: ColorScheme,
breakdown_settings: AllocationReportVisualizeSettings,
}
impl AllocatorVisualizer {
pub fn new() -> Self {
Self {
selected_blocks: Vec::default(),
color_scheme: ColorScheme::default(),
breakdown_settings: Default::default(),
}
}
pub fn set_color_scheme(&mut self, color_scheme: ColorScheme) {
self.color_scheme = color_scheme;
}
pub fn render_memory_block_ui(&mut self, ui: &mut egui::Ui, alloc: &Allocator) {
ui.label(format!(
"buffer image granularity: {:?}",
alloc.buffer_image_granularity
));
ui.collapsing(
format!("Memory Heaps ({} heaps)", alloc.memory_heaps.len()),
|ui| {
for (i, heap) in alloc.memory_heaps.iter().enumerate() {
ui.collapsing(format!("Heap: {}", i), |ui| {
ui.label(format!("flags: {:?}", heap.flags));
ui.label(format!(
"size: {} MiB",
heap.size as f64 / (1024 * 1024) as f64
));
});
}
},
);
ui.collapsing(
format!("Memory Types: ({} types)", alloc.memory_types.len()),
|ui| {
for (mem_type_idx, mem_type) in alloc.memory_types.iter().enumerate() {
ui.collapsing(
format!(
"Type: {} ({} blocks)",
mem_type_idx,
mem_type.memory_blocks.len(),
),
|ui| {
let mut total_block_size = 0;
let mut total_allocated = 0;
for block in mem_type.memory_blocks.iter().flatten() {
total_block_size += block.size;
total_allocated += block.sub_allocator.allocated();
}
let active_block_count = mem_type
.memory_blocks
.iter()
.filter(|block| block.is_some())
.count();
ui.label(format!("properties: {:?}", mem_type.memory_properties));
ui.label(format!("heap index: {}", mem_type.heap_index));
ui.label(format!("total block size: {} KiB", total_block_size / 1024));
ui.label(format!("total allocated: {} KiB", total_allocated / 1024));
ui.label(format!("block count: {}", active_block_count));
for (block_idx, block) in mem_type.memory_blocks.iter().enumerate() {
let Some(block) = block else { continue };
ui.collapsing(format!("Block: {}", block_idx), |ui| {
use ash::vk::Handle;
ui.label(format!("size: {} KiB", block.size / 1024));
ui.label(format!(
"allocated: {} KiB",
block.sub_allocator.allocated() / 1024
));
ui.label(format!(
"vk device memory: 0x{:x}",
block.device_memory.as_raw()
));
if let Some(mapped_ptr) = block.mapped_ptr {
ui.label(format!(
"mapped pointer: {:#p}",
mapped_ptr.0.as_ptr()
));
}
if block.dedicated_allocation {
ui.label("Dedicated Allocation");
}
block.sub_allocator.draw_base_info(ui);
if block.sub_allocator.supports_visualization()
&& ui.button("visualize").clicked()
&& !self.selected_blocks.iter().any(|x| {
x.memory_type_index == mem_type_idx
&& x.block_index == block_idx
})
{
self.selected_blocks.push(
AllocatorVisualizerBlockWindow::new(
mem_type_idx,
block_idx,
),
);
}
});
}
},
);
}
},
);
}
pub fn render_memory_block_window(
&mut self,
ctx: &egui::Context,
allocator: &Allocator,
open: &mut bool,
) {
egui::Window::new("Allocator Memory Blocks")
.open(open)
.show(ctx, |ui| self.render_breakdown_ui(ui, allocator));
}
pub fn render_memory_block_visualization_windows(
&mut self,
ctx: &egui::Context,
allocator: &Allocator,
) {
// Draw each window.
let color_scheme = &self.color_scheme;
self.selected_blocks.retain_mut(|window| {
let mut open = true;
egui::Window::new(format!(
"Block Visualizer {}:{}",
window.memory_type_index, window.block_index
))
.default_size([1920.0 * 0.5, 1080.0 * 0.5])
.open(&mut open)
.show(ctx, |ui| {
let memblock = &allocator.memory_types[window.memory_type_index].memory_blocks
[window.block_index]
.as_ref();
if let Some(memblock) = memblock {
ui.label(format!(
"Memory type {}, Memory block {}, Block size: {} KiB",
window.memory_type_index,
window.block_index,
memblock.size / 1024
));
window
.settings
.ui(ui, allocator.debug_settings.store_stack_traces);
ui.separator();
memblock
.sub_allocator
.draw_visualization(color_scheme, ui, &window.settings);
} else {
ui.label("Deallocated memory block");
}
});
open
});
}
pub fn render_breakdown_ui(&mut self, ui: &mut egui::Ui, allocator: &Allocator) {
render_allocation_reports_ui(
ui,
&mut self.breakdown_settings,
allocator
.memory_types
.iter()
.flat_map(|memory_type| memory_type.memory_blocks.iter())
.flatten()
.flat_map(|memory_block| memory_block.sub_allocator.report_allocations()),
);
}
pub fn render_breakdown_window(
&mut self,
ctx: &egui::Context,
allocator: &Allocator,
open: &mut bool,
) {
egui::Window::new("Allocator Breakdown")
.open(open)
.show(ctx, |ui| self.render_breakdown_ui(ui, allocator));
}
}