Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

1
vendor/naga_oil/.cargo-checksum.json vendored Normal file

File diff suppressed because one or more lines are too long

1454
vendor/naga_oil/Cargo.lock generated vendored Normal file

File diff suppressed because it is too large Load Diff

101
vendor/naga_oil/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,101 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
name = "naga_oil"
version = "0.17.1"
build = false
autolib = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "a crate for combining and manipulating shaders using naga IR"
readme = "README.md"
license = "MIT OR Apache-2.0"
repository = "https://github.com/bevyengine/naga_oil/"
[features]
allow_deprecated = []
default = [
"test_shader",
"glsl",
]
glsl = [
"naga/glsl-in",
"naga/glsl-out",
]
override_any = []
prune = []
test_shader = []
[lib]
name = "naga_oil"
path = "src/lib.rs"
[[example]]
name = "pbr_compose_test"
path = "examples/pbr_compose_test.rs"
[dependencies.bit-set]
version = "0.5"
[dependencies.codespan-reporting]
version = "0.11"
[dependencies.data-encoding]
version = "2.3.2"
[dependencies.indexmap]
version = "2"
[dependencies.naga]
version = "24"
features = [
"wgsl-in",
"wgsl-out",
]
[dependencies.once_cell]
version = "1.17.0"
[dependencies.regex]
version = "1.8"
[dependencies.regex-syntax]
version = "0.8"
[dependencies.rustc-hash]
version = "1.1.0"
[dependencies.thiserror]
version = "1.0"
[dependencies.tracing]
version = "0.1"
[dependencies.unicode-ident]
version = "1"
[dev-dependencies.futures-lite]
version = "1"
[dev-dependencies.tracing-subscriber]
version = "0.3"
features = [
"std",
"fmt",
]
[dev-dependencies.wgpu]
version = "24"
features = ["naga-ir"]

176
vendor/naga_oil/LICENSE-APACHE vendored Normal file
View File

@@ -0,0 +1,176 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

19
vendor/naga_oil/LICENSE-MIT vendored Normal file
View File

@@ -0,0 +1,19 @@
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

202
vendor/naga_oil/README.md vendored Normal file
View File

@@ -0,0 +1,202 @@
Naga Organised Integration Library (`naga-oil`) is a crate for combining and manipulating shaders.
- `compose` presents a modular shader composition framework
- `prune` strips shaders down to required parts
and probably less useful externally:
- `derive` allows importing of items from multiple shaders into a single shader
- `redirect` modifies a shader by substituting function calls and modifying bindings
# Compose
the compose module allows construction of shaders from modules (which are themselves shaders).
it does this by treating shaders as modules, and
- building each module independently to naga IR
- creating "header" files for each supported language, which are used to build dependent modules/shaders
- making final shaders by combining the shader IR with the IR for imported modules
for multiple small shaders with large common imports, this can be faster than parsing the full source for each shader, and it allows for constructing shaders in a cleaner modular manner with better scope control.
## imports
shaders can be added to the composer as modules. this makes their types, constants, variables and functions available to modules/shaders that import them. note that importing a module will affect the final shader's global state if the module defines globals variables with bindings.
modules may include a `#define_import_path` directive that names the module:
```wgsl
#define_import_path my_module
fn my_func() -> f32 {
return 1.0;
}
```
alternatively the module name can be specified as an argument to `Composer::add_composable_module`.
shaders can then import the module with an `#import` directive (with an optional `as` name) :
```wgsl
#import my_module;
#import my_other_module as mod2;
fn main() -> f32 {
let x = my_module::my_func();
let y = mod2::my_other_func();
return x*y;
}
```
or import a comma-separated list of individual items :
```wgsl
#import my_module::{my_func, my_const}
fn main() -> f32 {
return my_func(my_const);
}
```
Some rust-style import syntax is supported, and items can be directly imported using the fully qualified item name :
```wgsl
#import my_package::{
first_module::{item_one as item, item_two},
second_module::submodule,
}
fn main() -> f32 {
return item + item_two + submodule::subitem + my_package::third_module::item;
}
```
`module::self` and `module::*` are not currently supported.
imports can be nested - modules may import other modules, but not recursively. when a new module is added, all its `#import`s must already have been added.
the same module can be imported multiple times by different modules in the import tree.
there is no overlap of namespaces, so the same function names (or type, constant, or variable names) may be used in different modules.
note: the final shader will include the required dependencies (bindings, globals, consts, other functions) of any imported items that are used, but will not include the rest of the imported module.
## overriding functions
virtual functions can be declared with the `virtual` keyword:
```glsl
virtual fn point_light(world_position: vec3<f32>) -> vec3<f32> { ... }
```
virtual functions defined in imported modules can then be overridden using the `override` keyword:
```wgsl
#import bevy_pbr::lighting as Lighting
override fn Lighting::point_light (world_position: vec3<f32>) -> vec3<f32> {
let original = Lighting::point_light(world_position);
let quantized = vec3<u32>(original * 3.0);
return vec3<f32>(quantized) / 3.0;
}
```
overrides must either be declared in the top-level shader, or the module containing the override must be imported as an `additional_import` in a `Composer::add_composable_module` or `Composer::make_naga_module` call. using `#import` to import a module with overrides will not work due to tree-shaking.
override function definitions cause *all* calls to the original function in the entire shader scope to be replaced by calls to the new function, with the exception of calls within the override function itself.
the function signature of the override must match the base function.
overrides can be specified at any point in the final shader's import tree.
multiple overrides can be applied to the same function. for example, given :
- a module `a` containing a function `f`,
- a module `b` that imports `a`, and containing an `override a::f` function,
- a module `c` that imports `a` and `b`, and containing an `override a::f` function,
then `b` and `c` both specify an override for `a::f`.
the `override fn a::f` declared in module `b` may call to `a::f` within its body.
the `override fn a::f` declared in module `c` may call to `a::f` within its body, but the call will be redirected to `b::f`.
any other calls to `a::f` (within modules `a` or `b`, or anywhere else) will end up redirected to `c::f`.
in this way a chain or stack of overrides can be applied.
different overrides of the same function can be specified in different import branches. the final stack will be ordered based on the first occurrence of the override in the import tree (using a depth first search).
note that imports into a module/shader are processed in order, but are processed before the body of the current shader/module regardless of where they occur in that module, so there is no way to import a module containing an override and inject a call into the override stack prior to that imported override. you can instead create two modules each containing an override and import them into a parent module/shader to order them as required.
override functions can currently only be defined in wgsl.
if the `override_any` crate feature is enabled, then the `virtual` keyword is not required for the function being overridden.
## languages
modules can we written in GLSL or WGSL. shaders with entry points can be imported as modules (provided they have a `#define_import_path` directive). entry points are available to call from imported modules either via their name (for WGSL) or via `module::main` (for GLSL).
final shaders can also be written in GLSL or WGSL. for GLSL users must specify whether the shader is a vertex shader or fragment shader via the ShaderType argument (GLSL compute shaders are not supported).
## preprocessing
when generating a final shader or adding a composable module, a set of `shader_def` string/value pairs must be provided. The value can be a bool (`ShaderDefValue::Bool`), an i32 (`ShaderDefValue::Int`) or a u32 (`ShaderDefValue::UInt`).
these allow conditional compilation of parts of modules and the final shader. conditional compilation is performed with `#if` / `#ifdef` / `#ifndef`, `#else` and `#endif` preprocessor directives:
```wgsl
fn get_number() -> f32 {
#ifdef BIG_NUMBER
return 999.0;
#else
return 0.999;
#endif
}
```
the `#ifdef` directive matches when the def name exists in the input binding set (regardless of value). the `#ifndef` directive is the reverse.
the `#if` directive requires a def name, an operator, and a value for comparison:
- the def name must be a provided `shader_def` name.
- the operator must be one of `==`, `!=`, `>=`, `>`, `<`, `<=`
- the value must be an integer literal if comparing to a `ShaderDefValue::Int` or `ShaderDefValue::Uint`, or `true` or `false` if comparing to a `ShaderDef::Bool`.
shader defs can also be used in the shader source with `#SHADER_DEF` or `#{SHADER_DEF}`, and will be substituted for their value.
the preprocessor branching directives (`ifdef`, `ifndef` and `if`) can be prefixed with `#else` to create more complex control flows:
```wgsl
fn get_number() -> f32 {
#ifdef BIG_NUMBER
return 999.0;
#else if USER_NUMBER > 1
return f32(#USER_NUMBER)
#else
return 0.999;
#endif
}
```
shader defs can be created or overridden at the start of the top-level shader with the `#define` directive:
```wgsl
#define USER_NUMBER 42
```
the created value will default to `true` if not specified.
## error reporting
codespan reporting for errors is available using the error `emit_to_string` method. this requires validation to be enabled, which is true by default. `Composer::non_validating()` produces a non-validating composer that is not able to give accurate error reporting.
# prune
- strips dead code and bindings from shaders based on specified required output. intended to be used for building reduced depth and/or normal shaders from arbitrary vertex/fragment shaders.
proper docs tbd
# redirect
- redirects function calls
- wip: rebinds global bindings
- todo one day: translate between uniform, texture and buffer accesses so shaders written for direct passes can be used in indirect
proper docs tbd
# derive
- builds a single self-contained naga module out of parts of one or more existing modules
proper docs tbd

2
vendor/naga_oil/clippy.toml vendored Normal file
View File

@@ -0,0 +1,2 @@
large-error-threshold = 256
enum-variant-size-threshold = 256

View File

@@ -0,0 +1,103 @@
#define_import_path bevy_pbr::clustered_forward
#import bevy_pbr::mesh_view_bindings as Bindings
// NOTE: Keep in sync with bevy_pbr/src/light.rs
fn view_z_to_z_slice(view_z: f32, is_orthographic: bool) -> u32 {
var z_slice: u32 = 0u;
if (is_orthographic) {
// NOTE: view_z is correct in the orthographic case
z_slice = u32(floor((view_z - Bindings::lights.cluster_factors.z) * Bindings::lights.cluster_factors.w));
} else {
// NOTE: had to use -view_z to make it positive else log(negative) is nan
z_slice = u32(log(-view_z) * Bindings::lights.cluster_factors.z - Bindings::lights.cluster_factors.w + 1.0);
}
// NOTE: We use min as we may limit the far z plane used for clustering to be closeer than
// the furthest thing being drawn. This means that we need to limit to the maximum cluster.
return min(z_slice, Bindings::lights.cluster_dimensions.z - 1u);
}
fn fragment_cluster_index(frag_coord: vec2<f32>, view_z: f32, is_orthographic: bool) -> u32 {
let xy = vec2<u32>(floor(frag_coord * Bindings::lights.cluster_factors.xy));
let z_slice = view_z_to_z_slice(view_z, is_orthographic);
// NOTE: Restricting cluster index to avoid undefined behavior when accessing uniform buffer
// arrays based on the cluster index.
return min(
(xy.y * Bindings::lights.cluster_dimensions.x + xy.x) * Bindings::lights.cluster_dimensions.z + z_slice,
Bindings::lights.cluster_dimensions.w - 1u
);
}
// this must match CLUSTER_COUNT_SIZE in light.rs
const CLUSTER_COUNT_SIZE = 9u;
fn unpack_offset_and_counts(cluster_index: u32) -> vec3<u32> {
#ifdef NO_STORAGE_BUFFERS_SUPPORT
let offset_and_counts = Bindings::cluster_offsets_and_counts.data[cluster_index >> 2u][cluster_index & ((1u << 2u) - 1u)];
// [ 31 .. 18 | 17 .. 9 | 8 .. 0 ]
// [ offset | point light count | spot light count ]
return vec3<u32>(
(offset_and_counts >> (CLUSTER_COUNT_SIZE * 2u)) & ((1u << (32u - (CLUSTER_COUNT_SIZE * 2u))) - 1u),
(offset_and_counts >> CLUSTER_COUNT_SIZE) & ((1u << CLUSTER_COUNT_SIZE) - 1u),
offset_and_counts & ((1u << CLUSTER_COUNT_SIZE) - 1u),
);
#else
return Bindings::cluster_offsets_and_counts.data[cluster_index].xyz;
#endif
}
fn get_light_id(index: u32) -> u32 {
#ifdef NO_STORAGE_BUFFERS_SUPPORT
// The index is correct but in cluster_light_index_lists we pack 4 u8s into a u32
// This means the index into cluster_light_index_lists is index / 4
let indices = Bindings::cluster_light_index_lists.data[index >> 4u][(index >> 2u) & ((1u << 2u) - 1u)];
// And index % 4 gives the sub-index of the u8 within the u32 so we shift by 8 * sub-index
return (indices >> (8u * (index & ((1u << 2u) - 1u)))) & ((1u << 8u) - 1u);
#else
return Bindings::cluster_light_index_lists.data[index];
#endif
}
fn cluster_debug_visualization(
output_color: vec4<f32>,
view_z: f32,
is_orthographic: bool,
offset_and_counts: vec3<u32>,
cluster_index: u32,
) -> vec4<f32> {
// Cluster allocation debug (using 'over' alpha blending)
#ifdef CLUSTERED_FORWARD_DEBUG_Z_SLICES
// NOTE: This debug mode visualises the z-slices
let cluster_overlay_alpha = 0.1;
var z_slice: u32 = view_z_to_z_slice(view_z, is_orthographic);
// A hack to make the colors alternate a bit more
if ((z_slice & 1u) == 1u) {
z_slice = z_slice + Bindings::lights.cluster_dimensions.z / 2u;
}
let slice_color = hsv2rgb(f32(z_slice) / f32(Bindings::lights.cluster_dimensions.z + 1u), 1.0, 0.5);
output_color = vec4<f32>(
(1.0 - cluster_overlay_alpha) * output_color.rgb + cluster_overlay_alpha * slice_color,
output_color.a
);
#endif // CLUSTERED_FORWARD_DEBUG_Z_SLICES
#ifdef CLUSTERED_FORWARD_DEBUG_CLUSTER_LIGHT_COMPLEXITY
// NOTE: This debug mode visualises the number of lights within the cluster that contains
// the fragment. It shows a sort of lighting complexity measure.
let cluster_overlay_alpha = 0.1;
let max_light_complexity_per_cluster = 64.0;
output_color.r = (1.0 - cluster_overlay_alpha) * output_color.r
+ cluster_overlay_alpha * smoothStep(0.0, max_light_complexity_per_cluster, f32(offset_and_counts[1] + offset_and_counts[2]));
output_color.g = (1.0 - cluster_overlay_alpha) * output_color.g
+ cluster_overlay_alpha * (1.0 - smoothStep(0.0, max_light_complexity_per_cluster, f32(offset_and_counts[1] + offset_and_counts[2])));
#endif // CLUSTERED_FORWARD_DEBUG_CLUSTER_LIGHT_COMPLEXITY
#ifdef CLUSTERED_FORWARD_DEBUG_CLUSTER_COHERENCY
// NOTE: Visualizes the cluster to which the fragment belongs
let cluster_overlay_alpha = 0.1;
let cluster_color = hsv2rgb(random1D(f32(cluster_index)), 1.0, 0.5);
output_color = vec4<f32>(
(1.0 - cluster_overlay_alpha) * output_color.rgb + cluster_overlay_alpha * cluster_color,
output_color.a
);
#endif // CLUSTERED_FORWARD_DEBUG_CLUSTER_COHERENCY
return output_color;
}

View File

@@ -0,0 +1,42 @@
#import bevy_pbr::mesh_view_types
#import bevy_pbr::mesh_types
@group(0) @binding(0)
var<uniform> view: View;
@group(1) @binding(0)
var<uniform> mesh: Mesh;
#ifdef SKINNED
@group(1) @binding(1)
var<uniform> joint_matrices: SkinnedMesh;
#import bevy_pbr::skinning
#endif
// NOTE: Bindings must come before functions that use them!
#import bevy_pbr::mesh_functions
struct Vertex {
@location(0) position: vec3<f32>,
#ifdef SKINNED
@location(4) joint_indices: vec4<u32>,
@location(5) joint_weights: vec4<f32>,
#endif
};
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
};
@vertex
fn vertex(vertex: Vertex) -> VertexOutput {
#ifdef SKINNED
let model = skin_model(vertex.joint_indices, vertex.joint_weights);
#else
let model = mesh.model;
#endif
var out: VertexOutput;
out.clip_position = mesh_position_local_to_clip(model, vec4<f32>(vertex.position, 1.0));
return out;
}

View File

@@ -0,0 +1,67 @@
#import bevy_pbr::mesh_view_bindings
#import bevy_pbr::mesh_bindings
// NOTE: Bindings must come before functions that use them!
#import bevy_pbr::mesh_functions
struct Vertex {
@location(0) position: vec3<f32>,
@location(1) normal: vec3<f32>,
#ifdef VERTEX_UVS
@location(2) uv: vec2<f32>,
#endif
#ifdef VERTEX_TANGENTS
@location(3) tangent: vec4<f32>,
#endif
#ifdef VERTEX_COLORS
@location(4) color: vec4<f32>,
#endif
#ifdef SKINNED
@location(5) joint_indices: vec4<u32>,
@location(6) joint_weights: vec4<f32>,
#endif
};
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
#import bevy_pbr::mesh_vertex_output
};
@vertex
fn vertex(vertex: Vertex) -> VertexOutput {
var out: VertexOutput;
#ifdef SKINNED
var model = skin_model(vertex.joint_indices, vertex.joint_weights);
out.world_normal = skin_normals(model, vertex.normal);
#else
var model = mesh.model;
out.world_normal = mesh_normal_local_to_world(vertex.normal);
#endif
out.world_position = mesh_position_local_to_world(model, vec4<f32>(vertex.position, 1.0));
#ifdef VERTEX_UVS
out.uv = vertex.uv;
#endif
#ifdef VERTEX_TANGENTS
out.world_tangent = mesh_tangent_local_to_world(model, vertex.tangent);
#endif
#ifdef VERTEX_COLORS
out.color = vertex.color;
#endif
out.clip_position = mesh_position_world_to_clip(out.world_position);
return out;
}
struct FragmentInput {
@builtin(front_facing) is_front: bool,
#import bevy_pbr::mesh_vertex_output
};
@fragment
fn fragment(in: FragmentInput) -> @location(0) vec4<f32> {
#ifdef VERTEX_COLORS
return in.color;
#else
return vec4<f32>(1.0, 0.0, 1.0, 1.0);
#endif
}

View File

@@ -0,0 +1,6 @@
#define_import_path bevy_pbr::mesh_bindings
#import bevy_pbr::mesh_types as Types
@group(2) @binding(0)
var<uniform> mesh: Types::Mesh;

View File

@@ -0,0 +1,36 @@
#define_import_path bevy_pbr::mesh_functions
fn mesh_position_local_to_world(model: mat4x4<f32>, vertex_position: vec4<f32>) -> vec4<f32> {
return model * vertex_position;
}
fn mesh_position_world_to_clip(world_position: vec4<f32>) -> vec4<f32> {
return view.view_proj * world_position;
}
// NOTE: The intermediate world_position assignment is important
// for precision purposes when using the 'equals' depth comparison
// function.
fn mesh_position_local_to_clip(model: mat4x4<f32>, vertex_position: vec4<f32>) -> vec4<f32> {
let world_position = mesh_position_local_to_world(model, vertex_position);
return mesh_position_world_to_clip(world_position);
}
fn mesh_normal_local_to_world(vertex_normal: vec3<f32>) -> vec3<f32> {
return mat3x3<f32>(
mesh.inverse_transpose_model[0].xyz,
mesh.inverse_transpose_model[1].xyz,
mesh.inverse_transpose_model[2].xyz
) * vertex_normal;
}
fn mesh_tangent_local_to_world(model: mat4x4<f32>, vertex_tangent: vec4<f32>) -> vec4<f32> {
return vec4<f32>(
mat3x3<f32>(
model[0].xyz,
model[1].xyz,
model[2].xyz
) * vertex_tangent.xyz,
vertex_tangent.w
);
}

View File

@@ -0,0 +1,16 @@
#define_import_path bevy_pbr::mesh_types
struct Mesh {
model: mat4x4<f32>,
inverse_transpose_model: mat4x4<f32>,
// 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options.
flags: u32,
};
#ifdef SKINNED
struct SkinnedMesh {
data: array<mat4x4<f32>, 256u>,
};
#endif
const MESH_FLAGS_SHADOW_RECEIVER_BIT: u32 = 1u;

View File

@@ -0,0 +1,15 @@
#define_import_path bevy_pbr::mesh_vertex_output
struct MeshVertexOutput {
@location(0) world_position: vec4<f32>,
@location(1) world_normal: vec3<f32>,
#ifdef VERTEX_UVS
@location(2) uv: vec2<f32>,
#endif
#ifdef VERTEX_TANGENTS
@location(3) world_tangent: vec4<f32>,
#endif
#ifdef VERTEX_COLORS
@location(4) color: vec4<f32>,
#endif
}

View File

@@ -0,0 +1,42 @@
#define_import_path bevy_pbr::mesh_view_bindings
#import bevy_pbr::mesh_view_types as Types
@group(0) @binding(0)
var<uniform> view: Types::View;
@group(0) @binding(1)
var<uniform> lights: Types::Lights;
#ifdef NO_ARRAY_TEXTURES_SUPPORT
@group(0) @binding(2)
var point_shadow_textures: texture_depth_cube;
#else
@group(0) @binding(2)
var point_shadow_textures: texture_depth_cube_array;
#endif
@group(0) @binding(3)
var point_shadow_textures_sampler: sampler_comparison;
#ifdef NO_ARRAY_TEXTURES_SUPPORT
@group(0) @binding(4)
var directional_shadow_textures: texture_depth_2d;
#else
@group(0) @binding(4)
var directional_shadow_textures: texture_depth_2d_array;
#endif
@group(0) @binding(5)
var directional_shadow_textures_sampler: sampler_comparison;
#ifdef NO_STORAGE_BUFFERS_SUPPORT
@group(0) @binding(6)
var<uniform> point_lights: Types::PointLights;
@group(0) @binding(7)
var<uniform> cluster_light_index_lists: Types::ClusterLightIndexLists;
@group(0) @binding(8)
var<uniform> cluster_offsets_and_counts: Types::ClusterOffsetsAndCounts;
#else
@group(0) @binding(6)
var<storage> point_lights: Types::PointLights;
@group(0) @binding(7)
var<storage> cluster_light_index_lists: Types::ClusterLightIndexLists;
@group(0) @binding(8)
var<storage> cluster_offsets_and_counts: Types::ClusterOffsetsAndCounts;
#endif

View File

@@ -0,0 +1,87 @@
#define_import_path bevy_pbr::mesh_view_types
struct View {
view_proj: mat4x4<f32>,
inverse_view_proj: mat4x4<f32>,
view: mat4x4<f32>,
inverse_view: mat4x4<f32>,
projection: mat4x4<f32>,
inverse_projection: mat4x4<f32>,
world_position: vec3<f32>,
width: f32,
height: f32,
};
struct PointLight {
// For point lights: the lower-right 2x2 values of the projection matrix [2][2] [2][3] [3][2] [3][3]
// For spot lights: the direction (x,z), spot_scale and spot_offset
light_custom_data: vec4<f32>,
color_inverse_square_range: vec4<f32>,
position_radius: vec4<f32>,
// 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options.
flags: u32,
shadow_depth_bias: f32,
shadow_normal_bias: f32,
spot_light_tan_angle: f32,
};
const POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT: u32 = 1u;
const POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE: u32 = 2u;
struct DirectionalLight {
view_projection: mat4x4<f32>,
color: vec4<f32>,
direction_to_light: vec3<f32>,
// 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options.
flags: u32,
shadow_depth_bias: f32,
shadow_normal_bias: f32,
};
const DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT: u32 = 1u;
struct Lights {
// NOTE: this array size must be kept in sync with the constants defined bevy_pbr2/src/render/light.rs
directional_lights: array<DirectionalLight, 1u>,
ambient_color: vec4<f32>,
// x/y/z dimensions and n_clusters in w
cluster_dimensions: vec4<u32>,
// xy are vec2<f32>(cluster_dimensions.xy) / vec2<f32>(view.width, view.height)
//
// For perspective projections:
// z is cluster_dimensions.z / log(far / near)
// w is cluster_dimensions.z * log(near) / log(far / near)
//
// For orthographic projections:
// NOTE: near and far are +ve but -z is infront of the camera
// z is -near
// w is cluster_dimensions.z / (-far - -near)
cluster_factors: vec4<f32>,
n_directional_lights: u32,
spot_light_shadowmap_offset: i32,
};
#ifdef NO_STORAGE_BUFFERS_SUPPORT
struct PointLights {
data: array<PointLight, 256u>,
};
struct ClusterLightIndexLists {
// each u32 contains 4 u8 indices into the PointLights array
data: array<vec4<u32>, 1024u>,
};
struct ClusterOffsetsAndCounts {
// each u32 contains a 24-bit index into ClusterLightIndexLists in the high 24 bits
// and an 8-bit count of the number of lights in the low 8 bits
data: array<vec4<u32>, 1024u>,
};
#else
struct PointLights {
data: array<PointLight>,
};
struct ClusterLightIndexLists {
data: array<u32>,
};
struct ClusterOffsetsAndCounts {
data: array<vec4<u32>>,
};
#endif

View File

@@ -0,0 +1,761 @@
struct mesh_vertex_output__MeshVertexOutput {
@location(0) world_position: vec4<f32>,
@location(1) world_normal: vec3<f32>,
@location(2) uv: vec2<f32>,
}
struct pbr_types__StandardMaterial {
base_color: vec4<f32>,
emissive: vec4<f32>,
perceptual_roughness: f32,
metallic: f32,
reflectance: f32,
flags: u32,
alpha_cutoff: f32,
}
struct mesh_types__Mesh {
model: mat4x4<f32>,
inverse_transpose_model: mat4x4<f32>,
flags: u32,
}
struct mesh_view_types__View {
view_proj: mat4x4<f32>,
inverse_view_proj: mat4x4<f32>,
view: mat4x4<f32>,
inverse_view: mat4x4<f32>,
projection: mat4x4<f32>,
inverse_projection: mat4x4<f32>,
world_position: vec3<f32>,
width: f32,
height: f32,
}
struct mesh_view_types__PointLight {
light_custom_data: vec4<f32>,
color_inverse_square_range: vec4<f32>,
position_radius: vec4<f32>,
flags: u32,
shadow_depth_bias: f32,
shadow_normal_bias: f32,
spot_light_tan_angle: f32,
}
struct mesh_view_types__DirectionalLight {
view_projection: mat4x4<f32>,
color: vec4<f32>,
direction_to_light: vec3<f32>,
flags: u32,
shadow_depth_bias: f32,
shadow_normal_bias: f32,
}
struct mesh_view_types__Lights {
directional_lights: array<mesh_view_types__DirectionalLight,1u>,
ambient_color: vec4<f32>,
cluster_dimensions: vec4<u32>,
cluster_factors: vec4<f32>,
n_directional_lights: u32,
spot_light_shadowmap_offset: i32,
}
struct mesh_view_types__PointLights {
data: array<mesh_view_types__PointLight>,
}
struct mesh_view_types__ClusterLightIndexLists {
data: array<u32>,
}
struct mesh_view_types__ClusterOffsetsAndCounts {
data: array<vec4<u32>>,
}
struct pbr_functions__PbrInput {
material: pbr_types__StandardMaterial,
occlusion: f32,
frag_coord: vec4<f32>,
world_position: vec4<f32>,
world_normal: vec3<f32>,
N: vec3<f32>,
V: vec3<f32>,
is_orthographic: bool,
}
const pbr_types__STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT: u32 = 2u;
const pbr_types__STANDARD_MATERIAL_FLAGS_UNLIT_BIT: u32 = 32u;
const pbr_types__STANDARD_MATERIAL_FLAGS_ALPHA_MODE_BLEND: u32 = 256u;
const pbr_types__STANDARD_MATERIAL_FLAGS_TWO_COMPONENT_NORMAL_MAP: u32 = 512u;
const pbr_types__STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK: u32 = 128u;
const pbr_types__STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT: u32 = 16u;
const pbr_types__STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT: u32 = 1u;
const pbr_types__STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT: u32 = 4u;
const pbr_types__STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT: u32 = 8u;
const pbr_types__STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE: u32 = 64u;
const pbr_types__STANDARD_MATERIAL_FLAGS_FLIP_NORMAL_MAP_Y: u32 = 1024u;
const mesh_types__MESH_FLAGS_SHADOW_RECEIVER_BIT: u32 = 1u;
const mesh_view_types__POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE: u32 = 2u;
const mesh_view_types__DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT: u32 = 1u;
const mesh_view_types__POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT: u32 = 1u;
const utils__PI: f32 = 3.1415927410125732;
const clustered_forward__CLUSTER_COUNT_SIZE: u32 = 9u;
@group(2) @binding(0)
var<uniform> mesh_bindings__mesh: mesh_types__Mesh;
@group(0) @binding(0)
var<uniform> mesh_view_bindings__view: mesh_view_types__View;
@group(0) @binding(2)
var mesh_view_bindings__point_shadow_textures: texture_depth_cube_array;
@group(0) @binding(5)
var mesh_view_bindings__directional_shadow_textures_sampler: sampler_comparison;
@group(0) @binding(6)
var<storage> mesh_view_bindings__point_lights: mesh_view_types__PointLights;
@group(0) @binding(1)
var<uniform> mesh_view_bindings__lights: mesh_view_types__Lights;
@group(0) @binding(3)
var mesh_view_bindings__point_shadow_textures_sampler: sampler_comparison;
@group(0) @binding(4)
var mesh_view_bindings__directional_shadow_textures: texture_depth_2d_array;
@group(0) @binding(7)
var<storage> mesh_view_bindings__cluster_light_index_lists: mesh_view_types__ClusterLightIndexLists;
@group(0) @binding(8)
var<storage> mesh_view_bindings__cluster_offsets_and_counts: mesh_view_types__ClusterOffsetsAndCounts;
@group(1) @binding(8)
var pbr_bindings__occlusion_sampler: sampler;
@group(1) @binding(0)
var<uniform> pbr_bindings__material: pbr_types__StandardMaterial;
@group(1) @binding(3)
var pbr_bindings__emissive_texture: texture_2d<f32>;
@group(1) @binding(1)
var pbr_bindings__base_color_texture: texture_2d<f32>;
@group(1) @binding(5)
var pbr_bindings__metallic_roughness_texture: texture_2d<f32>;
@group(1) @binding(4)
var pbr_bindings__emissive_sampler: sampler;
@group(1) @binding(6)
var pbr_bindings__metallic_roughness_sampler: sampler;
@group(1) @binding(2)
var pbr_bindings__base_color_sampler: sampler;
@group(1) @binding(10)
var pbr_bindings__normal_map_sampler: sampler;
@group(1) @binding(9)
var pbr_bindings__normal_map_texture: texture_2d<f32>;
@group(1) @binding(7)
var pbr_bindings__occlusion_texture: texture_2d<f32>;
fn pbr_types__standard_material_new() -> pbr_types__StandardMaterial {
var material: pbr_types__StandardMaterial;
material.base_color = vec4<f32>(1.0, 1.0, 1.0, 1.0);
material.emissive = vec4<f32>(0.0, 0.0, 0.0, 1.0);
material.perceptual_roughness = 0.08900000154972076;
material.metallic = 0.009999999776482582;
material.reflectance = 0.5;
material.flags = pbr_types__STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE;
material.alpha_cutoff = 0.5;
let _e33: pbr_types__StandardMaterial = material;
return _e33;
}
fn utils__saturate(value: f32) -> f32 {
return clamp(value, 0.0, 1.0);
}
fn utils__hsv2rgb(hue: f32, saturation: f32, value_1: f32) -> vec3<f32> {
let rgb: vec3<f32> = clamp((abs((((vec3<f32>((hue * 6.0)) + vec3<f32>(0.0, 4.0, 2.0)) % vec3<f32>(6.0)) - vec3<f32>(3.0))) - vec3<f32>(1.0)), vec3<f32>(0.0), vec3<f32>(1.0));
return (value_1 * mix(vec3<f32>(1.0), rgb, vec3<f32>(saturation)));
}
fn utils__random1D(s: f32) -> f32 {
return fract((sin((s * 12.989800453186035)) * 43758.546875));
}
fn lighting__getDistanceAttenuation(distanceSquare: f32, inverseRangeSquared: f32) -> f32 {
let factor: f32 = (distanceSquare * inverseRangeSquared);
let _e10: f32 = utils__saturate((1.0 - (factor * factor)));
let attenuation: f32 = (_e10 * _e10);
return ((attenuation * 1.0) / max(distanceSquare, 9.999999747378752e-5));
}
fn lighting__D_GGX(roughness: f32, NoH: f32, h: vec3<f32>) -> f32 {
let oneMinusNoHSquared: f32 = (1.0 - (NoH * NoH));
let a: f32 = (NoH * roughness);
let k: f32 = (roughness / (oneMinusNoHSquared + (a * a)));
let d: f32 = ((k * k) * (1.0 / utils__PI));
return d;
}
fn lighting__V_SmithGGXCorrelated(roughness_1: f32, NoV: f32, NoL: f32) -> f32 {
let a2_: f32 = (roughness_1 * roughness_1);
let lambdaV: f32 = (NoL * sqrt((((NoV - (a2_ * NoV)) * NoV) + a2_)));
let lambdaL: f32 = (NoV * sqrt((((NoL - (a2_ * NoL)) * NoL) + a2_)));
let v_1: f32 = (0.5 / (lambdaV + lambdaL));
return v_1;
}
fn lighting__F_Schlick_vec(f0_: vec3<f32>, f90_: f32, VoH: f32) -> vec3<f32> {
return (f0_ + ((vec3<f32>(f90_) - f0_) * pow((1.0 - VoH), 5.0)));
}
fn lighting__F_Schlick(f0_1: f32, f90_1: f32, VoH_1: f32) -> f32 {
return (f0_1 + ((f90_1 - f0_1) * pow((1.0 - VoH_1), 5.0)));
}
fn lighting__fresnel(f0_2: vec3<f32>, LoH: f32) -> vec3<f32> {
let _e11: f32 = utils__saturate(dot(f0_2, vec3<f32>((50.0 * 0.33000001311302185))));
let _e12: vec3<f32> = lighting__F_Schlick_vec(f0_2, _e11, LoH);
return _e12;
}
fn lighting__specular(f0_3: vec3<f32>, roughness_2: f32, h_1: vec3<f32>, NoV_1: f32, NoL_1: f32, NoH_1: f32, LoH_1: f32, specularIntensity: f32) -> vec3<f32> {
let _e12: f32 = lighting__D_GGX(roughness_2, NoH_1, h_1);
let _e13: f32 = lighting__V_SmithGGXCorrelated(roughness_2, NoV_1, NoL_1);
let _e14: vec3<f32> = lighting__fresnel(f0_3, LoH_1);
return (((specularIntensity * _e12) * _e13) * _e14);
}
fn lighting__Fd_Burley(roughness_3: f32, NoV_2: f32, NoL_2: f32, LoH_2: f32) -> f32 {
let f90_2: f32 = (0.5 + (((2.0 * roughness_3) * LoH_2) * LoH_2));
let _e15: f32 = lighting__F_Schlick(1.0, f90_2, NoL_2);
let _e17: f32 = lighting__F_Schlick(1.0, f90_2, NoV_2);
return ((_e15 * _e17) * (1.0 / utils__PI));
}
fn lighting__EnvBRDFApprox(f0_4: vec3<f32>, perceptual_roughness_1: f32, NoV_3: f32) -> vec3<f32> {
let c0_: vec4<f32> = vec4<f32>(-1.0, -0.027499999850988388, -0.5720000267028809, 0.02199999988079071);
let c1_: vec4<f32> = vec4<f32>(1.0, 0.042500000447034836, 1.0399999618530273, -0.03999999910593033);
let r: vec4<f32> = ((perceptual_roughness_1 * c0_) + c1_);
let a004_: f32 = ((min((r.x * r.x), exp2((-9.279999732971191 * NoV_3))) * r.x) + r.y);
let AB: vec2<f32> = ((vec2<f32>(-1.0399999618530273, 1.0399999618530273) * a004_) + r.zw);
return ((f0_4 * AB.x) + vec3<f32>(AB.y));
}
fn lighting__perceptualRoughnessToRoughness(perceptualRoughness: f32) -> f32 {
let clampedPerceptualRoughness: f32 = clamp(perceptualRoughness, 0.08900000154972076, 1.0);
return (clampedPerceptualRoughness * clampedPerceptualRoughness);
}
fn lighting__reinhard(color: vec3<f32>) -> vec3<f32> {
return (color / (vec3<f32>(1.0) + color));
}
fn lighting__reinhard_extended(color_1: vec3<f32>, max_white: f32) -> vec3<f32> {
let numerator: vec3<f32> = (color_1 * (vec3<f32>(1.0) + (color_1 / vec3<f32>((max_white * max_white)))));
return (numerator / (vec3<f32>(1.0) + color_1));
}
fn lighting__luminance(v: vec3<f32>) -> f32 {
return dot(v, vec3<f32>(0.2125999927520752, 0.7152000069618225, 0.0722000002861023));
}
fn lighting__change_luminance(c_in: vec3<f32>, l_out: f32) -> vec3<f32> {
let _e6: f32 = lighting__luminance(c_in);
return (c_in * (l_out / _e6));
}
fn lighting__reinhard_luminance(color_2: vec3<f32>) -> vec3<f32> {
let _e5: f32 = lighting__luminance(color_2);
let l_new: f32 = (_e5 / (1.0 + _e5));
let _e9: vec3<f32> = lighting__change_luminance(color_2, l_new);
return _e9;
}
fn lighting__reinhard_extended_luminance(color_3: vec3<f32>, max_white_l: f32) -> vec3<f32> {
let _e6: f32 = lighting__luminance(color_3);
let numerator_1: f32 = (_e6 * (1.0 + (_e6 / (max_white_l * max_white_l))));
let l_new_1: f32 = (numerator_1 / (1.0 + _e6));
let _e15: vec3<f32> = lighting__change_luminance(color_3, l_new_1);
return _e15;
}
fn lighting__point_light(world_position: vec3<f32>, light: mesh_view_types__PointLight, roughness_4: f32, NdotV: f32, N: vec3<f32>, V: vec3<f32>, R: vec3<f32>, F0_: vec3<f32>, diffuseColor: vec3<f32>) -> vec3<f32> {
var L: vec3<f32>;
var H: vec3<f32>;
var NoL_3: f32;
var NoH_2: f32;
var LoH_3: f32;
let light_to_frag: vec3<f32> = (light.position_radius.xyz - world_position.xyz);
let distance_square: f32 = dot(light_to_frag, light_to_frag);
let _e20: f32 = lighting__getDistanceAttenuation(distance_square, light.color_inverse_square_range.w);
let centerToRay: vec3<f32> = ((dot(light_to_frag, R) * R) - light_to_frag);
let _e29: f32 = utils__saturate((light.position_radius.w * inverseSqrt(dot(centerToRay, centerToRay))));
let closestPoint: vec3<f32> = (light_to_frag + (centerToRay * _e29));
let LspecLengthInverse: f32 = inverseSqrt(dot(closestPoint, closestPoint));
let _e40: f32 = utils__saturate((roughness_4 + ((light.position_radius.w * 0.5) * LspecLengthInverse)));
let normalizationFactor: f32 = (roughness_4 / _e40);
let specularIntensity_1: f32 = (normalizationFactor * normalizationFactor);
L = (closestPoint * LspecLengthInverse);
let _e45: vec3<f32> = L;
H = normalize((_e45 + V));
let _e49: vec3<f32> = L;
let _e51: f32 = utils__saturate(dot(N, _e49));
NoL_3 = _e51;
let _e53: vec3<f32> = H;
let _e55: f32 = utils__saturate(dot(N, _e53));
NoH_2 = _e55;
let _e57: vec3<f32> = L;
let _e58: vec3<f32> = H;
let _e60: f32 = utils__saturate(dot(_e57, _e58));
LoH_3 = _e60;
let _e62: vec3<f32> = H;
let _e63: f32 = NoL_3;
let _e64: f32 = NoH_2;
let _e65: f32 = LoH_3;
let _e66: vec3<f32> = lighting__specular(F0_, roughness_4, _e62, NdotV, _e63, _e64, _e65, specularIntensity_1);
L = normalize(light_to_frag);
let _e68: vec3<f32> = L;
H = normalize((_e68 + V));
let _e71: vec3<f32> = L;
let _e73: f32 = utils__saturate(dot(N, _e71));
NoL_3 = _e73;
let _e74: vec3<f32> = H;
let _e76: f32 = utils__saturate(dot(N, _e74));
NoH_2 = _e76;
let _e77: vec3<f32> = L;
let _e78: vec3<f32> = H;
let _e80: f32 = utils__saturate(dot(_e77, _e78));
LoH_3 = _e80;
let _e81: f32 = NoL_3;
let _e82: f32 = LoH_3;
let _e83: f32 = lighting__Fd_Burley(roughness_4, NdotV, _e81, _e82);
let diffuse: vec3<f32> = (diffuseColor * _e83);
let _e89: f32 = NoL_3;
return (((diffuse + _e66) * light.color_inverse_square_range.xyz) * (_e20 * _e89));
}
fn lighting__spot_light(world_position_1: vec3<f32>, light_1: mesh_view_types__PointLight, roughness_5: f32, NdotV_1: f32, N_1: vec3<f32>, V_1: vec3<f32>, R_1: vec3<f32>, F0_1: vec3<f32>, diffuseColor_1: vec3<f32>) -> vec3<f32> {
var spot_dir: vec3<f32>;
let _e13: vec3<f32> = lighting__point_light(world_position_1, light_1, roughness_5, NdotV_1, N_1, V_1, R_1, F0_1, diffuseColor_1);
spot_dir = vec3<f32>(light_1.light_custom_data.x, 0.0, light_1.light_custom_data.y);
let _e24: f32 = spot_dir.x;
let _e26: f32 = spot_dir.x;
let _e30: f32 = spot_dir.z;
let _e32: f32 = spot_dir.z;
spot_dir.y = sqrt(((1.0 - (_e24 * _e26)) - (_e30 * _e32)));
if ((light_1.flags & mesh_view_types__POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE) != 0u) {
let _e42: f32 = spot_dir.y;
spot_dir.y = -(_e42);
}
let light_to_frag_1: vec3<f32> = (light_1.position_radius.xyz - world_position_1.xyz);
let _e48: vec3<f32> = spot_dir;
let cd: f32 = dot(-(_e48), normalize(light_to_frag_1));
let _e58: f32 = utils__saturate(((cd * light_1.light_custom_data.z) + light_1.light_custom_data.w));
let spot_attenuation: f32 = (_e58 * _e58);
return (_e13 * spot_attenuation);
}
fn lighting__directional_light(light_2: mesh_view_types__DirectionalLight, roughness_6: f32, NdotV_2: f32, normal: vec3<f32>, view: vec3<f32>, R_2: vec3<f32>, F0_2: vec3<f32>, diffuseColor_2: vec3<f32>) -> vec3<f32> {
let incident_light: vec3<f32> = light_2.direction_to_light.xyz;
let half_vector: vec3<f32> = normalize((incident_light + view));
let _e17: f32 = utils__saturate(dot(normal, incident_light));
let _e19: f32 = utils__saturate(dot(normal, half_vector));
let _e21: f32 = utils__saturate(dot(incident_light, half_vector));
let _e22: f32 = lighting__Fd_Burley(roughness_6, NdotV_2, _e17, _e21);
let diffuse_1: vec3<f32> = (diffuseColor_2 * _e22);
let _e25: vec3<f32> = lighting__specular(F0_2, roughness_6, half_vector, NdotV_2, _e17, _e19, _e21, 1.0);
return (((_e25 + diffuse_1) * light_2.color.xyz) * _e17);
}
fn clustered_forward__view_z_to_z_slice(view_z: f32, is_orthographic: bool) -> u32 {
var z_slice: u32 = 0u;
if is_orthographic {
let _e18: f32 = mesh_view_bindings__lights.cluster_factors.z;
let _e22: f32 = mesh_view_bindings__lights.cluster_factors.w;
z_slice = u32(floor(((view_z - _e18) * _e22)));
} else {
let _e30: f32 = mesh_view_bindings__lights.cluster_factors.z;
let _e34: f32 = mesh_view_bindings__lights.cluster_factors.w;
z_slice = u32((((log(-(view_z)) * _e30) - _e34) + 1.0));
}
let _e39: u32 = z_slice;
let _e42: u32 = mesh_view_bindings__lights.cluster_dimensions.z;
return min(_e39, (_e42 - 1u));
}
fn clustered_forward__fragment_cluster_index(frag_coord_1: vec2<f32>, view_z_1: f32, is_orthographic_1: bool) -> u32 {
let _e16: vec4<f32> = mesh_view_bindings__lights.cluster_factors;
let xy: vec2<u32> = vec2<u32>(floor((frag_coord_1 * _e16.xy)));
let _e21: u32 = clustered_forward__view_z_to_z_slice(view_z_1, is_orthographic_1);
let _e25: u32 = mesh_view_bindings__lights.cluster_dimensions.x;
let _e31: u32 = mesh_view_bindings__lights.cluster_dimensions.z;
let _e36: u32 = mesh_view_bindings__lights.cluster_dimensions.w;
return min(((((xy.y * _e25) + xy.x) * _e31) + _e21), (_e36 - 1u));
}
fn clustered_forward__unpack_offset_and_counts(cluster_index: u32) -> vec3<u32> {
let _e16: vec4<u32> = mesh_view_bindings__cluster_offsets_and_counts.data[cluster_index];
return _e16.xyz;
}
fn clustered_forward__get_light_id(index: u32) -> u32 {
let _e16: u32 = mesh_view_bindings__cluster_light_index_lists.data[index];
return _e16;
}
fn clustered_forward__cluster_debug_visualization(output_color_1: vec4<f32>, view_z_2: f32, is_orthographic_2: bool, offset_and_counts: vec3<u32>, cluster_index_1: u32) -> vec4<f32> {
return output_color_1;
}
fn shadows__fetch_point_shadow(light_id: u32, frag_position: vec4<f32>, surface_normal: vec3<f32>) -> f32 {
let light_3: mesh_view_types__PointLight = mesh_view_bindings__point_lights.data[light_id];
let surface_to_light: vec3<f32> = (light_3.position_radius.xyz - frag_position.xyz);
let surface_to_light_abs: vec3<f32> = abs(surface_to_light);
let distance_to_light: f32 = max(surface_to_light_abs.x, max(surface_to_light_abs.y, surface_to_light_abs.z));
let normal_offset: vec3<f32> = ((light_3.shadow_normal_bias * distance_to_light) * surface_normal.xyz);
let depth_offset: vec3<f32> = (light_3.shadow_depth_bias * normalize(surface_to_light.xyz));
let offset_position: vec3<f32> = ((frag_position.xyz + normal_offset) + depth_offset);
let frag_ls: vec3<f32> = (light_3.position_radius.xyz - offset_position.xyz);
let abs_position_ls: vec3<f32> = abs(frag_ls);
let major_axis_magnitude: f32 = max(abs_position_ls.x, max(abs_position_ls.y, abs_position_ls.z));
let zw: vec2<f32> = ((-(major_axis_magnitude) * light_3.light_custom_data.xy) + light_3.light_custom_data.zw);
let depth: f32 = (zw.x / zw.y);
let _e60: f32 = textureSampleCompareLevel(mesh_view_bindings__point_shadow_textures, mesh_view_bindings__point_shadow_textures_sampler, frag_ls, i32(light_id), depth);
return _e60;
}
fn shadows__fetch_spot_shadow(light_id_1: u32, frag_position_1: vec4<f32>, surface_normal_1: vec3<f32>) -> f32 {
var spot_dir_1: vec3<f32>;
var sign: f32 = -1.0;
let light_4: mesh_view_types__PointLight = mesh_view_bindings__point_lights.data[light_id_1];
let surface_to_light_1: vec3<f32> = (light_4.position_radius.xyz - frag_position_1.xyz);
spot_dir_1 = vec3<f32>(light_4.light_custom_data.x, 0.0, light_4.light_custom_data.y);
let _e32: f32 = spot_dir_1.x;
let _e34: f32 = spot_dir_1.x;
let _e38: f32 = spot_dir_1.z;
let _e40: f32 = spot_dir_1.z;
spot_dir_1.y = sqrt(((1.0 - (_e32 * _e34)) - (_e38 * _e40)));
if ((light_4.flags & mesh_view_types__POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE) != 0u) {
let _e50: f32 = spot_dir_1.y;
spot_dir_1.y = -(_e50);
}
let _e52: vec3<f32> = spot_dir_1;
let fwd: vec3<f32> = -(_e52);
let distance_to_light_1: f32 = dot(fwd, surface_to_light_1);
let offset_position_1: vec3<f32> = ((-(surface_to_light_1) + (light_4.shadow_depth_bias * normalize(surface_to_light_1))) + ((surface_normal_1.xyz * light_4.shadow_normal_bias) * distance_to_light_1));
if (fwd.z >= 0.0) {
sign = 1.0;
}
let _e73: f32 = sign;
let a_1: f32 = (-1.0 / (fwd.z + _e73));
let b: f32 = ((fwd.x * fwd.y) * a_1);
let _e81: f32 = sign;
let _e88: f32 = sign;
let _e90: f32 = sign;
let up_dir: vec3<f32> = vec3<f32>((1.0 + (((_e81 * fwd.x) * fwd.x) * a_1)), (_e88 * b), (-(_e90) * fwd.x));
let _e96: f32 = sign;
let right_dir: vec3<f32> = vec3<f32>(-(b), (-(_e96) - ((fwd.y * fwd.y) * a_1)), fwd.y);
let light_inv_rot: mat3x3<f32> = mat3x3<f32>(right_dir, up_dir, fwd);
let projected_position: vec3<f32> = (offset_position_1 * light_inv_rot);
let f_div_minus_z: f32 = (1.0 / (light_4.spot_light_tan_angle * -(projected_position.z)));
let shadow_xy_ndc: vec2<f32> = (projected_position.xy * f_div_minus_z);
let shadow_uv: vec2<f32> = ((shadow_xy_ndc * vec2<f32>(0.5, -0.5)) + vec2<f32>(0.5, 0.5));
let depth_1: f32 = (0.10000000149011612 / -(projected_position.z));
let _e129: i32 = mesh_view_bindings__lights.spot_light_shadowmap_offset;
let _e131: f32 = textureSampleCompareLevel(mesh_view_bindings__directional_shadow_textures, mesh_view_bindings__directional_shadow_textures_sampler, shadow_uv, (i32(light_id_1) + _e129), depth_1);
return _e131;
}
fn shadows__fetch_directional_shadow(light_id_2: u32, frag_position_2: vec4<f32>, surface_normal_2: vec3<f32>) -> f32 {
let light_5: mesh_view_types__DirectionalLight = mesh_view_bindings__lights.directional_lights[light_id_2];
let normal_offset_1: vec3<f32> = (light_5.shadow_normal_bias * surface_normal_2.xyz);
let depth_offset_1: vec3<f32> = (light_5.shadow_depth_bias * light_5.direction_to_light.xyz);
let offset_position_2: vec4<f32> = vec4<f32>(((frag_position_2.xyz + normal_offset_1) + depth_offset_1), frag_position_2.w);
let offset_position_clip: vec4<f32> = (light_5.view_projection * offset_position_2);
if (offset_position_clip.w <= 0.0) {
return 1.0;
}
let offset_position_ndc: vec3<f32> = (offset_position_clip.xyz / vec3<f32>(offset_position_clip.w));
if ((any((offset_position_ndc.xy < vec2<f32>(-1.0))) || (offset_position_ndc.z < 0.0)) || any((offset_position_ndc > vec3<f32>(1.0)))) {
return 1.0;
}
let flip_correction: vec2<f32> = vec2<f32>(0.5, -0.5);
let light_local: vec2<f32> = ((offset_position_ndc.xy * flip_correction) + vec2<f32>(0.5, 0.5));
let depth_2: f32 = offset_position_ndc.z;
let _e66: f32 = textureSampleCompareLevel(mesh_view_bindings__directional_shadow_textures, mesh_view_bindings__directional_shadow_textures_sampler, light_local, i32(light_id_2), depth_2);
return _e66;
}
fn pbr_functions__prepare_normal(standard_material_flags: u32, world_normal: vec3<f32>, uv: vec2<f32>, is_front_1: bool) -> vec3<f32> {
var N_2: vec3<f32>;
N_2 = normalize(world_normal);
if ((standard_material_flags & pbr_types__STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u) {
if !(is_front_1) {
let _e37: vec3<f32> = N_2;
N_2 = -(_e37);
}
}
let _e39: vec3<f32> = N_2;
return _e39;
}
fn pbr_functions__calculate_view(world_position_2: vec4<f32>, is_orthographic_3: bool) -> vec3<f32> {
var V_2: vec3<f32>;
if is_orthographic_3 {
let _e34: f32 = mesh_view_bindings__view.view_proj[0][2];
let _e39: f32 = mesh_view_bindings__view.view_proj[1][2];
let _e44: f32 = mesh_view_bindings__view.view_proj[2][2];
V_2 = normalize(vec3<f32>(_e34, _e39, _e44));
} else {
let _e48: vec3<f32> = mesh_view_bindings__view.world_position;
V_2 = normalize((_e48.xyz - world_position_2.xyz));
}
let _e53: vec3<f32> = V_2;
return _e53;
}
fn pbr_functions__pbr_input_new() -> pbr_functions__PbrInput {
var pbr_input_1: pbr_functions__PbrInput;
let _e29: pbr_types__StandardMaterial = pbr_types__standard_material_new();
pbr_input_1.material = _e29;
pbr_input_1.occlusion = 1.0;
pbr_input_1.frag_coord = vec4<f32>(0.0, 0.0, 0.0, 1.0);
pbr_input_1.world_position = vec4<f32>(0.0, 0.0, 0.0, 1.0);
pbr_input_1.world_normal = vec3<f32>(0.0, 0.0, 1.0);
pbr_input_1.is_orthographic = false;
pbr_input_1.N = vec3<f32>(0.0, 0.0, 1.0);
pbr_input_1.V = vec3<f32>(1.0, 0.0, 0.0);
let _e61: pbr_functions__PbrInput = pbr_input_1;
return _e61;
}
fn pbr_functions__pbr(in: pbr_functions__PbrInput) -> vec4<f32> {
var output_color_2: vec4<f32>;
var light_accum: vec3<f32>;
var i: u32;
var shadow: f32;
var i_1: u32;
var shadow_1: f32;
var i_2: u32 = 0u;
var shadow_2: f32;
output_color_2 = in.material.base_color;
let emissive_1: vec4<f32> = in.material.emissive;
let metallic_1: f32 = in.material.metallic;
let perceptual_roughness_2: f32 = in.material.perceptual_roughness;
let _e37: f32 = lighting__perceptualRoughnessToRoughness(perceptual_roughness_2);
let occlusion_1: f32 = in.occlusion;
if ((in.material.flags & pbr_types__STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE) != 0u) {
output_color_2.w = 1.0;
} else {
if ((in.material.flags & pbr_types__STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK) != 0u) {
let _e52: f32 = output_color_2.w;
if (_e52 >= in.material.alpha_cutoff) {
output_color_2.w = 1.0;
} else {
discard;
}
}
}
let NdotV_3: f32 = max(dot(in.N, in.V), 9.999999747378752e-5);
let reflectance: f32 = in.material.reflectance;
let _e71: vec4<f32> = output_color_2;
let F0_3: vec3<f32> = (vec3<f32>((((0.1599999964237213 * reflectance) * reflectance) * (1.0 - metallic_1))) + (_e71.xyz * metallic_1));
let _e76: vec4<f32> = output_color_2;
let diffuse_color: vec3<f32> = (_e76.xyz * (1.0 - metallic_1));
let R_3: vec3<f32> = reflect(-(in.V), in.N);
light_accum = vec3<f32>(0.0);
let _e92: f32 = mesh_view_bindings__view.inverse_view[0][2];
let _e97: f32 = mesh_view_bindings__view.inverse_view[1][2];
let _e102: f32 = mesh_view_bindings__view.inverse_view[2][2];
let _e107: f32 = mesh_view_bindings__view.inverse_view[3][2];
let view_z_3: f32 = dot(vec4<f32>(_e92, _e97, _e102, _e107), in.world_position);
let _e114: u32 = clustered_forward__fragment_cluster_index(in.frag_coord.xy, view_z_3, in.is_orthographic);
let _e115: vec3<u32> = clustered_forward__unpack_offset_and_counts(_e114);
i = _e115.x;
loop {
let _e119: u32 = i;
if (_e119 < (_e115.x + _e115.y)) {
} else {
break;
}
let _e129: u32 = i;
let _e130: u32 = clustered_forward__get_light_id(_e129);
let light_6: mesh_view_types__PointLight = mesh_view_bindings__point_lights.data[_e130];
shadow = 1.0;
let _e137: u32 = mesh_bindings__mesh.flags;
if (((_e137 & mesh_types__MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u) && ((light_6.flags & mesh_view_types__POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u)) {
let _e148: f32 = shadows__fetch_point_shadow(_e130, in.world_position, in.world_normal);
shadow = _e148;
}
let _e153: vec3<f32> = lighting__point_light(in.world_position.xyz, light_6, _e37, NdotV_3, in.N, in.V, R_3, F0_3, diffuse_color);
let _e154: vec3<f32> = light_accum;
let _e155: f32 = shadow;
light_accum = (_e154 + (_e153 * _e155));
continuing {
let _e126: u32 = i;
i = (_e126 + 1u);
}
}
i_1 = (_e115.x + _e115.y);
loop {
let _e164: u32 = i_1;
if (_e164 < ((_e115.x + _e115.y) + _e115.z)) {
} else {
break;
}
let _e177: u32 = i_1;
let _e178: u32 = clustered_forward__get_light_id(_e177);
let light_7: mesh_view_types__PointLight = mesh_view_bindings__point_lights.data[_e178];
shadow_1 = 1.0;
let _e185: u32 = mesh_bindings__mesh.flags;
if (((_e185 & mesh_types__MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u) && ((light_7.flags & mesh_view_types__POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u)) {
let _e196: f32 = shadows__fetch_spot_shadow(_e178, in.world_position, in.world_normal);
shadow_1 = _e196;
}
let _e201: vec3<f32> = lighting__spot_light(in.world_position.xyz, light_7, _e37, NdotV_3, in.N, in.V, R_3, F0_3, diffuse_color);
let _e202: vec3<f32> = light_accum;
let _e203: f32 = shadow_1;
light_accum = (_e202 + (_e201 * _e203));
continuing {
let _e174: u32 = i_1;
i_1 = (_e174 + 1u);
}
}
let n_directional_lights: u32 = mesh_view_bindings__lights.n_directional_lights;
loop {
let _e210: u32 = i_2;
if (_e210 < n_directional_lights) {
} else {
break;
}
let _e216: u32 = i_2;
let light_8: mesh_view_types__DirectionalLight = mesh_view_bindings__lights.directional_lights[_e216];
shadow_2 = 1.0;
let _e222: u32 = mesh_bindings__mesh.flags;
if (((_e222 & mesh_types__MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u) && ((light_8.flags & mesh_view_types__DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u)) {
let _e231: u32 = i_2;
let _e234: f32 = shadows__fetch_directional_shadow(_e231, in.world_position, in.world_normal);
shadow_2 = _e234;
}
let _e237: vec3<f32> = lighting__directional_light(light_8, _e37, NdotV_3, in.N, in.V, R_3, F0_3, diffuse_color);
let _e238: vec3<f32> = light_accum;
let _e239: f32 = shadow_2;
light_accum = (_e238 + (_e237 * _e239));
continuing {
let _e212: u32 = i_2;
i_2 = (_e212 + 1u);
}
}
let _e243: vec3<f32> = lighting__EnvBRDFApprox(diffuse_color, 1.0, NdotV_3);
let _e244: vec3<f32> = lighting__EnvBRDFApprox(F0_3, perceptual_roughness_2, NdotV_3);
let _e245: vec3<f32> = light_accum;
let _e248: vec4<f32> = mesh_view_bindings__lights.ambient_color;
let _e255: f32 = output_color_2.w;
let _e259: f32 = output_color_2.w;
output_color_2 = vec4<f32>(((_e245 + (((_e243 + _e244) * _e248.xyz) * occlusion_1)) + (emissive_1.xyz * _e255)), _e259);
let _e261: vec4<f32> = output_color_2;
let _e263: vec4<f32> = clustered_forward__cluster_debug_visualization(_e261, view_z_3, in.is_orthographic, _e115, _e114);
output_color_2 = _e263;
let _e264: vec4<f32> = output_color_2;
return _e264;
}
fn pbr_functions__tone_mapping(in_1: vec4<f32>) -> vec4<f32> {
let _e29: vec3<f32> = lighting__reinhard_luminance(in_1.xyz);
return vec4<f32>(_e29, in_1.w);
}
@fragment
fn fragment(mesh: mesh_vertex_output__MeshVertexOutput, @builtin(front_facing) is_front: bool, @builtin(position) frag_coord: vec4<f32>) -> @location(0) vec4<f32> {
var output_color: vec4<f32>;
var pbr_input: pbr_functions__PbrInput;
var emissive: vec4<f32>;
var metallic: f32;
var perceptual_roughness: f32;
var occlusion: f32;
let _e42: vec4<f32> = pbr_bindings__material.base_color;
output_color = _e42;
let _e45: u32 = pbr_bindings__material.flags;
if ((_e45 & pbr_types__STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT) != 0u) {
let _e49: vec4<f32> = output_color;
let _e51: vec4<f32> = textureSample(pbr_bindings__base_color_texture, pbr_bindings__base_color_sampler, mesh.uv);
output_color = (_e49 * _e51);
}
let _e54: u32 = pbr_bindings__material.flags;
if ((_e54 & pbr_types__STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u) {
let _e61: vec4<f32> = output_color;
pbr_input.material.base_color = _e61;
let _e65: f32 = pbr_bindings__material.reflectance;
pbr_input.material.reflectance = _e65;
let _e69: u32 = pbr_bindings__material.flags;
pbr_input.material.flags = _e69;
let _e73: f32 = pbr_bindings__material.alpha_cutoff;
pbr_input.material.alpha_cutoff = _e73;
let _e75: vec4<f32> = pbr_bindings__material.emissive;
emissive = _e75;
let _e78: u32 = pbr_bindings__material.flags;
if ((_e78 & pbr_types__STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT) != 0u) {
let _e82: vec4<f32> = emissive;
let _e85: vec4<f32> = textureSample(pbr_bindings__emissive_texture, pbr_bindings__emissive_sampler, mesh.uv);
emissive = vec4<f32>((_e82.xyz * _e85.xyz), 1.0);
}
let _e92: vec4<f32> = emissive;
pbr_input.material.emissive = _e92;
let _e94: f32 = pbr_bindings__material.metallic;
metallic = _e94;
let _e97: f32 = pbr_bindings__material.perceptual_roughness;
perceptual_roughness = _e97;
let _e100: u32 = pbr_bindings__material.flags;
if ((_e100 & pbr_types__STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT) != 0u) {
let metallic_roughness: vec4<f32> = textureSample(pbr_bindings__metallic_roughness_texture, pbr_bindings__metallic_roughness_sampler, mesh.uv);
let _e106: f32 = metallic;
metallic = (_e106 * metallic_roughness.z);
let _e109: f32 = perceptual_roughness;
perceptual_roughness = (_e109 * metallic_roughness.y);
}
let _e114: f32 = metallic;
pbr_input.material.metallic = _e114;
let _e117: f32 = perceptual_roughness;
pbr_input.material.perceptual_roughness = _e117;
occlusion = 1.0;
let _e121: u32 = pbr_bindings__material.flags;
if ((_e121 & pbr_types__STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT) != 0u) {
let _e126: vec4<f32> = textureSample(pbr_bindings__occlusion_texture, pbr_bindings__occlusion_sampler, mesh.uv);
occlusion = _e126.x;
}
let _e129: f32 = occlusion;
pbr_input.occlusion = _e129;
pbr_input.frag_coord = frag_coord;
pbr_input.world_position = mesh.world_position;
pbr_input.world_normal = mesh.world_normal;
let _e140: f32 = mesh_view_bindings__view.projection[3][3];
pbr_input.is_orthographic = (_e140 == 1.0);
let _e145: u32 = pbr_bindings__material.flags;
let _e148: vec3<f32> = pbr_functions__prepare_normal(_e145, mesh.world_normal, mesh.uv, is_front);
pbr_input.N = _e148;
let _e152: bool = pbr_input.is_orthographic;
let _e153: vec3<f32> = pbr_functions__calculate_view(mesh.world_position, _e152);
pbr_input.V = _e153;
let _e154: pbr_functions__PbrInput = pbr_input;
let _e155: vec4<f32> = pbr_functions__pbr(_e154);
let _e156: vec4<f32> = pbr_functions__tone_mapping(_e155);
output_color = _e156;
}
let _e157: vec4<f32> = output_color;
return _e157;
}

View File

@@ -0,0 +1,90 @@
#import bevy_pbr::mesh_vertex_output as OutputTypes
#import bevy_pbr::pbr_functions as PbrCore
#import bevy_pbr::pbr_bindings as MaterialBindings
#import bevy_pbr::pbr_types as PbrTypes
#import bevy_pbr::mesh_view_bindings as ViewBindings
@fragment
fn fragment(
mesh: OutputTypes::MeshVertexOutput,
@builtin(front_facing) is_front: bool,
@builtin(position) frag_coord: vec4<f32>,
) -> @location(0) vec4<f32> {
var output_color: vec4<f32> = MaterialBindings::material.base_color;
#ifdef VERTEX_COLORS
output_color = output_color * mesh.color;
#endif
#ifdef VERTEX_UVS
if ((MaterialBindings::material.flags & PbrTypes::STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT) != 0u) {
output_color = output_color * textureSample(MaterialBindings::base_color_texture, MaterialBindings::base_color_sampler, mesh.uv);
}
#endif
// NOTE: Unlit bit not set means == 0 is true, so the true case is if lit
if ((MaterialBindings::material.flags & PbrTypes::STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u) {
// Prepare a 'processed' StandardMaterial by sampling all textures to resolve
// the material members
var pbr_input: PbrCore::PbrInput;
pbr_input.material.base_color = output_color;
pbr_input.material.reflectance = MaterialBindings::material.reflectance;
pbr_input.material.flags = MaterialBindings::material.flags;
pbr_input.material.alpha_cutoff = MaterialBindings::material.alpha_cutoff;
// TODO use .a for exposure compensation in HDR
var emissive: vec4<f32> = MaterialBindings::material.emissive;
#ifdef VERTEX_UVS
if ((MaterialBindings::material.flags & PbrTypes::STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT) != 0u) {
emissive = vec4<f32>(emissive.rgb * textureSample(MaterialBindings::emissive_texture, MaterialBindings::emissive_sampler, mesh.uv).rgb, 1.0);
}
#endif
pbr_input.material.emissive = emissive;
var metallic: f32 = MaterialBindings::material.metallic;
var perceptual_roughness: f32 = MaterialBindings::material.perceptual_roughness;
#ifdef VERTEX_UVS
if ((MaterialBindings::material.flags & PbrTypes::STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT) != 0u) {
let metallic_roughness = textureSample(MaterialBindings::metallic_roughness_texture, MaterialBindings::metallic_roughness_sampler, mesh.uv);
// Sampling from GLTF standard channels for now
metallic = metallic * metallic_roughness.b;
perceptual_roughness = perceptual_roughness * metallic_roughness.g;
}
#endif
pbr_input.material.metallic = metallic;
pbr_input.material.perceptual_roughness = perceptual_roughness;
var occlusion: f32 = 1.0;
#ifdef VERTEX_UVS
if ((MaterialBindings::material.flags & PbrTypes::STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT) != 0u) {
occlusion = textureSample(MaterialBindings::occlusion_texture, MaterialBindings::occlusion_sampler, mesh.uv).r;
}
#endif
pbr_input.occlusion = occlusion;
pbr_input.frag_coord = frag_coord;
pbr_input.world_position = mesh.world_position;
pbr_input.world_normal = mesh.world_normal;
pbr_input.is_orthographic = ViewBindings::view.projection[3].w == 1.0;
pbr_input.N = PbrCore::prepare_normal(
MaterialBindings::material.flags,
mesh.world_normal,
#ifdef VERTEX_TANGENTS
#ifdef STANDARDMATERIAL_NORMAL_MAP
mesh.world_tangent,
#endif
#endif
#ifdef VERTEX_UVS
mesh.uv,
#endif
is_front,
);
pbr_input.V = PbrCore::calculate_view(mesh.world_position, pbr_input.is_orthographic);
output_color = PbrCore::tone_mapping(PbrCore::pbr(pbr_input));
}
return output_color;
}

View File

@@ -0,0 +1,26 @@
#define_import_path bevy_pbr::pbr_bindings
#import bevy_pbr::pbr_types as Types
@group(1) @binding(0)
var<uniform> material: Types::StandardMaterial;
@group(1) @binding(1)
var base_color_texture: texture_2d<f32>;
@group(1) @binding(2)
var base_color_sampler: sampler;
@group(1) @binding(3)
var emissive_texture: texture_2d<f32>;
@group(1) @binding(4)
var emissive_sampler: sampler;
@group(1) @binding(5)
var metallic_roughness_texture: texture_2d<f32>;
@group(1) @binding(6)
var metallic_roughness_sampler: sampler;
@group(1) @binding(7)
var occlusion_texture: texture_2d<f32>;
@group(1) @binding(8)
var occlusion_sampler: sampler;
@group(1) @binding(9)
var normal_map_texture: texture_2d<f32>;
@group(1) @binding(10)
var normal_map_sampler: sampler;

View File

@@ -0,0 +1,252 @@
#define_import_path bevy_pbr::pbr_functions
#import bevy_pbr::pbr_types as PbrTypes
#import bevy_pbr::mesh_types as MeshTypes
#import bevy_pbr::mesh_bindings as MeshBindings
#import bevy_pbr::mesh_view_types as ViewTypes
#import bevy_pbr::mesh_view_bindings as ViewBindings
#import bevy_pbr::lighting as Lighting
#import bevy_pbr::clustered_forward as Clustering
#import bevy_pbr::shadows as Shadows
// NOTE: This ensures that the world_normal is normalized and if
// vertex tangents and normal maps then normal mapping may be applied.
fn prepare_normal(
standard_material_flags: u32,
world_normal: vec3<f32>,
#ifdef VERTEX_TANGENTS
#ifdef STANDARDMATERIAL_NORMAL_MAP
world_tangent: vec4<f32>,
#endif
#endif
#ifdef VERTEX_UVS
uv: vec2<f32>,
#endif
is_front: bool,
) -> vec3<f32> {
var N: vec3<f32> = normalize(world_normal);
#ifdef VERTEX_TANGENTS
#ifdef STANDARDMATERIAL_NORMAL_MAP
// NOTE: The mikktspace method of normal mapping explicitly requires that these NOT be
// normalized nor any Gram-Schmidt applied to ensure the vertex normal is orthogonal to the
// vertex tangent! Do not change this code unless you really know what you are doing.
// http://www.mikktspace.com/
var T: vec3<f32> = world_tangent.xyz;
var B: vec3<f32> = world_tangent.w * cross(N, T);
#endif
#endif
if ((standard_material_flags & PbrTypes::STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u) {
if (!is_front) {
N = -N;
#ifdef VERTEX_TANGENTS
#ifdef STANDARDMATERIAL_NORMAL_MAP
T = -T;
B = -B;
#endif
#endif
}
}
#ifdef VERTEX_TANGENTS
#ifdef VERTEX_UVS
#ifdef STANDARDMATERIAL_NORMAL_MAP
// Nt is the tangent-space normal.
var Nt = textureSample(normal_map_texture, normal_map_sampler, uv).rgb;
if ((standard_material_flags & PbrTypes::STANDARD_MATERIAL_FLAGS_TWO_COMPONENT_NORMAL_MAP) != 0u) {
// Only use the xy components and derive z for 2-component normal maps.
Nt = vec3<f32>(Nt.rg * 2.0 - 1.0, 0.0);
Nt.z = sqrt(1.0 - Nt.x * Nt.x - Nt.y * Nt.y);
} else {
Nt = Nt * 2.0 - 1.0;
}
// Normal maps authored for DirectX require flipping the y component
if ((standard_material_flags & PbrTypes::STANDARD_MATERIAL_FLAGS_FLIP_NORMAL_MAP_Y) != 0u) {
Nt.y = -Nt.y;
}
// NOTE: The mikktspace method of normal mapping applies maps the tangent-space normal from
// the normal map texture in this way to be an EXACT inverse of how the normal map baker
// calculates the normal maps so there is no error introduced. Do not change this code
// unless you really know what you are doing.
// http://www.mikktspace.com/
N = normalize(Nt.x * T + Nt.y * B + Nt.z * N);
#endif
#endif
#endif
return N;
}
// NOTE: Correctly calculates the view vector depending on whether
// the projection is orthographic or perspective.
fn calculate_view(
world_position: vec4<f32>,
is_orthographic: bool,
) -> vec3<f32> {
var V: vec3<f32>;
if (is_orthographic) {
// Orthographic view vector
V = normalize(vec3<f32>(ViewBindings::view.view_proj[0].z, ViewBindings::view.view_proj[1].z, ViewBindings::view.view_proj[2].z));
} else {
// Only valid for a perpective projection
V = normalize(ViewBindings::view.world_position.xyz - world_position.xyz);
}
return V;
}
struct PbrInput {
material: PbrTypes::StandardMaterial,
occlusion: f32,
frag_coord: vec4<f32>,
world_position: vec4<f32>,
// Normalized world normal used for shadow mapping as normal-mapping is not used for shadow
// mapping
world_normal: vec3<f32>,
// Normalized normal-mapped world normal used for lighting
N: vec3<f32>,
// Normalized view vector in world space, pointing from the fragment world position toward the
// view world position
V: vec3<f32>,
is_orthographic: bool,
};
// Creates a PbrInput with default values
fn pbr_input_new() -> PbrInput {
var pbr_input: PbrInput;
pbr_input.material = PbrTypes::standard_material_new();
pbr_input.occlusion = 1.0;
pbr_input.frag_coord = vec4<f32>(0.0, 0.0, 0.0, 1.0);
pbr_input.world_position = vec4<f32>(0.0, 0.0, 0.0, 1.0);
pbr_input.world_normal = vec3<f32>(0.0, 0.0, 1.0);
pbr_input.is_orthographic = false;
pbr_input.N = vec3<f32>(0.0, 0.0, 1.0);
pbr_input.V = vec3<f32>(1.0, 0.0, 0.0);
return pbr_input;
}
fn pbr(
in: PbrInput,
) -> vec4<f32> {
var output_color: vec4<f32> = in.material.base_color;
// TODO use .a for exposure compensation in HDR
let emissive = in.material.emissive;
// calculate non-linear roughness from linear perceptualRoughness
let metallic = in.material.metallic;
let perceptual_roughness = in.material.perceptual_roughness;
let roughness = Lighting::perceptualRoughnessToRoughness(perceptual_roughness);
let occlusion = in.occlusion;
if ((in.material.flags & PbrTypes::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE) != 0u) {
// NOTE: If rendering as opaque, alpha should be ignored so set to 1.0
output_color.a = 1.0;
} else if ((in.material.flags & PbrTypes::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK) != 0u) {
if (output_color.a >= in.material.alpha_cutoff) {
// NOTE: If rendering as masked alpha and >= the cutoff, render as fully opaque
output_color.a = 1.0;
} else {
// NOTE: output_color.a < in.material.alpha_cutoff should not is not rendered
// NOTE: This and any other discards mean that early-z testing cannot be done!
discard;
}
}
// Neubelt and Pettineo 2013, "Crafting a Next-gen Material Pipeline for The Order: 1886"
let NdotV = max(dot(in.N, in.V), 0.0001);
// Remapping [0,1] reflectance to F0
// See https://google.github.io/filament/Filament.html#materialsystem/parameterization/remapping
let reflectance = in.material.reflectance;
let F0 = 0.16 * reflectance * reflectance * (1.0 - metallic) + output_color.rgb * metallic;
// Diffuse strength inversely related to metallicity
let diffuse_color = output_color.rgb * (1.0 - metallic);
let R = reflect(-in.V, in.N);
// accumulate color
var light_accum: vec3<f32> = vec3<f32>(0.0);
let view_z = dot(vec4<f32>(
ViewBindings::view.inverse_view[0].z,
ViewBindings::view.inverse_view[1].z,
ViewBindings::view.inverse_view[2].z,
ViewBindings::view.inverse_view[3].z
), in.world_position);
let cluster_index = Clustering::fragment_cluster_index(in.frag_coord.xy, view_z, in.is_orthographic);
let offset_and_counts = Clustering::unpack_offset_and_counts(cluster_index);
// point lights
for (var i: u32 = offset_and_counts[0]; i < offset_and_counts[0] + offset_and_counts[1]; i = i + 1u) {
let light_id = Clustering::get_light_id(i);
let light = ViewBindings::point_lights.data[light_id];
var shadow: f32 = 1.0;
if ((MeshBindings::mesh.flags & MeshTypes::MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u
&& (light.flags & ViewTypes::POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) {
shadow = Shadows::fetch_point_shadow(light_id, in.world_position, in.world_normal);
}
let light_contrib = Lighting::point_light(in.world_position.xyz, light, roughness, NdotV, in.N, in.V, R, F0, diffuse_color);
light_accum = light_accum + light_contrib * shadow;
}
// spot lights
for (var i: u32 = offset_and_counts[0] + offset_and_counts[1]; i < offset_and_counts[0] + offset_and_counts[1] + offset_and_counts[2]; i = i + 1u) {
let light_id = Clustering::get_light_id(i);
let light = ViewBindings::point_lights.data[light_id];
var shadow: f32 = 1.0;
if ((MeshBindings::mesh.flags & MeshTypes::MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u
&& (light.flags & ViewTypes::POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) {
shadow = Shadows::fetch_spot_shadow(light_id, in.world_position, in.world_normal);
}
let light_contrib = Lighting::spot_light(in.world_position.xyz, light, roughness, NdotV, in.N, in.V, R, F0, diffuse_color);
light_accum = light_accum + light_contrib * shadow;
}
let n_directional_lights = ViewBindings::lights.n_directional_lights;
for (var i: u32 = 0u; i < n_directional_lights; i = i + 1u) {
let light = ViewBindings::lights.directional_lights[i];
var shadow: f32 = 1.0;
if ((MeshBindings::mesh.flags & MeshTypes::MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u
&& (light.flags & ViewTypes::DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) {
shadow = Shadows::fetch_directional_shadow(i, in.world_position, in.world_normal);
}
let light_contrib = Lighting::directional_light(light, roughness, NdotV, in.N, in.V, R, F0, diffuse_color);
light_accum = light_accum + light_contrib * shadow;
}
let diffuse_ambient = Lighting::EnvBRDFApprox(diffuse_color, 1.0, NdotV);
let specular_ambient = Lighting::EnvBRDFApprox(F0, perceptual_roughness, NdotV);
output_color = vec4<f32>(
light_accum +
(diffuse_ambient + specular_ambient) * ViewBindings::lights.ambient_color.rgb * occlusion +
emissive.rgb * output_color.a,
output_color.a);
output_color = Clustering::cluster_debug_visualization(
output_color,
view_z,
in.is_orthographic,
offset_and_counts,
cluster_index,
);
return output_color;
}
fn tone_mapping(in: vec4<f32>) -> vec4<f32> {
// tone_mapping
return vec4<f32>(Lighting::reinhard_luminance(in.rgb), in.a);
// Gamma correction.
// Not needed with sRGB buffer
// output_color.rgb = pow(output_color.rgb, vec3(1.0 / 2.2));
}

View File

@@ -0,0 +1,283 @@
#define_import_path bevy_pbr::lighting
#import bevy_pbr::utils as Utils
#import bevy_pbr::mesh_view_types as ViewTypes
// From the Filament design doc
// https://google.github.io/filament/Filament.html#table_symbols
// Symbol Definition
// v View unit vector
// l Incident light unit vector
// n Surface normal unit vector
// h Half unit vector between l and v
// f BRDF
// f_d Diffuse component of a BRDF
// f_r Specular component of a BRDF
// α Roughness, remapped from using input perceptualRoughness
// σ Diffuse reflectance
// Ω Spherical domain
// f0 Reflectance at normal incidence
// f90 Reflectance at grazing angle
// χ+(a) Heaviside function (1 if a>0 and 0 otherwise)
// nior Index of refraction (IOR) of an interface
// ⟨n⋅l⟩ Dot product clamped to [0..1]
// ⟨a⟩ Saturated value (clamped to [0..1])
// The Bidirectional Reflectance Distribution Function (BRDF) describes the surface response of a standard material
// and consists of two components, the diffuse component (f_d) and the specular component (f_r):
// f(v,l) = f_d(v,l) + f_r(v,l)
//
// The form of the microfacet model is the same for diffuse and specular
// f_r(v,l) = f_d(v,l) = 1 / { |n⋅v||n⋅l| } ∫_Ω D(m,α) G(v,l,m) f_m(v,l,m) (v⋅m) (l⋅m) dm
//
// In which:
// D, also called the Normal Distribution Function (NDF) models the distribution of the microfacets
// G models the visibility (or occlusion or shadow-masking) of the microfacets
// f_m is the microfacet BRDF and differs between specular and diffuse components
//
// The above integration needs to be approximated.
// distanceAttenuation is simply the square falloff of light intensity
// combined with a smooth attenuation at the edge of the light radius
//
// light radius is a non-physical construct for efficiency purposes,
// because otherwise every light affects every fragment in the scene
fn getDistanceAttenuation(distanceSquare: f32, inverseRangeSquared: f32) -> f32 {
let factor = distanceSquare * inverseRangeSquared;
let smoothFactor = Utils::saturate(1.0 - factor * factor);
let attenuation = smoothFactor * smoothFactor;
return attenuation * 1.0 / max(distanceSquare, 0.0001);
}
// Normal distribution function (specular D)
// Based on https://google.github.io/filament/Filament.html#citation-walter07
// D_GGX(h,α) = α^2 / { π ((n⋅h)^2 (α21) + 1)^2 }
// Simple implementation, has precision problems when using fp16 instead of fp32
// see https://google.github.io/filament/Filament.html#listing_speculardfp16
fn D_GGX(roughness: f32, NoH: f32, h: vec3<f32>) -> f32 {
let oneMinusNoHSquared = 1.0 - NoH * NoH;
let a = NoH * roughness;
let k = roughness / (oneMinusNoHSquared + a * a);
let d = k * k * (1.0 / Utils::PI);
return d;
}
// Visibility function (Specular G)
// V(v,l,a) = G(v,l,α) / { 4 (n⋅v) (n⋅l) }
// such that f_r becomes
// f_r(v,l) = D(h,α) V(v,l,α) F(v,h,f0)
// where
// V(v,l,α) = 0.5 / { n⋅l sqrt((n⋅v)^2 (1α2) + α2) + n⋅v sqrt((n⋅l)^2 (1α2) + α2) }
// Note the two sqrt's, that may be slow on mobile, see https://google.github.io/filament/Filament.html#listing_approximatedspecularv
fn V_SmithGGXCorrelated(roughness: f32, NoV: f32, NoL: f32) -> f32 {
let a2 = roughness * roughness;
let lambdaV = NoL * sqrt((NoV - a2 * NoV) * NoV + a2);
let lambdaL = NoV * sqrt((NoL - a2 * NoL) * NoL + a2);
let v = 0.5 / (lambdaV + lambdaL);
return v;
}
// Fresnel function
// see https://google.github.io/filament/Filament.html#citation-schlick94
// F_Schlick(v,h,f_0,f_90) = f_0 + (f_90 f_0) (1 v⋅h)^5
fn F_Schlick_vec(f0: vec3<f32>, f90: f32, VoH: f32) -> vec3<f32> {
// not using mix to keep the vec3 and float versions identical
return f0 + (f90 - f0) * pow(1.0 - VoH, 5.0);
}
fn F_Schlick(f0: f32, f90: f32, VoH: f32) -> f32 {
// not using mix to keep the vec3 and float versions identical
return f0 + (f90 - f0) * pow(1.0 - VoH, 5.0);
}
fn fresnel(f0: vec3<f32>, LoH: f32) -> vec3<f32> {
// f_90 suitable for ambient occlusion
// see https://google.github.io/filament/Filament.html#lighting/occlusion
let f90 = Utils::saturate(dot(f0, vec3<f32>(50.0 * 0.33)));
return F_Schlick_vec(f0, f90, LoH);
}
// Specular BRDF
// https://google.github.io/filament/Filament.html#materialsystem/specularbrdf
// Cook-Torrance approximation of the microfacet model integration using Fresnel law F to model f_m
// f_r(v,l) = { D(h,α) G(v,l,α) F(v,h,f0) } / { 4 (n⋅v) (n⋅l) }
fn specular(f0: vec3<f32>, roughness: f32, h: vec3<f32>, NoV: f32, NoL: f32,
NoH: f32, LoH: f32, specularIntensity: f32) -> vec3<f32> {
let D = D_GGX(roughness, NoH, h);
let V = V_SmithGGXCorrelated(roughness, NoV, NoL);
let F = fresnel(f0, LoH);
return (specularIntensity * D * V) * F;
}
// Diffuse BRDF
// https://google.github.io/filament/Filament.html#materialsystem/diffusebrdf
// fd(v,l) = σ/π * 1 / { |n⋅v||n⋅l| } ∫Ω D(m,α) G(v,l,m) (v⋅m) (l⋅m) dm
//
// simplest approximation
// float Fd_Lambert() {
// return 1.0 / PI;
// }
//
// vec3 Fd = diffuseColor * Fd_Lambert();
//
// Disney approximation
// See https://google.github.io/filament/Filament.html#citation-burley12
// minimal quality difference
fn Fd_Burley(roughness: f32, NoV: f32, NoL: f32, LoH: f32) -> f32 {
let f90 = 0.5 + 2.0 * roughness * LoH * LoH;
let lightScatter = F_Schlick(1.0, f90, NoL);
let viewScatter = F_Schlick(1.0, f90, NoV);
return lightScatter * viewScatter * (1.0 / Utils::PI);
}
// From https://www.unrealengine.com/en-US/blog/physically-based-shading-on-mobile
fn EnvBRDFApprox(f0: vec3<f32>, perceptual_roughness: f32, NoV: f32) -> vec3<f32> {
let c0 = vec4<f32>(-1.0, -0.0275, -0.572, 0.022);
let c1 = vec4<f32>(1.0, 0.0425, 1.04, -0.04);
let r = perceptual_roughness * c0 + c1;
let a004 = min(r.x * r.x, exp2(-9.28 * NoV)) * r.x + r.y;
let AB = vec2<f32>(-1.04, 1.04) * a004 + r.zw;
return f0 * AB.x + AB.y;
}
fn perceptualRoughnessToRoughness(perceptualRoughness: f32) -> f32 {
// clamp perceptual roughness to prevent precision problems
// According to Filament design 0.089 is recommended for mobile
// Filament uses 0.045 for non-mobile
let clampedPerceptualRoughness = clamp(perceptualRoughness, 0.089, 1.0);
return clampedPerceptualRoughness * clampedPerceptualRoughness;
}
// from https://64.github.io/tonemapping/
// reinhard on RGB oversaturates colors
fn reinhard(color: vec3<f32>) -> vec3<f32> {
return color / (1.0 + color);
}
fn reinhard_extended(color: vec3<f32>, max_white: f32) -> vec3<f32> {
let numerator = color * (1.0 + (color / vec3<f32>(max_white * max_white)));
return numerator / (1.0 + color);
}
// luminance coefficients from Rec. 709.
// https://en.wikipedia.org/wiki/Rec._709
fn luminance(v: vec3<f32>) -> f32 {
return dot(v, vec3<f32>(0.2126, 0.7152, 0.0722));
}
fn change_luminance(c_in: vec3<f32>, l_out: f32) -> vec3<f32> {
let l_in = luminance(c_in);
return c_in * (l_out / l_in);
}
fn reinhard_luminance(color: vec3<f32>) -> vec3<f32> {
let l_old = luminance(color);
let l_new = l_old / (1.0 + l_old);
return change_luminance(color, l_new);
}
fn reinhard_extended_luminance(color: vec3<f32>, max_white_l: f32) -> vec3<f32> {
let l_old = luminance(color);
let numerator = l_old * (1.0 + (l_old / (max_white_l * max_white_l)));
let l_new = numerator / (1.0 + l_old);
return change_luminance(color, l_new);
}
fn point_light(
world_position: vec3<f32>, light: ViewTypes::PointLight, roughness: f32, NdotV: f32, N: vec3<f32>, V: vec3<f32>,
R: vec3<f32>, F0: vec3<f32>, diffuseColor: vec3<f32>
) -> vec3<f32> {
let light_to_frag = light.position_radius.xyz - world_position.xyz;
let distance_square = dot(light_to_frag, light_to_frag);
let rangeAttenuation =
getDistanceAttenuation(distance_square, light.color_inverse_square_range.w);
// Specular.
// Representative Point Area Lights.
// see http://blog.selfshadow.com/publications/s2013-shading-course/karis/s2013_pbs_epic_notes_v2.pdf p14-16
let a = roughness;
let centerToRay = dot(light_to_frag, R) * R - light_to_frag;
let closestPoint = light_to_frag + centerToRay * Utils::saturate(light.position_radius.w * inverseSqrt(dot(centerToRay, centerToRay)));
let LspecLengthInverse = inverseSqrt(dot(closestPoint, closestPoint));
let normalizationFactor = a / Utils::saturate(a + (light.position_radius.w * 0.5 * LspecLengthInverse));
let specularIntensity = normalizationFactor * normalizationFactor;
var L: vec3<f32> = closestPoint * LspecLengthInverse; // normalize() equivalent?
var H: vec3<f32> = normalize(L + V);
var NoL: f32 = Utils::saturate(dot(N, L));
var NoH: f32 = Utils::saturate(dot(N, H));
var LoH: f32 = Utils::saturate(dot(L, H));
let specular_light = specular(F0, roughness, H, NdotV, NoL, NoH, LoH, specularIntensity);
// Diffuse.
// Comes after specular since its NoL is used in the lighting equation.
L = normalize(light_to_frag);
H = normalize(L + V);
NoL = Utils::saturate(dot(N, L));
NoH = Utils::saturate(dot(N, H));
LoH = Utils::saturate(dot(L, H));
let diffuse = diffuseColor * Fd_Burley(roughness, NdotV, NoL, LoH);
// See https://google.github.io/filament/Filament.html#mjx-eqn-pointLightLuminanceEquation
// Lout = f(v,l) Φ / { 4 π d^2 }⟨n⋅l⟩
// where
// f(v,l) = (f_d(v,l) + f_r(v,l)) * light_color
// Φ is luminous power in lumens
// our rangeAttentuation = 1 / d^2 multiplied with an attenuation factor for smoothing at the edge of the non-physical maximum light radius
// For a point light, luminous intensity, I, in lumens per steradian is given by:
// I = Φ / 4 π
// The derivation of this can be seen here: https://google.github.io/filament/Filament.html#mjx-eqn-pointLightLuminousPower
// NOTE: light.color.rgb is premultiplied with light.intensity / 4 π (which would be the luminous intensity) on the CPU
// TODO compensate for energy loss https://google.github.io/filament/Filament.html#materialsystem/improvingthebrdfs/energylossinspecularreflectance
return ((diffuse + specular_light) * light.color_inverse_square_range.rgb) * (rangeAttenuation * NoL);
}
fn spot_light(
world_position: vec3<f32>, light: ViewTypes::PointLight, roughness: f32, NdotV: f32, N: vec3<f32>, V: vec3<f32>,
R: vec3<f32>, F0: vec3<f32>, diffuseColor: vec3<f32>
) -> vec3<f32> {
// reuse the point light calculations
let point_light = point_light(world_position, light, roughness, NdotV, N, V, R, F0, diffuseColor);
// reconstruct spot dir from x/z and y-direction flag
var spot_dir = vec3<f32>(light.light_custom_data.x, 0.0, light.light_custom_data.y);
spot_dir.y = sqrt(1.0 - spot_dir.x * spot_dir.x - spot_dir.z * spot_dir.z);
if ((light.flags & ViewTypes::POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE) != 0u) {
spot_dir.y = -spot_dir.y;
}
let light_to_frag = light.position_radius.xyz - world_position.xyz;
// calculate attenuation based on filament formula https://google.github.io/filament/Filament.html#listing_glslpunctuallight
// spot_scale and spot_offset have been precomputed
// note we normalize here to get "l" from the filament listing. spot_dir is already normalized
let cd = dot(-spot_dir, normalize(light_to_frag));
let attenuation = Utils::saturate(cd * light.light_custom_data.z + light.light_custom_data.w);
let spot_attenuation = attenuation * attenuation;
return point_light * spot_attenuation;
}
fn directional_light(light: ViewTypes::DirectionalLight, roughness: f32, NdotV: f32, normal: vec3<f32>, view: vec3<f32>, R: vec3<f32>, F0: vec3<f32>, diffuseColor: vec3<f32>) -> vec3<f32> {
let incident_light = light.direction_to_light.xyz;
let half_vector = normalize(incident_light + view);
let NoL = Utils::saturate(dot(normal, incident_light));
let NoH = Utils::saturate(dot(normal, half_vector));
let LoH = Utils::saturate(dot(incident_light, half_vector));
let diffuse = diffuseColor * Fd_Burley(roughness, NdotV, NoL, LoH);
let specularIntensity = 1.0;
let specular_light = specular(F0, roughness, half_vector, NdotV, NoL, NoH, LoH, specularIntensity);
return (specular_light + diffuse) * light.color.rgb * NoL;
}

View File

@@ -0,0 +1,40 @@
#define_import_path bevy_pbr::pbr_types
struct StandardMaterial {
base_color: vec4<f32>,
emissive: vec4<f32>,
perceptual_roughness: f32,
metallic: f32,
reflectance: f32,
// 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options.
flags: u32,
alpha_cutoff: f32,
};
const STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT: u32 = 1u;
const STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT: u32 = 2u;
const STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT: u32 = 4u;
const STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT: u32 = 8u;
const STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT: u32 = 16u;
const STANDARD_MATERIAL_FLAGS_UNLIT_BIT: u32 = 32u;
const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE: u32 = 64u;
const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK: u32 = 128u;
const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_BLEND: u32 = 256u;
const STANDARD_MATERIAL_FLAGS_TWO_COMPONENT_NORMAL_MAP: u32 = 512u;
const STANDARD_MATERIAL_FLAGS_FLIP_NORMAL_MAP_Y: u32 = 1024u;
// Creates a StandardMaterial with default values
fn standard_material_new() -> StandardMaterial {
var material: StandardMaterial;
// NOTE: Keep in-sync with src/pbr_material.rs!
material.base_color = vec4<f32>(1.0, 1.0, 1.0, 1.0);
material.emissive = vec4<f32>(0.0, 0.0, 0.0, 1.0);
material.perceptual_roughness = 0.089;
material.metallic = 0.01;
material.reflectance = 0.5;
material.flags = STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE;
material.alpha_cutoff = 0.5;
return material;
}

View File

@@ -0,0 +1,137 @@
#define_import_path bevy_pbr::shadows
#import bevy_pbr::mesh_view_types as Types
#import bevy_pbr::mesh_view_bindings as Bindings
fn fetch_point_shadow(light_id: u32, frag_position: vec4<f32>, surface_normal: vec3<f32>) -> f32 {
let light = Bindings::point_lights.data[light_id];
// because the shadow maps align with the axes and the frustum planes are at 45 degrees
// we can get the worldspace depth by taking the largest absolute axis
let surface_to_light = light.position_radius.xyz - frag_position.xyz;
let surface_to_light_abs = abs(surface_to_light);
let distance_to_light = max(surface_to_light_abs.x, max(surface_to_light_abs.y, surface_to_light_abs.z));
// The normal bias here is already scaled by the texel size at 1 world unit from the light.
// The texel size increases proportionally with distance from the light so multiplying by
// distance to light scales the normal bias to the texel size at the fragment distance.
let normal_offset = light.shadow_normal_bias * distance_to_light * surface_normal.xyz;
let depth_offset = light.shadow_depth_bias * normalize(surface_to_light.xyz);
let offset_position = frag_position.xyz + normal_offset + depth_offset;
// similar largest-absolute-axis trick as above, but now with the offset fragment position
let frag_ls = light.position_radius.xyz - offset_position.xyz;
let abs_position_ls = abs(frag_ls);
let major_axis_magnitude = max(abs_position_ls.x, max(abs_position_ls.y, abs_position_ls.z));
// NOTE: These simplifications come from multiplying:
// projection * vec4(0, 0, -major_axis_magnitude, 1.0)
// and keeping only the terms that have any impact on the depth.
// Projection-agnostic approach:
let zw = -major_axis_magnitude * light.light_custom_data.xy + light.light_custom_data.zw;
let depth = zw.x / zw.y;
// do the lookup, using HW PCF and comparison
// NOTE: Due to the non-uniform control flow above, we must use the Level variant of
// textureSampleCompare to avoid undefined behaviour due to some of the fragments in
// a quad (2x2 fragments) being processed not being sampled, and this messing with
// mip-mapping functionality. The shadow maps have no mipmaps so Level just samples
// from LOD 0.
#ifdef NO_ARRAY_TEXTURES_SUPPORT
return textureSampleCompare(Bindings::point_shadow_textures, Bindings::point_shadow_textures_sampler, frag_ls, depth);
#else
return textureSampleCompareLevel(Bindings::point_shadow_textures, Bindings::point_shadow_textures_sampler, frag_ls, i32(light_id), depth);
#endif
}
fn fetch_spot_shadow(light_id: u32, frag_position: vec4<f32>, surface_normal: vec3<f32>) -> f32 {
let light = Bindings::point_lights.data[light_id];
let surface_to_light = light.position_radius.xyz - frag_position.xyz;
// construct the light view matrix
var spot_dir = vec3<f32>(light.light_custom_data.x, 0.0, light.light_custom_data.y);
// reconstruct spot dir from x/z and y-direction flag
spot_dir.y = sqrt(1.0 - spot_dir.x * spot_dir.x - spot_dir.z * spot_dir.z);
if ((light.flags & Types::POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE) != 0u) {
spot_dir.y = -spot_dir.y;
}
// view matrix z_axis is the reverse of transform.forward()
let fwd = -spot_dir;
let distance_to_light = dot(fwd, surface_to_light);
let offset_position =
-surface_to_light
+ (light.shadow_depth_bias * normalize(surface_to_light))
+ (surface_normal.xyz * light.shadow_normal_bias) * distance_to_light;
// the construction of the up and right vectors needs to precisely mirror the code
// in render/light.rs:spot_light_view_matrix
var sign = -1.0;
if (fwd.z >= 0.0) {
sign = 1.0;
}
let a = -1.0 / (fwd.z + sign);
let b = fwd.x * fwd.y * a;
let up_dir = vec3<f32>(1.0 + sign * fwd.x * fwd.x * a, sign * b, -sign * fwd.x);
let right_dir = vec3<f32>(-b, -sign - fwd.y * fwd.y * a, fwd.y);
let light_inv_rot = mat3x3<f32>(right_dir, up_dir, fwd);
// because the matrix is a pure rotation matrix, the inverse is just the transpose, and to calculate
// the product of the transpose with a vector we can just post-multiply instead of pre-multplying.
// this allows us to keep the matrix construction code identical between CPU and GPU.
let projected_position = offset_position * light_inv_rot;
// divide xy by perspective matrix "f" and by -projected.z (projected.z is -projection matrix's w)
// to get ndc coordinates
let f_div_minus_z = 1.0 / (light.spot_light_tan_angle * -projected_position.z);
let shadow_xy_ndc = projected_position.xy * f_div_minus_z;
// convert to uv coordinates
let shadow_uv = shadow_xy_ndc * vec2<f32>(0.5, -0.5) + vec2<f32>(0.5, 0.5);
// 0.1 must match POINT_LIGHT_NEAR_Z
let depth = 0.1 / -projected_position.z;
#ifdef NO_ARRAY_TEXTURES_SUPPORT
return textureSampleCompare(Bindings::directional_shadow_textures, Bindings::directional_shadow_textures_sampler,
shadow_uv, depth);
#else
return textureSampleCompareLevel(Bindings::directional_shadow_textures, Bindings::directional_shadow_textures_sampler,
shadow_uv, i32(light_id) + Bindings::lights.spot_light_shadowmap_offset, depth);
#endif
}
fn fetch_directional_shadow(light_id: u32, frag_position: vec4<f32>, surface_normal: vec3<f32>) -> f32 {
let light = Bindings::lights.directional_lights[light_id];
// The normal bias is scaled to the texel size.
let normal_offset = light.shadow_normal_bias * surface_normal.xyz;
let depth_offset = light.shadow_depth_bias * light.direction_to_light.xyz;
let offset_position = vec4<f32>(frag_position.xyz + normal_offset + depth_offset, frag_position.w);
let offset_position_clip = light.view_projection * offset_position;
if (offset_position_clip.w <= 0.0) {
return 1.0;
}
let offset_position_ndc = offset_position_clip.xyz / offset_position_clip.w;
// No shadow outside the orthographic projection volume
if (any(offset_position_ndc.xy < vec2<f32>(-1.0)) || offset_position_ndc.z < 0.0
|| any(offset_position_ndc > vec3<f32>(1.0))) {
return 1.0;
}
// compute texture coordinates for shadow lookup, compensating for the Y-flip difference
// between the NDC and texture coordinates
let flip_correction = vec2<f32>(0.5, -0.5);
let light_local = offset_position_ndc.xy * flip_correction + vec2<f32>(0.5, 0.5);
let depth = offset_position_ndc.z;
// do the lookup, using HW PCF and comparison
// NOTE: Due to non-uniform control flow above, we must use the level variant of the texture
// sampler to avoid use of implicit derivatives causing possible undefined behavior.
#ifdef NO_ARRAY_TEXTURES_SUPPORT
return textureSampleCompareLevel(Bindings::directional_shadow_textures, Bindings::directional_shadow_textures_sampler, light_local, depth);
#else
return textureSampleCompareLevel(Bindings::directional_shadow_textures, Bindings::directional_shadow_textures_sampler, light_local, i32(light_id), depth);
#endif
}

View File

@@ -0,0 +1,41 @@
#define_import_path bevy_pbr::skinning
#ifdef SKINNED
@group(2) @binding(1)
var<uniform> joint_matrices: SkinnedMesh;
fn skin_model(
indexes: vec4<u32>,
weights: vec4<f32>,
) -> mat4x4<f32> {
return weights.x * joint_matrices.data[indexes.x]
+ weights.y * joint_matrices.data[indexes.y]
+ weights.z * joint_matrices.data[indexes.z]
+ weights.w * joint_matrices.data[indexes.w];
}
fn inverse_transpose_3x3(in: mat3x3<f32>) -> mat3x3<f32> {
let x = cross(in[1], in[2]);
let y = cross(in[2], in[0]);
let z = cross(in[0], in[1]);
let det = dot(in[2], z);
return mat3x3<f32>(
x / det,
y / det,
z / det
);
}
fn skin_normals(
model: mat4x4<f32>,
normal: vec3<f32>,
) -> vec3<f32> {
return inverse_transpose_3x3(mat3x3<f32>(
model[0].xyz,
model[1].xyz,
model[2].xyz
)) * normal;
}
#endif

View File

@@ -0,0 +1,23 @@
#define_import_path bevy_pbr::utils
const PI: f32 = 3.141592653589793;
fn saturate(value: f32) -> f32 {
return clamp(value, 0.0, 1.0);
}
fn hsv2rgb(hue: f32, saturation: f32, value: f32) -> vec3<f32> {
let rgb = clamp(
abs(
((hue * 6.0 + vec3<f32>(0.0, 4.0, 2.0)) % 6.0) - 3.0
) - 1.0,
vec3<f32>(0.0),
vec3<f32>(1.0)
);
return value * mix( vec3<f32>(1.0), rgb, vec3<f32>(saturation));
}
fn random1D(s: f32) -> f32 {
return fract(sin(s * 12.9898) * 43758.5453123);
}

View File

@@ -0,0 +1,44 @@
#import bevy_pbr::mesh_types
#import bevy_pbr::mesh_view_bindings
@group(1) @binding(0)
var<uniform> mesh: Mesh;
#ifdef SKINNED
@group(1) @binding(1)
var<uniform> joint_matrices: SkinnedMesh;
#import bevy_pbr::skinning
#endif
// NOTE: Bindings must come before functions that use them!
#import bevy_pbr::mesh_functions
struct Vertex {
@location(0) position: vec3<f32>,
#ifdef SKINNED
@location(4) joint_indexes: vec4<u32>,
@location(5) joint_weights: vec4<f32>,
#endif
};
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
};
@vertex
fn vertex(vertex: Vertex) -> VertexOutput {
#ifdef SKINNED
let model = skin_model(vertex.joint_indexes, vertex.joint_weights);
#else
let model = mesh.model;
#endif
var out: VertexOutput;
out.clip_position = mesh_position_local_to_clip(model, vec4<f32>(vertex.position, 1.0));
return out;
}
@fragment
fn fragment() -> @location(0) vec4<f32> {
return vec4<f32>(1.0, 1.0, 1.0, 1.0);
}

View File

@@ -0,0 +1,225 @@
use std::borrow::Cow;
use naga_oil::compose::{
ComposableModuleDescriptor, Composer, ComposerError, NagaModuleDescriptor,
};
#[allow(unused_variables, dead_code)]
fn init_composer() -> Composer {
let mut composer = Composer::default();
let mut load_composable = |source: &str, file_path: &str| {
match composer.add_composable_module(ComposableModuleDescriptor {
source,
file_path,
..Default::default()
}) {
Ok(_module) => {
// println!("{} -> {:#?}", module.name, module)
}
Err(e) => {
println!("? -> {e:#?}")
}
}
};
load_composable(
include_str!("bevy_pbr_wgsl/utils.wgsl"),
"examples/bevy_pbr_wgsl/utils.wgsl",
);
load_composable(
include_str!("bevy_pbr_wgsl/mesh_view_types.wgsl"),
"examples/bevy_pbr_wgsl/mesh_view_types.wgsl",
);
load_composable(
include_str!("bevy_pbr_wgsl/mesh_view_bindings.wgsl"),
"examples/bevy_pbr_wgsl/mesh_view_bindings.wgsl",
);
load_composable(
include_str!("bevy_pbr_wgsl/pbr_types.wgsl"),
"examples/bevy_pbr_wgsl/pbr_types.wgsl",
);
load_composable(
include_str!("bevy_pbr_wgsl/pbr_bindings.wgsl"),
"examples/bevy_pbr_wgsl/pbr_bindings.wgsl",
);
load_composable(
include_str!("bevy_pbr_wgsl/skinning.wgsl"),
"examples/bevy_pbr_wgsl/skinning.wgsl",
);
load_composable(
include_str!("bevy_pbr_wgsl/mesh_types.wgsl"),
"examples/bevy_pbr_wgsl/mesh_types.wgsl",
);
load_composable(
include_str!("bevy_pbr_wgsl/mesh_bindings.wgsl"),
"examples/bevy_pbr_wgsl/mesh_bindings.wgsl",
);
load_composable(
include_str!("bevy_pbr_wgsl/mesh_vertex_output.wgsl"),
"examples/bevy_pbr_wgsl/mesh_vertex_output.wgsl",
);
load_composable(
include_str!("bevy_pbr_wgsl/clustered_forward.wgsl"),
"examples/bevy_pbr_wgsl/clustered_forward.wgsl",
);
load_composable(
include_str!("bevy_pbr_wgsl/pbr_lighting.wgsl"),
"examples/bevy_pbr_wgsl/pbr_lighting.wgsl",
);
load_composable(
include_str!("bevy_pbr_wgsl/shadows.wgsl"),
"examples/bevy_pbr_wgsl/shadows.wgsl",
);
load_composable(
include_str!("bevy_pbr_wgsl/pbr_functions.wgsl"),
"examples/bevy_pbr_wgsl/pbr_functions.wgsl",
);
composer
}
// rebuild composer every time
fn test_compose_full() -> Result<naga::Module, ComposerError> {
let mut composer = init_composer();
match composer.make_naga_module(NagaModuleDescriptor {
source: include_str!("bevy_pbr_wgsl/pbr.wgsl"),
file_path: "examples/bevy_pbr_wgsl/pbr.wgsl",
shader_defs: [("VERTEX_UVS".to_owned(), Default::default())].into(),
..Default::default()
}) {
Ok(module) => {
// println!("shader: {:#?}", module);
// let info = composer.create_validator().validate(&module).unwrap();
// let _wgsl = naga::back::wgsl::write_string(&module, &info, naga::back::wgsl::WriterFlags::EXPLICIT_TYPES).unwrap();
// println!("wgsl: \n\n{}", wgsl);
Ok(module)
}
Err(e) => {
println!("{}", e.emit_to_string(&composer));
Err(e)
}
}
}
// make naga module from initialized composer
fn test_compose_final_module(n: usize, composer: &mut Composer) {
let mut shader;
for _ in 0..n {
shader = match composer.make_naga_module(NagaModuleDescriptor {
source: include_str!("bevy_pbr_wgsl/pbr.wgsl"),
file_path: "examples/bevy_pbr_wgsl/pbr.wgsl",
shader_defs: [("VERTEX_UVS".to_owned(), Default::default())].into(),
..Default::default()
}) {
Ok(module) => {
// println!("shader: {:#?}", module);
// let info = composer.create_validator().validate(&module).unwrap();
// let _wgsl = naga::back::wgsl::write_string(&module, &info, naga::back::wgsl::WriterFlags::EXPLICIT_TYPES).unwrap();
// println!("wgsl: \n\n{}", wgsl);
Ok(module)
}
Err(e) => {
println!("error: {e:#?}");
Err(e)
}
};
if shader.as_ref().unwrap().types.iter().next().is_none() {
println!("ouch");
}
}
}
// make shader module from string
fn test_wgsl_string_compile(n: usize) {
let instance = wgpu::Instance::new(&wgpu::InstanceDescriptor::default());
let adapter = instance
.enumerate_adapters(wgpu::Backends::all())
.into_iter()
.next()
.unwrap();
let device = futures_lite::future::block_on(
adapter.request_device(&wgpu::DeviceDescriptor::default(), None),
)
.unwrap()
.0;
for _ in 0..n {
let _desc = device.create_shader_module(wgpu::ShaderModuleDescriptor {
source: wgpu::ShaderSource::Wgsl(
include_str!("bevy_pbr_wgsl/output_VERTEX_UVS.wgsl").into(),
),
label: None,
});
}
}
// make shader module from composed naga
fn test_composer_compile(n: usize, composer: &mut Composer) {
let instance = wgpu::Instance::new(&wgpu::InstanceDescriptor::default());
let adapter = instance
.enumerate_adapters(wgpu::Backends::all())
.into_iter()
.next()
.unwrap();
let device = futures_lite::future::block_on(
adapter.request_device(&wgpu::DeviceDescriptor::default(), None),
)
.unwrap()
.0;
for _ in 0..n {
let module = composer
.make_naga_module(NagaModuleDescriptor {
source: include_str!("bevy_pbr_wgsl/pbr.wgsl"),
file_path: "examples/bevy_pbr_wgsl/pbr.wgsl",
shader_defs: [("VERTEX_UVS".to_owned(), Default::default())].into(),
..Default::default()
})
.unwrap();
let _desc = device.create_shader_module(wgpu::ShaderModuleDescriptor {
source: wgpu::ShaderSource::Naga(Cow::Owned(module)),
label: None,
});
}
}
fn main() {
println!("running 1000 full composer builds (no caching)");
let start = std::time::Instant::now();
for _ in 0..1000 {
let pbr = test_compose_full().unwrap();
if pbr.types.iter().next().is_none() {
println!("ouch");
}
}
let end = std::time::Instant::now();
println!("1000 full builds: {:?}", end - start);
let mut composer = init_composer();
println!("running 10000 composer final builds");
let start = std::time::Instant::now();
test_compose_final_module(10000, &mut composer);
let end = std::time::Instant::now();
println!("10000 final builds: {:?}", end - start);
println!("running 10000 wgpu string compiles");
let start = std::time::Instant::now();
test_wgsl_string_compile(10000);
let end = std::time::Instant::now();
println!("10000 string compiles: {:?}", end - start);
println!("running 10000 composer builds + wgpu module compiles");
let start = std::time::Instant::now();
test_composer_compile(10000, &mut composer);
let end = std::time::Instant::now();
println!("10000 module compiles: {:?}", end - start);
}

View File

@@ -0,0 +1,262 @@
use std::{borrow::Cow, str::Lines};
use regex::Regex;
// outside of blocks and quotes, change state on //, /* or "
static RE_NONE: once_cell::sync::Lazy<Regex> =
once_cell::sync::Lazy::new(|| Regex::new(r#"(//|/\*|\")"#).unwrap());
// in blocks, change on /* and */
static RE_BLOCK: once_cell::sync::Lazy<Regex> =
once_cell::sync::Lazy::new(|| Regex::new(r"(/\*|\*/)").unwrap());
// in quotes, change only on "
static RE_QUOTE: once_cell::sync::Lazy<Regex> =
once_cell::sync::Lazy::new(|| Regex::new(r#"\""#).unwrap());
#[derive(PartialEq, Eq)]
enum CommentState {
None,
Block(usize),
Quote,
}
pub struct CommentReplaceIter<'a> {
lines: &'a mut Lines<'a>,
state: CommentState,
}
impl<'a> Iterator for CommentReplaceIter<'a> {
type Item = Cow<'a, str>;
fn next(&mut self) -> Option<Self::Item> {
let line_in = self.lines.next()?;
// fast path
if self.state == CommentState::None && !RE_NONE.is_match(line_in) {
return Some(Cow::Borrowed(line_in));
}
let mut output = String::new();
let mut section_start = 0;
loop {
let marker = match self.state {
CommentState::None => &RE_NONE,
CommentState::Block(_) => &RE_BLOCK,
CommentState::Quote => &RE_QUOTE,
}
.find(&line_in[section_start..]);
let section_end = marker
.map(|m| section_start + m.start())
.unwrap_or(line_in.len());
if let CommentState::Block(_) = self.state {
output.extend(std::iter::repeat_n(' ', section_end - section_start));
} else {
output.push_str(&line_in[section_start..section_end]);
}
match marker {
None => return Some(Cow::Owned(output)),
Some(marker) => {
match marker.as_str() {
// only possible in None state
"//" => {
output.extend(std::iter::repeat_n(
' ',
line_in.len() - marker.start() - section_start,
));
return Some(Cow::Owned(output));
}
// only possible in None or Block state
"/*" => {
self.state = match self.state {
CommentState::None => CommentState::Block(1),
CommentState::Block(n) => CommentState::Block(n + 1),
_ => unreachable!(),
};
output.push_str(" ");
}
// only possible in Block state
"*/" => {
self.state = match self.state {
CommentState::Block(1) => CommentState::None,
CommentState::Block(n) => CommentState::Block(n - 1),
_ => unreachable!(),
};
output.push_str(" ");
}
// only possible in None or Quote state
"\"" => {
self.state = match self.state {
CommentState::None => CommentState::Quote,
CommentState::Quote => CommentState::None,
_ => unreachable!(),
};
output.push('"');
}
_ => unreachable!(),
}
section_start += marker.end();
}
}
}
}
}
pub trait CommentReplaceExt<'a> {
/// replace WGSL and GLSL comments with whitespace characters
fn replace_comments(&'a mut self) -> CommentReplaceIter<'a>;
}
impl<'a> CommentReplaceExt<'a> for Lines<'a> {
fn replace_comments(&'a mut self) -> CommentReplaceIter<'a> {
CommentReplaceIter {
lines: self,
state: CommentState::None,
}
}
}
#[test]
fn comment_test() {
const INPUT: &str = r"
not commented
// line commented
not commented
/* block commented on a line */
not commented
// line comment with a /* block comment unterminated
not commented
/* block comment
spanning lines */
not commented
/* block comment
spanning lines and with // line comments
even with a // line commented terminator */
not commented
";
assert_eq!(
INPUT
.lines()
.replace_comments()
.zip(INPUT.lines())
.find(|(line, original)| {
(line != "not commented" && !line.chars().all(|c| c == ' '))
|| line.len() != original.len()
}),
None
);
const PARTIAL_TESTS: [(&str, &str); 11] = [
(
"1.0 /* block comment with a partial line comment on the end *// 2.0",
"1.0 / 2.0",
),
(
"1.0 /* block comment with a partial block comment on the end */* 2.0",
"1.0 * 2.0",
),
(
"1.0 /* block comment 1 *//* block comment 2 */ * 2.0",
"1.0 * 2.0",
),
(
"1.0 /* block comment with real line comment after */// line comment",
"1.0 ",
),
("*/", "*/"),
(
r#"#import "embedded://file.wgsl""#,
r#"#import "embedded://file.wgsl""#,
),
(
r#"// #import "embedded://file.wgsl""#,
r#" "#,
),
(
r#"/* #import "embedded://file.wgsl" */"#,
r#" "#,
),
(
r#"/* #import "embedded:*/file.wgsl" */"#,
r#" file.wgsl" */"#,
),
(
r#"#import "embedded://file.wgsl" // comment"#,
r#"#import "embedded://file.wgsl" "#,
),
(
r#"#import "embedded:/* */ /* /**/* / / /// * / //*/*/ / */*file.wgsl""#,
r#"#import "embedded:/* */ /* /**/* / / /// * / //*/*/ / */*file.wgsl""#,
),
];
for &(input, expected) in PARTIAL_TESTS.iter() {
let mut nasty_processed = input.lines();
let nasty_processed = nasty_processed.replace_comments().next().unwrap();
assert_eq!(&nasty_processed, expected);
}
}
#[test]
fn multiline_comment_test() {
let test_cases = [
(
// Basic test
r"/*
hoho
*/",
r"
",
),
(
// Testing the commenting-out of multiline comments
r"///*
hehe
//*/",
r"
hehe
",
),
(
// Testing the commenting-out of single-line comments
r"/* // */ code goes here /*
Still a comment // */
/* dummy */",
r" code goes here
",
),
(
// A comment with a nested multiline comment
// Notice how the "//" inside the multiline comment doesn't take effect
r"/*
//*
*/commented
*/not commented",
r"
not commented",
),
];
for &(input, expected) in test_cases.iter() {
for (output_line, expected_line) in input.lines().replace_comments().zip(expected.lines()) {
assert_eq!(output_line.as_ref(), expected_line);
}
}
}
#[test]
fn test_comment_becomes_spaces() {
let test_cases = [("let a/**/b =3u;", "let a b =3u;")];
for &(input, expected) in test_cases.iter() {
for (output_line, expected_line) in input.lines().replace_comments().zip(expected.lines()) {
assert_eq!(output_line.as_ref(), expected_line);
}
}
}

307
vendor/naga_oil/src/compose/error.rs vendored Normal file
View File

@@ -0,0 +1,307 @@
use std::{borrow::Cow, collections::HashMap, ops::Range};
use codespan_reporting::{
diagnostic::{Diagnostic, Label},
files::SimpleFile,
term,
term::termcolor::WriteColor,
};
use thiserror::Error;
use tracing::trace;
use super::{preprocess::PreprocessOutput, Composer, ShaderDefValue};
use crate::{compose::SPAN_SHIFT, redirect::RedirectError};
#[derive(Debug)]
pub enum ErrSource {
Module {
name: String,
offset: usize,
defs: HashMap<String, ShaderDefValue>,
},
Constructing {
path: String,
source: String,
offset: usize,
},
}
impl ErrSource {
pub fn path<'a>(&'a self, composer: &'a Composer) -> &'a String {
match self {
ErrSource::Module { name, .. } => &composer.module_sets.get(name).unwrap().file_path,
ErrSource::Constructing { path, .. } => path,
}
}
pub fn source<'a>(&'a self, composer: &'a Composer) -> Cow<'a, String> {
match self {
ErrSource::Module { name, defs, .. } => {
let raw_source = &composer.module_sets.get(name).unwrap().sanitized_source;
let Ok(PreprocessOutput {
preprocessed_source: source,
..
}) = composer.preprocessor.preprocess(raw_source, defs)
else {
return Default::default();
};
Cow::Owned(source)
}
ErrSource::Constructing { source, .. } => Cow::Borrowed(source),
}
}
pub fn offset(&self) -> usize {
match self {
ErrSource::Module { offset, .. } | ErrSource::Constructing { offset, .. } => *offset,
}
}
}
#[derive(Debug, Error)]
#[error("Composer error: {inner}")]
pub struct ComposerError {
#[source]
pub inner: ComposerErrorInner,
pub source: ErrSource,
}
#[derive(Debug, Error)]
pub enum ComposerErrorInner {
#[error("{0}")]
ImportParseError(String, usize),
#[error("required import '{0}' not found")]
ImportNotFound(String, usize),
#[error("{0}")]
WgslParseError(naga::front::wgsl::ParseError),
#[cfg(feature = "glsl")]
#[error("{0:?}")]
GlslParseError(naga::front::glsl::ParseErrors),
#[error("naga_oil bug, please file a report: failed to convert imported module IR back into WGSL for use with WGSL shaders: {0}")]
WgslBackError(naga::back::wgsl::Error),
#[cfg(feature = "glsl")]
#[error("naga_oil bug, please file a report: failed to convert imported module IR back into GLSL for use with GLSL shaders: {0}")]
GlslBackError(naga::back::glsl::Error),
#[error("naga_oil bug, please file a report: composer failed to build a valid header: {0}")]
HeaderValidationError(naga::WithSpan<naga::valid::ValidationError>),
#[error("failed to build a valid final module: {0}")]
ShaderValidationError(naga::WithSpan<naga::valid::ValidationError>),
#[error(
"Not enough '# endif' lines. Each if statement should be followed by an endif statement."
)]
NotEnoughEndIfs(usize),
#[error("Too many '# endif' lines. Each endif should be preceded by an if statement.")]
TooManyEndIfs(usize),
#[error("'#else' without preceding condition.")]
ElseWithoutCondition(usize),
#[error("Unknown shader def operator: '{operator}'")]
UnknownShaderDefOperator { pos: usize, operator: String },
#[error("Unknown shader def: '{shader_def_name}'")]
UnknownShaderDef { pos: usize, shader_def_name: String },
#[error(
"Invalid shader def comparison for '{shader_def_name}': expected {expected}, got {value}"
)]
InvalidShaderDefComparisonValue {
pos: usize,
shader_def_name: String,
expected: String,
value: String,
},
#[error("multiple inconsistent shader def values: '{def}'")]
InconsistentShaderDefValue { def: String },
#[error("Attempted to add a module with no #define_import_path")]
NoModuleName,
#[error("source contains internal decoration string, results probably won't be what you expect. if you have a legitimate reason to do this please file a report")]
DecorationInSource(Range<usize>),
#[error("naga oil only supports glsl 440 and 450")]
GlslInvalidVersion(usize),
#[error("invalid override :{0}")]
RedirectError(#[from] RedirectError),
#[error(
"override is invalid as `{name}` is not virtual (this error can be disabled with feature 'override_any')"
)]
OverrideNotVirtual { name: String, pos: usize },
#[error(
"Composable module identifiers must not require substitution according to naga writeback rules: `{original}`"
)]
InvalidIdentifier { original: String, at: naga::Span },
#[error("Invalid value for `#define`d shader def {name}: {value}")]
InvalidShaderDefDefinitionValue {
name: String,
value: String,
pos: usize,
},
#[error("#define statements are only allowed at the start of the top-level shaders")]
DefineInModule(usize),
}
struct ErrorSources<'a> {
current: Option<&'a (dyn std::error::Error + 'static)>,
}
impl<'a> ErrorSources<'a> {
fn of(error: &'a dyn std::error::Error) -> Self {
Self {
current: error.source(),
}
}
}
impl<'a> Iterator for ErrorSources<'a> {
type Item = &'a (dyn std::error::Error + 'static);
fn next(&mut self) -> Option<Self::Item> {
let current = self.current;
self.current = self.current.and_then(std::error::Error::source);
current
}
}
// impl<'a> FusedIterator for ErrorSources<'a> {}
impl ComposerError {
/// format a Composer error
pub fn emit_to_string(&self, composer: &Composer) -> String {
composer.undecorate(&self.emit_to_string_internal(composer))
}
fn emit_to_string_internal(&self, composer: &Composer) -> String {
let path = self.source.path(composer);
let source = self.source.source(composer);
let source_offset = self.source.offset();
trace!("source:\n~{}~", source);
trace!("source offset: {}", source_offset);
let map_span = |rng: Range<usize>| -> Range<usize> {
((rng.start & ((1 << SPAN_SHIFT) - 1)).saturating_sub(source_offset))
..((rng.end & ((1 << SPAN_SHIFT) - 1)).saturating_sub(source_offset))
};
let files = SimpleFile::new(path, source.as_str());
let config = term::Config::default();
let (labels, notes) = match &self.inner {
ComposerErrorInner::DecorationInSource(range) => {
(vec![Label::primary((), range.clone())], vec![])
}
ComposerErrorInner::HeaderValidationError(v)
| ComposerErrorInner::ShaderValidationError(v) => (
v.spans()
.map(|(span, desc)| {
trace!(
"mapping span {:?} -> {:?}",
span.to_range().unwrap_or(0..0),
map_span(span.to_range().unwrap_or(0..0))
);
Label::primary((), map_span(span.to_range().unwrap_or(0..0)))
.with_message(desc.to_owned())
})
.collect(),
ErrorSources::of(&v)
.map(|source| source.to_string())
.collect(),
),
ComposerErrorInner::ImportNotFound(msg, pos) => (
vec![Label::primary((), *pos..*pos)],
vec![format!("missing import '{msg}'")],
),
ComposerErrorInner::ImportParseError(msg, pos) => (
vec![Label::primary((), *pos..*pos)],
vec![format!("invalid import spec: '{msg}'")],
),
ComposerErrorInner::WgslParseError(e) => (
e.labels()
.map(|(range, msg)| {
Label::primary((), map_span(range.to_range().unwrap_or(0..0)))
.with_message(msg)
})
.collect(),
vec![e.message().to_owned()],
),
#[cfg(feature = "glsl")]
ComposerErrorInner::GlslParseError(e) => (
e.errors
.iter()
.map(|naga::front::glsl::Error { kind, meta }| {
Label::primary((), map_span(meta.to_range().unwrap_or(0..0)))
.with_message(kind.to_string())
})
.collect(),
vec![],
),
ComposerErrorInner::NotEnoughEndIfs(pos)
| ComposerErrorInner::TooManyEndIfs(pos)
| ComposerErrorInner::ElseWithoutCondition(pos)
| ComposerErrorInner::UnknownShaderDef { pos, .. }
| ComposerErrorInner::UnknownShaderDefOperator { pos, .. }
| ComposerErrorInner::InvalidShaderDefComparisonValue { pos, .. }
| ComposerErrorInner::OverrideNotVirtual { pos, .. }
| ComposerErrorInner::GlslInvalidVersion(pos)
| ComposerErrorInner::DefineInModule(pos)
| ComposerErrorInner::InvalidShaderDefDefinitionValue { pos, .. } => {
(vec![Label::primary((), *pos..*pos)], vec![])
}
ComposerErrorInner::WgslBackError(e) => {
return format!("{path}: wgsl back error: {e}");
}
#[cfg(feature = "glsl")]
ComposerErrorInner::GlslBackError(e) => {
return format!("{path}: glsl back error: {e}");
}
ComposerErrorInner::InconsistentShaderDefValue { def } => {
return format!("{path}: multiple inconsistent shader def values: '{def}'");
}
ComposerErrorInner::RedirectError(..) => (
vec![Label::primary((), 0..0)],
vec![format!("override error")],
),
ComposerErrorInner::NoModuleName => {
return format!(
"{path}: no #define_import_path declaration found in composable module"
);
}
ComposerErrorInner::InvalidIdentifier { at, .. } => (
vec![Label::primary((), map_span(at.to_range().unwrap_or(0..0)))
.with_message(self.inner.to_string())],
vec![],
),
};
let diagnostic = Diagnostic::error()
.with_message(self.inner.to_string())
.with_labels(labels)
.with_notes(notes);
let mut msg = Vec::with_capacity(256);
let mut color_writer;
let mut no_color_writer;
let writer: &mut dyn WriteColor = if supports_color() {
color_writer = term::termcolor::Ansi::new(&mut msg);
&mut color_writer
} else {
no_color_writer = term::termcolor::NoColor::new(&mut msg);
&mut no_color_writer
};
term::emit(writer, &config, &files, &diagnostic).expect("cannot write error");
String::from_utf8_lossy(&msg).into_owned()
}
}
#[cfg(any(test, target_arch = "wasm32"))]
fn supports_color() -> bool {
false
}
// termcolor doesn't expose this logic when using custom buffers
#[cfg(not(any(test, target_arch = "wasm32")))]
fn supports_color() -> bool {
match std::env::var_os("TERM") {
None if cfg!(unix) => false,
Some(term) if term == "dumb" => false,
_ => std::env::var_os("NO_COLOR").is_none(),
}
}

1940
vendor/naga_oil/src/compose/mod.rs vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,370 @@
use indexmap::IndexMap;
use super::{
tokenizer::{Token, Tokenizer},
Composer, ImportDefWithOffset, ImportDefinition,
};
pub fn parse_imports<'a>(
input: &'a str,
declared_imports: &mut IndexMap<String, Vec<String>>,
) -> Result<(), (&'a str, usize)> {
let mut tokens = Tokenizer::new(input, false).peekable();
match tokens.next() {
Some(Token::Other('#', _)) => (),
Some(other) => return Err(("expected `#import`", other.pos())),
None => return Err(("expected #import", input.len())),
};
match tokens.next() {
Some(Token::Identifier("import", _)) => (),
Some(other) => return Err(("expected `#import`", other.pos())),
None => return Err(("expected `#import`", input.len())),
};
let mut stack = Vec::default();
let mut current = String::default();
let mut as_name = None;
let mut is_deprecated_itemlist = false;
loop {
match tokens.peek() {
Some(Token::Identifier(ident, _)) => {
current.push_str(ident);
tokens.next();
if tokens.peek().and_then(Token::identifier) == Some("as") {
let pos = tokens.next().unwrap().pos();
let Some(Token::Identifier(name, _)) = tokens.next() else {
return Err(("expected identifier after `as`", pos));
};
as_name = Some(name);
}
// support deprecated #import mod item
if let Some(Token::Identifier(..)) = tokens.peek() {
#[cfg(not(feature = "allow_deprecated"))]
tracing::warn!("item list imports are deprecated, please use `rust::style::item_imports` (or use feature `allow_deprecated`)`\n| {}", input);
is_deprecated_itemlist = true;
stack.push(format!("{}::", current));
current = String::default();
as_name = None;
}
continue;
}
Some(Token::Other('{', pos)) => {
if !current.ends_with("::") {
return Err(("open brace must follow `::`", *pos));
}
stack.push(current);
current = String::default();
as_name = None;
}
Some(Token::Other(',', _))
| Some(Token::Other('}', _))
| Some(Token::Other('\n', _))
| None => {
if !current.is_empty() {
let used_name = as_name.map(ToString::to_string).unwrap_or_else(|| {
current
.rsplit_once("::")
.map(|(_, name)| name.to_owned())
.unwrap_or(current.clone())
});
declared_imports.entry(used_name).or_default().push(format!(
"{}{}",
stack.join(""),
current
));
current = String::default();
as_name = None;
}
if let Some(Token::Other('}', pos)) = tokens.peek() {
if stack.pop().is_none() {
return Err(("close brace without open", *pos));
}
}
if tokens.peek().is_none() {
break;
}
}
Some(Token::Other(';', _)) => {
tokens.next();
if let Some(token) = tokens.peek() {
return Err(("unexpected token after ';'", token.pos()));
}
}
Some(Token::Other(_, pos)) => return Err(("unexpected token", *pos)),
Some(Token::Whitespace(..)) => unreachable!(),
}
tokens.next();
}
if !(stack.is_empty() || is_deprecated_itemlist && stack.len() == 1) {
return Err(("missing close brace", input.len()));
}
Ok(())
}
pub fn substitute_identifiers(
input: &str,
offset: usize,
declared_imports: &IndexMap<String, Vec<String>>,
used_imports: &mut IndexMap<String, ImportDefWithOffset>,
allow_ambiguous: bool,
) -> Result<String, usize> {
let tokens = Tokenizer::new(input, true);
let mut output = String::with_capacity(input.len());
let mut in_substitution_position = true;
for token in tokens {
match token {
Token::Identifier(ident, token_pos) => {
if in_substitution_position {
let (first, residual) = ident.split_once("::").unwrap_or((ident, ""));
let full_paths = declared_imports
.get(first)
.cloned()
.unwrap_or(vec![first.to_owned()]);
if !allow_ambiguous && full_paths.len() > 1 {
return Err(offset + token_pos);
}
for mut full_path in full_paths {
if !residual.is_empty() {
full_path.push_str("::");
full_path.push_str(residual);
}
if let Some((module, item)) = full_path.rsplit_once("::") {
used_imports
.entry(module.to_owned())
.or_insert_with(|| ImportDefWithOffset {
definition: ImportDefinition {
import: module.to_owned(),
..Default::default()
},
offset: offset + token_pos,
})
.definition
.items
.push(item.to_owned());
output.push_str(item);
output.push_str(&Composer::decorate(module));
} else if full_path.find('"').is_some() {
// we don't want to replace local variables that shadow quoted module imports with the
// quoted name as that won't compile.
// since quoted items always refer to modules, we can just emit the original ident
// in this case
output.push_str(ident);
} else {
// if there are no quotes we do the replacement. this means that individually imported
// items can be used, and any shadowing local variables get harmlessly renamed.
// TODO: it can lead to weird errors, but such is life
output.push_str(&full_path);
}
}
} else {
output.push_str(ident);
}
}
Token::Other(other, _) => {
output.push(other);
if other == '.' || other == '@' {
in_substitution_position = false;
continue;
}
}
Token::Whitespace(ws, _) => output.push_str(ws),
}
in_substitution_position = true;
}
Ok(output)
}
#[cfg(test)]
fn test_parse(input: &str) -> Result<IndexMap<String, Vec<String>>, (&str, usize)> {
let mut declared_imports = IndexMap::default();
parse_imports(input, &mut declared_imports)?;
Ok(declared_imports)
}
#[test]
fn import_tokens() {
let input = r"
#import a::b
";
assert_eq!(
test_parse(input),
Ok(IndexMap::from_iter([(
"b".to_owned(),
vec!("a::b".to_owned())
)]))
);
let input = r"
#import a::{b, c}
";
assert_eq!(
test_parse(input),
Ok(IndexMap::from_iter([
("b".to_owned(), vec!("a::b".to_owned())),
("c".to_owned(), vec!("a::c".to_owned())),
]))
);
let input = r"
#import a::{b as d, c}
";
assert_eq!(
test_parse(input),
Ok(IndexMap::from_iter([
("d".to_owned(), vec!("a::b".to_owned())),
("c".to_owned(), vec!("a::c".to_owned())),
]))
);
let input = r"
#import a::{b::{c, d}, e}
";
assert_eq!(
test_parse(input),
Ok(IndexMap::from_iter([
("c".to_owned(), vec!("a::b::c".to_owned())),
("d".to_owned(), vec!("a::b::d".to_owned())),
("e".to_owned(), vec!("a::e".to_owned())),
]))
);
let input = r"
#import a::b::{c, d}, e
";
assert_eq!(
test_parse(input),
Ok(IndexMap::from_iter([
("c".to_owned(), vec!("a::b::c".to_owned())),
("d".to_owned(), vec!("a::b::d".to_owned())),
("e".to_owned(), vec!("e".to_owned())),
]))
);
let input = r"
#import a, b
";
assert_eq!(
test_parse(input),
Ok(IndexMap::from_iter([
("a".to_owned(), vec!("a".to_owned())),
("b".to_owned(), vec!("b".to_owned())),
]))
);
let input = r"
#import a::b c, d
";
assert_eq!(
test_parse(input),
Ok(IndexMap::from_iter([
("c".to_owned(), vec!("a::b::c".to_owned())),
("d".to_owned(), vec!("a::b::d".to_owned())),
]))
);
let input = r"
#import a::b c
";
assert_eq!(
test_parse(input),
Ok(IndexMap::from_iter([(
"c".to_owned(),
vec!("a::b::c".to_owned())
),]))
);
let input = r"
#import a::b::{c::{d, e}, f, g::{h as i, j}}
";
assert_eq!(
test_parse(input),
Ok(IndexMap::from_iter([
("d".to_owned(), vec!("a::b::c::d".to_owned())),
("e".to_owned(), vec!("a::b::c::e".to_owned())),
("f".to_owned(), vec!("a::b::f".to_owned())),
("i".to_owned(), vec!("a::b::g::h".to_owned())),
("j".to_owned(), vec!("a::b::g::j".to_owned())),
]))
);
let input = r"
#import a::b::{
c::{d, e},
f,
g::{
h as i,
j::k::l as m,
}
}
";
assert_eq!(
test_parse(input),
Ok(IndexMap::from_iter([
("d".to_owned(), vec!("a::b::c::d".to_owned())),
("e".to_owned(), vec!("a::b::c::e".to_owned())),
("f".to_owned(), vec!("a::b::f".to_owned())),
("i".to_owned(), vec!("a::b::g::h".to_owned())),
("m".to_owned(), vec!("a::b::g::j::k::l".to_owned())),
]))
);
let input = r#"
#import "path//with\ all sorts of .stuff"::{a, b}
"#;
assert_eq!(
test_parse(input),
Ok(IndexMap::from_iter([
(
"a".to_owned(),
vec!(r#""path//with\ all sorts of .stuff"::a"#.to_owned())
),
(
"b".to_owned(),
vec!(r#""path//with\ all sorts of .stuff"::b"#.to_owned())
),
]))
);
let input = r"
#import a::b::{
";
assert!(test_parse(input).is_err());
let input = r"
#import a::b::{{c}
";
assert!(test_parse(input).is_err());
let input = r"
#import a::b::{c}}
";
assert!(test_parse(input).is_err());
let input = r"
#import a::b{{c,d}}
";
assert!(test_parse(input).is_err());
let input = r"
#import a:b
";
assert!(test_parse(input).is_err());
}

1524
vendor/naga_oil/src/compose/preprocess.rs vendored Normal file

File diff suppressed because it is too large Load Diff

1554
vendor/naga_oil/src/compose/test.rs vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,5 @@
#define_import_path overridable
virtual fn func() -> f32 {
return 1.0;
}

View File

@@ -0,0 +1,5 @@
#import overridable
override fn overridable::func() -> f32 {
return overridable::func() + 1.0;
}

View File

@@ -0,0 +1,5 @@
#import overridable
fn entry_point() -> f32 {
return overridable::func();
}

View File

@@ -0,0 +1,19 @@
#define_import_path test_module
var<workgroup> atom: atomic<u32>;
fn entry_point() -> f32 {
atomicStore(&atom, 1u); // atom = 1
var y = atomicLoad(&atom); // y = 1, atom = 1
y += atomicAdd(&atom, 2u); // y = 2, atom = 3
y += atomicSub(&atom, 1u); // y = 5, atom = 2
y += atomicMax(&atom, 5u); // y = 7, atom = 5
y += atomicMin(&atom, 4u); // y = 12, atom = 4
y += atomicExchange(&atom, y); // y = 16, atom = 12
let exchange = atomicCompareExchangeWeak(&atom, 12u, 0u);
if exchange.exchanged {
y += exchange.old_value; // y = 28, atom = 0
}
return f32(y); // 28.0
}

View File

@@ -0,0 +1,5 @@
#import test_module
fn main() -> f32 {
return test_module::entry_point();
}

View File

@@ -0,0 +1,31 @@
#import "shaders/skills/shared.wgsl" Vertex, VertexOutput
#if EFFECT_ID == 0
#import "shaders/skills/sound.wgsl" frag, vert
#else if EFFECT_ID == 1
#import "shaders/skills/orb.wgsl" frag, vert
#else if EFFECT_ID == 2
#import "shaders/skills/slash.wgsl" frag, vert
#else if EFFECT_ID == 3
#import "shaders/skills/railgun_trail.wgsl" frag, vert
#else if EFFECT_ID == 4
#import "shaders/skills/magic_arrow.wgsl" frag, vert
#else if EFFECT_ID == 5
#import "shaders/skills/hit.wgsl" frag, vert
#else if EFFECT_ID == 6
#import "shaders/skills/lightning_ring.wgsl" frag, vert
#else if EFFECT_ID == 7
#import "shaders/skills/lightning.wgsl" frag, vert
#endif
#import something_unused
@fragment
fn fragment(in: VertexOutput) -> @location(0) vec4<f32> {
return frag(in);
}
@vertex
fn vertex(vertex: Vertex) -> VertexOutput {
return vert(vertex);
}

View File

@@ -0,0 +1,144 @@
#define_import_path mod
fn f() -> f32 {
var x = 0.0;
#ifdef a1
#ifdef a2
#ifdef a3
#ifdef a4
#ifdef a5
#ifdef a6
#ifdef a7
#ifdef a8
#ifdef a9
#ifdef a10
#ifdef a11
#ifdef a12
#ifdef a13
#ifdef a14
#ifdef a15
#ifdef a16
#ifdef a17
#ifdef a18
#ifdef a19
#ifdef a20
#ifdef a21
#ifdef a22
#ifdef a23
#ifdef a24
#ifdef a25
#ifdef a26
#ifdef a27
#ifdef a28
#ifdef a29
#ifdef a30
#ifdef a31
#ifdef a32
#ifdef a33
#ifdef a34
#ifdef a35
#ifdef a36
#ifdef a37
#ifdef a38
#ifdef a39
#ifdef a40
#ifdef a41
#ifdef a42
#ifdef a43
#ifdef a44
#ifdef a45
#ifdef a46
#ifdef a47
#ifdef a48
#ifdef a49
#ifdef a50
#ifdef a51
#ifdef a52
#ifdef a53
#ifdef a54
#ifdef a55
#ifdef a56
#ifdef a57
#ifdef a58
#ifdef a59
#ifdef a60
#ifdef a61
#ifdef a62
#ifdef a63
#ifdef a64
#ifdef a65
#ifdef a66
#ifdef a66
#ifdef a67
x = 1.0;
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
return x;
}

View File

@@ -0,0 +1,5 @@
#import mod
fn main() -> f32 {
return mod::f();
}

View File

@@ -0,0 +1,12 @@
#define_import_path include
fn non_ep(f: f32) -> f32 {
return f * 2.0;
}
@fragment
fn fragment(
@builtin(position) frag_coord: vec4<f32>,
) -> @location(0) vec4<f32> {
return vec4<f32>(1.5 * frag_coord);
}

View File

@@ -0,0 +1,8 @@
#import include as Inc
@fragment
fn fragment(
@builtin(position) frag_coord: vec4<f32>,
) -> @location(0) vec4<f32> {
return Inc::fragment(frag_coord);
}

View File

@@ -0,0 +1,10 @@
#import test_module
@group(0) @binding(0)
var<storage, read_write> buffer: f32;
@compute @workgroup_size(1, 1, 1)
fn run_test() {
let res = test_module::entry_point();
buffer = res;
}

View File

@@ -0,0 +1,3 @@
#define_import_path a
const C: u32 = 1u;

View File

@@ -0,0 +1,3 @@
#define_import_path b
const C: u32 = 2u;

View File

@@ -0,0 +1,9 @@
#ifdef USE_A
#import a C
#else
#import b C
#endif
fn main() -> u32 {
return C;
}

View File

@@ -0,0 +1,9 @@
#define_import_path middle
#ifdef USE_A
#import a::b
#endif
fn mid_fn() -> u32 {
return b::C;
}

View File

@@ -0,0 +1,3 @@
#define_import_path a::b
const C: u32 = 1u;

View File

@@ -0,0 +1,7 @@
#ifdef USE_A
#import a::b
#endif
fn main() -> u32 {
return b::C;
}

View File

@@ -0,0 +1,5 @@
#import middle
fn main() -> u32 {
return middle::mid_fn();
}

View File

@@ -0,0 +1,7 @@
#define_import_path bind
#import consts
const y: u32 = 2u;
var<private> arr: array<u32, consts::X>;

View File

@@ -0,0 +1,3 @@
#define_import_path consts
const X: u32 = 1u;

View File

@@ -0,0 +1,6 @@
#import consts
#import bind
fn main() -> f32 {
return f32(bind::arr[0]);
}

View File

@@ -0,0 +1,12 @@
#define_import_path filters
diagnostic(warning, derivative_uniformity);
fn diagnostic_test(s : sampler, tex : texture_2d<f32>, ro_buffer : array<f32, 4>) -> vec4f {
if ro_buffer[0] == 0 {
// Emits a derivative uniformity error during validation.
return textureSample(tex, s, vec2(0.,0.));
}
return vec4f(0.);
}

View File

@@ -0,0 +1,10 @@
#import filters
@group(0) @binding(0) var s : sampler;
@group(0) @binding(2) var tex : texture_2d<f32>;
@group(1) @binding(0) var<storage, read> ro_buffer : array<f32, 4>;
@fragment
fn main(@builtin(position) p : vec4f) -> @location(0) vec4f {
return filters::diagnostic_test();
}

View File

@@ -0,0 +1,7 @@
#define_import_path a
#import consts
fn f() -> f32 {
return consts::PI * 1.0;
}

View File

@@ -0,0 +1,20 @@
const PI: f32 = 3.1;
fn b__f() -> f32 {
return PI * 2.0;
}
fn b__g() -> f32 {
return PI * 2.0;
}
fn a__f() -> f32 {
return PI * 1.0;
}
fn main() -> f32 {
let x = a__f();
let y = b__f();
return x*y;
}

View File

@@ -0,0 +1,11 @@
#define_import_path b
#import consts
fn f() -> f32 {
return consts::PI * 2.0;
}
fn g() -> f32 {
return consts::PI * 2.0;
}

View File

@@ -0,0 +1,3 @@
#define_import_path consts
const PI: f32 = 3.1;

View File

@@ -0,0 +1,9 @@
#import a
#import b
fn main() -> f32 {
let x = a::f();
let y = b::f();
return x*y;
}

View File

@@ -0,0 +1,8 @@
#define_import_path a
#import struct
fn a() -> struct::MyStruct {
var s_a: struct::MyStruct;
s_a.value = 1.0;
return s_a;
}

View File

@@ -0,0 +1,8 @@
#define_import_path b
#import struct
fn b() -> struct::MyStruct {
var s_b: struct::MyStruct;
s_b.value = 2.0;
return s_b;
}

View File

@@ -0,0 +1,5 @@
#define_import_path struct
struct MyStruct {
value: f32,
}

View File

@@ -0,0 +1,8 @@
#import a
#import b
fn main() -> f32 {
let a = a::a();
let b = b::b();
return a.value / b.value;
}

View File

@@ -0,0 +1,19 @@
#define_import_path mod
#ifdef DEF_ONE
const a: u32 = 1u;
#else
const a: u32 = 0u;
#endif
#ifndef DEF_TWO
const b: u32 = 0u;
#else
const b: u32 = 2u;
#endif
#if DEF_THREE == true
const c: u32 = 4u;
#else
const c: u32 = 0u;
#endif

View File

@@ -0,0 +1,6 @@
#define_import_path test_module
#import mod a, b, c
fn entry_point() -> f32 {
return f32(a + b + c);
}

View File

@@ -0,0 +1,12 @@
#define_import_path include
doesn't matter what goes here for this test
or here, just moving lines around a bit
#import missing
fn sub() {
// have to use something for it to be declared missing
let x = missing::y();
}

View File

@@ -0,0 +1,17 @@
#define_import_path wgsl_parse_err
const VAL: u32 = 1u;
fn all_ok() -> f32 {
let x = 1.0;
var y = sqrt(x);
y += 1.0;
return y;
}
fn woops() -> f32 {
let x = 1.0;
var y = sqrt(x);
y += 1.0;
return zdd;
}

View File

@@ -0,0 +1,3 @@
fn ok() {
wgsl_parse_err::woops();
}

View File

@@ -0,0 +1,17 @@
#define_import_path valid_inc
fn ok() -> f32 {
return 1.0;
}
fn func() -> f32 {
return 1u;
}
fn still_ok() -> f32 {
return 1.0;
}
fn main() {
let x: f32 = func();
}

View File

@@ -0,0 +1,3 @@
fn whatever() {
valid_inc::main();
}

View File

@@ -0,0 +1,14 @@
fn funcX_naga_oil_mod_XN53GK4TSNFSGCYTMMUX() -> f32 {
return 1f;
}
fn funcX_naga_oil_vrt_XN53GK4TSNFSGCYTMMUXX_naga_oil_mod_XOBWHKZ3JNYX() -> f32 {
let _e0: f32 = funcX_naga_oil_mod_XN53GK4TSNFSGCYTMMUX();
return (_e0 + 1f);
}
fn entry_point() -> f32 {
let _e0: f32 = funcX_naga_oil_vrt_XN53GK4TSNFSGCYTMMUXX_naga_oil_mod_XOBWHKZ3JNYX();
return _e0;
}

View File

@@ -0,0 +1,38 @@
var<workgroup> atomX_naga_oil_mod_XORSXG5C7NVXWI5LMMUX: atomic<u32>;
fn entry_pointX_naga_oil_mod_XORSXG5C7NVXWI5LMMUX() -> f32 {
var y: u32;
atomicStore((&atomX_naga_oil_mod_XORSXG5C7NVXWI5LMMUX), 1u);
let _e3: u32 = atomicLoad((&atomX_naga_oil_mod_XORSXG5C7NVXWI5LMMUX));
y = _e3;
let _e7: u32 = atomicAdd((&atomX_naga_oil_mod_XORSXG5C7NVXWI5LMMUX), 2u);
let _e8: u32 = y;
y = (_e8 + _e7);
let _e12: u32 = atomicSub((&atomX_naga_oil_mod_XORSXG5C7NVXWI5LMMUX), 1u);
let _e13: u32 = y;
y = (_e13 + _e12);
let _e17: u32 = atomicMax((&atomX_naga_oil_mod_XORSXG5C7NVXWI5LMMUX), 5u);
let _e18: u32 = y;
y = (_e18 + _e17);
let _e22: u32 = atomicMin((&atomX_naga_oil_mod_XORSXG5C7NVXWI5LMMUX), 4u);
let _e23: u32 = y;
y = (_e23 + _e22);
let _e25: u32 = y;
let _e27: u32 = atomicExchange((&atomX_naga_oil_mod_XORSXG5C7NVXWI5LMMUX), _e25);
let _e28: u32 = y;
y = (_e28 + _e27);
let _e33: _atomic_compare_exchange_resultUint4_ = atomicCompareExchangeWeak((&atomX_naga_oil_mod_XORSXG5C7NVXWI5LMMUX), 12u, 0u);
if _e33.exchanged {
let _e36: u32 = y;
y = (_e36 + _e33.old_value);
}
let _e38: u32 = y;
return f32(_e38);
}
fn main() -> f32 {
let _e0: f32 = entry_pointX_naga_oil_mod_XORSXG5C7NVXWI5LMMUX();
return _e0;
}

View File

@@ -0,0 +1,39 @@
struct IsFineX_naga_oil_mod_XON2HE5LDORZQX {
fine: f32,
}
struct Isbad_X_naga_oil_mod_XON2HE5LDORZQX {
fine_member: f32,
}
const fineX_naga_oil_mod_XMNXW443UOMX: f32 = 1f;
const bad_X_naga_oil_mod_XMNXW443UOMX: f32 = 1f;
var<private> fineX_naga_oil_mod_XM5WG6YTBNRZQX: f32 = 1f;
var<private> bad_X_naga_oil_mod_XM5WG6YTBNRZQX: f32 = 1f;
fn fineX_naga_oil_mod_XMZXHGX(in: f32) -> f32 {
return in;
}
fn bad_X_naga_oil_mod_XMZXHGX(in_1: f32) -> f32 {
return in_1;
}
fn main() -> f32 {
var d: IsFineX_naga_oil_mod_XON2HE5LDORZQX;
var e: Isbad_X_naga_oil_mod_XON2HE5LDORZQX;
let _e1: f32 = fineX_naga_oil_mod_XMZXHGX(1f);
let _e3: f32 = bad_X_naga_oil_mod_XMZXHGX(2f);
let b: f32 = (_e1 + _e3);
let _e6: f32 = fineX_naga_oil_mod_XM5WG6YTBNRZQX;
let _e8: f32 = bad_X_naga_oil_mod_XM5WG6YTBNRZQX;
let c: f32 = (_e6 + _e8);
d.fine = 3f;
e.fine_member = 4f;
let _e20: f32 = d.fine;
let _e23: f32 = e.fine_member;
return ((((2f + b) + c) + _e20) + _e23);
}

View File

@@ -0,0 +1,13 @@
fn fX_naga_oil_mod_XNVXWIX() -> f32 {
var x: f32 = 0f;
x = 1f;
let _e3: f32 = x;
return _e3;
}
fn main() -> f32 {
let _e0: f32 = fX_naga_oil_mod_XNVXWIX();
return _e0;
}

View File

@@ -0,0 +1,6 @@
const CX_naga_oil_mod_XMEX: u32 = 1u;
fn main() -> u32 {
return CX_naga_oil_mod_XMEX;
}

View File

@@ -0,0 +1,6 @@
const CX_naga_oil_mod_XMIX: u32 = 2u;
fn main() -> u32 {
return CX_naga_oil_mod_XMIX;
}

View File

@@ -0,0 +1,8 @@
error: required import 'b' not found
┌─ tests/conditional_import_fail/top.wgsl:6:12
6 │ return b::C;
│ ^
= missing import 'b'

View File

@@ -0,0 +1,8 @@
error: required import 'b' not found
┌─ tests/conditional_import_fail/top_nested.wgsl:1:1
1 │ #import middle
│ ^
= missing import 'b'

View File

@@ -0,0 +1,20 @@
@group(0) @binding(0) var s : sampler;
@group(0) @binding(2) var tex : texture_2d<f32>;
@group(1) @binding(0) var<storage, read> ro_buffer : array<f32, 4>;
@fragment
fn main(@builtin(position) p : vec4f) -> @location(0) vec4f {
return filters::diagnostic_test();
}
diagnostic(warning, derivative_uniformity);
fn diagnostic_test() -> vec4f {
diagnostic(off, derivative_uniformity);
if ro_buffer[0] == 0 {
// Emits a derivative uniformity error during validation.
return textureSample(tex, s, vec2(0.,0.));
}
return vec4f(0.);
}

View File

@@ -0,0 +1,16 @@
const PIX_naga_oil_mod_XMNXW443UOMX: f32 = 3.1f;
fn fX_naga_oil_mod_XMEX() -> f32 {
return 3.1f;
}
fn fX_naga_oil_mod_XMIX() -> f32 {
return 6.2f;
}
fn main() -> f32 {
let _e0: f32 = fX_naga_oil_mod_XMEX();
let _e1: f32 = fX_naga_oil_mod_XMIX();
return (_e0 * _e1);
}

View File

@@ -0,0 +1,26 @@
struct MyStructX_naga_oil_mod_XON2HE5LDOQX {
value: f32,
}
fn aX_naga_oil_mod_XMEX() -> MyStructX_naga_oil_mod_XON2HE5LDOQX {
var s_a: MyStructX_naga_oil_mod_XON2HE5LDOQX;
s_a.value = 1f;
let _e3: MyStructX_naga_oil_mod_XON2HE5LDOQX = s_a;
return _e3;
}
fn bX_naga_oil_mod_XMIX() -> MyStructX_naga_oil_mod_XON2HE5LDOQX {
var s_b: MyStructX_naga_oil_mod_XON2HE5LDOQX;
s_b.value = 2f;
let _e3: MyStructX_naga_oil_mod_XON2HE5LDOQX = s_b;
return _e3;
}
fn main() -> f32 {
let _e0: MyStructX_naga_oil_mod_XON2HE5LDOQX = aX_naga_oil_mod_XMEX();
let _e1: MyStructX_naga_oil_mod_XON2HE5LDOQX = bX_naga_oil_mod_XMIX();
return (_e0.value / _e1.value);
}

View File

@@ -0,0 +1,8 @@
error: no definition in scope for identifier: `zdd`
┌─ tests/error_test/wgsl_parse_err.wgsl:16:12
16 │ return zdd;
│ ^^^ unknown identifier
= no definition in scope for identifier: `zdd`

View File

@@ -0,0 +1,10 @@
error: failed to build a valid final module: Function [1] 'func' is invalid
┌─ tests/error_test/wgsl_valid_err.wgsl:7:1
7 │ ╭ fn func() -> f32 {
8 │ │ return 1u;
│ │ ^^ naga::Expression [0]
│ ╰──────────────^ naga::Function [1]
= The `return` value Some([0]) does not match the function return value

View File

@@ -0,0 +1,10 @@
error: failed to build a valid final module: Function [0] 'valid_inc::func' is invalid
┌─ tests/error_test/wgsl_valid_err.wgsl:7:1
7 │ ╭ fn func() -> f32 {
8 │ │ return 1u;
│ │ ^^ naga::Expression [0]
│ ╰──────────────^ naga::Function [0]
= The `return` value Some([0]) does not match the function return value

View File

@@ -0,0 +1,22 @@
struct VertexOutput {
@builtin(position) gl_Position: vec4<f32>,
}
var<private> gl_Position: vec4<f32>;
fn wgsl_funcX_naga_oil_mod_XO5TXG3C7NVXWI5LMMUX() -> f32 {
return 53f;
}
fn main_1() {
let _e0: f32 = wgsl_funcX_naga_oil_mod_XO5TXG3C7NVXWI5LMMUX();
gl_Position = vec4(_e0);
return;
}
@vertex
fn main() -> VertexOutput {
main_1();
let _e1: vec4<f32> = gl_Position;
return VertexOutput(_e1);
}

View File

@@ -0,0 +1,19 @@
struct FragmentOutput {
@location(0) out_color: vec4<f32>,
}
const my_constantX_naga_oil_mod_XMNXW23LPNYX: f32 = 0.5f;
var<private> out_color: vec4<f32>;
fn main_1() {
out_color = vec4<f32>(1f, 0.5f, 0f, 1f);
return;
}
@fragment
fn main() -> FragmentOutput {
main_1();
let _e1: vec4<f32> = out_color;
return FragmentOutput(_e1);
}

View File

@@ -0,0 +1,6 @@
const my_constantX_naga_oil_mod_XMNXW23LPNYX: f32 = 0.5f;
fn main() -> vec4<f32> {
return vec4<f32>(1f, 0.5f, 0f, 1f);
}

View File

@@ -0,0 +1,9 @@
const XX_naga_oil_mod_XMNXW443UOMX: u32 = 1u;
var<private> arrX_naga_oil_mod_XMJUW4ZAX: array<u32, 1>;
fn main() -> f32 {
let _e2: u32 = arrX_naga_oil_mod_XMJUW4ZAX[0];
return f32(_e2);
}

View File

@@ -0,0 +1,6 @@
error: override is invalid as `outer` is not virtual (this error can be disabled with feature 'override_any')
┌─ tests/overrides/top_invalid.wgsl:3:13
3 │ override fn mod::outer() -> f32 {
│ ^

View File

@@ -0,0 +1,17 @@
const XX_naga_oil_mod_XMNXW443UOMX: u32 = 1u;
const YX_naga_oil_mod_XMNXW443UOMX: u32 = 2u;
fn doubleX_naga_oil_mod_XMNXW443UOMX(in: u32) -> u32 {
return (in * 2u);
}
fn main() -> u32 {
let _e1: u32 = doubleX_naga_oil_mod_XMNXW443UOMX(XX_naga_oil_mod_XMNXW443UOMX);
return _e1;
}
fn other() -> u32 {
let _e1: u32 = doubleX_naga_oil_mod_XMNXW443UOMX(YX_naga_oil_mod_XMNXW443UOMX);
return _e1;
}

View File

@@ -0,0 +1,17 @@
struct FragX_naga_oil_mod_XNVXWIX {
fragment: f32,
}
fn fragmentX_naga_oil_mod_XNVXWIX(f_1: FragX_naga_oil_mod_XNVXWIX) -> f32 {
return (f_1.fragment * 2f);
}
@fragment
fn main() -> @location(0) f32 {
var f: FragX_naga_oil_mod_XNVXWIX;
f.fragment = 3f;
let _e3: FragX_naga_oil_mod_XNVXWIX = f;
let _e4: f32 = fragmentX_naga_oil_mod_XNVXWIX(_e3);
return _e4;
}

View File

@@ -0,0 +1,8 @@
error: required import 'missing' not found
┌─ tests/error_test/include.wgsl:11:13
11 │ let x = missing::y();
│ ^
= missing import 'missing'

View File

@@ -0,0 +1,9 @@
fn helloX_naga_oil_mod_XNFXGGX() -> f32 {
return 1f;
}
fn main() -> f32 {
let _e0: f32 = helloX_naga_oil_mod_XNFXGGX();
return _e0;
}

View File

@@ -0,0 +1,14 @@
fn fooX_naga_oil_mod_XEJYXK33UMVSF63LPMR2WYZJCX() -> f32 {
return 3f;
}
fn myfunc(foo: u32) -> f32 {
return (f32(foo) * 2f);
}
fn main() -> f32 {
let _e1: f32 = myfunc(1u);
let _e2: f32 = fooX_naga_oil_mod_XEJYXK33UMVSF63LPMR2WYZJCX();
return (_e1 + _e2);
}

View File

@@ -0,0 +1,15 @@
var<private> aX_naga_oil_mod_XNVXWIX: f32 = 0f;
fn add() {
let _e2: f32 = aX_naga_oil_mod_XNVXWIX;
aX_naga_oil_mod_XNVXWIX = (_e2 + 1f);
return;
}
fn main() -> f32 {
add();
add();
let _e1: f32 = aX_naga_oil_mod_XNVXWIX;
return _e1;
}

View File

@@ -0,0 +1,9 @@
fn fragmentX_naga_oil_mod_XNFXGG3DVMRSQX(frag_coord_1: vec4<f32>) -> vec4<f32> {
return vec4<f32>((1.5f * frag_coord_1));
}
@fragment
fn fragment(@builtin(position) frag_coord: vec4<f32>) -> @location(0) vec4<f32> {
let _e1: vec4<f32> = fragmentX_naga_oil_mod_XNFXGG3DVMRSQX(frag_coord);
return _e1;
}

View File

@@ -0,0 +1,16 @@
struct CustomMaterialX_naga_oil_mod_XM5WHG3C7NVXWI5LMMUX {
Color: vec4<f32>,
}
@group(1) @binding(0)
var<uniform> global: CustomMaterialX_naga_oil_mod_XM5WHG3C7NVXWI5LMMUX;
fn glsl_funcX_naga_oil_mod_XM5WHG3C7NVXWI5LMMUX() -> f32 {
return 3f;
}
fn fraggo() -> f32 {
let _e0: f32 = glsl_funcX_naga_oil_mod_XM5WHG3C7NVXWI5LMMUX();
return _e0;
}

View File

@@ -0,0 +1,19 @@
struct FragmentOutput {
@location(0) out_color: vec4<f32>,
}
const my_constantX_naga_oil_mod_XMNXW23LPNYX: f32 = 0.5f;
var<private> out_color: vec4<f32>;
fn main_1() {
out_color = vec4<f32>(1f, 0.5f, 0f, 1f);
return;
}
@fragment
fn main() -> FragmentOutput {
main_1();
let _e1: vec4<f32> = out_color;
return FragmentOutput(_e1);
}

Some files were not shown because too many files have changed in this diff Show More