Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

View File

@@ -0,0 +1,103 @@
#define_import_path bevy_pbr::clustered_forward
#import bevy_pbr::mesh_view_bindings as Bindings
// NOTE: Keep in sync with bevy_pbr/src/light.rs
fn view_z_to_z_slice(view_z: f32, is_orthographic: bool) -> u32 {
var z_slice: u32 = 0u;
if (is_orthographic) {
// NOTE: view_z is correct in the orthographic case
z_slice = u32(floor((view_z - Bindings::lights.cluster_factors.z) * Bindings::lights.cluster_factors.w));
} else {
// NOTE: had to use -view_z to make it positive else log(negative) is nan
z_slice = u32(log(-view_z) * Bindings::lights.cluster_factors.z - Bindings::lights.cluster_factors.w + 1.0);
}
// NOTE: We use min as we may limit the far z plane used for clustering to be closeer than
// the furthest thing being drawn. This means that we need to limit to the maximum cluster.
return min(z_slice, Bindings::lights.cluster_dimensions.z - 1u);
}
fn fragment_cluster_index(frag_coord: vec2<f32>, view_z: f32, is_orthographic: bool) -> u32 {
let xy = vec2<u32>(floor(frag_coord * Bindings::lights.cluster_factors.xy));
let z_slice = view_z_to_z_slice(view_z, is_orthographic);
// NOTE: Restricting cluster index to avoid undefined behavior when accessing uniform buffer
// arrays based on the cluster index.
return min(
(xy.y * Bindings::lights.cluster_dimensions.x + xy.x) * Bindings::lights.cluster_dimensions.z + z_slice,
Bindings::lights.cluster_dimensions.w - 1u
);
}
// this must match CLUSTER_COUNT_SIZE in light.rs
const CLUSTER_COUNT_SIZE = 9u;
fn unpack_offset_and_counts(cluster_index: u32) -> vec3<u32> {
#ifdef NO_STORAGE_BUFFERS_SUPPORT
let offset_and_counts = Bindings::cluster_offsets_and_counts.data[cluster_index >> 2u][cluster_index & ((1u << 2u) - 1u)];
// [ 31 .. 18 | 17 .. 9 | 8 .. 0 ]
// [ offset | point light count | spot light count ]
return vec3<u32>(
(offset_and_counts >> (CLUSTER_COUNT_SIZE * 2u)) & ((1u << (32u - (CLUSTER_COUNT_SIZE * 2u))) - 1u),
(offset_and_counts >> CLUSTER_COUNT_SIZE) & ((1u << CLUSTER_COUNT_SIZE) - 1u),
offset_and_counts & ((1u << CLUSTER_COUNT_SIZE) - 1u),
);
#else
return Bindings::cluster_offsets_and_counts.data[cluster_index].xyz;
#endif
}
fn get_light_id(index: u32) -> u32 {
#ifdef NO_STORAGE_BUFFERS_SUPPORT
// The index is correct but in cluster_light_index_lists we pack 4 u8s into a u32
// This means the index into cluster_light_index_lists is index / 4
let indices = Bindings::cluster_light_index_lists.data[index >> 4u][(index >> 2u) & ((1u << 2u) - 1u)];
// And index % 4 gives the sub-index of the u8 within the u32 so we shift by 8 * sub-index
return (indices >> (8u * (index & ((1u << 2u) - 1u)))) & ((1u << 8u) - 1u);
#else
return Bindings::cluster_light_index_lists.data[index];
#endif
}
fn cluster_debug_visualization(
output_color: vec4<f32>,
view_z: f32,
is_orthographic: bool,
offset_and_counts: vec3<u32>,
cluster_index: u32,
) -> vec4<f32> {
// Cluster allocation debug (using 'over' alpha blending)
#ifdef CLUSTERED_FORWARD_DEBUG_Z_SLICES
// NOTE: This debug mode visualises the z-slices
let cluster_overlay_alpha = 0.1;
var z_slice: u32 = view_z_to_z_slice(view_z, is_orthographic);
// A hack to make the colors alternate a bit more
if ((z_slice & 1u) == 1u) {
z_slice = z_slice + Bindings::lights.cluster_dimensions.z / 2u;
}
let slice_color = hsv2rgb(f32(z_slice) / f32(Bindings::lights.cluster_dimensions.z + 1u), 1.0, 0.5);
output_color = vec4<f32>(
(1.0 - cluster_overlay_alpha) * output_color.rgb + cluster_overlay_alpha * slice_color,
output_color.a
);
#endif // CLUSTERED_FORWARD_DEBUG_Z_SLICES
#ifdef CLUSTERED_FORWARD_DEBUG_CLUSTER_LIGHT_COMPLEXITY
// NOTE: This debug mode visualises the number of lights within the cluster that contains
// the fragment. It shows a sort of lighting complexity measure.
let cluster_overlay_alpha = 0.1;
let max_light_complexity_per_cluster = 64.0;
output_color.r = (1.0 - cluster_overlay_alpha) * output_color.r
+ cluster_overlay_alpha * smoothStep(0.0, max_light_complexity_per_cluster, f32(offset_and_counts[1] + offset_and_counts[2]));
output_color.g = (1.0 - cluster_overlay_alpha) * output_color.g
+ cluster_overlay_alpha * (1.0 - smoothStep(0.0, max_light_complexity_per_cluster, f32(offset_and_counts[1] + offset_and_counts[2])));
#endif // CLUSTERED_FORWARD_DEBUG_CLUSTER_LIGHT_COMPLEXITY
#ifdef CLUSTERED_FORWARD_DEBUG_CLUSTER_COHERENCY
// NOTE: Visualizes the cluster to which the fragment belongs
let cluster_overlay_alpha = 0.1;
let cluster_color = hsv2rgb(random1D(f32(cluster_index)), 1.0, 0.5);
output_color = vec4<f32>(
(1.0 - cluster_overlay_alpha) * output_color.rgb + cluster_overlay_alpha * cluster_color,
output_color.a
);
#endif // CLUSTERED_FORWARD_DEBUG_CLUSTER_COHERENCY
return output_color;
}

View File

@@ -0,0 +1,42 @@
#import bevy_pbr::mesh_view_types
#import bevy_pbr::mesh_types
@group(0) @binding(0)
var<uniform> view: View;
@group(1) @binding(0)
var<uniform> mesh: Mesh;
#ifdef SKINNED
@group(1) @binding(1)
var<uniform> joint_matrices: SkinnedMesh;
#import bevy_pbr::skinning
#endif
// NOTE: Bindings must come before functions that use them!
#import bevy_pbr::mesh_functions
struct Vertex {
@location(0) position: vec3<f32>,
#ifdef SKINNED
@location(4) joint_indices: vec4<u32>,
@location(5) joint_weights: vec4<f32>,
#endif
};
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
};
@vertex
fn vertex(vertex: Vertex) -> VertexOutput {
#ifdef SKINNED
let model = skin_model(vertex.joint_indices, vertex.joint_weights);
#else
let model = mesh.model;
#endif
var out: VertexOutput;
out.clip_position = mesh_position_local_to_clip(model, vec4<f32>(vertex.position, 1.0));
return out;
}

View File

@@ -0,0 +1,67 @@
#import bevy_pbr::mesh_view_bindings
#import bevy_pbr::mesh_bindings
// NOTE: Bindings must come before functions that use them!
#import bevy_pbr::mesh_functions
struct Vertex {
@location(0) position: vec3<f32>,
@location(1) normal: vec3<f32>,
#ifdef VERTEX_UVS
@location(2) uv: vec2<f32>,
#endif
#ifdef VERTEX_TANGENTS
@location(3) tangent: vec4<f32>,
#endif
#ifdef VERTEX_COLORS
@location(4) color: vec4<f32>,
#endif
#ifdef SKINNED
@location(5) joint_indices: vec4<u32>,
@location(6) joint_weights: vec4<f32>,
#endif
};
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
#import bevy_pbr::mesh_vertex_output
};
@vertex
fn vertex(vertex: Vertex) -> VertexOutput {
var out: VertexOutput;
#ifdef SKINNED
var model = skin_model(vertex.joint_indices, vertex.joint_weights);
out.world_normal = skin_normals(model, vertex.normal);
#else
var model = mesh.model;
out.world_normal = mesh_normal_local_to_world(vertex.normal);
#endif
out.world_position = mesh_position_local_to_world(model, vec4<f32>(vertex.position, 1.0));
#ifdef VERTEX_UVS
out.uv = vertex.uv;
#endif
#ifdef VERTEX_TANGENTS
out.world_tangent = mesh_tangent_local_to_world(model, vertex.tangent);
#endif
#ifdef VERTEX_COLORS
out.color = vertex.color;
#endif
out.clip_position = mesh_position_world_to_clip(out.world_position);
return out;
}
struct FragmentInput {
@builtin(front_facing) is_front: bool,
#import bevy_pbr::mesh_vertex_output
};
@fragment
fn fragment(in: FragmentInput) -> @location(0) vec4<f32> {
#ifdef VERTEX_COLORS
return in.color;
#else
return vec4<f32>(1.0, 0.0, 1.0, 1.0);
#endif
}

View File

@@ -0,0 +1,6 @@
#define_import_path bevy_pbr::mesh_bindings
#import bevy_pbr::mesh_types as Types
@group(2) @binding(0)
var<uniform> mesh: Types::Mesh;

View File

@@ -0,0 +1,36 @@
#define_import_path bevy_pbr::mesh_functions
fn mesh_position_local_to_world(model: mat4x4<f32>, vertex_position: vec4<f32>) -> vec4<f32> {
return model * vertex_position;
}
fn mesh_position_world_to_clip(world_position: vec4<f32>) -> vec4<f32> {
return view.view_proj * world_position;
}
// NOTE: The intermediate world_position assignment is important
// for precision purposes when using the 'equals' depth comparison
// function.
fn mesh_position_local_to_clip(model: mat4x4<f32>, vertex_position: vec4<f32>) -> vec4<f32> {
let world_position = mesh_position_local_to_world(model, vertex_position);
return mesh_position_world_to_clip(world_position);
}
fn mesh_normal_local_to_world(vertex_normal: vec3<f32>) -> vec3<f32> {
return mat3x3<f32>(
mesh.inverse_transpose_model[0].xyz,
mesh.inverse_transpose_model[1].xyz,
mesh.inverse_transpose_model[2].xyz
) * vertex_normal;
}
fn mesh_tangent_local_to_world(model: mat4x4<f32>, vertex_tangent: vec4<f32>) -> vec4<f32> {
return vec4<f32>(
mat3x3<f32>(
model[0].xyz,
model[1].xyz,
model[2].xyz
) * vertex_tangent.xyz,
vertex_tangent.w
);
}

View File

@@ -0,0 +1,16 @@
#define_import_path bevy_pbr::mesh_types
struct Mesh {
model: mat4x4<f32>,
inverse_transpose_model: mat4x4<f32>,
// 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options.
flags: u32,
};
#ifdef SKINNED
struct SkinnedMesh {
data: array<mat4x4<f32>, 256u>,
};
#endif
const MESH_FLAGS_SHADOW_RECEIVER_BIT: u32 = 1u;

View File

@@ -0,0 +1,15 @@
#define_import_path bevy_pbr::mesh_vertex_output
struct MeshVertexOutput {
@location(0) world_position: vec4<f32>,
@location(1) world_normal: vec3<f32>,
#ifdef VERTEX_UVS
@location(2) uv: vec2<f32>,
#endif
#ifdef VERTEX_TANGENTS
@location(3) world_tangent: vec4<f32>,
#endif
#ifdef VERTEX_COLORS
@location(4) color: vec4<f32>,
#endif
}

View File

@@ -0,0 +1,42 @@
#define_import_path bevy_pbr::mesh_view_bindings
#import bevy_pbr::mesh_view_types as Types
@group(0) @binding(0)
var<uniform> view: Types::View;
@group(0) @binding(1)
var<uniform> lights: Types::Lights;
#ifdef NO_ARRAY_TEXTURES_SUPPORT
@group(0) @binding(2)
var point_shadow_textures: texture_depth_cube;
#else
@group(0) @binding(2)
var point_shadow_textures: texture_depth_cube_array;
#endif
@group(0) @binding(3)
var point_shadow_textures_sampler: sampler_comparison;
#ifdef NO_ARRAY_TEXTURES_SUPPORT
@group(0) @binding(4)
var directional_shadow_textures: texture_depth_2d;
#else
@group(0) @binding(4)
var directional_shadow_textures: texture_depth_2d_array;
#endif
@group(0) @binding(5)
var directional_shadow_textures_sampler: sampler_comparison;
#ifdef NO_STORAGE_BUFFERS_SUPPORT
@group(0) @binding(6)
var<uniform> point_lights: Types::PointLights;
@group(0) @binding(7)
var<uniform> cluster_light_index_lists: Types::ClusterLightIndexLists;
@group(0) @binding(8)
var<uniform> cluster_offsets_and_counts: Types::ClusterOffsetsAndCounts;
#else
@group(0) @binding(6)
var<storage> point_lights: Types::PointLights;
@group(0) @binding(7)
var<storage> cluster_light_index_lists: Types::ClusterLightIndexLists;
@group(0) @binding(8)
var<storage> cluster_offsets_and_counts: Types::ClusterOffsetsAndCounts;
#endif

View File

@@ -0,0 +1,87 @@
#define_import_path bevy_pbr::mesh_view_types
struct View {
view_proj: mat4x4<f32>,
inverse_view_proj: mat4x4<f32>,
view: mat4x4<f32>,
inverse_view: mat4x4<f32>,
projection: mat4x4<f32>,
inverse_projection: mat4x4<f32>,
world_position: vec3<f32>,
width: f32,
height: f32,
};
struct PointLight {
// For point lights: the lower-right 2x2 values of the projection matrix [2][2] [2][3] [3][2] [3][3]
// For spot lights: the direction (x,z), spot_scale and spot_offset
light_custom_data: vec4<f32>,
color_inverse_square_range: vec4<f32>,
position_radius: vec4<f32>,
// 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options.
flags: u32,
shadow_depth_bias: f32,
shadow_normal_bias: f32,
spot_light_tan_angle: f32,
};
const POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT: u32 = 1u;
const POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE: u32 = 2u;
struct DirectionalLight {
view_projection: mat4x4<f32>,
color: vec4<f32>,
direction_to_light: vec3<f32>,
// 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options.
flags: u32,
shadow_depth_bias: f32,
shadow_normal_bias: f32,
};
const DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT: u32 = 1u;
struct Lights {
// NOTE: this array size must be kept in sync with the constants defined bevy_pbr2/src/render/light.rs
directional_lights: array<DirectionalLight, 1u>,
ambient_color: vec4<f32>,
// x/y/z dimensions and n_clusters in w
cluster_dimensions: vec4<u32>,
// xy are vec2<f32>(cluster_dimensions.xy) / vec2<f32>(view.width, view.height)
//
// For perspective projections:
// z is cluster_dimensions.z / log(far / near)
// w is cluster_dimensions.z * log(near) / log(far / near)
//
// For orthographic projections:
// NOTE: near and far are +ve but -z is infront of the camera
// z is -near
// w is cluster_dimensions.z / (-far - -near)
cluster_factors: vec4<f32>,
n_directional_lights: u32,
spot_light_shadowmap_offset: i32,
};
#ifdef NO_STORAGE_BUFFERS_SUPPORT
struct PointLights {
data: array<PointLight, 256u>,
};
struct ClusterLightIndexLists {
// each u32 contains 4 u8 indices into the PointLights array
data: array<vec4<u32>, 1024u>,
};
struct ClusterOffsetsAndCounts {
// each u32 contains a 24-bit index into ClusterLightIndexLists in the high 24 bits
// and an 8-bit count of the number of lights in the low 8 bits
data: array<vec4<u32>, 1024u>,
};
#else
struct PointLights {
data: array<PointLight>,
};
struct ClusterLightIndexLists {
data: array<u32>,
};
struct ClusterOffsetsAndCounts {
data: array<vec4<u32>>,
};
#endif

View File

@@ -0,0 +1,761 @@
struct mesh_vertex_output__MeshVertexOutput {
@location(0) world_position: vec4<f32>,
@location(1) world_normal: vec3<f32>,
@location(2) uv: vec2<f32>,
}
struct pbr_types__StandardMaterial {
base_color: vec4<f32>,
emissive: vec4<f32>,
perceptual_roughness: f32,
metallic: f32,
reflectance: f32,
flags: u32,
alpha_cutoff: f32,
}
struct mesh_types__Mesh {
model: mat4x4<f32>,
inverse_transpose_model: mat4x4<f32>,
flags: u32,
}
struct mesh_view_types__View {
view_proj: mat4x4<f32>,
inverse_view_proj: mat4x4<f32>,
view: mat4x4<f32>,
inverse_view: mat4x4<f32>,
projection: mat4x4<f32>,
inverse_projection: mat4x4<f32>,
world_position: vec3<f32>,
width: f32,
height: f32,
}
struct mesh_view_types__PointLight {
light_custom_data: vec4<f32>,
color_inverse_square_range: vec4<f32>,
position_radius: vec4<f32>,
flags: u32,
shadow_depth_bias: f32,
shadow_normal_bias: f32,
spot_light_tan_angle: f32,
}
struct mesh_view_types__DirectionalLight {
view_projection: mat4x4<f32>,
color: vec4<f32>,
direction_to_light: vec3<f32>,
flags: u32,
shadow_depth_bias: f32,
shadow_normal_bias: f32,
}
struct mesh_view_types__Lights {
directional_lights: array<mesh_view_types__DirectionalLight,1u>,
ambient_color: vec4<f32>,
cluster_dimensions: vec4<u32>,
cluster_factors: vec4<f32>,
n_directional_lights: u32,
spot_light_shadowmap_offset: i32,
}
struct mesh_view_types__PointLights {
data: array<mesh_view_types__PointLight>,
}
struct mesh_view_types__ClusterLightIndexLists {
data: array<u32>,
}
struct mesh_view_types__ClusterOffsetsAndCounts {
data: array<vec4<u32>>,
}
struct pbr_functions__PbrInput {
material: pbr_types__StandardMaterial,
occlusion: f32,
frag_coord: vec4<f32>,
world_position: vec4<f32>,
world_normal: vec3<f32>,
N: vec3<f32>,
V: vec3<f32>,
is_orthographic: bool,
}
const pbr_types__STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT: u32 = 2u;
const pbr_types__STANDARD_MATERIAL_FLAGS_UNLIT_BIT: u32 = 32u;
const pbr_types__STANDARD_MATERIAL_FLAGS_ALPHA_MODE_BLEND: u32 = 256u;
const pbr_types__STANDARD_MATERIAL_FLAGS_TWO_COMPONENT_NORMAL_MAP: u32 = 512u;
const pbr_types__STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK: u32 = 128u;
const pbr_types__STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT: u32 = 16u;
const pbr_types__STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT: u32 = 1u;
const pbr_types__STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT: u32 = 4u;
const pbr_types__STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT: u32 = 8u;
const pbr_types__STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE: u32 = 64u;
const pbr_types__STANDARD_MATERIAL_FLAGS_FLIP_NORMAL_MAP_Y: u32 = 1024u;
const mesh_types__MESH_FLAGS_SHADOW_RECEIVER_BIT: u32 = 1u;
const mesh_view_types__POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE: u32 = 2u;
const mesh_view_types__DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT: u32 = 1u;
const mesh_view_types__POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT: u32 = 1u;
const utils__PI: f32 = 3.1415927410125732;
const clustered_forward__CLUSTER_COUNT_SIZE: u32 = 9u;
@group(2) @binding(0)
var<uniform> mesh_bindings__mesh: mesh_types__Mesh;
@group(0) @binding(0)
var<uniform> mesh_view_bindings__view: mesh_view_types__View;
@group(0) @binding(2)
var mesh_view_bindings__point_shadow_textures: texture_depth_cube_array;
@group(0) @binding(5)
var mesh_view_bindings__directional_shadow_textures_sampler: sampler_comparison;
@group(0) @binding(6)
var<storage> mesh_view_bindings__point_lights: mesh_view_types__PointLights;
@group(0) @binding(1)
var<uniform> mesh_view_bindings__lights: mesh_view_types__Lights;
@group(0) @binding(3)
var mesh_view_bindings__point_shadow_textures_sampler: sampler_comparison;
@group(0) @binding(4)
var mesh_view_bindings__directional_shadow_textures: texture_depth_2d_array;
@group(0) @binding(7)
var<storage> mesh_view_bindings__cluster_light_index_lists: mesh_view_types__ClusterLightIndexLists;
@group(0) @binding(8)
var<storage> mesh_view_bindings__cluster_offsets_and_counts: mesh_view_types__ClusterOffsetsAndCounts;
@group(1) @binding(8)
var pbr_bindings__occlusion_sampler: sampler;
@group(1) @binding(0)
var<uniform> pbr_bindings__material: pbr_types__StandardMaterial;
@group(1) @binding(3)
var pbr_bindings__emissive_texture: texture_2d<f32>;
@group(1) @binding(1)
var pbr_bindings__base_color_texture: texture_2d<f32>;
@group(1) @binding(5)
var pbr_bindings__metallic_roughness_texture: texture_2d<f32>;
@group(1) @binding(4)
var pbr_bindings__emissive_sampler: sampler;
@group(1) @binding(6)
var pbr_bindings__metallic_roughness_sampler: sampler;
@group(1) @binding(2)
var pbr_bindings__base_color_sampler: sampler;
@group(1) @binding(10)
var pbr_bindings__normal_map_sampler: sampler;
@group(1) @binding(9)
var pbr_bindings__normal_map_texture: texture_2d<f32>;
@group(1) @binding(7)
var pbr_bindings__occlusion_texture: texture_2d<f32>;
fn pbr_types__standard_material_new() -> pbr_types__StandardMaterial {
var material: pbr_types__StandardMaterial;
material.base_color = vec4<f32>(1.0, 1.0, 1.0, 1.0);
material.emissive = vec4<f32>(0.0, 0.0, 0.0, 1.0);
material.perceptual_roughness = 0.08900000154972076;
material.metallic = 0.009999999776482582;
material.reflectance = 0.5;
material.flags = pbr_types__STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE;
material.alpha_cutoff = 0.5;
let _e33: pbr_types__StandardMaterial = material;
return _e33;
}
fn utils__saturate(value: f32) -> f32 {
return clamp(value, 0.0, 1.0);
}
fn utils__hsv2rgb(hue: f32, saturation: f32, value_1: f32) -> vec3<f32> {
let rgb: vec3<f32> = clamp((abs((((vec3<f32>((hue * 6.0)) + vec3<f32>(0.0, 4.0, 2.0)) % vec3<f32>(6.0)) - vec3<f32>(3.0))) - vec3<f32>(1.0)), vec3<f32>(0.0), vec3<f32>(1.0));
return (value_1 * mix(vec3<f32>(1.0), rgb, vec3<f32>(saturation)));
}
fn utils__random1D(s: f32) -> f32 {
return fract((sin((s * 12.989800453186035)) * 43758.546875));
}
fn lighting__getDistanceAttenuation(distanceSquare: f32, inverseRangeSquared: f32) -> f32 {
let factor: f32 = (distanceSquare * inverseRangeSquared);
let _e10: f32 = utils__saturate((1.0 - (factor * factor)));
let attenuation: f32 = (_e10 * _e10);
return ((attenuation * 1.0) / max(distanceSquare, 9.999999747378752e-5));
}
fn lighting__D_GGX(roughness: f32, NoH: f32, h: vec3<f32>) -> f32 {
let oneMinusNoHSquared: f32 = (1.0 - (NoH * NoH));
let a: f32 = (NoH * roughness);
let k: f32 = (roughness / (oneMinusNoHSquared + (a * a)));
let d: f32 = ((k * k) * (1.0 / utils__PI));
return d;
}
fn lighting__V_SmithGGXCorrelated(roughness_1: f32, NoV: f32, NoL: f32) -> f32 {
let a2_: f32 = (roughness_1 * roughness_1);
let lambdaV: f32 = (NoL * sqrt((((NoV - (a2_ * NoV)) * NoV) + a2_)));
let lambdaL: f32 = (NoV * sqrt((((NoL - (a2_ * NoL)) * NoL) + a2_)));
let v_1: f32 = (0.5 / (lambdaV + lambdaL));
return v_1;
}
fn lighting__F_Schlick_vec(f0_: vec3<f32>, f90_: f32, VoH: f32) -> vec3<f32> {
return (f0_ + ((vec3<f32>(f90_) - f0_) * pow((1.0 - VoH), 5.0)));
}
fn lighting__F_Schlick(f0_1: f32, f90_1: f32, VoH_1: f32) -> f32 {
return (f0_1 + ((f90_1 - f0_1) * pow((1.0 - VoH_1), 5.0)));
}
fn lighting__fresnel(f0_2: vec3<f32>, LoH: f32) -> vec3<f32> {
let _e11: f32 = utils__saturate(dot(f0_2, vec3<f32>((50.0 * 0.33000001311302185))));
let _e12: vec3<f32> = lighting__F_Schlick_vec(f0_2, _e11, LoH);
return _e12;
}
fn lighting__specular(f0_3: vec3<f32>, roughness_2: f32, h_1: vec3<f32>, NoV_1: f32, NoL_1: f32, NoH_1: f32, LoH_1: f32, specularIntensity: f32) -> vec3<f32> {
let _e12: f32 = lighting__D_GGX(roughness_2, NoH_1, h_1);
let _e13: f32 = lighting__V_SmithGGXCorrelated(roughness_2, NoV_1, NoL_1);
let _e14: vec3<f32> = lighting__fresnel(f0_3, LoH_1);
return (((specularIntensity * _e12) * _e13) * _e14);
}
fn lighting__Fd_Burley(roughness_3: f32, NoV_2: f32, NoL_2: f32, LoH_2: f32) -> f32 {
let f90_2: f32 = (0.5 + (((2.0 * roughness_3) * LoH_2) * LoH_2));
let _e15: f32 = lighting__F_Schlick(1.0, f90_2, NoL_2);
let _e17: f32 = lighting__F_Schlick(1.0, f90_2, NoV_2);
return ((_e15 * _e17) * (1.0 / utils__PI));
}
fn lighting__EnvBRDFApprox(f0_4: vec3<f32>, perceptual_roughness_1: f32, NoV_3: f32) -> vec3<f32> {
let c0_: vec4<f32> = vec4<f32>(-1.0, -0.027499999850988388, -0.5720000267028809, 0.02199999988079071);
let c1_: vec4<f32> = vec4<f32>(1.0, 0.042500000447034836, 1.0399999618530273, -0.03999999910593033);
let r: vec4<f32> = ((perceptual_roughness_1 * c0_) + c1_);
let a004_: f32 = ((min((r.x * r.x), exp2((-9.279999732971191 * NoV_3))) * r.x) + r.y);
let AB: vec2<f32> = ((vec2<f32>(-1.0399999618530273, 1.0399999618530273) * a004_) + r.zw);
return ((f0_4 * AB.x) + vec3<f32>(AB.y));
}
fn lighting__perceptualRoughnessToRoughness(perceptualRoughness: f32) -> f32 {
let clampedPerceptualRoughness: f32 = clamp(perceptualRoughness, 0.08900000154972076, 1.0);
return (clampedPerceptualRoughness * clampedPerceptualRoughness);
}
fn lighting__reinhard(color: vec3<f32>) -> vec3<f32> {
return (color / (vec3<f32>(1.0) + color));
}
fn lighting__reinhard_extended(color_1: vec3<f32>, max_white: f32) -> vec3<f32> {
let numerator: vec3<f32> = (color_1 * (vec3<f32>(1.0) + (color_1 / vec3<f32>((max_white * max_white)))));
return (numerator / (vec3<f32>(1.0) + color_1));
}
fn lighting__luminance(v: vec3<f32>) -> f32 {
return dot(v, vec3<f32>(0.2125999927520752, 0.7152000069618225, 0.0722000002861023));
}
fn lighting__change_luminance(c_in: vec3<f32>, l_out: f32) -> vec3<f32> {
let _e6: f32 = lighting__luminance(c_in);
return (c_in * (l_out / _e6));
}
fn lighting__reinhard_luminance(color_2: vec3<f32>) -> vec3<f32> {
let _e5: f32 = lighting__luminance(color_2);
let l_new: f32 = (_e5 / (1.0 + _e5));
let _e9: vec3<f32> = lighting__change_luminance(color_2, l_new);
return _e9;
}
fn lighting__reinhard_extended_luminance(color_3: vec3<f32>, max_white_l: f32) -> vec3<f32> {
let _e6: f32 = lighting__luminance(color_3);
let numerator_1: f32 = (_e6 * (1.0 + (_e6 / (max_white_l * max_white_l))));
let l_new_1: f32 = (numerator_1 / (1.0 + _e6));
let _e15: vec3<f32> = lighting__change_luminance(color_3, l_new_1);
return _e15;
}
fn lighting__point_light(world_position: vec3<f32>, light: mesh_view_types__PointLight, roughness_4: f32, NdotV: f32, N: vec3<f32>, V: vec3<f32>, R: vec3<f32>, F0_: vec3<f32>, diffuseColor: vec3<f32>) -> vec3<f32> {
var L: vec3<f32>;
var H: vec3<f32>;
var NoL_3: f32;
var NoH_2: f32;
var LoH_3: f32;
let light_to_frag: vec3<f32> = (light.position_radius.xyz - world_position.xyz);
let distance_square: f32 = dot(light_to_frag, light_to_frag);
let _e20: f32 = lighting__getDistanceAttenuation(distance_square, light.color_inverse_square_range.w);
let centerToRay: vec3<f32> = ((dot(light_to_frag, R) * R) - light_to_frag);
let _e29: f32 = utils__saturate((light.position_radius.w * inverseSqrt(dot(centerToRay, centerToRay))));
let closestPoint: vec3<f32> = (light_to_frag + (centerToRay * _e29));
let LspecLengthInverse: f32 = inverseSqrt(dot(closestPoint, closestPoint));
let _e40: f32 = utils__saturate((roughness_4 + ((light.position_radius.w * 0.5) * LspecLengthInverse)));
let normalizationFactor: f32 = (roughness_4 / _e40);
let specularIntensity_1: f32 = (normalizationFactor * normalizationFactor);
L = (closestPoint * LspecLengthInverse);
let _e45: vec3<f32> = L;
H = normalize((_e45 + V));
let _e49: vec3<f32> = L;
let _e51: f32 = utils__saturate(dot(N, _e49));
NoL_3 = _e51;
let _e53: vec3<f32> = H;
let _e55: f32 = utils__saturate(dot(N, _e53));
NoH_2 = _e55;
let _e57: vec3<f32> = L;
let _e58: vec3<f32> = H;
let _e60: f32 = utils__saturate(dot(_e57, _e58));
LoH_3 = _e60;
let _e62: vec3<f32> = H;
let _e63: f32 = NoL_3;
let _e64: f32 = NoH_2;
let _e65: f32 = LoH_3;
let _e66: vec3<f32> = lighting__specular(F0_, roughness_4, _e62, NdotV, _e63, _e64, _e65, specularIntensity_1);
L = normalize(light_to_frag);
let _e68: vec3<f32> = L;
H = normalize((_e68 + V));
let _e71: vec3<f32> = L;
let _e73: f32 = utils__saturate(dot(N, _e71));
NoL_3 = _e73;
let _e74: vec3<f32> = H;
let _e76: f32 = utils__saturate(dot(N, _e74));
NoH_2 = _e76;
let _e77: vec3<f32> = L;
let _e78: vec3<f32> = H;
let _e80: f32 = utils__saturate(dot(_e77, _e78));
LoH_3 = _e80;
let _e81: f32 = NoL_3;
let _e82: f32 = LoH_3;
let _e83: f32 = lighting__Fd_Burley(roughness_4, NdotV, _e81, _e82);
let diffuse: vec3<f32> = (diffuseColor * _e83);
let _e89: f32 = NoL_3;
return (((diffuse + _e66) * light.color_inverse_square_range.xyz) * (_e20 * _e89));
}
fn lighting__spot_light(world_position_1: vec3<f32>, light_1: mesh_view_types__PointLight, roughness_5: f32, NdotV_1: f32, N_1: vec3<f32>, V_1: vec3<f32>, R_1: vec3<f32>, F0_1: vec3<f32>, diffuseColor_1: vec3<f32>) -> vec3<f32> {
var spot_dir: vec3<f32>;
let _e13: vec3<f32> = lighting__point_light(world_position_1, light_1, roughness_5, NdotV_1, N_1, V_1, R_1, F0_1, diffuseColor_1);
spot_dir = vec3<f32>(light_1.light_custom_data.x, 0.0, light_1.light_custom_data.y);
let _e24: f32 = spot_dir.x;
let _e26: f32 = spot_dir.x;
let _e30: f32 = spot_dir.z;
let _e32: f32 = spot_dir.z;
spot_dir.y = sqrt(((1.0 - (_e24 * _e26)) - (_e30 * _e32)));
if ((light_1.flags & mesh_view_types__POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE) != 0u) {
let _e42: f32 = spot_dir.y;
spot_dir.y = -(_e42);
}
let light_to_frag_1: vec3<f32> = (light_1.position_radius.xyz - world_position_1.xyz);
let _e48: vec3<f32> = spot_dir;
let cd: f32 = dot(-(_e48), normalize(light_to_frag_1));
let _e58: f32 = utils__saturate(((cd * light_1.light_custom_data.z) + light_1.light_custom_data.w));
let spot_attenuation: f32 = (_e58 * _e58);
return (_e13 * spot_attenuation);
}
fn lighting__directional_light(light_2: mesh_view_types__DirectionalLight, roughness_6: f32, NdotV_2: f32, normal: vec3<f32>, view: vec3<f32>, R_2: vec3<f32>, F0_2: vec3<f32>, diffuseColor_2: vec3<f32>) -> vec3<f32> {
let incident_light: vec3<f32> = light_2.direction_to_light.xyz;
let half_vector: vec3<f32> = normalize((incident_light + view));
let _e17: f32 = utils__saturate(dot(normal, incident_light));
let _e19: f32 = utils__saturate(dot(normal, half_vector));
let _e21: f32 = utils__saturate(dot(incident_light, half_vector));
let _e22: f32 = lighting__Fd_Burley(roughness_6, NdotV_2, _e17, _e21);
let diffuse_1: vec3<f32> = (diffuseColor_2 * _e22);
let _e25: vec3<f32> = lighting__specular(F0_2, roughness_6, half_vector, NdotV_2, _e17, _e19, _e21, 1.0);
return (((_e25 + diffuse_1) * light_2.color.xyz) * _e17);
}
fn clustered_forward__view_z_to_z_slice(view_z: f32, is_orthographic: bool) -> u32 {
var z_slice: u32 = 0u;
if is_orthographic {
let _e18: f32 = mesh_view_bindings__lights.cluster_factors.z;
let _e22: f32 = mesh_view_bindings__lights.cluster_factors.w;
z_slice = u32(floor(((view_z - _e18) * _e22)));
} else {
let _e30: f32 = mesh_view_bindings__lights.cluster_factors.z;
let _e34: f32 = mesh_view_bindings__lights.cluster_factors.w;
z_slice = u32((((log(-(view_z)) * _e30) - _e34) + 1.0));
}
let _e39: u32 = z_slice;
let _e42: u32 = mesh_view_bindings__lights.cluster_dimensions.z;
return min(_e39, (_e42 - 1u));
}
fn clustered_forward__fragment_cluster_index(frag_coord_1: vec2<f32>, view_z_1: f32, is_orthographic_1: bool) -> u32 {
let _e16: vec4<f32> = mesh_view_bindings__lights.cluster_factors;
let xy: vec2<u32> = vec2<u32>(floor((frag_coord_1 * _e16.xy)));
let _e21: u32 = clustered_forward__view_z_to_z_slice(view_z_1, is_orthographic_1);
let _e25: u32 = mesh_view_bindings__lights.cluster_dimensions.x;
let _e31: u32 = mesh_view_bindings__lights.cluster_dimensions.z;
let _e36: u32 = mesh_view_bindings__lights.cluster_dimensions.w;
return min(((((xy.y * _e25) + xy.x) * _e31) + _e21), (_e36 - 1u));
}
fn clustered_forward__unpack_offset_and_counts(cluster_index: u32) -> vec3<u32> {
let _e16: vec4<u32> = mesh_view_bindings__cluster_offsets_and_counts.data[cluster_index];
return _e16.xyz;
}
fn clustered_forward__get_light_id(index: u32) -> u32 {
let _e16: u32 = mesh_view_bindings__cluster_light_index_lists.data[index];
return _e16;
}
fn clustered_forward__cluster_debug_visualization(output_color_1: vec4<f32>, view_z_2: f32, is_orthographic_2: bool, offset_and_counts: vec3<u32>, cluster_index_1: u32) -> vec4<f32> {
return output_color_1;
}
fn shadows__fetch_point_shadow(light_id: u32, frag_position: vec4<f32>, surface_normal: vec3<f32>) -> f32 {
let light_3: mesh_view_types__PointLight = mesh_view_bindings__point_lights.data[light_id];
let surface_to_light: vec3<f32> = (light_3.position_radius.xyz - frag_position.xyz);
let surface_to_light_abs: vec3<f32> = abs(surface_to_light);
let distance_to_light: f32 = max(surface_to_light_abs.x, max(surface_to_light_abs.y, surface_to_light_abs.z));
let normal_offset: vec3<f32> = ((light_3.shadow_normal_bias * distance_to_light) * surface_normal.xyz);
let depth_offset: vec3<f32> = (light_3.shadow_depth_bias * normalize(surface_to_light.xyz));
let offset_position: vec3<f32> = ((frag_position.xyz + normal_offset) + depth_offset);
let frag_ls: vec3<f32> = (light_3.position_radius.xyz - offset_position.xyz);
let abs_position_ls: vec3<f32> = abs(frag_ls);
let major_axis_magnitude: f32 = max(abs_position_ls.x, max(abs_position_ls.y, abs_position_ls.z));
let zw: vec2<f32> = ((-(major_axis_magnitude) * light_3.light_custom_data.xy) + light_3.light_custom_data.zw);
let depth: f32 = (zw.x / zw.y);
let _e60: f32 = textureSampleCompareLevel(mesh_view_bindings__point_shadow_textures, mesh_view_bindings__point_shadow_textures_sampler, frag_ls, i32(light_id), depth);
return _e60;
}
fn shadows__fetch_spot_shadow(light_id_1: u32, frag_position_1: vec4<f32>, surface_normal_1: vec3<f32>) -> f32 {
var spot_dir_1: vec3<f32>;
var sign: f32 = -1.0;
let light_4: mesh_view_types__PointLight = mesh_view_bindings__point_lights.data[light_id_1];
let surface_to_light_1: vec3<f32> = (light_4.position_radius.xyz - frag_position_1.xyz);
spot_dir_1 = vec3<f32>(light_4.light_custom_data.x, 0.0, light_4.light_custom_data.y);
let _e32: f32 = spot_dir_1.x;
let _e34: f32 = spot_dir_1.x;
let _e38: f32 = spot_dir_1.z;
let _e40: f32 = spot_dir_1.z;
spot_dir_1.y = sqrt(((1.0 - (_e32 * _e34)) - (_e38 * _e40)));
if ((light_4.flags & mesh_view_types__POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE) != 0u) {
let _e50: f32 = spot_dir_1.y;
spot_dir_1.y = -(_e50);
}
let _e52: vec3<f32> = spot_dir_1;
let fwd: vec3<f32> = -(_e52);
let distance_to_light_1: f32 = dot(fwd, surface_to_light_1);
let offset_position_1: vec3<f32> = ((-(surface_to_light_1) + (light_4.shadow_depth_bias * normalize(surface_to_light_1))) + ((surface_normal_1.xyz * light_4.shadow_normal_bias) * distance_to_light_1));
if (fwd.z >= 0.0) {
sign = 1.0;
}
let _e73: f32 = sign;
let a_1: f32 = (-1.0 / (fwd.z + _e73));
let b: f32 = ((fwd.x * fwd.y) * a_1);
let _e81: f32 = sign;
let _e88: f32 = sign;
let _e90: f32 = sign;
let up_dir: vec3<f32> = vec3<f32>((1.0 + (((_e81 * fwd.x) * fwd.x) * a_1)), (_e88 * b), (-(_e90) * fwd.x));
let _e96: f32 = sign;
let right_dir: vec3<f32> = vec3<f32>(-(b), (-(_e96) - ((fwd.y * fwd.y) * a_1)), fwd.y);
let light_inv_rot: mat3x3<f32> = mat3x3<f32>(right_dir, up_dir, fwd);
let projected_position: vec3<f32> = (offset_position_1 * light_inv_rot);
let f_div_minus_z: f32 = (1.0 / (light_4.spot_light_tan_angle * -(projected_position.z)));
let shadow_xy_ndc: vec2<f32> = (projected_position.xy * f_div_minus_z);
let shadow_uv: vec2<f32> = ((shadow_xy_ndc * vec2<f32>(0.5, -0.5)) + vec2<f32>(0.5, 0.5));
let depth_1: f32 = (0.10000000149011612 / -(projected_position.z));
let _e129: i32 = mesh_view_bindings__lights.spot_light_shadowmap_offset;
let _e131: f32 = textureSampleCompareLevel(mesh_view_bindings__directional_shadow_textures, mesh_view_bindings__directional_shadow_textures_sampler, shadow_uv, (i32(light_id_1) + _e129), depth_1);
return _e131;
}
fn shadows__fetch_directional_shadow(light_id_2: u32, frag_position_2: vec4<f32>, surface_normal_2: vec3<f32>) -> f32 {
let light_5: mesh_view_types__DirectionalLight = mesh_view_bindings__lights.directional_lights[light_id_2];
let normal_offset_1: vec3<f32> = (light_5.shadow_normal_bias * surface_normal_2.xyz);
let depth_offset_1: vec3<f32> = (light_5.shadow_depth_bias * light_5.direction_to_light.xyz);
let offset_position_2: vec4<f32> = vec4<f32>(((frag_position_2.xyz + normal_offset_1) + depth_offset_1), frag_position_2.w);
let offset_position_clip: vec4<f32> = (light_5.view_projection * offset_position_2);
if (offset_position_clip.w <= 0.0) {
return 1.0;
}
let offset_position_ndc: vec3<f32> = (offset_position_clip.xyz / vec3<f32>(offset_position_clip.w));
if ((any((offset_position_ndc.xy < vec2<f32>(-1.0))) || (offset_position_ndc.z < 0.0)) || any((offset_position_ndc > vec3<f32>(1.0)))) {
return 1.0;
}
let flip_correction: vec2<f32> = vec2<f32>(0.5, -0.5);
let light_local: vec2<f32> = ((offset_position_ndc.xy * flip_correction) + vec2<f32>(0.5, 0.5));
let depth_2: f32 = offset_position_ndc.z;
let _e66: f32 = textureSampleCompareLevel(mesh_view_bindings__directional_shadow_textures, mesh_view_bindings__directional_shadow_textures_sampler, light_local, i32(light_id_2), depth_2);
return _e66;
}
fn pbr_functions__prepare_normal(standard_material_flags: u32, world_normal: vec3<f32>, uv: vec2<f32>, is_front_1: bool) -> vec3<f32> {
var N_2: vec3<f32>;
N_2 = normalize(world_normal);
if ((standard_material_flags & pbr_types__STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u) {
if !(is_front_1) {
let _e37: vec3<f32> = N_2;
N_2 = -(_e37);
}
}
let _e39: vec3<f32> = N_2;
return _e39;
}
fn pbr_functions__calculate_view(world_position_2: vec4<f32>, is_orthographic_3: bool) -> vec3<f32> {
var V_2: vec3<f32>;
if is_orthographic_3 {
let _e34: f32 = mesh_view_bindings__view.view_proj[0][2];
let _e39: f32 = mesh_view_bindings__view.view_proj[1][2];
let _e44: f32 = mesh_view_bindings__view.view_proj[2][2];
V_2 = normalize(vec3<f32>(_e34, _e39, _e44));
} else {
let _e48: vec3<f32> = mesh_view_bindings__view.world_position;
V_2 = normalize((_e48.xyz - world_position_2.xyz));
}
let _e53: vec3<f32> = V_2;
return _e53;
}
fn pbr_functions__pbr_input_new() -> pbr_functions__PbrInput {
var pbr_input_1: pbr_functions__PbrInput;
let _e29: pbr_types__StandardMaterial = pbr_types__standard_material_new();
pbr_input_1.material = _e29;
pbr_input_1.occlusion = 1.0;
pbr_input_1.frag_coord = vec4<f32>(0.0, 0.0, 0.0, 1.0);
pbr_input_1.world_position = vec4<f32>(0.0, 0.0, 0.0, 1.0);
pbr_input_1.world_normal = vec3<f32>(0.0, 0.0, 1.0);
pbr_input_1.is_orthographic = false;
pbr_input_1.N = vec3<f32>(0.0, 0.0, 1.0);
pbr_input_1.V = vec3<f32>(1.0, 0.0, 0.0);
let _e61: pbr_functions__PbrInput = pbr_input_1;
return _e61;
}
fn pbr_functions__pbr(in: pbr_functions__PbrInput) -> vec4<f32> {
var output_color_2: vec4<f32>;
var light_accum: vec3<f32>;
var i: u32;
var shadow: f32;
var i_1: u32;
var shadow_1: f32;
var i_2: u32 = 0u;
var shadow_2: f32;
output_color_2 = in.material.base_color;
let emissive_1: vec4<f32> = in.material.emissive;
let metallic_1: f32 = in.material.metallic;
let perceptual_roughness_2: f32 = in.material.perceptual_roughness;
let _e37: f32 = lighting__perceptualRoughnessToRoughness(perceptual_roughness_2);
let occlusion_1: f32 = in.occlusion;
if ((in.material.flags & pbr_types__STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE) != 0u) {
output_color_2.w = 1.0;
} else {
if ((in.material.flags & pbr_types__STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK) != 0u) {
let _e52: f32 = output_color_2.w;
if (_e52 >= in.material.alpha_cutoff) {
output_color_2.w = 1.0;
} else {
discard;
}
}
}
let NdotV_3: f32 = max(dot(in.N, in.V), 9.999999747378752e-5);
let reflectance: f32 = in.material.reflectance;
let _e71: vec4<f32> = output_color_2;
let F0_3: vec3<f32> = (vec3<f32>((((0.1599999964237213 * reflectance) * reflectance) * (1.0 - metallic_1))) + (_e71.xyz * metallic_1));
let _e76: vec4<f32> = output_color_2;
let diffuse_color: vec3<f32> = (_e76.xyz * (1.0 - metallic_1));
let R_3: vec3<f32> = reflect(-(in.V), in.N);
light_accum = vec3<f32>(0.0);
let _e92: f32 = mesh_view_bindings__view.inverse_view[0][2];
let _e97: f32 = mesh_view_bindings__view.inverse_view[1][2];
let _e102: f32 = mesh_view_bindings__view.inverse_view[2][2];
let _e107: f32 = mesh_view_bindings__view.inverse_view[3][2];
let view_z_3: f32 = dot(vec4<f32>(_e92, _e97, _e102, _e107), in.world_position);
let _e114: u32 = clustered_forward__fragment_cluster_index(in.frag_coord.xy, view_z_3, in.is_orthographic);
let _e115: vec3<u32> = clustered_forward__unpack_offset_and_counts(_e114);
i = _e115.x;
loop {
let _e119: u32 = i;
if (_e119 < (_e115.x + _e115.y)) {
} else {
break;
}
let _e129: u32 = i;
let _e130: u32 = clustered_forward__get_light_id(_e129);
let light_6: mesh_view_types__PointLight = mesh_view_bindings__point_lights.data[_e130];
shadow = 1.0;
let _e137: u32 = mesh_bindings__mesh.flags;
if (((_e137 & mesh_types__MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u) && ((light_6.flags & mesh_view_types__POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u)) {
let _e148: f32 = shadows__fetch_point_shadow(_e130, in.world_position, in.world_normal);
shadow = _e148;
}
let _e153: vec3<f32> = lighting__point_light(in.world_position.xyz, light_6, _e37, NdotV_3, in.N, in.V, R_3, F0_3, diffuse_color);
let _e154: vec3<f32> = light_accum;
let _e155: f32 = shadow;
light_accum = (_e154 + (_e153 * _e155));
continuing {
let _e126: u32 = i;
i = (_e126 + 1u);
}
}
i_1 = (_e115.x + _e115.y);
loop {
let _e164: u32 = i_1;
if (_e164 < ((_e115.x + _e115.y) + _e115.z)) {
} else {
break;
}
let _e177: u32 = i_1;
let _e178: u32 = clustered_forward__get_light_id(_e177);
let light_7: mesh_view_types__PointLight = mesh_view_bindings__point_lights.data[_e178];
shadow_1 = 1.0;
let _e185: u32 = mesh_bindings__mesh.flags;
if (((_e185 & mesh_types__MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u) && ((light_7.flags & mesh_view_types__POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u)) {
let _e196: f32 = shadows__fetch_spot_shadow(_e178, in.world_position, in.world_normal);
shadow_1 = _e196;
}
let _e201: vec3<f32> = lighting__spot_light(in.world_position.xyz, light_7, _e37, NdotV_3, in.N, in.V, R_3, F0_3, diffuse_color);
let _e202: vec3<f32> = light_accum;
let _e203: f32 = shadow_1;
light_accum = (_e202 + (_e201 * _e203));
continuing {
let _e174: u32 = i_1;
i_1 = (_e174 + 1u);
}
}
let n_directional_lights: u32 = mesh_view_bindings__lights.n_directional_lights;
loop {
let _e210: u32 = i_2;
if (_e210 < n_directional_lights) {
} else {
break;
}
let _e216: u32 = i_2;
let light_8: mesh_view_types__DirectionalLight = mesh_view_bindings__lights.directional_lights[_e216];
shadow_2 = 1.0;
let _e222: u32 = mesh_bindings__mesh.flags;
if (((_e222 & mesh_types__MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u) && ((light_8.flags & mesh_view_types__DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u)) {
let _e231: u32 = i_2;
let _e234: f32 = shadows__fetch_directional_shadow(_e231, in.world_position, in.world_normal);
shadow_2 = _e234;
}
let _e237: vec3<f32> = lighting__directional_light(light_8, _e37, NdotV_3, in.N, in.V, R_3, F0_3, diffuse_color);
let _e238: vec3<f32> = light_accum;
let _e239: f32 = shadow_2;
light_accum = (_e238 + (_e237 * _e239));
continuing {
let _e212: u32 = i_2;
i_2 = (_e212 + 1u);
}
}
let _e243: vec3<f32> = lighting__EnvBRDFApprox(diffuse_color, 1.0, NdotV_3);
let _e244: vec3<f32> = lighting__EnvBRDFApprox(F0_3, perceptual_roughness_2, NdotV_3);
let _e245: vec3<f32> = light_accum;
let _e248: vec4<f32> = mesh_view_bindings__lights.ambient_color;
let _e255: f32 = output_color_2.w;
let _e259: f32 = output_color_2.w;
output_color_2 = vec4<f32>(((_e245 + (((_e243 + _e244) * _e248.xyz) * occlusion_1)) + (emissive_1.xyz * _e255)), _e259);
let _e261: vec4<f32> = output_color_2;
let _e263: vec4<f32> = clustered_forward__cluster_debug_visualization(_e261, view_z_3, in.is_orthographic, _e115, _e114);
output_color_2 = _e263;
let _e264: vec4<f32> = output_color_2;
return _e264;
}
fn pbr_functions__tone_mapping(in_1: vec4<f32>) -> vec4<f32> {
let _e29: vec3<f32> = lighting__reinhard_luminance(in_1.xyz);
return vec4<f32>(_e29, in_1.w);
}
@fragment
fn fragment(mesh: mesh_vertex_output__MeshVertexOutput, @builtin(front_facing) is_front: bool, @builtin(position) frag_coord: vec4<f32>) -> @location(0) vec4<f32> {
var output_color: vec4<f32>;
var pbr_input: pbr_functions__PbrInput;
var emissive: vec4<f32>;
var metallic: f32;
var perceptual_roughness: f32;
var occlusion: f32;
let _e42: vec4<f32> = pbr_bindings__material.base_color;
output_color = _e42;
let _e45: u32 = pbr_bindings__material.flags;
if ((_e45 & pbr_types__STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT) != 0u) {
let _e49: vec4<f32> = output_color;
let _e51: vec4<f32> = textureSample(pbr_bindings__base_color_texture, pbr_bindings__base_color_sampler, mesh.uv);
output_color = (_e49 * _e51);
}
let _e54: u32 = pbr_bindings__material.flags;
if ((_e54 & pbr_types__STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u) {
let _e61: vec4<f32> = output_color;
pbr_input.material.base_color = _e61;
let _e65: f32 = pbr_bindings__material.reflectance;
pbr_input.material.reflectance = _e65;
let _e69: u32 = pbr_bindings__material.flags;
pbr_input.material.flags = _e69;
let _e73: f32 = pbr_bindings__material.alpha_cutoff;
pbr_input.material.alpha_cutoff = _e73;
let _e75: vec4<f32> = pbr_bindings__material.emissive;
emissive = _e75;
let _e78: u32 = pbr_bindings__material.flags;
if ((_e78 & pbr_types__STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT) != 0u) {
let _e82: vec4<f32> = emissive;
let _e85: vec4<f32> = textureSample(pbr_bindings__emissive_texture, pbr_bindings__emissive_sampler, mesh.uv);
emissive = vec4<f32>((_e82.xyz * _e85.xyz), 1.0);
}
let _e92: vec4<f32> = emissive;
pbr_input.material.emissive = _e92;
let _e94: f32 = pbr_bindings__material.metallic;
metallic = _e94;
let _e97: f32 = pbr_bindings__material.perceptual_roughness;
perceptual_roughness = _e97;
let _e100: u32 = pbr_bindings__material.flags;
if ((_e100 & pbr_types__STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT) != 0u) {
let metallic_roughness: vec4<f32> = textureSample(pbr_bindings__metallic_roughness_texture, pbr_bindings__metallic_roughness_sampler, mesh.uv);
let _e106: f32 = metallic;
metallic = (_e106 * metallic_roughness.z);
let _e109: f32 = perceptual_roughness;
perceptual_roughness = (_e109 * metallic_roughness.y);
}
let _e114: f32 = metallic;
pbr_input.material.metallic = _e114;
let _e117: f32 = perceptual_roughness;
pbr_input.material.perceptual_roughness = _e117;
occlusion = 1.0;
let _e121: u32 = pbr_bindings__material.flags;
if ((_e121 & pbr_types__STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT) != 0u) {
let _e126: vec4<f32> = textureSample(pbr_bindings__occlusion_texture, pbr_bindings__occlusion_sampler, mesh.uv);
occlusion = _e126.x;
}
let _e129: f32 = occlusion;
pbr_input.occlusion = _e129;
pbr_input.frag_coord = frag_coord;
pbr_input.world_position = mesh.world_position;
pbr_input.world_normal = mesh.world_normal;
let _e140: f32 = mesh_view_bindings__view.projection[3][3];
pbr_input.is_orthographic = (_e140 == 1.0);
let _e145: u32 = pbr_bindings__material.flags;
let _e148: vec3<f32> = pbr_functions__prepare_normal(_e145, mesh.world_normal, mesh.uv, is_front);
pbr_input.N = _e148;
let _e152: bool = pbr_input.is_orthographic;
let _e153: vec3<f32> = pbr_functions__calculate_view(mesh.world_position, _e152);
pbr_input.V = _e153;
let _e154: pbr_functions__PbrInput = pbr_input;
let _e155: vec4<f32> = pbr_functions__pbr(_e154);
let _e156: vec4<f32> = pbr_functions__tone_mapping(_e155);
output_color = _e156;
}
let _e157: vec4<f32> = output_color;
return _e157;
}

View File

@@ -0,0 +1,90 @@
#import bevy_pbr::mesh_vertex_output as OutputTypes
#import bevy_pbr::pbr_functions as PbrCore
#import bevy_pbr::pbr_bindings as MaterialBindings
#import bevy_pbr::pbr_types as PbrTypes
#import bevy_pbr::mesh_view_bindings as ViewBindings
@fragment
fn fragment(
mesh: OutputTypes::MeshVertexOutput,
@builtin(front_facing) is_front: bool,
@builtin(position) frag_coord: vec4<f32>,
) -> @location(0) vec4<f32> {
var output_color: vec4<f32> = MaterialBindings::material.base_color;
#ifdef VERTEX_COLORS
output_color = output_color * mesh.color;
#endif
#ifdef VERTEX_UVS
if ((MaterialBindings::material.flags & PbrTypes::STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT) != 0u) {
output_color = output_color * textureSample(MaterialBindings::base_color_texture, MaterialBindings::base_color_sampler, mesh.uv);
}
#endif
// NOTE: Unlit bit not set means == 0 is true, so the true case is if lit
if ((MaterialBindings::material.flags & PbrTypes::STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u) {
// Prepare a 'processed' StandardMaterial by sampling all textures to resolve
// the material members
var pbr_input: PbrCore::PbrInput;
pbr_input.material.base_color = output_color;
pbr_input.material.reflectance = MaterialBindings::material.reflectance;
pbr_input.material.flags = MaterialBindings::material.flags;
pbr_input.material.alpha_cutoff = MaterialBindings::material.alpha_cutoff;
// TODO use .a for exposure compensation in HDR
var emissive: vec4<f32> = MaterialBindings::material.emissive;
#ifdef VERTEX_UVS
if ((MaterialBindings::material.flags & PbrTypes::STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT) != 0u) {
emissive = vec4<f32>(emissive.rgb * textureSample(MaterialBindings::emissive_texture, MaterialBindings::emissive_sampler, mesh.uv).rgb, 1.0);
}
#endif
pbr_input.material.emissive = emissive;
var metallic: f32 = MaterialBindings::material.metallic;
var perceptual_roughness: f32 = MaterialBindings::material.perceptual_roughness;
#ifdef VERTEX_UVS
if ((MaterialBindings::material.flags & PbrTypes::STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT) != 0u) {
let metallic_roughness = textureSample(MaterialBindings::metallic_roughness_texture, MaterialBindings::metallic_roughness_sampler, mesh.uv);
// Sampling from GLTF standard channels for now
metallic = metallic * metallic_roughness.b;
perceptual_roughness = perceptual_roughness * metallic_roughness.g;
}
#endif
pbr_input.material.metallic = metallic;
pbr_input.material.perceptual_roughness = perceptual_roughness;
var occlusion: f32 = 1.0;
#ifdef VERTEX_UVS
if ((MaterialBindings::material.flags & PbrTypes::STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT) != 0u) {
occlusion = textureSample(MaterialBindings::occlusion_texture, MaterialBindings::occlusion_sampler, mesh.uv).r;
}
#endif
pbr_input.occlusion = occlusion;
pbr_input.frag_coord = frag_coord;
pbr_input.world_position = mesh.world_position;
pbr_input.world_normal = mesh.world_normal;
pbr_input.is_orthographic = ViewBindings::view.projection[3].w == 1.0;
pbr_input.N = PbrCore::prepare_normal(
MaterialBindings::material.flags,
mesh.world_normal,
#ifdef VERTEX_TANGENTS
#ifdef STANDARDMATERIAL_NORMAL_MAP
mesh.world_tangent,
#endif
#endif
#ifdef VERTEX_UVS
mesh.uv,
#endif
is_front,
);
pbr_input.V = PbrCore::calculate_view(mesh.world_position, pbr_input.is_orthographic);
output_color = PbrCore::tone_mapping(PbrCore::pbr(pbr_input));
}
return output_color;
}

View File

@@ -0,0 +1,26 @@
#define_import_path bevy_pbr::pbr_bindings
#import bevy_pbr::pbr_types as Types
@group(1) @binding(0)
var<uniform> material: Types::StandardMaterial;
@group(1) @binding(1)
var base_color_texture: texture_2d<f32>;
@group(1) @binding(2)
var base_color_sampler: sampler;
@group(1) @binding(3)
var emissive_texture: texture_2d<f32>;
@group(1) @binding(4)
var emissive_sampler: sampler;
@group(1) @binding(5)
var metallic_roughness_texture: texture_2d<f32>;
@group(1) @binding(6)
var metallic_roughness_sampler: sampler;
@group(1) @binding(7)
var occlusion_texture: texture_2d<f32>;
@group(1) @binding(8)
var occlusion_sampler: sampler;
@group(1) @binding(9)
var normal_map_texture: texture_2d<f32>;
@group(1) @binding(10)
var normal_map_sampler: sampler;

View File

@@ -0,0 +1,252 @@
#define_import_path bevy_pbr::pbr_functions
#import bevy_pbr::pbr_types as PbrTypes
#import bevy_pbr::mesh_types as MeshTypes
#import bevy_pbr::mesh_bindings as MeshBindings
#import bevy_pbr::mesh_view_types as ViewTypes
#import bevy_pbr::mesh_view_bindings as ViewBindings
#import bevy_pbr::lighting as Lighting
#import bevy_pbr::clustered_forward as Clustering
#import bevy_pbr::shadows as Shadows
// NOTE: This ensures that the world_normal is normalized and if
// vertex tangents and normal maps then normal mapping may be applied.
fn prepare_normal(
standard_material_flags: u32,
world_normal: vec3<f32>,
#ifdef VERTEX_TANGENTS
#ifdef STANDARDMATERIAL_NORMAL_MAP
world_tangent: vec4<f32>,
#endif
#endif
#ifdef VERTEX_UVS
uv: vec2<f32>,
#endif
is_front: bool,
) -> vec3<f32> {
var N: vec3<f32> = normalize(world_normal);
#ifdef VERTEX_TANGENTS
#ifdef STANDARDMATERIAL_NORMAL_MAP
// NOTE: The mikktspace method of normal mapping explicitly requires that these NOT be
// normalized nor any Gram-Schmidt applied to ensure the vertex normal is orthogonal to the
// vertex tangent! Do not change this code unless you really know what you are doing.
// http://www.mikktspace.com/
var T: vec3<f32> = world_tangent.xyz;
var B: vec3<f32> = world_tangent.w * cross(N, T);
#endif
#endif
if ((standard_material_flags & PbrTypes::STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u) {
if (!is_front) {
N = -N;
#ifdef VERTEX_TANGENTS
#ifdef STANDARDMATERIAL_NORMAL_MAP
T = -T;
B = -B;
#endif
#endif
}
}
#ifdef VERTEX_TANGENTS
#ifdef VERTEX_UVS
#ifdef STANDARDMATERIAL_NORMAL_MAP
// Nt is the tangent-space normal.
var Nt = textureSample(normal_map_texture, normal_map_sampler, uv).rgb;
if ((standard_material_flags & PbrTypes::STANDARD_MATERIAL_FLAGS_TWO_COMPONENT_NORMAL_MAP) != 0u) {
// Only use the xy components and derive z for 2-component normal maps.
Nt = vec3<f32>(Nt.rg * 2.0 - 1.0, 0.0);
Nt.z = sqrt(1.0 - Nt.x * Nt.x - Nt.y * Nt.y);
} else {
Nt = Nt * 2.0 - 1.0;
}
// Normal maps authored for DirectX require flipping the y component
if ((standard_material_flags & PbrTypes::STANDARD_MATERIAL_FLAGS_FLIP_NORMAL_MAP_Y) != 0u) {
Nt.y = -Nt.y;
}
// NOTE: The mikktspace method of normal mapping applies maps the tangent-space normal from
// the normal map texture in this way to be an EXACT inverse of how the normal map baker
// calculates the normal maps so there is no error introduced. Do not change this code
// unless you really know what you are doing.
// http://www.mikktspace.com/
N = normalize(Nt.x * T + Nt.y * B + Nt.z * N);
#endif
#endif
#endif
return N;
}
// NOTE: Correctly calculates the view vector depending on whether
// the projection is orthographic or perspective.
fn calculate_view(
world_position: vec4<f32>,
is_orthographic: bool,
) -> vec3<f32> {
var V: vec3<f32>;
if (is_orthographic) {
// Orthographic view vector
V = normalize(vec3<f32>(ViewBindings::view.view_proj[0].z, ViewBindings::view.view_proj[1].z, ViewBindings::view.view_proj[2].z));
} else {
// Only valid for a perpective projection
V = normalize(ViewBindings::view.world_position.xyz - world_position.xyz);
}
return V;
}
struct PbrInput {
material: PbrTypes::StandardMaterial,
occlusion: f32,
frag_coord: vec4<f32>,
world_position: vec4<f32>,
// Normalized world normal used for shadow mapping as normal-mapping is not used for shadow
// mapping
world_normal: vec3<f32>,
// Normalized normal-mapped world normal used for lighting
N: vec3<f32>,
// Normalized view vector in world space, pointing from the fragment world position toward the
// view world position
V: vec3<f32>,
is_orthographic: bool,
};
// Creates a PbrInput with default values
fn pbr_input_new() -> PbrInput {
var pbr_input: PbrInput;
pbr_input.material = PbrTypes::standard_material_new();
pbr_input.occlusion = 1.0;
pbr_input.frag_coord = vec4<f32>(0.0, 0.0, 0.0, 1.0);
pbr_input.world_position = vec4<f32>(0.0, 0.0, 0.0, 1.0);
pbr_input.world_normal = vec3<f32>(0.0, 0.0, 1.0);
pbr_input.is_orthographic = false;
pbr_input.N = vec3<f32>(0.0, 0.0, 1.0);
pbr_input.V = vec3<f32>(1.0, 0.0, 0.0);
return pbr_input;
}
fn pbr(
in: PbrInput,
) -> vec4<f32> {
var output_color: vec4<f32> = in.material.base_color;
// TODO use .a for exposure compensation in HDR
let emissive = in.material.emissive;
// calculate non-linear roughness from linear perceptualRoughness
let metallic = in.material.metallic;
let perceptual_roughness = in.material.perceptual_roughness;
let roughness = Lighting::perceptualRoughnessToRoughness(perceptual_roughness);
let occlusion = in.occlusion;
if ((in.material.flags & PbrTypes::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE) != 0u) {
// NOTE: If rendering as opaque, alpha should be ignored so set to 1.0
output_color.a = 1.0;
} else if ((in.material.flags & PbrTypes::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK) != 0u) {
if (output_color.a >= in.material.alpha_cutoff) {
// NOTE: If rendering as masked alpha and >= the cutoff, render as fully opaque
output_color.a = 1.0;
} else {
// NOTE: output_color.a < in.material.alpha_cutoff should not is not rendered
// NOTE: This and any other discards mean that early-z testing cannot be done!
discard;
}
}
// Neubelt and Pettineo 2013, "Crafting a Next-gen Material Pipeline for The Order: 1886"
let NdotV = max(dot(in.N, in.V), 0.0001);
// Remapping [0,1] reflectance to F0
// See https://google.github.io/filament/Filament.html#materialsystem/parameterization/remapping
let reflectance = in.material.reflectance;
let F0 = 0.16 * reflectance * reflectance * (1.0 - metallic) + output_color.rgb * metallic;
// Diffuse strength inversely related to metallicity
let diffuse_color = output_color.rgb * (1.0 - metallic);
let R = reflect(-in.V, in.N);
// accumulate color
var light_accum: vec3<f32> = vec3<f32>(0.0);
let view_z = dot(vec4<f32>(
ViewBindings::view.inverse_view[0].z,
ViewBindings::view.inverse_view[1].z,
ViewBindings::view.inverse_view[2].z,
ViewBindings::view.inverse_view[3].z
), in.world_position);
let cluster_index = Clustering::fragment_cluster_index(in.frag_coord.xy, view_z, in.is_orthographic);
let offset_and_counts = Clustering::unpack_offset_and_counts(cluster_index);
// point lights
for (var i: u32 = offset_and_counts[0]; i < offset_and_counts[0] + offset_and_counts[1]; i = i + 1u) {
let light_id = Clustering::get_light_id(i);
let light = ViewBindings::point_lights.data[light_id];
var shadow: f32 = 1.0;
if ((MeshBindings::mesh.flags & MeshTypes::MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u
&& (light.flags & ViewTypes::POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) {
shadow = Shadows::fetch_point_shadow(light_id, in.world_position, in.world_normal);
}
let light_contrib = Lighting::point_light(in.world_position.xyz, light, roughness, NdotV, in.N, in.V, R, F0, diffuse_color);
light_accum = light_accum + light_contrib * shadow;
}
// spot lights
for (var i: u32 = offset_and_counts[0] + offset_and_counts[1]; i < offset_and_counts[0] + offset_and_counts[1] + offset_and_counts[2]; i = i + 1u) {
let light_id = Clustering::get_light_id(i);
let light = ViewBindings::point_lights.data[light_id];
var shadow: f32 = 1.0;
if ((MeshBindings::mesh.flags & MeshTypes::MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u
&& (light.flags & ViewTypes::POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) {
shadow = Shadows::fetch_spot_shadow(light_id, in.world_position, in.world_normal);
}
let light_contrib = Lighting::spot_light(in.world_position.xyz, light, roughness, NdotV, in.N, in.V, R, F0, diffuse_color);
light_accum = light_accum + light_contrib * shadow;
}
let n_directional_lights = ViewBindings::lights.n_directional_lights;
for (var i: u32 = 0u; i < n_directional_lights; i = i + 1u) {
let light = ViewBindings::lights.directional_lights[i];
var shadow: f32 = 1.0;
if ((MeshBindings::mesh.flags & MeshTypes::MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u
&& (light.flags & ViewTypes::DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) {
shadow = Shadows::fetch_directional_shadow(i, in.world_position, in.world_normal);
}
let light_contrib = Lighting::directional_light(light, roughness, NdotV, in.N, in.V, R, F0, diffuse_color);
light_accum = light_accum + light_contrib * shadow;
}
let diffuse_ambient = Lighting::EnvBRDFApprox(diffuse_color, 1.0, NdotV);
let specular_ambient = Lighting::EnvBRDFApprox(F0, perceptual_roughness, NdotV);
output_color = vec4<f32>(
light_accum +
(diffuse_ambient + specular_ambient) * ViewBindings::lights.ambient_color.rgb * occlusion +
emissive.rgb * output_color.a,
output_color.a);
output_color = Clustering::cluster_debug_visualization(
output_color,
view_z,
in.is_orthographic,
offset_and_counts,
cluster_index,
);
return output_color;
}
fn tone_mapping(in: vec4<f32>) -> vec4<f32> {
// tone_mapping
return vec4<f32>(Lighting::reinhard_luminance(in.rgb), in.a);
// Gamma correction.
// Not needed with sRGB buffer
// output_color.rgb = pow(output_color.rgb, vec3(1.0 / 2.2));
}

View File

@@ -0,0 +1,283 @@
#define_import_path bevy_pbr::lighting
#import bevy_pbr::utils as Utils
#import bevy_pbr::mesh_view_types as ViewTypes
// From the Filament design doc
// https://google.github.io/filament/Filament.html#table_symbols
// Symbol Definition
// v View unit vector
// l Incident light unit vector
// n Surface normal unit vector
// h Half unit vector between l and v
// f BRDF
// f_d Diffuse component of a BRDF
// f_r Specular component of a BRDF
// α Roughness, remapped from using input perceptualRoughness
// σ Diffuse reflectance
// Ω Spherical domain
// f0 Reflectance at normal incidence
// f90 Reflectance at grazing angle
// χ+(a) Heaviside function (1 if a>0 and 0 otherwise)
// nior Index of refraction (IOR) of an interface
// ⟨n⋅l⟩ Dot product clamped to [0..1]
// ⟨a⟩ Saturated value (clamped to [0..1])
// The Bidirectional Reflectance Distribution Function (BRDF) describes the surface response of a standard material
// and consists of two components, the diffuse component (f_d) and the specular component (f_r):
// f(v,l) = f_d(v,l) + f_r(v,l)
//
// The form of the microfacet model is the same for diffuse and specular
// f_r(v,l) = f_d(v,l) = 1 / { |n⋅v||n⋅l| } ∫_Ω D(m,α) G(v,l,m) f_m(v,l,m) (v⋅m) (l⋅m) dm
//
// In which:
// D, also called the Normal Distribution Function (NDF) models the distribution of the microfacets
// G models the visibility (or occlusion or shadow-masking) of the microfacets
// f_m is the microfacet BRDF and differs between specular and diffuse components
//
// The above integration needs to be approximated.
// distanceAttenuation is simply the square falloff of light intensity
// combined with a smooth attenuation at the edge of the light radius
//
// light radius is a non-physical construct for efficiency purposes,
// because otherwise every light affects every fragment in the scene
fn getDistanceAttenuation(distanceSquare: f32, inverseRangeSquared: f32) -> f32 {
let factor = distanceSquare * inverseRangeSquared;
let smoothFactor = Utils::saturate(1.0 - factor * factor);
let attenuation = smoothFactor * smoothFactor;
return attenuation * 1.0 / max(distanceSquare, 0.0001);
}
// Normal distribution function (specular D)
// Based on https://google.github.io/filament/Filament.html#citation-walter07
// D_GGX(h,α) = α^2 / { π ((n⋅h)^2 (α21) + 1)^2 }
// Simple implementation, has precision problems when using fp16 instead of fp32
// see https://google.github.io/filament/Filament.html#listing_speculardfp16
fn D_GGX(roughness: f32, NoH: f32, h: vec3<f32>) -> f32 {
let oneMinusNoHSquared = 1.0 - NoH * NoH;
let a = NoH * roughness;
let k = roughness / (oneMinusNoHSquared + a * a);
let d = k * k * (1.0 / Utils::PI);
return d;
}
// Visibility function (Specular G)
// V(v,l,a) = G(v,l,α) / { 4 (n⋅v) (n⋅l) }
// such that f_r becomes
// f_r(v,l) = D(h,α) V(v,l,α) F(v,h,f0)
// where
// V(v,l,α) = 0.5 / { n⋅l sqrt((n⋅v)^2 (1α2) + α2) + n⋅v sqrt((n⋅l)^2 (1α2) + α2) }
// Note the two sqrt's, that may be slow on mobile, see https://google.github.io/filament/Filament.html#listing_approximatedspecularv
fn V_SmithGGXCorrelated(roughness: f32, NoV: f32, NoL: f32) -> f32 {
let a2 = roughness * roughness;
let lambdaV = NoL * sqrt((NoV - a2 * NoV) * NoV + a2);
let lambdaL = NoV * sqrt((NoL - a2 * NoL) * NoL + a2);
let v = 0.5 / (lambdaV + lambdaL);
return v;
}
// Fresnel function
// see https://google.github.io/filament/Filament.html#citation-schlick94
// F_Schlick(v,h,f_0,f_90) = f_0 + (f_90 f_0) (1 v⋅h)^5
fn F_Schlick_vec(f0: vec3<f32>, f90: f32, VoH: f32) -> vec3<f32> {
// not using mix to keep the vec3 and float versions identical
return f0 + (f90 - f0) * pow(1.0 - VoH, 5.0);
}
fn F_Schlick(f0: f32, f90: f32, VoH: f32) -> f32 {
// not using mix to keep the vec3 and float versions identical
return f0 + (f90 - f0) * pow(1.0 - VoH, 5.0);
}
fn fresnel(f0: vec3<f32>, LoH: f32) -> vec3<f32> {
// f_90 suitable for ambient occlusion
// see https://google.github.io/filament/Filament.html#lighting/occlusion
let f90 = Utils::saturate(dot(f0, vec3<f32>(50.0 * 0.33)));
return F_Schlick_vec(f0, f90, LoH);
}
// Specular BRDF
// https://google.github.io/filament/Filament.html#materialsystem/specularbrdf
// Cook-Torrance approximation of the microfacet model integration using Fresnel law F to model f_m
// f_r(v,l) = { D(h,α) G(v,l,α) F(v,h,f0) } / { 4 (n⋅v) (n⋅l) }
fn specular(f0: vec3<f32>, roughness: f32, h: vec3<f32>, NoV: f32, NoL: f32,
NoH: f32, LoH: f32, specularIntensity: f32) -> vec3<f32> {
let D = D_GGX(roughness, NoH, h);
let V = V_SmithGGXCorrelated(roughness, NoV, NoL);
let F = fresnel(f0, LoH);
return (specularIntensity * D * V) * F;
}
// Diffuse BRDF
// https://google.github.io/filament/Filament.html#materialsystem/diffusebrdf
// fd(v,l) = σ/π * 1 / { |n⋅v||n⋅l| } ∫Ω D(m,α) G(v,l,m) (v⋅m) (l⋅m) dm
//
// simplest approximation
// float Fd_Lambert() {
// return 1.0 / PI;
// }
//
// vec3 Fd = diffuseColor * Fd_Lambert();
//
// Disney approximation
// See https://google.github.io/filament/Filament.html#citation-burley12
// minimal quality difference
fn Fd_Burley(roughness: f32, NoV: f32, NoL: f32, LoH: f32) -> f32 {
let f90 = 0.5 + 2.0 * roughness * LoH * LoH;
let lightScatter = F_Schlick(1.0, f90, NoL);
let viewScatter = F_Schlick(1.0, f90, NoV);
return lightScatter * viewScatter * (1.0 / Utils::PI);
}
// From https://www.unrealengine.com/en-US/blog/physically-based-shading-on-mobile
fn EnvBRDFApprox(f0: vec3<f32>, perceptual_roughness: f32, NoV: f32) -> vec3<f32> {
let c0 = vec4<f32>(-1.0, -0.0275, -0.572, 0.022);
let c1 = vec4<f32>(1.0, 0.0425, 1.04, -0.04);
let r = perceptual_roughness * c0 + c1;
let a004 = min(r.x * r.x, exp2(-9.28 * NoV)) * r.x + r.y;
let AB = vec2<f32>(-1.04, 1.04) * a004 + r.zw;
return f0 * AB.x + AB.y;
}
fn perceptualRoughnessToRoughness(perceptualRoughness: f32) -> f32 {
// clamp perceptual roughness to prevent precision problems
// According to Filament design 0.089 is recommended for mobile
// Filament uses 0.045 for non-mobile
let clampedPerceptualRoughness = clamp(perceptualRoughness, 0.089, 1.0);
return clampedPerceptualRoughness * clampedPerceptualRoughness;
}
// from https://64.github.io/tonemapping/
// reinhard on RGB oversaturates colors
fn reinhard(color: vec3<f32>) -> vec3<f32> {
return color / (1.0 + color);
}
fn reinhard_extended(color: vec3<f32>, max_white: f32) -> vec3<f32> {
let numerator = color * (1.0 + (color / vec3<f32>(max_white * max_white)));
return numerator / (1.0 + color);
}
// luminance coefficients from Rec. 709.
// https://en.wikipedia.org/wiki/Rec._709
fn luminance(v: vec3<f32>) -> f32 {
return dot(v, vec3<f32>(0.2126, 0.7152, 0.0722));
}
fn change_luminance(c_in: vec3<f32>, l_out: f32) -> vec3<f32> {
let l_in = luminance(c_in);
return c_in * (l_out / l_in);
}
fn reinhard_luminance(color: vec3<f32>) -> vec3<f32> {
let l_old = luminance(color);
let l_new = l_old / (1.0 + l_old);
return change_luminance(color, l_new);
}
fn reinhard_extended_luminance(color: vec3<f32>, max_white_l: f32) -> vec3<f32> {
let l_old = luminance(color);
let numerator = l_old * (1.0 + (l_old / (max_white_l * max_white_l)));
let l_new = numerator / (1.0 + l_old);
return change_luminance(color, l_new);
}
fn point_light(
world_position: vec3<f32>, light: ViewTypes::PointLight, roughness: f32, NdotV: f32, N: vec3<f32>, V: vec3<f32>,
R: vec3<f32>, F0: vec3<f32>, diffuseColor: vec3<f32>
) -> vec3<f32> {
let light_to_frag = light.position_radius.xyz - world_position.xyz;
let distance_square = dot(light_to_frag, light_to_frag);
let rangeAttenuation =
getDistanceAttenuation(distance_square, light.color_inverse_square_range.w);
// Specular.
// Representative Point Area Lights.
// see http://blog.selfshadow.com/publications/s2013-shading-course/karis/s2013_pbs_epic_notes_v2.pdf p14-16
let a = roughness;
let centerToRay = dot(light_to_frag, R) * R - light_to_frag;
let closestPoint = light_to_frag + centerToRay * Utils::saturate(light.position_radius.w * inverseSqrt(dot(centerToRay, centerToRay)));
let LspecLengthInverse = inverseSqrt(dot(closestPoint, closestPoint));
let normalizationFactor = a / Utils::saturate(a + (light.position_radius.w * 0.5 * LspecLengthInverse));
let specularIntensity = normalizationFactor * normalizationFactor;
var L: vec3<f32> = closestPoint * LspecLengthInverse; // normalize() equivalent?
var H: vec3<f32> = normalize(L + V);
var NoL: f32 = Utils::saturate(dot(N, L));
var NoH: f32 = Utils::saturate(dot(N, H));
var LoH: f32 = Utils::saturate(dot(L, H));
let specular_light = specular(F0, roughness, H, NdotV, NoL, NoH, LoH, specularIntensity);
// Diffuse.
// Comes after specular since its NoL is used in the lighting equation.
L = normalize(light_to_frag);
H = normalize(L + V);
NoL = Utils::saturate(dot(N, L));
NoH = Utils::saturate(dot(N, H));
LoH = Utils::saturate(dot(L, H));
let diffuse = diffuseColor * Fd_Burley(roughness, NdotV, NoL, LoH);
// See https://google.github.io/filament/Filament.html#mjx-eqn-pointLightLuminanceEquation
// Lout = f(v,l) Φ / { 4 π d^2 }⟨n⋅l⟩
// where
// f(v,l) = (f_d(v,l) + f_r(v,l)) * light_color
// Φ is luminous power in lumens
// our rangeAttentuation = 1 / d^2 multiplied with an attenuation factor for smoothing at the edge of the non-physical maximum light radius
// For a point light, luminous intensity, I, in lumens per steradian is given by:
// I = Φ / 4 π
// The derivation of this can be seen here: https://google.github.io/filament/Filament.html#mjx-eqn-pointLightLuminousPower
// NOTE: light.color.rgb is premultiplied with light.intensity / 4 π (which would be the luminous intensity) on the CPU
// TODO compensate for energy loss https://google.github.io/filament/Filament.html#materialsystem/improvingthebrdfs/energylossinspecularreflectance
return ((diffuse + specular_light) * light.color_inverse_square_range.rgb) * (rangeAttenuation * NoL);
}
fn spot_light(
world_position: vec3<f32>, light: ViewTypes::PointLight, roughness: f32, NdotV: f32, N: vec3<f32>, V: vec3<f32>,
R: vec3<f32>, F0: vec3<f32>, diffuseColor: vec3<f32>
) -> vec3<f32> {
// reuse the point light calculations
let point_light = point_light(world_position, light, roughness, NdotV, N, V, R, F0, diffuseColor);
// reconstruct spot dir from x/z and y-direction flag
var spot_dir = vec3<f32>(light.light_custom_data.x, 0.0, light.light_custom_data.y);
spot_dir.y = sqrt(1.0 - spot_dir.x * spot_dir.x - spot_dir.z * spot_dir.z);
if ((light.flags & ViewTypes::POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE) != 0u) {
spot_dir.y = -spot_dir.y;
}
let light_to_frag = light.position_radius.xyz - world_position.xyz;
// calculate attenuation based on filament formula https://google.github.io/filament/Filament.html#listing_glslpunctuallight
// spot_scale and spot_offset have been precomputed
// note we normalize here to get "l" from the filament listing. spot_dir is already normalized
let cd = dot(-spot_dir, normalize(light_to_frag));
let attenuation = Utils::saturate(cd * light.light_custom_data.z + light.light_custom_data.w);
let spot_attenuation = attenuation * attenuation;
return point_light * spot_attenuation;
}
fn directional_light(light: ViewTypes::DirectionalLight, roughness: f32, NdotV: f32, normal: vec3<f32>, view: vec3<f32>, R: vec3<f32>, F0: vec3<f32>, diffuseColor: vec3<f32>) -> vec3<f32> {
let incident_light = light.direction_to_light.xyz;
let half_vector = normalize(incident_light + view);
let NoL = Utils::saturate(dot(normal, incident_light));
let NoH = Utils::saturate(dot(normal, half_vector));
let LoH = Utils::saturate(dot(incident_light, half_vector));
let diffuse = diffuseColor * Fd_Burley(roughness, NdotV, NoL, LoH);
let specularIntensity = 1.0;
let specular_light = specular(F0, roughness, half_vector, NdotV, NoL, NoH, LoH, specularIntensity);
return (specular_light + diffuse) * light.color.rgb * NoL;
}

View File

@@ -0,0 +1,40 @@
#define_import_path bevy_pbr::pbr_types
struct StandardMaterial {
base_color: vec4<f32>,
emissive: vec4<f32>,
perceptual_roughness: f32,
metallic: f32,
reflectance: f32,
// 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options.
flags: u32,
alpha_cutoff: f32,
};
const STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT: u32 = 1u;
const STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT: u32 = 2u;
const STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT: u32 = 4u;
const STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT: u32 = 8u;
const STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT: u32 = 16u;
const STANDARD_MATERIAL_FLAGS_UNLIT_BIT: u32 = 32u;
const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE: u32 = 64u;
const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK: u32 = 128u;
const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_BLEND: u32 = 256u;
const STANDARD_MATERIAL_FLAGS_TWO_COMPONENT_NORMAL_MAP: u32 = 512u;
const STANDARD_MATERIAL_FLAGS_FLIP_NORMAL_MAP_Y: u32 = 1024u;
// Creates a StandardMaterial with default values
fn standard_material_new() -> StandardMaterial {
var material: StandardMaterial;
// NOTE: Keep in-sync with src/pbr_material.rs!
material.base_color = vec4<f32>(1.0, 1.0, 1.0, 1.0);
material.emissive = vec4<f32>(0.0, 0.0, 0.0, 1.0);
material.perceptual_roughness = 0.089;
material.metallic = 0.01;
material.reflectance = 0.5;
material.flags = STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE;
material.alpha_cutoff = 0.5;
return material;
}

View File

@@ -0,0 +1,137 @@
#define_import_path bevy_pbr::shadows
#import bevy_pbr::mesh_view_types as Types
#import bevy_pbr::mesh_view_bindings as Bindings
fn fetch_point_shadow(light_id: u32, frag_position: vec4<f32>, surface_normal: vec3<f32>) -> f32 {
let light = Bindings::point_lights.data[light_id];
// because the shadow maps align with the axes and the frustum planes are at 45 degrees
// we can get the worldspace depth by taking the largest absolute axis
let surface_to_light = light.position_radius.xyz - frag_position.xyz;
let surface_to_light_abs = abs(surface_to_light);
let distance_to_light = max(surface_to_light_abs.x, max(surface_to_light_abs.y, surface_to_light_abs.z));
// The normal bias here is already scaled by the texel size at 1 world unit from the light.
// The texel size increases proportionally with distance from the light so multiplying by
// distance to light scales the normal bias to the texel size at the fragment distance.
let normal_offset = light.shadow_normal_bias * distance_to_light * surface_normal.xyz;
let depth_offset = light.shadow_depth_bias * normalize(surface_to_light.xyz);
let offset_position = frag_position.xyz + normal_offset + depth_offset;
// similar largest-absolute-axis trick as above, but now with the offset fragment position
let frag_ls = light.position_radius.xyz - offset_position.xyz;
let abs_position_ls = abs(frag_ls);
let major_axis_magnitude = max(abs_position_ls.x, max(abs_position_ls.y, abs_position_ls.z));
// NOTE: These simplifications come from multiplying:
// projection * vec4(0, 0, -major_axis_magnitude, 1.0)
// and keeping only the terms that have any impact on the depth.
// Projection-agnostic approach:
let zw = -major_axis_magnitude * light.light_custom_data.xy + light.light_custom_data.zw;
let depth = zw.x / zw.y;
// do the lookup, using HW PCF and comparison
// NOTE: Due to the non-uniform control flow above, we must use the Level variant of
// textureSampleCompare to avoid undefined behaviour due to some of the fragments in
// a quad (2x2 fragments) being processed not being sampled, and this messing with
// mip-mapping functionality. The shadow maps have no mipmaps so Level just samples
// from LOD 0.
#ifdef NO_ARRAY_TEXTURES_SUPPORT
return textureSampleCompare(Bindings::point_shadow_textures, Bindings::point_shadow_textures_sampler, frag_ls, depth);
#else
return textureSampleCompareLevel(Bindings::point_shadow_textures, Bindings::point_shadow_textures_sampler, frag_ls, i32(light_id), depth);
#endif
}
fn fetch_spot_shadow(light_id: u32, frag_position: vec4<f32>, surface_normal: vec3<f32>) -> f32 {
let light = Bindings::point_lights.data[light_id];
let surface_to_light = light.position_radius.xyz - frag_position.xyz;
// construct the light view matrix
var spot_dir = vec3<f32>(light.light_custom_data.x, 0.0, light.light_custom_data.y);
// reconstruct spot dir from x/z and y-direction flag
spot_dir.y = sqrt(1.0 - spot_dir.x * spot_dir.x - spot_dir.z * spot_dir.z);
if ((light.flags & Types::POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE) != 0u) {
spot_dir.y = -spot_dir.y;
}
// view matrix z_axis is the reverse of transform.forward()
let fwd = -spot_dir;
let distance_to_light = dot(fwd, surface_to_light);
let offset_position =
-surface_to_light
+ (light.shadow_depth_bias * normalize(surface_to_light))
+ (surface_normal.xyz * light.shadow_normal_bias) * distance_to_light;
// the construction of the up and right vectors needs to precisely mirror the code
// in render/light.rs:spot_light_view_matrix
var sign = -1.0;
if (fwd.z >= 0.0) {
sign = 1.0;
}
let a = -1.0 / (fwd.z + sign);
let b = fwd.x * fwd.y * a;
let up_dir = vec3<f32>(1.0 + sign * fwd.x * fwd.x * a, sign * b, -sign * fwd.x);
let right_dir = vec3<f32>(-b, -sign - fwd.y * fwd.y * a, fwd.y);
let light_inv_rot = mat3x3<f32>(right_dir, up_dir, fwd);
// because the matrix is a pure rotation matrix, the inverse is just the transpose, and to calculate
// the product of the transpose with a vector we can just post-multiply instead of pre-multplying.
// this allows us to keep the matrix construction code identical between CPU and GPU.
let projected_position = offset_position * light_inv_rot;
// divide xy by perspective matrix "f" and by -projected.z (projected.z is -projection matrix's w)
// to get ndc coordinates
let f_div_minus_z = 1.0 / (light.spot_light_tan_angle * -projected_position.z);
let shadow_xy_ndc = projected_position.xy * f_div_minus_z;
// convert to uv coordinates
let shadow_uv = shadow_xy_ndc * vec2<f32>(0.5, -0.5) + vec2<f32>(0.5, 0.5);
// 0.1 must match POINT_LIGHT_NEAR_Z
let depth = 0.1 / -projected_position.z;
#ifdef NO_ARRAY_TEXTURES_SUPPORT
return textureSampleCompare(Bindings::directional_shadow_textures, Bindings::directional_shadow_textures_sampler,
shadow_uv, depth);
#else
return textureSampleCompareLevel(Bindings::directional_shadow_textures, Bindings::directional_shadow_textures_sampler,
shadow_uv, i32(light_id) + Bindings::lights.spot_light_shadowmap_offset, depth);
#endif
}
fn fetch_directional_shadow(light_id: u32, frag_position: vec4<f32>, surface_normal: vec3<f32>) -> f32 {
let light = Bindings::lights.directional_lights[light_id];
// The normal bias is scaled to the texel size.
let normal_offset = light.shadow_normal_bias * surface_normal.xyz;
let depth_offset = light.shadow_depth_bias * light.direction_to_light.xyz;
let offset_position = vec4<f32>(frag_position.xyz + normal_offset + depth_offset, frag_position.w);
let offset_position_clip = light.view_projection * offset_position;
if (offset_position_clip.w <= 0.0) {
return 1.0;
}
let offset_position_ndc = offset_position_clip.xyz / offset_position_clip.w;
// No shadow outside the orthographic projection volume
if (any(offset_position_ndc.xy < vec2<f32>(-1.0)) || offset_position_ndc.z < 0.0
|| any(offset_position_ndc > vec3<f32>(1.0))) {
return 1.0;
}
// compute texture coordinates for shadow lookup, compensating for the Y-flip difference
// between the NDC and texture coordinates
let flip_correction = vec2<f32>(0.5, -0.5);
let light_local = offset_position_ndc.xy * flip_correction + vec2<f32>(0.5, 0.5);
let depth = offset_position_ndc.z;
// do the lookup, using HW PCF and comparison
// NOTE: Due to non-uniform control flow above, we must use the level variant of the texture
// sampler to avoid use of implicit derivatives causing possible undefined behavior.
#ifdef NO_ARRAY_TEXTURES_SUPPORT
return textureSampleCompareLevel(Bindings::directional_shadow_textures, Bindings::directional_shadow_textures_sampler, light_local, depth);
#else
return textureSampleCompareLevel(Bindings::directional_shadow_textures, Bindings::directional_shadow_textures_sampler, light_local, i32(light_id), depth);
#endif
}

View File

@@ -0,0 +1,41 @@
#define_import_path bevy_pbr::skinning
#ifdef SKINNED
@group(2) @binding(1)
var<uniform> joint_matrices: SkinnedMesh;
fn skin_model(
indexes: vec4<u32>,
weights: vec4<f32>,
) -> mat4x4<f32> {
return weights.x * joint_matrices.data[indexes.x]
+ weights.y * joint_matrices.data[indexes.y]
+ weights.z * joint_matrices.data[indexes.z]
+ weights.w * joint_matrices.data[indexes.w];
}
fn inverse_transpose_3x3(in: mat3x3<f32>) -> mat3x3<f32> {
let x = cross(in[1], in[2]);
let y = cross(in[2], in[0]);
let z = cross(in[0], in[1]);
let det = dot(in[2], z);
return mat3x3<f32>(
x / det,
y / det,
z / det
);
}
fn skin_normals(
model: mat4x4<f32>,
normal: vec3<f32>,
) -> vec3<f32> {
return inverse_transpose_3x3(mat3x3<f32>(
model[0].xyz,
model[1].xyz,
model[2].xyz
)) * normal;
}
#endif

View File

@@ -0,0 +1,23 @@
#define_import_path bevy_pbr::utils
const PI: f32 = 3.141592653589793;
fn saturate(value: f32) -> f32 {
return clamp(value, 0.0, 1.0);
}
fn hsv2rgb(hue: f32, saturation: f32, value: f32) -> vec3<f32> {
let rgb = clamp(
abs(
((hue * 6.0 + vec3<f32>(0.0, 4.0, 2.0)) % 6.0) - 3.0
) - 1.0,
vec3<f32>(0.0),
vec3<f32>(1.0)
);
return value * mix( vec3<f32>(1.0), rgb, vec3<f32>(saturation));
}
fn random1D(s: f32) -> f32 {
return fract(sin(s * 12.9898) * 43758.5453123);
}

View File

@@ -0,0 +1,44 @@
#import bevy_pbr::mesh_types
#import bevy_pbr::mesh_view_bindings
@group(1) @binding(0)
var<uniform> mesh: Mesh;
#ifdef SKINNED
@group(1) @binding(1)
var<uniform> joint_matrices: SkinnedMesh;
#import bevy_pbr::skinning
#endif
// NOTE: Bindings must come before functions that use them!
#import bevy_pbr::mesh_functions
struct Vertex {
@location(0) position: vec3<f32>,
#ifdef SKINNED
@location(4) joint_indexes: vec4<u32>,
@location(5) joint_weights: vec4<f32>,
#endif
};
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
};
@vertex
fn vertex(vertex: Vertex) -> VertexOutput {
#ifdef SKINNED
let model = skin_model(vertex.joint_indexes, vertex.joint_weights);
#else
let model = mesh.model;
#endif
var out: VertexOutput;
out.clip_position = mesh_position_local_to_clip(model, vec4<f32>(vertex.position, 1.0));
return out;
}
@fragment
fn fragment() -> @location(0) vec4<f32> {
return vec4<f32>(1.0, 1.0, 1.0, 1.0);
}