Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

578
vendor/bevy_pbr/src/ssr/mod.rs vendored Normal file
View File

@@ -0,0 +1,578 @@
//! Screen space reflections implemented via raymarching.
use bevy_app::{App, Plugin};
use bevy_asset::{load_internal_asset, weak_handle, Handle};
use bevy_core_pipeline::{
core_3d::{
graph::{Core3d, Node3d},
DEPTH_TEXTURE_SAMPLING_SUPPORTED,
},
fullscreen_vertex_shader,
prepass::{DeferredPrepass, DepthPrepass, MotionVectorPrepass, NormalPrepass},
};
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{
component::Component,
entity::Entity,
query::{Has, QueryItem, With},
reflect::ReflectComponent,
resource::Resource,
schedule::IntoScheduleConfigs as _,
system::{lifetimeless::Read, Commands, Query, Res, ResMut},
world::{FromWorld, World},
};
use bevy_image::BevyDefault as _;
use bevy_reflect::{std_traits::ReflectDefault, Reflect};
use bevy_render::render_graph::RenderGraph;
use bevy_render::{
extract_component::{ExtractComponent, ExtractComponentPlugin},
render_graph::{NodeRunError, RenderGraphApp, RenderGraphContext, ViewNode, ViewNodeRunner},
render_resource::{
binding_types, AddressMode, BindGroupEntries, BindGroupLayout, BindGroupLayoutEntries,
CachedRenderPipelineId, ColorTargetState, ColorWrites, DynamicUniformBuffer, FilterMode,
FragmentState, Operations, PipelineCache, RenderPassColorAttachment, RenderPassDescriptor,
RenderPipelineDescriptor, Sampler, SamplerBindingType, SamplerDescriptor, Shader,
ShaderStages, ShaderType, SpecializedRenderPipeline, SpecializedRenderPipelines,
TextureFormat, TextureSampleType,
},
renderer::{RenderAdapter, RenderContext, RenderDevice, RenderQueue},
view::{ExtractedView, Msaa, ViewTarget, ViewUniformOffset},
Render, RenderApp, RenderSet,
};
use bevy_utils::{once, prelude::default};
use tracing::info;
use crate::{
binding_arrays_are_usable, graph::NodePbr, prelude::EnvironmentMapLight,
MeshPipelineViewLayoutKey, MeshPipelineViewLayouts, MeshViewBindGroup, RenderViewLightProbes,
ViewEnvironmentMapUniformOffset, ViewFogUniformOffset, ViewLightProbesUniformOffset,
ViewLightsUniformOffset,
};
const SSR_SHADER_HANDLE: Handle<Shader> = weak_handle!("0b559df2-0d61-4f53-bf62-aea16cf32787");
const RAYMARCH_SHADER_HANDLE: Handle<Shader> = weak_handle!("798cc6fc-6072-4b6c-ab4f-83905fa4a19e");
/// Enables screen-space reflections for a camera.
///
/// Screen-space reflections are currently only supported with deferred rendering.
pub struct ScreenSpaceReflectionsPlugin;
/// Add this component to a camera to enable *screen-space reflections* (SSR).
///
/// Screen-space reflections currently require deferred rendering in order to
/// appear. Therefore, they also need the [`DepthPrepass`] and [`DeferredPrepass`]
/// components, which are inserted automatically.
///
/// SSR currently performs no roughness filtering for glossy reflections, so
/// only very smooth surfaces will reflect objects in screen space. You can
/// adjust the `perceptual_roughness_threshold` in order to tune the threshold
/// below which screen-space reflections will be traced.
///
/// As with all screen-space techniques, SSR can only reflect objects on screen.
/// When objects leave the camera, they will disappear from reflections.
/// An alternative that doesn't suffer from this problem is the combination of
/// a [`LightProbe`](crate::LightProbe) and [`EnvironmentMapLight`]. The advantage of SSR is
/// that it can reflect all objects, not just static ones.
///
/// SSR is an approximation technique and produces artifacts in some situations.
/// Hand-tuning the settings in this component will likely be useful.
///
/// Screen-space reflections are presently unsupported on WebGL 2 because of a
/// bug whereby Naga doesn't generate correct GLSL when sampling depth buffers,
/// which is required for screen-space raymarching.
#[derive(Clone, Copy, Component, Reflect)]
#[reflect(Component, Default, Clone)]
#[require(DepthPrepass, DeferredPrepass)]
#[doc(alias = "Ssr")]
pub struct ScreenSpaceReflections {
/// The maximum PBR roughness level that will enable screen space
/// reflections.
pub perceptual_roughness_threshold: f32,
/// When marching the depth buffer, we only have 2.5D information and don't
/// know how thick surfaces are. We shall assume that the depth buffer
/// fragments are cuboids with a constant thickness defined by this
/// parameter.
pub thickness: f32,
/// The number of steps to be taken at regular intervals to find an initial
/// intersection. Must not be zero.
///
/// Higher values result in higher-quality reflections, because the
/// raymarching shader is less likely to miss objects. However, they take
/// more GPU time.
pub linear_steps: u32,
/// Exponent to be applied in the linear part of the march.
///
/// A value of 1.0 will result in equidistant steps, and higher values will
/// compress the earlier steps, and expand the later ones. This might be
/// desirable in order to get more detail close to objects.
///
/// For optimal performance, this should be a small unsigned integer, such
/// as 1 or 2.
pub linear_march_exponent: f32,
/// Number of steps in a bisection (binary search) to perform once the
/// linear search has found an intersection. Helps narrow down the hit,
/// increasing the chance of the secant method finding an accurate hit
/// point.
pub bisection_steps: u32,
/// Approximate the root position using the secant method—by solving for
/// line-line intersection between the ray approach rate and the surface
/// gradient.
pub use_secant: bool,
}
/// A version of [`ScreenSpaceReflections`] for upload to the GPU.
///
/// For more information on these fields, see the corresponding documentation in
/// [`ScreenSpaceReflections`].
#[derive(Clone, Copy, Component, ShaderType)]
pub struct ScreenSpaceReflectionsUniform {
perceptual_roughness_threshold: f32,
thickness: f32,
linear_steps: u32,
linear_march_exponent: f32,
bisection_steps: u32,
/// A boolean converted to a `u32`.
use_secant: u32,
}
/// The node in the render graph that traces screen space reflections.
#[derive(Default)]
pub struct ScreenSpaceReflectionsNode;
/// Identifies which screen space reflections render pipeline a view needs.
#[derive(Component, Deref, DerefMut)]
pub struct ScreenSpaceReflectionsPipelineId(pub CachedRenderPipelineId);
/// Information relating to the render pipeline for the screen space reflections
/// shader.
#[derive(Resource)]
pub struct ScreenSpaceReflectionsPipeline {
mesh_view_layouts: MeshPipelineViewLayouts,
color_sampler: Sampler,
depth_linear_sampler: Sampler,
depth_nearest_sampler: Sampler,
bind_group_layout: BindGroupLayout,
binding_arrays_are_usable: bool,
}
/// A GPU buffer that stores the screen space reflection settings for each view.
#[derive(Resource, Default, Deref, DerefMut)]
pub struct ScreenSpaceReflectionsBuffer(pub DynamicUniformBuffer<ScreenSpaceReflectionsUniform>);
/// A component that stores the offset within the
/// [`ScreenSpaceReflectionsBuffer`] for each view.
#[derive(Component, Default, Deref, DerefMut)]
pub struct ViewScreenSpaceReflectionsUniformOffset(u32);
/// Identifies a specific configuration of the SSR pipeline shader.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct ScreenSpaceReflectionsPipelineKey {
mesh_pipeline_view_key: MeshPipelineViewLayoutKey,
is_hdr: bool,
has_environment_maps: bool,
}
impl Plugin for ScreenSpaceReflectionsPlugin {
fn build(&self, app: &mut App) {
load_internal_asset!(app, SSR_SHADER_HANDLE, "ssr.wgsl", Shader::from_wgsl);
load_internal_asset!(
app,
RAYMARCH_SHADER_HANDLE,
"raymarch.wgsl",
Shader::from_wgsl
);
app.register_type::<ScreenSpaceReflections>()
.add_plugins(ExtractComponentPlugin::<ScreenSpaceReflections>::default());
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.init_resource::<ScreenSpaceReflectionsBuffer>()
.add_systems(Render, prepare_ssr_pipelines.in_set(RenderSet::Prepare))
.add_systems(
Render,
prepare_ssr_settings.in_set(RenderSet::PrepareResources),
)
.add_render_graph_node::<ViewNodeRunner<ScreenSpaceReflectionsNode>>(
Core3d,
NodePbr::ScreenSpaceReflections,
);
}
fn finish(&self, app: &mut App) {
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.init_resource::<ScreenSpaceReflectionsPipeline>()
.init_resource::<SpecializedRenderPipelines<ScreenSpaceReflectionsPipeline>>();
// only reference the default deferred lighting pass
// if it has been added
let has_default_deferred_lighting_pass = render_app
.world_mut()
.resource_mut::<RenderGraph>()
.sub_graph(Core3d)
.get_node_state(NodePbr::DeferredLightingPass)
.is_ok();
if has_default_deferred_lighting_pass {
render_app.add_render_graph_edges(
Core3d,
(
NodePbr::DeferredLightingPass,
NodePbr::ScreenSpaceReflections,
Node3d::MainOpaquePass,
),
);
} else {
render_app.add_render_graph_edges(
Core3d,
(NodePbr::ScreenSpaceReflections, Node3d::MainOpaquePass),
);
}
}
}
impl Default for ScreenSpaceReflections {
// Reasonable default values.
//
// These are from
// <https://gist.github.com/h3r2tic/9c8356bdaefbe80b1a22ae0aaee192db?permalink_comment_id=4552149#gistcomment-4552149>.
fn default() -> Self {
Self {
perceptual_roughness_threshold: 0.1,
linear_steps: 16,
bisection_steps: 4,
use_secant: true,
thickness: 0.25,
linear_march_exponent: 1.0,
}
}
}
impl ViewNode for ScreenSpaceReflectionsNode {
type ViewQuery = (
Read<ViewTarget>,
Read<ViewUniformOffset>,
Read<ViewLightsUniformOffset>,
Read<ViewFogUniformOffset>,
Read<ViewLightProbesUniformOffset>,
Read<ViewScreenSpaceReflectionsUniformOffset>,
Read<ViewEnvironmentMapUniformOffset>,
Read<MeshViewBindGroup>,
Read<ScreenSpaceReflectionsPipelineId>,
);
fn run<'w>(
&self,
_: &mut RenderGraphContext,
render_context: &mut RenderContext<'w>,
(
view_target,
view_uniform_offset,
view_lights_offset,
view_fog_offset,
view_light_probes_offset,
view_ssr_offset,
view_environment_map_offset,
view_bind_group,
ssr_pipeline_id,
): QueryItem<'w, Self::ViewQuery>,
world: &'w World,
) -> Result<(), NodeRunError> {
// Grab the render pipeline.
let pipeline_cache = world.resource::<PipelineCache>();
let Some(render_pipeline) = pipeline_cache.get_render_pipeline(**ssr_pipeline_id) else {
return Ok(());
};
// Set up a standard pair of postprocessing textures.
let postprocess = view_target.post_process_write();
// Create the bind group for this view.
let ssr_pipeline = world.resource::<ScreenSpaceReflectionsPipeline>();
let ssr_bind_group = render_context.render_device().create_bind_group(
"SSR bind group",
&ssr_pipeline.bind_group_layout,
&BindGroupEntries::sequential((
postprocess.source,
&ssr_pipeline.color_sampler,
&ssr_pipeline.depth_linear_sampler,
&ssr_pipeline.depth_nearest_sampler,
)),
);
// Build the SSR render pass.
let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor {
label: Some("SSR pass"),
color_attachments: &[Some(RenderPassColorAttachment {
view: postprocess.destination,
resolve_target: None,
ops: Operations::default(),
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
// Set bind groups.
render_pass.set_render_pipeline(render_pipeline);
render_pass.set_bind_group(
0,
&view_bind_group.value,
&[
view_uniform_offset.offset,
view_lights_offset.offset,
view_fog_offset.offset,
**view_light_probes_offset,
**view_ssr_offset,
**view_environment_map_offset,
],
);
// Perform the SSR render pass.
render_pass.set_bind_group(1, &ssr_bind_group, &[]);
render_pass.draw(0..3, 0..1);
Ok(())
}
}
impl FromWorld for ScreenSpaceReflectionsPipeline {
fn from_world(world: &mut World) -> Self {
let mesh_view_layouts = world.resource::<MeshPipelineViewLayouts>().clone();
let render_device = world.resource::<RenderDevice>();
let render_adapter = world.resource::<RenderAdapter>();
// Create the bind group layout.
let bind_group_layout = render_device.create_bind_group_layout(
"SSR bind group layout",
&BindGroupLayoutEntries::sequential(
ShaderStages::FRAGMENT,
(
binding_types::texture_2d(TextureSampleType::Float { filterable: true }),
binding_types::sampler(SamplerBindingType::Filtering),
binding_types::sampler(SamplerBindingType::Filtering),
binding_types::sampler(SamplerBindingType::NonFiltering),
),
),
);
// Create the samplers we need.
let color_sampler = render_device.create_sampler(&SamplerDescriptor {
label: "SSR color sampler".into(),
address_mode_u: AddressMode::ClampToEdge,
address_mode_v: AddressMode::ClampToEdge,
mag_filter: FilterMode::Linear,
min_filter: FilterMode::Linear,
..default()
});
let depth_linear_sampler = render_device.create_sampler(&SamplerDescriptor {
label: "SSR depth linear sampler".into(),
address_mode_u: AddressMode::ClampToEdge,
address_mode_v: AddressMode::ClampToEdge,
mag_filter: FilterMode::Linear,
min_filter: FilterMode::Linear,
..default()
});
let depth_nearest_sampler = render_device.create_sampler(&SamplerDescriptor {
label: "SSR depth nearest sampler".into(),
address_mode_u: AddressMode::ClampToEdge,
address_mode_v: AddressMode::ClampToEdge,
mag_filter: FilterMode::Nearest,
min_filter: FilterMode::Nearest,
..default()
});
Self {
mesh_view_layouts,
color_sampler,
depth_linear_sampler,
depth_nearest_sampler,
bind_group_layout,
binding_arrays_are_usable: binding_arrays_are_usable(render_device, render_adapter),
}
}
}
/// Sets up screen space reflection pipelines for each applicable view.
pub fn prepare_ssr_pipelines(
mut commands: Commands,
pipeline_cache: Res<PipelineCache>,
mut pipelines: ResMut<SpecializedRenderPipelines<ScreenSpaceReflectionsPipeline>>,
ssr_pipeline: Res<ScreenSpaceReflectionsPipeline>,
views: Query<
(
Entity,
&ExtractedView,
Has<RenderViewLightProbes<EnvironmentMapLight>>,
Has<NormalPrepass>,
Has<MotionVectorPrepass>,
),
(
With<ScreenSpaceReflectionsUniform>,
With<DepthPrepass>,
With<DeferredPrepass>,
),
>,
) {
for (
entity,
extracted_view,
has_environment_maps,
has_normal_prepass,
has_motion_vector_prepass,
) in &views
{
// SSR is only supported in the deferred pipeline, which has no MSAA
// support. Thus we can assume MSAA is off.
let mut mesh_pipeline_view_key = MeshPipelineViewLayoutKey::from(Msaa::Off)
| MeshPipelineViewLayoutKey::DEPTH_PREPASS
| MeshPipelineViewLayoutKey::DEFERRED_PREPASS;
mesh_pipeline_view_key.set(
MeshPipelineViewLayoutKey::NORMAL_PREPASS,
has_normal_prepass,
);
mesh_pipeline_view_key.set(
MeshPipelineViewLayoutKey::MOTION_VECTOR_PREPASS,
has_motion_vector_prepass,
);
// Build the pipeline.
let pipeline_id = pipelines.specialize(
&pipeline_cache,
&ssr_pipeline,
ScreenSpaceReflectionsPipelineKey {
mesh_pipeline_view_key,
is_hdr: extracted_view.hdr,
has_environment_maps,
},
);
// Note which pipeline ID was used.
commands
.entity(entity)
.insert(ScreenSpaceReflectionsPipelineId(pipeline_id));
}
}
/// Gathers up screen space reflection settings for each applicable view and
/// writes them into a GPU buffer.
pub fn prepare_ssr_settings(
mut commands: Commands,
views: Query<(Entity, Option<&ScreenSpaceReflectionsUniform>), With<ExtractedView>>,
mut ssr_settings_buffer: ResMut<ScreenSpaceReflectionsBuffer>,
render_device: Res<RenderDevice>,
render_queue: Res<RenderQueue>,
) {
let Some(mut writer) =
ssr_settings_buffer.get_writer(views.iter().len(), &render_device, &render_queue)
else {
return;
};
for (view, ssr_uniform) in views.iter() {
let uniform_offset = match ssr_uniform {
None => 0,
Some(ssr_uniform) => writer.write(ssr_uniform),
};
commands
.entity(view)
.insert(ViewScreenSpaceReflectionsUniformOffset(uniform_offset));
}
}
impl ExtractComponent for ScreenSpaceReflections {
type QueryData = Read<ScreenSpaceReflections>;
type QueryFilter = ();
type Out = ScreenSpaceReflectionsUniform;
fn extract_component(settings: QueryItem<'_, Self::QueryData>) -> Option<Self::Out> {
if !DEPTH_TEXTURE_SAMPLING_SUPPORTED {
once!(info!(
"Disabling screen-space reflections on this platform because depth textures \
aren't supported correctly"
));
return None;
}
Some((*settings).into())
}
}
impl SpecializedRenderPipeline for ScreenSpaceReflectionsPipeline {
type Key = ScreenSpaceReflectionsPipelineKey;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
let mesh_view_layout = self
.mesh_view_layouts
.get_view_layout(key.mesh_pipeline_view_key);
let mut shader_defs = vec![
"DEPTH_PREPASS".into(),
"DEFERRED_PREPASS".into(),
"SCREEN_SPACE_REFLECTIONS".into(),
];
if key.has_environment_maps {
shader_defs.push("ENVIRONMENT_MAP".into());
}
if self.binding_arrays_are_usable {
shader_defs.push("MULTIPLE_LIGHT_PROBES_IN_ARRAY".into());
}
RenderPipelineDescriptor {
label: Some("SSR pipeline".into()),
layout: vec![mesh_view_layout.clone(), self.bind_group_layout.clone()],
vertex: fullscreen_vertex_shader::fullscreen_shader_vertex_state(),
fragment: Some(FragmentState {
shader: SSR_SHADER_HANDLE,
shader_defs,
entry_point: "fragment".into(),
targets: vec![Some(ColorTargetState {
format: if key.is_hdr {
ViewTarget::TEXTURE_FORMAT_HDR
} else {
TextureFormat::bevy_default()
},
blend: None,
write_mask: ColorWrites::ALL,
})],
}),
push_constant_ranges: vec![],
primitive: default(),
depth_stencil: None,
multisample: default(),
zero_initialize_workgroup_memory: false,
}
}
}
impl From<ScreenSpaceReflections> for ScreenSpaceReflectionsUniform {
fn from(settings: ScreenSpaceReflections) -> Self {
Self {
perceptual_roughness_threshold: settings.perceptual_roughness_threshold,
thickness: settings.thickness,
linear_steps: settings.linear_steps,
linear_march_exponent: settings.linear_march_exponent,
bisection_steps: settings.bisection_steps,
use_secant: settings.use_secant as u32,
}
}
}

511
vendor/bevy_pbr/src/ssr/raymarch.wgsl vendored Normal file
View File

@@ -0,0 +1,511 @@
// Copyright (c) 2023 Tomasz Stachowiak
//
// This contribution is dual licensed under EITHER OF
//
// Apache License, Version 2.0, (http://www.apache.org/licenses/LICENSE-2.0)
// MIT license (http://opensource.org/licenses/MIT)
//
// at your option.
//
// This is a port of the original [`raymarch.hlsl`] to WGSL. It's deliberately
// kept as close as possible so that patches to the original `raymarch.hlsl`
// have the greatest chances of applying to this version.
//
// [`raymarch.hlsl`]:
// https://gist.github.com/h3r2tic/9c8356bdaefbe80b1a22ae0aaee192db
#define_import_path bevy_pbr::raymarch
#import bevy_pbr::mesh_view_bindings::depth_prepass_texture
#import bevy_pbr::view_transformations::{
direction_world_to_clip,
ndc_to_uv,
perspective_camera_near,
position_world_to_ndc,
}
// Allows us to sample from the depth buffer with bilinear filtering.
@group(1) @binding(2) var depth_linear_sampler: sampler;
// Allows us to sample from the depth buffer with nearest-neighbor filtering.
@group(1) @binding(3) var depth_nearest_sampler: sampler;
// Main code
struct HybridRootFinder {
linear_steps: u32,
bisection_steps: u32,
use_secant: bool,
linear_march_exponent: f32,
jitter: f32,
min_t: f32,
max_t: f32,
}
fn hybrid_root_finder_new_with_linear_steps(v: u32) -> HybridRootFinder {
var res: HybridRootFinder;
res.linear_steps = v;
res.bisection_steps = 0u;
res.use_secant = false;
res.linear_march_exponent = 1.0;
res.jitter = 1.0;
res.min_t = 0.0;
res.max_t = 1.0;
return res;
}
fn hybrid_root_finder_find_root(
root_finder: ptr<function, HybridRootFinder>,
start: vec3<f32>,
end: vec3<f32>,
distance_fn: ptr<function, DepthRaymarchDistanceFn>,
hit_t: ptr<function, f32>,
miss_t: ptr<function, f32>,
hit_d: ptr<function, DistanceWithPenetration>,
) -> bool {
let dir = end - start;
var min_t = (*root_finder).min_t;
var max_t = (*root_finder).max_t;
var min_d = DistanceWithPenetration(0.0, false, 0.0);
var max_d = DistanceWithPenetration(0.0, false, 0.0);
let step_size = (max_t - min_t) / f32((*root_finder).linear_steps);
var intersected = false;
//
// Ray march using linear steps
if ((*root_finder).linear_steps > 0u) {
let candidate_t = mix(
min_t,
max_t,
pow(
(*root_finder).jitter / f32((*root_finder).linear_steps),
(*root_finder).linear_march_exponent
)
);
let candidate = start + dir * candidate_t;
let candidate_d = depth_raymarch_distance_fn_evaluate(distance_fn, candidate);
intersected = candidate_d.distance < 0.0 && candidate_d.valid;
if (intersected) {
max_t = candidate_t;
max_d = candidate_d;
// The `[min_t .. max_t]` interval contains an intersection. End the linear search.
} else {
// No intersection yet. Carry on.
min_t = candidate_t;
min_d = candidate_d;
for (var step = 1u; step < (*root_finder).linear_steps; step += 1u) {
let candidate_t = mix(
(*root_finder).min_t,
(*root_finder).max_t,
pow(
(f32(step) + (*root_finder).jitter) / f32((*root_finder).linear_steps),
(*root_finder).linear_march_exponent
)
);
let candidate = start + dir * candidate_t;
let candidate_d = depth_raymarch_distance_fn_evaluate(distance_fn, candidate);
intersected = candidate_d.distance < 0.0 && candidate_d.valid;
if (intersected) {
max_t = candidate_t;
max_d = candidate_d;
// The `[min_t .. max_t]` interval contains an intersection.
// End the linear search.
break;
} else {
// No intersection yet. Carry on.
min_t = candidate_t;
min_d = candidate_d;
}
}
}
}
*miss_t = min_t;
*hit_t = min_t;
//
// Refine the hit using bisection
if (intersected) {
for (var step = 0u; step < (*root_finder).bisection_steps; step += 1u) {
let mid_t = (min_t + max_t) * 0.5;
let candidate = start + dir * mid_t;
let candidate_d = depth_raymarch_distance_fn_evaluate(distance_fn, candidate);
if (candidate_d.distance < 0.0 && candidate_d.valid) {
// Intersection at the mid point. Refine the first half.
max_t = mid_t;
max_d = candidate_d;
} else {
// No intersection yet at the mid point. Refine the second half.
min_t = mid_t;
min_d = candidate_d;
}
}
if ((*root_finder).use_secant) {
// Finish with one application of the secant method
let total_d = min_d.distance + -max_d.distance;
let mid_t = mix(min_t, max_t, min_d.distance / total_d);
let candidate = start + dir * mid_t;
let candidate_d = depth_raymarch_distance_fn_evaluate(distance_fn, candidate);
// Only accept the result of the secant method if it improves upon
// the previous result.
//
// Technically root_finder should be `abs(candidate_d.distance) <
// min(min_d.distance, -max_d.distance) * frac`, but root_finder seems
// sufficient.
if (abs(candidate_d.distance) < min_d.distance * 0.9 && candidate_d.valid) {
*hit_t = mid_t;
*hit_d = candidate_d;
} else {
*hit_t = max_t;
*hit_d = max_d;
}
return true;
} else {
*hit_t = max_t;
*hit_d = max_d;
return true;
}
} else {
// Mark the conservative miss distance.
*hit_t = min_t;
return false;
}
}
struct DistanceWithPenetration {
/// Distance to the surface of which a root we're trying to find
distance: f32,
/// Whether to consider this sample valid for intersection.
/// Mostly relevant for allowing the ray marcher to travel behind surfaces,
/// as it will mark surfaces it travels under as invalid.
valid: bool,
/// Conservative estimate of depth to which the ray penetrates the marched surface.
penetration: f32,
}
struct DepthRaymarchDistanceFn {
depth_tex_size: vec2<f32>,
march_behind_surfaces: bool,
depth_thickness: f32,
use_sloppy_march: bool,
}
fn depth_raymarch_distance_fn_evaluate(
distance_fn: ptr<function, DepthRaymarchDistanceFn>,
ray_point_cs: vec3<f32>,
) -> DistanceWithPenetration {
let interp_uv = ndc_to_uv(ray_point_cs.xy);
let ray_depth = 1.0 / ray_point_cs.z;
// We're using both point-sampled and bilinear-filtered values from the depth buffer.
//
// That's really stupid but works like magic. For samples taken near the ray origin,
// the discrete nature of the depth buffer becomes a problem. It's not a land of continuous surfaces,
// but a bunch of stacked duplo bricks.
//
// Technically we should be taking discrete steps in distance_fn duplo land, but then we're at the mercy
// of arbitrary quantization of our directions -- and sometimes we'll take a step which would
// claim that the ray is occluded -- even though the underlying smooth surface wouldn't occlude it.
//
// If we instead take linear taps from the depth buffer, we reconstruct the linear surface.
// That fixes acne, but introduces false shadowing near object boundaries, as we now pretend
// that everything is shrink-wrapped by distance_fn continuous 2.5D surface, and our depth thickness
// heuristic ends up falling apart.
//
// The fix is to consider both the smooth and the discrete surfaces, and only claim occlusion
// when the ray descends below both.
//
// The two approaches end up fixing each other's artifacts:
// * The false occlusions due to duplo land are rejected because the ray stays above the smooth surface.
// * The shrink-wrap surface is no longer continuous, so it's possible for rays to miss it.
let linear_depth =
1.0 / textureSampleLevel(depth_prepass_texture, depth_linear_sampler, interp_uv, 0u);
let unfiltered_depth =
1.0 / textureSampleLevel(depth_prepass_texture, depth_nearest_sampler, interp_uv, 0u);
var max_depth: f32;
var min_depth: f32;
if ((*distance_fn).use_sloppy_march) {
max_depth = unfiltered_depth;
min_depth = unfiltered_depth;
} else {
max_depth = max(linear_depth, unfiltered_depth);
min_depth = min(linear_depth, unfiltered_depth);
}
let bias = 0.000002;
var res: DistanceWithPenetration;
res.distance = max_depth * (1.0 + bias) - ray_depth;
// distance_fn will be used at the end of the ray march to potentially discard the hit.
res.penetration = ray_depth - min_depth;
if ((*distance_fn).march_behind_surfaces) {
res.valid = res.penetration < (*distance_fn).depth_thickness;
} else {
res.valid = true;
}
return res;
}
struct DepthRayMarchResult {
/// True if the raymarch hit something.
hit: bool,
/// In case of a hit, the normalized distance to it.
///
/// In case of a miss, the furthest the ray managed to travel, which could either be
/// exceeding the max range, or getting behind a surface further than the depth thickness.
///
/// Range: `0..=1` as a lerp factor over `ray_start_cs..=ray_end_cs`.
hit_t: f32,
/// UV corresponding to `hit_t`.
hit_uv: vec2<f32>,
/// The distance that the hit point penetrates into the hit surface.
/// Will normally be non-zero due to limited precision of the ray march.
///
/// In case of a miss: undefined.
hit_penetration: f32,
/// Ditto, within the range `0..DepthRayMarch::depth_thickness_linear_z`
///
/// In case of a miss: undefined.
hit_penetration_frac: f32,
}
struct DepthRayMarch {
/// Number of steps to be taken at regular intervals to find an initial intersection.
/// Must not be zero.
linear_steps: u32,
/// Exponent to be applied in the linear part of the march.
///
/// A value of 1.0 will result in equidistant steps, and higher values will compress
/// the earlier steps, and expand the later ones. This might be desirable in order
/// to get more detail close to objects in SSR or SSGI.
///
/// For optimal performance, this should be a small compile-time unsigned integer,
/// such as 1 or 2.
linear_march_exponent: f32,
/// Number of steps in a bisection (binary search) to perform once the linear search
/// has found an intersection. Helps narrow down the hit, increasing the chance of
/// the secant method finding an accurate hit point.
///
/// Useful when sampling color, e.g. SSR or SSGI, but pointless for contact shadows.
bisection_steps: u32,
/// Approximate the root position using the secant method -- by solving for line-line
/// intersection between the ray approach rate and the surface gradient.
///
/// Useful when sampling color, e.g. SSR or SSGI, but pointless for contact shadows.
use_secant: bool,
/// Jitter to apply to the first step of the linear search; 0..=1 range, mapping
/// to the extent of a single linear step in the first phase of the search.
/// Use 1.0 if you don't want jitter.
jitter: f32,
/// Clip space coordinates (w=1) of the ray.
ray_start_cs: vec3<f32>,
ray_end_cs: vec3<f32>,
/// Should be used for contact shadows, but not for any color bounce, e.g. SSR.
///
/// For SSR etc. this can easily create leaks, but with contact shadows it allows the rays
/// to pass over invalid occlusions (due to thickness), and find potentially valid ones ahead.
///
/// Note that this will cause the linear search to potentially miss surfaces,
/// because when the ray overshoots and ends up penetrating a surface further than
/// `depth_thickness_linear_z`, the ray marcher will just carry on.
///
/// For this reason, this may require a lot of samples, or high depth thickness,
/// so that `depth_thickness_linear_z >= world space ray length / linear_steps`.
march_behind_surfaces: bool,
/// If `true`, the ray marcher only performs nearest lookups of the depth buffer,
/// resulting in aliasing and false occlusion when marching tiny detail.
/// It should work fine for longer traces with fewer rays though.
use_sloppy_march: bool,
/// When marching the depth buffer, we only have 2.5D information, and don't know how
/// thick surfaces are. We shall assume that the depth buffer fragments are little squares
/// with a constant thickness defined by this parameter.
depth_thickness_linear_z: f32,
/// Size of the depth buffer we're marching in, in pixels.
depth_tex_size: vec2<f32>,
}
fn depth_ray_march_new_from_depth(depth_tex_size: vec2<f32>) -> DepthRayMarch {
var res: DepthRayMarch;
res.jitter = 1.0;
res.linear_steps = 4u;
res.bisection_steps = 0u;
res.linear_march_exponent = 1.0;
res.depth_tex_size = depth_tex_size;
res.depth_thickness_linear_z = 1.0;
res.march_behind_surfaces = false;
res.use_sloppy_march = false;
return res;
}
fn depth_ray_march_to_cs_dir_impl(
raymarch: ptr<function, DepthRayMarch>,
dir_cs: vec4<f32>,
infinite: bool,
) {
var end_cs = vec4((*raymarch).ray_start_cs, 1.0) + dir_cs;
// Perform perspective division, but avoid dividing by zero for rays
// heading directly towards the eye.
end_cs /= select(-1.0, 1.0, end_cs.w >= 0.0) * max(1e-10, abs(end_cs.w));
// Clip ray start to the view frustum
var delta_cs = end_cs.xyz - (*raymarch).ray_start_cs;
let near_edge = select(vec3(-1.0, -1.0, 0.0), vec3(1.0, 1.0, 1.0), delta_cs < vec3(0.0));
let dist_to_near_edge = (near_edge - (*raymarch).ray_start_cs) / delta_cs;
let max_dist_to_near_edge = max(dist_to_near_edge.x, dist_to_near_edge.y);
(*raymarch).ray_start_cs += delta_cs * max(0.0, max_dist_to_near_edge);
// Clip ray end to the view frustum
delta_cs = end_cs.xyz - (*raymarch).ray_start_cs;
let far_edge = select(vec3(-1.0, -1.0, 0.0), vec3(1.0, 1.0, 1.0), delta_cs >= vec3(0.0));
let dist_to_far_edge = (far_edge - (*raymarch).ray_start_cs) / delta_cs;
let min_dist_to_far_edge = min(
min(dist_to_far_edge.x, dist_to_far_edge.y),
dist_to_far_edge.z
);
if (infinite) {
delta_cs *= min_dist_to_far_edge;
} else {
// If unbounded, would make the ray reach the end of the frustum
delta_cs *= min(1.0, min_dist_to_far_edge);
}
(*raymarch).ray_end_cs = (*raymarch).ray_start_cs + delta_cs;
}
/// March from a clip-space position (w = 1)
fn depth_ray_march_from_cs(raymarch: ptr<function, DepthRayMarch>, v: vec3<f32>) {
(*raymarch).ray_start_cs = v;
}
/// March to a clip-space position (w = 1)
///
/// Must be called after `from_cs`, as it will clip the world-space ray to the view frustum.
fn depth_ray_march_to_cs(raymarch: ptr<function, DepthRayMarch>, end_cs: vec3<f32>) {
let dir = vec4(end_cs - (*raymarch).ray_start_cs, 0.0) * sign(end_cs.z);
depth_ray_march_to_cs_dir_impl(raymarch, dir, false);
}
/// March towards a clip-space direction. Infinite (ray is extended to cover the whole view frustum).
///
/// Must be called after `from_cs`, as it will clip the world-space ray to the view frustum.
fn depth_ray_march_to_cs_dir(raymarch: ptr<function, DepthRayMarch>, dir: vec4<f32>) {
depth_ray_march_to_cs_dir_impl(raymarch, dir, true);
}
/// March to a world-space position.
///
/// Must be called after `from_cs`, as it will clip the world-space ray to the view frustum.
fn depth_ray_march_to_ws(raymarch: ptr<function, DepthRayMarch>, end: vec3<f32>) {
depth_ray_march_to_cs(raymarch, position_world_to_ndc(end));
}
/// March towards a world-space direction. Infinite (ray is extended to cover the whole view frustum).
///
/// Must be called after `from_cs`, as it will clip the world-space ray to the view frustum.
fn depth_ray_march_to_ws_dir(raymarch: ptr<function, DepthRayMarch>, dir: vec3<f32>) {
depth_ray_march_to_cs_dir_impl(raymarch, direction_world_to_clip(dir), true);
}
/// Perform the ray march.
fn depth_ray_march_march(raymarch: ptr<function, DepthRayMarch>) -> DepthRayMarchResult {
var res = DepthRayMarchResult(false, 0.0, vec2(0.0), 0.0, 0.0);
let ray_start_uv = ndc_to_uv((*raymarch).ray_start_cs.xy);
let ray_end_uv = ndc_to_uv((*raymarch).ray_end_cs.xy);
let ray_uv_delta = ray_end_uv - ray_start_uv;
let ray_len_px = ray_uv_delta * (*raymarch).depth_tex_size;
let min_px_per_step = 1u;
let step_count = max(
2,
min(i32((*raymarch).linear_steps), i32(floor(length(ray_len_px) / f32(min_px_per_step))))
);
let linear_z_to_scaled_linear_z = 1.0 / perspective_camera_near();
let depth_thickness = (*raymarch).depth_thickness_linear_z * linear_z_to_scaled_linear_z;
var distance_fn: DepthRaymarchDistanceFn;
distance_fn.depth_tex_size = (*raymarch).depth_tex_size;
distance_fn.march_behind_surfaces = (*raymarch).march_behind_surfaces;
distance_fn.depth_thickness = depth_thickness;
distance_fn.use_sloppy_march = (*raymarch).use_sloppy_march;
var hit: DistanceWithPenetration;
var hit_t = 0.0;
var miss_t = 0.0;
var root_finder = hybrid_root_finder_new_with_linear_steps(u32(step_count));
root_finder.bisection_steps = (*raymarch).bisection_steps;
root_finder.use_secant = (*raymarch).use_secant;
root_finder.linear_march_exponent = (*raymarch).linear_march_exponent;
root_finder.jitter = (*raymarch).jitter;
let intersected = hybrid_root_finder_find_root(
&root_finder,
(*raymarch).ray_start_cs,
(*raymarch).ray_end_cs,
&distance_fn,
&hit_t,
&miss_t,
&hit
);
res.hit_t = hit_t;
if (intersected && hit.penetration < depth_thickness && hit.distance < depth_thickness) {
res.hit = true;
res.hit_uv = mix(ray_start_uv, ray_end_uv, res.hit_t);
res.hit_penetration = hit.penetration / linear_z_to_scaled_linear_z;
res.hit_penetration_frac = hit.penetration / depth_thickness;
return res;
}
res.hit_t = miss_t;
res.hit_uv = mix(ray_start_uv, ray_end_uv, res.hit_t);
return res;
}

194
vendor/bevy_pbr/src/ssr/ssr.wgsl vendored Normal file
View File

@@ -0,0 +1,194 @@
// A postprocessing pass that performs screen-space reflections.
#define_import_path bevy_pbr::ssr
#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput
#import bevy_pbr::{
clustered_forward,
lighting,
lighting::{LAYER_BASE, LAYER_CLEARCOAT},
mesh_view_bindings::{view, depth_prepass_texture, deferred_prepass_texture, ssr_settings},
pbr_deferred_functions::pbr_input_from_deferred_gbuffer,
pbr_deferred_types,
pbr_functions,
prepass_utils,
raymarch::{
depth_ray_march_from_cs,
depth_ray_march_march,
depth_ray_march_new_from_depth,
depth_ray_march_to_ws_dir,
},
utils,
view_transformations::{
depth_ndc_to_view_z,
frag_coord_to_ndc,
ndc_to_frag_coord,
ndc_to_uv,
position_view_to_ndc,
position_world_to_ndc,
position_world_to_view,
},
}
#import bevy_render::view::View
#ifdef ENVIRONMENT_MAP
#import bevy_pbr::environment_map
#endif
// The texture representing the color framebuffer.
@group(1) @binding(0) var color_texture: texture_2d<f32>;
// The sampler that lets us sample from the color framebuffer.
@group(1) @binding(1) var color_sampler: sampler;
// Group 1, bindings 2 and 3 are in `raymarch.wgsl`.
// Returns the reflected color in the RGB channel and the specular occlusion in
// the alpha channel.
//
// The general approach here is similar to [1]. We first project the reflection
// ray into screen space. Then we perform uniform steps along that screen-space
// reflected ray, converting each step to view space.
//
// The arguments are:
//
// * `R_world`: The reflection vector in world space.
//
// * `P_world`: The current position in world space.
//
// [1]: https://lettier.github.io/3d-game-shaders-for-beginners/screen-space-reflection.html
fn evaluate_ssr(R_world: vec3<f32>, P_world: vec3<f32>) -> vec4<f32> {
let depth_size = vec2<f32>(textureDimensions(depth_prepass_texture));
var raymarch = depth_ray_march_new_from_depth(depth_size);
depth_ray_march_from_cs(&raymarch, position_world_to_ndc(P_world));
depth_ray_march_to_ws_dir(&raymarch, normalize(R_world));
raymarch.linear_steps = ssr_settings.linear_steps;
raymarch.bisection_steps = ssr_settings.bisection_steps;
raymarch.use_secant = ssr_settings.use_secant != 0u;
raymarch.depth_thickness_linear_z = ssr_settings.thickness;
raymarch.jitter = 1.0; // Disable jitter for now.
raymarch.march_behind_surfaces = false;
let raymarch_result = depth_ray_march_march(&raymarch);
if (raymarch_result.hit) {
return vec4(
textureSampleLevel(color_texture, color_sampler, raymarch_result.hit_uv, 0.0).rgb,
0.0
);
}
return vec4(0.0, 0.0, 0.0, 1.0);
}
@fragment
fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4<f32> {
// Sample the depth.
var frag_coord = in.position;
frag_coord.z = prepass_utils::prepass_depth(in.position, 0u);
// Load the G-buffer data.
let fragment = textureLoad(color_texture, vec2<i32>(frag_coord.xy), 0);
let gbuffer = textureLoad(deferred_prepass_texture, vec2<i32>(frag_coord.xy), 0);
let pbr_input = pbr_input_from_deferred_gbuffer(frag_coord, gbuffer);
// Don't do anything if the surface is too rough, since we can't blur or do
// temporal accumulation yet.
let perceptual_roughness = pbr_input.material.perceptual_roughness;
if (perceptual_roughness > ssr_settings.perceptual_roughness_threshold) {
return fragment;
}
// Unpack the PBR input.
var specular_occlusion = pbr_input.specular_occlusion;
let world_position = pbr_input.world_position.xyz;
let N = pbr_input.N;
let V = pbr_input.V;
// Calculate the reflection vector.
let R = reflect(-V, N);
// Do the raymarching.
let ssr_specular = evaluate_ssr(R, world_position);
var indirect_light = ssr_specular.rgb;
specular_occlusion *= ssr_specular.a;
// Sample the environment map if necessary.
//
// This will take the specular part of the environment map into account if
// the ray missed. Otherwise, it only takes the diffuse part.
//
// TODO: Merge this with the duplicated code in `apply_pbr_lighting`.
#ifdef ENVIRONMENT_MAP
// Unpack values required for environment mapping.
let base_color = pbr_input.material.base_color.rgb;
let metallic = pbr_input.material.metallic;
let reflectance = pbr_input.material.reflectance;
let specular_transmission = pbr_input.material.specular_transmission;
let diffuse_transmission = pbr_input.material.diffuse_transmission;
let diffuse_occlusion = pbr_input.diffuse_occlusion;
#ifdef STANDARD_MATERIAL_CLEARCOAT
// Do the above calculations again for the clearcoat layer. Remember that
// the clearcoat can have its own roughness and its own normal.
let clearcoat = pbr_input.material.clearcoat;
let clearcoat_perceptual_roughness = pbr_input.material.clearcoat_perceptual_roughness;
let clearcoat_roughness = lighting::perceptualRoughnessToRoughness(clearcoat_perceptual_roughness);
let clearcoat_N = pbr_input.clearcoat_N;
let clearcoat_NdotV = max(dot(clearcoat_N, pbr_input.V), 0.0001);
let clearcoat_R = reflect(-pbr_input.V, clearcoat_N);
#endif // STANDARD_MATERIAL_CLEARCOAT
// Calculate various other values needed for environment mapping.
let roughness = lighting::perceptualRoughnessToRoughness(perceptual_roughness);
let diffuse_color = pbr_functions::calculate_diffuse_color(
base_color,
metallic,
specular_transmission,
diffuse_transmission
);
let NdotV = max(dot(N, V), 0.0001);
let F_ab = lighting::F_AB(perceptual_roughness, NdotV);
let F0 = pbr_functions::calculate_F0(base_color, metallic, reflectance);
// Pack all the values into a structure.
var lighting_input: lighting::LightingInput;
lighting_input.layers[LAYER_BASE].NdotV = NdotV;
lighting_input.layers[LAYER_BASE].N = N;
lighting_input.layers[LAYER_BASE].R = R;
lighting_input.layers[LAYER_BASE].perceptual_roughness = perceptual_roughness;
lighting_input.layers[LAYER_BASE].roughness = roughness;
lighting_input.P = world_position.xyz;
lighting_input.V = V;
lighting_input.diffuse_color = diffuse_color;
lighting_input.F0_ = F0;
lighting_input.F_ab = F_ab;
#ifdef STANDARD_MATERIAL_CLEARCOAT
lighting_input.layers[LAYER_CLEARCOAT].NdotV = clearcoat_NdotV;
lighting_input.layers[LAYER_CLEARCOAT].N = clearcoat_N;
lighting_input.layers[LAYER_CLEARCOAT].R = clearcoat_R;
lighting_input.layers[LAYER_CLEARCOAT].perceptual_roughness = clearcoat_perceptual_roughness;
lighting_input.layers[LAYER_CLEARCOAT].roughness = clearcoat_roughness;
lighting_input.clearcoat_strength = clearcoat;
#endif // STANDARD_MATERIAL_CLEARCOAT
// Determine which cluster we're in. We'll need this to find the right
// reflection probe.
let cluster_index = clustered_forward::fragment_cluster_index(
frag_coord.xy, frag_coord.z, false);
var clusterable_object_index_ranges =
clustered_forward::unpack_clusterable_object_index_ranges(cluster_index);
// Sample the environment map.
let environment_light = environment_map::environment_map_light(
&lighting_input, &clusterable_object_index_ranges, false);
// Accumulate the environment map light.
indirect_light += view.exposure *
(environment_light.diffuse * diffuse_occlusion +
environment_light.specular * specular_occlusion);
#endif
// Write the results.
return vec4(fragment.rgb + indirect_light, 1.0);
}