Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

View File

@@ -0,0 +1,408 @@
use crate::{
render_resource::{SurfaceTexture, TextureView},
renderer::{RenderAdapter, RenderDevice, RenderInstance},
Extract, ExtractSchedule, Render, RenderApp, RenderSet, WgpuWrapper,
};
use bevy_app::{App, Plugin};
use bevy_ecs::{entity::EntityHashMap, prelude::*};
use bevy_platform::collections::HashSet;
use bevy_utils::default;
use bevy_window::{
CompositeAlphaMode, PresentMode, PrimaryWindow, RawHandleWrapper, Window, WindowClosing,
};
use core::{
num::NonZero,
ops::{Deref, DerefMut},
};
use tracing::{debug, warn};
use wgpu::{
SurfaceConfiguration, SurfaceTargetUnsafe, TextureFormat, TextureUsages, TextureViewDescriptor,
};
pub mod screenshot;
use screenshot::{ScreenshotPlugin, ScreenshotToScreenPipeline};
pub struct WindowRenderPlugin;
impl Plugin for WindowRenderPlugin {
fn build(&self, app: &mut App) {
app.add_plugins(ScreenshotPlugin);
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
render_app
.init_resource::<ExtractedWindows>()
.init_resource::<WindowSurfaces>()
.add_systems(ExtractSchedule, extract_windows)
.add_systems(
Render,
create_surfaces
.run_if(need_surface_configuration)
.before(prepare_windows),
)
.add_systems(Render, prepare_windows.in_set(RenderSet::ManageViews));
}
}
fn finish(&self, app: &mut App) {
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
render_app.init_resource::<ScreenshotToScreenPipeline>();
}
}
}
pub struct ExtractedWindow {
/// An entity that contains the components in [`Window`].
pub entity: Entity,
pub handle: RawHandleWrapper,
pub physical_width: u32,
pub physical_height: u32,
pub present_mode: PresentMode,
pub desired_maximum_frame_latency: Option<NonZero<u32>>,
/// Note: this will not always be the swap chain texture view. When taking a screenshot,
/// this will point to an alternative texture instead to allow for copying the render result
/// to CPU memory.
pub swap_chain_texture_view: Option<TextureView>,
pub swap_chain_texture: Option<SurfaceTexture>,
pub swap_chain_texture_format: Option<TextureFormat>,
pub size_changed: bool,
pub present_mode_changed: bool,
pub alpha_mode: CompositeAlphaMode,
}
impl ExtractedWindow {
fn set_swapchain_texture(&mut self, frame: wgpu::SurfaceTexture) {
let texture_view_descriptor = TextureViewDescriptor {
format: Some(frame.texture.format().add_srgb_suffix()),
..default()
};
self.swap_chain_texture_view = Some(TextureView::from(
frame.texture.create_view(&texture_view_descriptor),
));
self.swap_chain_texture = Some(SurfaceTexture::from(frame));
}
}
#[derive(Default, Resource)]
pub struct ExtractedWindows {
pub primary: Option<Entity>,
pub windows: EntityHashMap<ExtractedWindow>,
}
impl Deref for ExtractedWindows {
type Target = EntityHashMap<ExtractedWindow>;
fn deref(&self) -> &Self::Target {
&self.windows
}
}
impl DerefMut for ExtractedWindows {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.windows
}
}
fn extract_windows(
mut extracted_windows: ResMut<ExtractedWindows>,
mut closing: Extract<EventReader<WindowClosing>>,
windows: Extract<Query<(Entity, &Window, &RawHandleWrapper, Option<&PrimaryWindow>)>>,
mut removed: Extract<RemovedComponents<RawHandleWrapper>>,
mut window_surfaces: ResMut<WindowSurfaces>,
) {
for (entity, window, handle, primary) in windows.iter() {
if primary.is_some() {
extracted_windows.primary = Some(entity);
}
let (new_width, new_height) = (
window.resolution.physical_width().max(1),
window.resolution.physical_height().max(1),
);
let extracted_window = extracted_windows.entry(entity).or_insert(ExtractedWindow {
entity,
handle: handle.clone(),
physical_width: new_width,
physical_height: new_height,
present_mode: window.present_mode,
desired_maximum_frame_latency: window.desired_maximum_frame_latency,
swap_chain_texture: None,
swap_chain_texture_view: None,
size_changed: false,
swap_chain_texture_format: None,
present_mode_changed: false,
alpha_mode: window.composite_alpha_mode,
});
// NOTE: Drop the swap chain frame here
extracted_window.swap_chain_texture_view = None;
extracted_window.size_changed = new_width != extracted_window.physical_width
|| new_height != extracted_window.physical_height;
extracted_window.present_mode_changed =
window.present_mode != extracted_window.present_mode;
if extracted_window.size_changed {
debug!(
"Window size changed from {}x{} to {}x{}",
extracted_window.physical_width,
extracted_window.physical_height,
new_width,
new_height
);
extracted_window.physical_width = new_width;
extracted_window.physical_height = new_height;
}
if extracted_window.present_mode_changed {
debug!(
"Window Present Mode changed from {:?} to {:?}",
extracted_window.present_mode, window.present_mode
);
extracted_window.present_mode = window.present_mode;
}
}
for closing_window in closing.read() {
extracted_windows.remove(&closing_window.window);
window_surfaces.remove(&closing_window.window);
}
for removed_window in removed.read() {
extracted_windows.remove(&removed_window);
window_surfaces.remove(&removed_window);
}
}
struct SurfaceData {
// TODO: what lifetime should this be?
surface: WgpuWrapper<wgpu::Surface<'static>>,
configuration: SurfaceConfiguration,
}
#[derive(Resource, Default)]
pub struct WindowSurfaces {
surfaces: EntityHashMap<SurfaceData>,
/// List of windows that we have already called the initial `configure_surface` for
configured_windows: HashSet<Entity>,
}
impl WindowSurfaces {
fn remove(&mut self, window: &Entity) {
self.surfaces.remove(window);
self.configured_windows.remove(window);
}
}
/// (re)configures window surfaces, and obtains a swapchain texture for rendering.
///
/// NOTE: `get_current_texture` in `prepare_windows` can take a long time if the GPU workload is
/// the performance bottleneck. This can be seen in profiles as multiple prepare-set systems all
/// taking an unusually long time to complete, and all finishing at about the same time as the
/// `prepare_windows` system. Improvements in bevy are planned to avoid this happening when it
/// should not but it will still happen as it is easy for a user to create a large GPU workload
/// relative to the GPU performance and/or CPU workload.
/// This can be caused by many reasons, but several of them are:
/// - GPU workload is more than your current GPU can manage
/// - Error / performance bug in your custom shaders
/// - wgpu was unable to detect a proper GPU hardware-accelerated device given the chosen
/// [`Backends`](crate::settings::Backends), [`WgpuLimits`](crate::settings::WgpuLimits),
/// and/or [`WgpuFeatures`](crate::settings::WgpuFeatures). For example, on Windows currently
/// `DirectX 11` is not supported by wgpu 0.12 and so if your GPU/drivers do not support Vulkan,
/// it may be that a software renderer called "Microsoft Basic Render Driver" using `DirectX 12`
/// will be chosen and performance will be very poor. This is visible in a log message that is
/// output during renderer initialization. Future versions of wgpu will support `DirectX 11`, but
/// another alternative is to try to use [`ANGLE`](https://github.com/gfx-rs/wgpu#angle) and
/// [`Backends::GL`](crate::settings::Backends::GL) if your GPU/drivers support `OpenGL 4.3` / `OpenGL ES 3.0` or
/// later.
pub fn prepare_windows(
mut windows: ResMut<ExtractedWindows>,
mut window_surfaces: ResMut<WindowSurfaces>,
render_device: Res<RenderDevice>,
#[cfg(target_os = "linux")] render_instance: Res<RenderInstance>,
) {
for window in windows.windows.values_mut() {
let window_surfaces = window_surfaces.deref_mut();
let Some(surface_data) = window_surfaces.surfaces.get(&window.entity) else {
continue;
};
// A recurring issue is hitting `wgpu::SurfaceError::Timeout` on certain Linux
// mesa driver implementations. This seems to be a quirk of some drivers.
// We'd rather keep panicking when not on Linux mesa, because in those case,
// the `Timeout` is still probably the symptom of a degraded unrecoverable
// application state.
// see https://github.com/bevyengine/bevy/pull/5957
// and https://github.com/gfx-rs/wgpu/issues/1218
#[cfg(target_os = "linux")]
let may_erroneously_timeout = || {
render_instance
.enumerate_adapters(wgpu::Backends::VULKAN)
.iter()
.any(|adapter| {
let name = adapter.get_info().name;
name.starts_with("Radeon")
|| name.starts_with("AMD")
|| name.starts_with("Intel")
})
};
let surface = &surface_data.surface;
match surface.get_current_texture() {
Ok(frame) => {
window.set_swapchain_texture(frame);
}
Err(wgpu::SurfaceError::Outdated) => {
render_device.configure_surface(surface, &surface_data.configuration);
let frame = match surface.get_current_texture() {
Ok(frame) => frame,
Err(err) => {
// This is a common occurrence on X11 and Xwayland with NVIDIA drivers
// when opening and resizing the window.
warn!("Couldn't get swap chain texture after configuring. Cause: '{err}'");
continue;
}
};
window.set_swapchain_texture(frame);
}
#[cfg(target_os = "linux")]
Err(wgpu::SurfaceError::Timeout) if may_erroneously_timeout() => {
tracing::trace!(
"Couldn't get swap chain texture. This is probably a quirk \
of your Linux GPU driver, so it can be safely ignored."
);
}
Err(err) => {
panic!("Couldn't get swap chain texture, operation unrecoverable: {err}");
}
}
window.swap_chain_texture_format = Some(surface_data.configuration.format);
}
}
pub fn need_surface_configuration(
windows: Res<ExtractedWindows>,
window_surfaces: Res<WindowSurfaces>,
) -> bool {
for window in windows.windows.values() {
if !window_surfaces.configured_windows.contains(&window.entity)
|| window.size_changed
|| window.present_mode_changed
{
return true;
}
}
false
}
// 2 is wgpu's default/what we've been using so far.
// 1 is the minimum, but may cause lower framerates due to the cpu waiting for the gpu to finish
// all work for the previous frame before starting work on the next frame, which then means the gpu
// has to wait for the cpu to finish to start on the next frame.
const DEFAULT_DESIRED_MAXIMUM_FRAME_LATENCY: u32 = 2;
/// Creates window surfaces.
pub fn create_surfaces(
// By accessing a NonSend resource, we tell the scheduler to put this system on the main thread,
// which is necessary for some OS's
#[cfg(any(target_os = "macos", target_os = "ios"))] _marker: Option<
NonSend<bevy_app::NonSendMarker>,
>,
windows: Res<ExtractedWindows>,
mut window_surfaces: ResMut<WindowSurfaces>,
render_instance: Res<RenderInstance>,
render_adapter: Res<RenderAdapter>,
render_device: Res<RenderDevice>,
) {
for window in windows.windows.values() {
let data = window_surfaces
.surfaces
.entry(window.entity)
.or_insert_with(|| {
let surface_target = SurfaceTargetUnsafe::RawHandle {
raw_display_handle: window.handle.get_display_handle(),
raw_window_handle: window.handle.get_window_handle(),
};
// SAFETY: The window handles in ExtractedWindows will always be valid objects to create surfaces on
let surface = unsafe {
// NOTE: On some OSes this MUST be called from the main thread.
// As of wgpu 0.15, only fallible if the given window is a HTML canvas and obtaining a WebGPU or WebGL2 context fails.
render_instance
.create_surface_unsafe(surface_target)
.expect("Failed to create wgpu surface")
};
let caps = surface.get_capabilities(&render_adapter);
let formats = caps.formats;
// For future HDR output support, we'll need to request a format that supports HDR,
// but as of wgpu 0.15 that is not yet supported.
// Prefer sRGB formats for surfaces, but fall back to first available format if no sRGB formats are available.
let mut format = *formats.first().expect("No supported formats for surface");
for available_format in formats {
// Rgba8UnormSrgb and Bgra8UnormSrgb and the only sRGB formats wgpu exposes that we can use for surfaces.
if available_format == TextureFormat::Rgba8UnormSrgb
|| available_format == TextureFormat::Bgra8UnormSrgb
{
format = available_format;
break;
}
}
let configuration = SurfaceConfiguration {
format,
width: window.physical_width,
height: window.physical_height,
usage: TextureUsages::RENDER_ATTACHMENT,
present_mode: match window.present_mode {
PresentMode::Fifo => wgpu::PresentMode::Fifo,
PresentMode::FifoRelaxed => wgpu::PresentMode::FifoRelaxed,
PresentMode::Mailbox => wgpu::PresentMode::Mailbox,
PresentMode::Immediate => wgpu::PresentMode::Immediate,
PresentMode::AutoVsync => wgpu::PresentMode::AutoVsync,
PresentMode::AutoNoVsync => wgpu::PresentMode::AutoNoVsync,
},
desired_maximum_frame_latency: window
.desired_maximum_frame_latency
.map(NonZero::<u32>::get)
.unwrap_or(DEFAULT_DESIRED_MAXIMUM_FRAME_LATENCY),
alpha_mode: match window.alpha_mode {
CompositeAlphaMode::Auto => wgpu::CompositeAlphaMode::Auto,
CompositeAlphaMode::Opaque => wgpu::CompositeAlphaMode::Opaque,
CompositeAlphaMode::PreMultiplied => {
wgpu::CompositeAlphaMode::PreMultiplied
}
CompositeAlphaMode::PostMultiplied => {
wgpu::CompositeAlphaMode::PostMultiplied
}
CompositeAlphaMode::Inherit => wgpu::CompositeAlphaMode::Inherit,
},
view_formats: if !format.is_srgb() {
vec![format.add_srgb_suffix()]
} else {
vec![]
},
};
render_device.configure_surface(&surface, &configuration);
SurfaceData {
surface: WgpuWrapper::new(surface),
configuration,
}
});
if window.size_changed || window.present_mode_changed {
data.configuration.width = window.physical_width;
data.configuration.height = window.physical_height;
data.configuration.present_mode = match window.present_mode {
PresentMode::Fifo => wgpu::PresentMode::Fifo,
PresentMode::FifoRelaxed => wgpu::PresentMode::FifoRelaxed,
PresentMode::Mailbox => wgpu::PresentMode::Mailbox,
PresentMode::Immediate => wgpu::PresentMode::Immediate,
PresentMode::AutoVsync => wgpu::PresentMode::AutoVsync,
PresentMode::AutoNoVsync => wgpu::PresentMode::AutoNoVsync,
};
render_device.configure_surface(&data.surface, &data.configuration);
}
window_surfaces.configured_windows.insert(window.entity);
}
}

View File

@@ -0,0 +1,695 @@
use super::ExtractedWindows;
use crate::{
camera::{ManualTextureViewHandle, ManualTextureViews, NormalizedRenderTarget, RenderTarget},
gpu_readback,
prelude::Shader,
render_asset::{RenderAssetUsages, RenderAssets},
render_resource::{
binding_types::texture_2d, BindGroup, BindGroupEntries, BindGroupLayout,
BindGroupLayoutEntries, Buffer, BufferUsages, CachedRenderPipelineId, FragmentState,
PipelineCache, RenderPipelineDescriptor, SpecializedRenderPipeline,
SpecializedRenderPipelines, Texture, TextureUsages, TextureView, VertexState,
},
renderer::RenderDevice,
texture::{GpuImage, OutputColorAttachment},
view::{prepare_view_attachments, prepare_view_targets, ViewTargetAttachments, WindowSurfaces},
ExtractSchedule, MainWorld, Render, RenderApp, RenderSet,
};
use alloc::{borrow::Cow, sync::Arc};
use bevy_app::{First, Plugin, Update};
use bevy_asset::{load_internal_asset, weak_handle, Handle};
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{
entity::EntityHashMap, event::event_update_system, prelude::*, system::SystemState,
};
use bevy_image::{Image, TextureFormatPixelInfo};
use bevy_platform::collections::HashSet;
use bevy_reflect::Reflect;
use bevy_tasks::AsyncComputeTaskPool;
use bevy_utils::default;
use bevy_window::{PrimaryWindow, WindowRef};
use core::ops::Deref;
use std::{
path::Path,
sync::{
mpsc::{Receiver, Sender},
Mutex,
},
};
use tracing::{error, info, warn};
use wgpu::{CommandEncoder, Extent3d, TextureFormat};
#[derive(Event, Deref, DerefMut, Reflect, Debug)]
#[reflect(Debug)]
pub struct ScreenshotCaptured(pub Image);
/// A component that signals to the renderer to capture a screenshot this frame.
///
/// This component should be spawned on a new entity with an observer that will trigger
/// with [`ScreenshotCaptured`] when the screenshot is ready.
///
/// Screenshots are captured asynchronously and may not be available immediately after the frame
/// that the component is spawned on. The observer should be used to handle the screenshot when it
/// is ready.
///
/// Note that the screenshot entity will be despawned after the screenshot is captured and the
/// observer is triggered.
///
/// # Usage
///
/// ```
/// # use bevy_ecs::prelude::*;
/// # use bevy_render::view::screenshot::{save_to_disk, Screenshot};
///
/// fn take_screenshot(mut commands: Commands) {
/// commands.spawn(Screenshot::primary_window())
/// .observe(save_to_disk("screenshot.png"));
/// }
/// ```
#[derive(Component, Deref, DerefMut, Reflect, Debug)]
#[reflect(Component, Debug)]
pub struct Screenshot(pub RenderTarget);
/// A marker component that indicates that a screenshot is currently being captured.
#[derive(Component, Default)]
pub struct Capturing;
/// A marker component that indicates that a screenshot has been captured, the image is ready, and
/// the screenshot entity can be despawned.
#[derive(Component, Default)]
pub struct Captured;
impl Screenshot {
/// Capture a screenshot of the provided window entity.
pub fn window(window: Entity) -> Self {
Self(RenderTarget::Window(WindowRef::Entity(window)))
}
/// Capture a screenshot of the primary window, if one exists.
pub fn primary_window() -> Self {
Self(RenderTarget::Window(WindowRef::Primary))
}
/// Capture a screenshot of the provided render target image.
pub fn image(image: Handle<Image>) -> Self {
Self(RenderTarget::Image(image.into()))
}
/// Capture a screenshot of the provided manual texture view.
pub fn texture_view(texture_view: ManualTextureViewHandle) -> Self {
Self(RenderTarget::TextureView(texture_view))
}
}
struct ScreenshotPreparedState {
pub texture: Texture,
pub buffer: Buffer,
pub bind_group: BindGroup,
pub pipeline_id: CachedRenderPipelineId,
pub size: Extent3d,
}
#[derive(Resource, Deref, DerefMut)]
pub struct CapturedScreenshots(pub Arc<Mutex<Receiver<(Entity, Image)>>>);
#[derive(Resource, Deref, DerefMut, Default)]
struct RenderScreenshotTargets(EntityHashMap<NormalizedRenderTarget>);
#[derive(Resource, Deref, DerefMut, Default)]
struct RenderScreenshotsPrepared(EntityHashMap<ScreenshotPreparedState>);
#[derive(Resource, Deref, DerefMut)]
struct RenderScreenshotsSender(Sender<(Entity, Image)>);
/// Saves the captured screenshot to disk at the provided path.
pub fn save_to_disk(path: impl AsRef<Path>) -> impl FnMut(Trigger<ScreenshotCaptured>) {
let path = path.as_ref().to_owned();
move |trigger| {
let img = trigger.event().deref().clone();
match img.try_into_dynamic() {
Ok(dyn_img) => match image::ImageFormat::from_path(&path) {
Ok(format) => {
// discard the alpha channel which stores brightness values when HDR is enabled to make sure
// the screenshot looks right
let img = dyn_img.to_rgb8();
#[cfg(not(target_arch = "wasm32"))]
match img.save_with_format(&path, format) {
Ok(_) => info!("Screenshot saved to {}", path.display()),
Err(e) => error!("Cannot save screenshot, IO error: {e}"),
}
#[cfg(target_arch = "wasm32")]
{
let save_screenshot = || {
use image::EncodableLayout;
use wasm_bindgen::{JsCast, JsValue};
let mut image_buffer = std::io::Cursor::new(Vec::new());
img.write_to(&mut image_buffer, format)
.map_err(|e| JsValue::from_str(&format!("{e}")))?;
// SAFETY: `image_buffer` only exist in this closure, and is not used after this line
let parts = js_sys::Array::of1(&unsafe {
js_sys::Uint8Array::view(image_buffer.into_inner().as_bytes())
.into()
});
let blob = web_sys::Blob::new_with_u8_array_sequence(&parts)?;
let url = web_sys::Url::create_object_url_with_blob(&blob)?;
let window = web_sys::window().unwrap();
let document = window.document().unwrap();
let link = document.create_element("a")?;
link.set_attribute("href", &url)?;
link.set_attribute(
"download",
path.file_name()
.and_then(|filename| filename.to_str())
.ok_or_else(|| JsValue::from_str("Invalid filename"))?,
)?;
let html_element = link.dyn_into::<web_sys::HtmlElement>()?;
html_element.click();
web_sys::Url::revoke_object_url(&url)?;
Ok::<(), JsValue>(())
};
match (save_screenshot)() {
Ok(_) => info!("Screenshot saved to {}", path.display()),
Err(e) => error!("Cannot save screenshot, error: {e:?}"),
};
}
}
Err(e) => error!("Cannot save screenshot, requested format not recognized: {e}"),
},
Err(e) => error!("Cannot save screenshot, screen format cannot be understood: {e}"),
}
}
}
fn clear_screenshots(mut commands: Commands, screenshots: Query<Entity, With<Captured>>) {
for entity in screenshots.iter() {
commands.entity(entity).despawn();
}
}
pub fn trigger_screenshots(
mut commands: Commands,
captured_screenshots: ResMut<CapturedScreenshots>,
) {
let captured_screenshots = captured_screenshots.lock().unwrap();
while let Ok((entity, image)) = captured_screenshots.try_recv() {
commands.entity(entity).insert(Captured);
commands.trigger_targets(ScreenshotCaptured(image), entity);
}
}
fn extract_screenshots(
mut targets: ResMut<RenderScreenshotTargets>,
mut main_world: ResMut<MainWorld>,
mut system_state: Local<
Option<
SystemState<(
Commands,
Query<Entity, With<PrimaryWindow>>,
Query<(Entity, &Screenshot), Without<Capturing>>,
)>,
>,
>,
mut seen_targets: Local<HashSet<NormalizedRenderTarget>>,
) {
if system_state.is_none() {
*system_state = Some(SystemState::new(&mut main_world));
}
let system_state = system_state.as_mut().unwrap();
let (mut commands, primary_window, screenshots) = system_state.get_mut(&mut main_world);
targets.clear();
seen_targets.clear();
let primary_window = primary_window.iter().next();
for (entity, screenshot) in screenshots.iter() {
let render_target = screenshot.0.clone();
let Some(render_target) = render_target.normalize(primary_window) else {
warn!(
"Unknown render target for screenshot, skipping: {:?}",
render_target
);
continue;
};
if seen_targets.contains(&render_target) {
warn!(
"Duplicate render target for screenshot, skipping entity {}: {:?}",
entity, render_target
);
// If we don't despawn the entity here, it will be captured again in the next frame
commands.entity(entity).despawn();
continue;
}
seen_targets.insert(render_target.clone());
targets.insert(entity, render_target);
commands.entity(entity).insert(Capturing);
}
system_state.apply(&mut main_world);
}
fn prepare_screenshots(
targets: Res<RenderScreenshotTargets>,
mut prepared: ResMut<RenderScreenshotsPrepared>,
window_surfaces: Res<WindowSurfaces>,
render_device: Res<RenderDevice>,
screenshot_pipeline: Res<ScreenshotToScreenPipeline>,
pipeline_cache: Res<PipelineCache>,
mut pipelines: ResMut<SpecializedRenderPipelines<ScreenshotToScreenPipeline>>,
images: Res<RenderAssets<GpuImage>>,
manual_texture_views: Res<ManualTextureViews>,
mut view_target_attachments: ResMut<ViewTargetAttachments>,
) {
prepared.clear();
for (entity, target) in targets.iter() {
match target {
NormalizedRenderTarget::Window(window) => {
let window = window.entity();
let Some(surface_data) = window_surfaces.surfaces.get(&window) else {
warn!("Unknown window for screenshot, skipping: {}", window);
continue;
};
let format = surface_data.configuration.format.add_srgb_suffix();
let size = Extent3d {
width: surface_data.configuration.width,
height: surface_data.configuration.height,
..default()
};
let (texture_view, state) = prepare_screenshot_state(
size,
format,
&render_device,
&screenshot_pipeline,
&pipeline_cache,
&mut pipelines,
);
prepared.insert(*entity, state);
view_target_attachments.insert(
target.clone(),
OutputColorAttachment::new(texture_view.clone(), format.add_srgb_suffix()),
);
}
NormalizedRenderTarget::Image(image) => {
let Some(gpu_image) = images.get(&image.handle) else {
warn!("Unknown image for screenshot, skipping: {:?}", image);
continue;
};
let format = gpu_image.texture_format;
let (texture_view, state) = prepare_screenshot_state(
gpu_image.size,
format,
&render_device,
&screenshot_pipeline,
&pipeline_cache,
&mut pipelines,
);
prepared.insert(*entity, state);
view_target_attachments.insert(
target.clone(),
OutputColorAttachment::new(texture_view.clone(), format.add_srgb_suffix()),
);
}
NormalizedRenderTarget::TextureView(texture_view) => {
let Some(manual_texture_view) = manual_texture_views.get(texture_view) else {
warn!(
"Unknown manual texture view for screenshot, skipping: {:?}",
texture_view
);
continue;
};
let format = manual_texture_view.format;
let size = Extent3d {
width: manual_texture_view.size.x,
height: manual_texture_view.size.y,
..default()
};
let (texture_view, state) = prepare_screenshot_state(
size,
format,
&render_device,
&screenshot_pipeline,
&pipeline_cache,
&mut pipelines,
);
prepared.insert(*entity, state);
view_target_attachments.insert(
target.clone(),
OutputColorAttachment::new(texture_view.clone(), format.add_srgb_suffix()),
);
}
}
}
}
fn prepare_screenshot_state(
size: Extent3d,
format: TextureFormat,
render_device: &RenderDevice,
pipeline: &ScreenshotToScreenPipeline,
pipeline_cache: &PipelineCache,
pipelines: &mut SpecializedRenderPipelines<ScreenshotToScreenPipeline>,
) -> (TextureView, ScreenshotPreparedState) {
let texture = render_device.create_texture(&wgpu::TextureDescriptor {
label: Some("screenshot-capture-rendertarget"),
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format,
usage: TextureUsages::RENDER_ATTACHMENT
| TextureUsages::COPY_SRC
| TextureUsages::TEXTURE_BINDING,
view_formats: &[],
});
let texture_view = texture.create_view(&Default::default());
let buffer = render_device.create_buffer(&wgpu::BufferDescriptor {
label: Some("screenshot-transfer-buffer"),
size: gpu_readback::get_aligned_size(size, format.pixel_size() as u32) as u64,
usage: BufferUsages::MAP_READ | BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let bind_group = render_device.create_bind_group(
"screenshot-to-screen-bind-group",
&pipeline.bind_group_layout,
&BindGroupEntries::single(&texture_view),
);
let pipeline_id = pipelines.specialize(pipeline_cache, pipeline, format);
(
texture_view,
ScreenshotPreparedState {
texture,
buffer,
bind_group,
pipeline_id,
size,
},
)
}
pub struct ScreenshotPlugin;
const SCREENSHOT_SHADER_HANDLE: Handle<Shader> =
weak_handle!("c31753d6-326a-47cb-a359-65c97a471fda");
impl Plugin for ScreenshotPlugin {
fn build(&self, app: &mut bevy_app::App) {
app.add_systems(
First,
clear_screenshots
.after(event_update_system)
.before(ApplyDeferred),
)
.register_type::<Screenshot>()
.register_type::<ScreenshotCaptured>();
load_internal_asset!(
app,
SCREENSHOT_SHADER_HANDLE,
"screenshot.wgsl",
Shader::from_wgsl
);
}
fn finish(&self, app: &mut bevy_app::App) {
let (tx, rx) = std::sync::mpsc::channel();
app.add_systems(Update, trigger_screenshots)
.insert_resource(CapturedScreenshots(Arc::new(Mutex::new(rx))));
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
render_app
.insert_resource(RenderScreenshotsSender(tx))
.init_resource::<RenderScreenshotTargets>()
.init_resource::<RenderScreenshotsPrepared>()
.init_resource::<SpecializedRenderPipelines<ScreenshotToScreenPipeline>>()
.add_systems(ExtractSchedule, extract_screenshots.ambiguous_with_all())
.add_systems(
Render,
prepare_screenshots
.after(prepare_view_attachments)
.before(prepare_view_targets)
.in_set(RenderSet::ManageViews),
);
}
}
}
#[derive(Resource)]
pub struct ScreenshotToScreenPipeline {
pub bind_group_layout: BindGroupLayout,
}
impl FromWorld for ScreenshotToScreenPipeline {
fn from_world(render_world: &mut World) -> Self {
let device = render_world.resource::<RenderDevice>();
let bind_group_layout = device.create_bind_group_layout(
"screenshot-to-screen-bgl",
&BindGroupLayoutEntries::single(
wgpu::ShaderStages::FRAGMENT,
texture_2d(wgpu::TextureSampleType::Float { filterable: false }),
),
);
Self { bind_group_layout }
}
}
impl SpecializedRenderPipeline for ScreenshotToScreenPipeline {
type Key = TextureFormat;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
RenderPipelineDescriptor {
label: Some(Cow::Borrowed("screenshot-to-screen")),
layout: vec![self.bind_group_layout.clone()],
vertex: VertexState {
buffers: vec![],
shader_defs: vec![],
entry_point: Cow::Borrowed("vs_main"),
shader: SCREENSHOT_SHADER_HANDLE,
},
primitive: wgpu::PrimitiveState {
cull_mode: Some(wgpu::Face::Back),
..Default::default()
},
depth_stencil: None,
multisample: Default::default(),
fragment: Some(FragmentState {
shader: SCREENSHOT_SHADER_HANDLE,
entry_point: Cow::Borrowed("fs_main"),
shader_defs: vec![],
targets: vec![Some(wgpu::ColorTargetState {
format: key,
blend: None,
write_mask: wgpu::ColorWrites::ALL,
})],
}),
push_constant_ranges: Vec::new(),
zero_initialize_workgroup_memory: false,
}
}
}
pub(crate) fn submit_screenshot_commands(world: &World, encoder: &mut CommandEncoder) {
let targets = world.resource::<RenderScreenshotTargets>();
let prepared = world.resource::<RenderScreenshotsPrepared>();
let pipelines = world.resource::<PipelineCache>();
let gpu_images = world.resource::<RenderAssets<GpuImage>>();
let windows = world.resource::<ExtractedWindows>();
let manual_texture_views = world.resource::<ManualTextureViews>();
for (entity, render_target) in targets.iter() {
match render_target {
NormalizedRenderTarget::Window(window) => {
let window = window.entity();
let Some(window) = windows.get(&window) else {
continue;
};
let width = window.physical_width;
let height = window.physical_height;
let Some(texture_format) = window.swap_chain_texture_format else {
continue;
};
let Some(swap_chain_texture) = window.swap_chain_texture.as_ref() else {
continue;
};
let texture_view = swap_chain_texture.texture.create_view(&Default::default());
render_screenshot(
encoder,
prepared,
pipelines,
entity,
width,
height,
texture_format,
&texture_view,
);
}
NormalizedRenderTarget::Image(image) => {
let Some(gpu_image) = gpu_images.get(&image.handle) else {
warn!("Unknown image for screenshot, skipping: {:?}", image);
continue;
};
let width = gpu_image.size.width;
let height = gpu_image.size.height;
let texture_format = gpu_image.texture_format;
let texture_view = gpu_image.texture_view.deref();
render_screenshot(
encoder,
prepared,
pipelines,
entity,
width,
height,
texture_format,
texture_view,
);
}
NormalizedRenderTarget::TextureView(texture_view) => {
let Some(texture_view) = manual_texture_views.get(texture_view) else {
warn!(
"Unknown manual texture view for screenshot, skipping: {:?}",
texture_view
);
continue;
};
let width = texture_view.size.x;
let height = texture_view.size.y;
let texture_format = texture_view.format;
let texture_view = texture_view.texture_view.deref();
render_screenshot(
encoder,
prepared,
pipelines,
entity,
width,
height,
texture_format,
texture_view,
);
}
};
}
}
fn render_screenshot(
encoder: &mut CommandEncoder,
prepared: &RenderScreenshotsPrepared,
pipelines: &PipelineCache,
entity: &Entity,
width: u32,
height: u32,
texture_format: TextureFormat,
texture_view: &wgpu::TextureView,
) {
if let Some(prepared_state) = &prepared.get(entity) {
let extent = Extent3d {
width,
height,
depth_or_array_layers: 1,
};
encoder.copy_texture_to_buffer(
prepared_state.texture.as_image_copy(),
wgpu::TexelCopyBufferInfo {
buffer: &prepared_state.buffer,
layout: gpu_readback::layout_data(extent, texture_format),
},
extent,
);
if let Some(pipeline) = pipelines.get_render_pipeline(prepared_state.pipeline_id) {
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("screenshot_to_screen_pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: texture_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
pass.set_pipeline(pipeline);
pass.set_bind_group(0, &prepared_state.bind_group, &[]);
pass.draw(0..3, 0..1);
}
}
}
pub(crate) fn collect_screenshots(world: &mut World) {
#[cfg(feature = "trace")]
let _span = tracing::info_span!("collect_screenshots").entered();
let sender = world.resource::<RenderScreenshotsSender>().deref().clone();
let prepared = world.resource::<RenderScreenshotsPrepared>();
for (entity, prepared) in prepared.iter() {
let entity = *entity;
let sender = sender.clone();
let width = prepared.size.width;
let height = prepared.size.height;
let texture_format = prepared.texture.format();
let pixel_size = texture_format.pixel_size();
let buffer = prepared.buffer.clone();
let finish = async move {
let (tx, rx) = async_channel::bounded(1);
let buffer_slice = buffer.slice(..);
// The polling for this map call is done every frame when the command queue is submitted.
buffer_slice.map_async(wgpu::MapMode::Read, move |result| {
let err = result.err();
if err.is_some() {
panic!("{}", err.unwrap().to_string());
}
tx.try_send(()).unwrap();
});
rx.recv().await.unwrap();
let data = buffer_slice.get_mapped_range();
// we immediately move the data to CPU memory to avoid holding the mapped view for long
let mut result = Vec::from(&*data);
drop(data);
if result.len() != ((width * height) as usize * pixel_size) {
// Our buffer has been padded because we needed to align to a multiple of 256.
// We remove this padding here
let initial_row_bytes = width as usize * pixel_size;
let buffered_row_bytes =
gpu_readback::align_byte_size(width * pixel_size as u32) as usize;
let mut take_offset = buffered_row_bytes;
let mut place_offset = initial_row_bytes;
for _ in 1..height {
result.copy_within(take_offset..take_offset + buffered_row_bytes, place_offset);
take_offset += buffered_row_bytes;
place_offset += initial_row_bytes;
}
result.truncate(initial_row_bytes * height as usize);
}
if let Err(e) = sender.send((
entity,
Image::new(
Extent3d {
width,
height,
depth_or_array_layers: 1,
},
wgpu::TextureDimension::D2,
result,
texture_format,
RenderAssetUsages::RENDER_WORLD,
),
)) {
error!("Failed to send screenshot: {}", e);
}
};
AsyncComputeTaskPool::get().spawn(finish).detach();
}
}

View File

@@ -0,0 +1,16 @@
// This vertex shader will create a triangle that will cover the entire screen
// with minimal effort, avoiding the need for a vertex buffer etc.
@vertex
fn vs_main(@builtin(vertex_index) in_vertex_index: u32) -> @builtin(position) vec4<f32> {
let x = f32((in_vertex_index & 1u) << 2u);
let y = f32((in_vertex_index & 2u) << 1u);
return vec4<f32>(x - 1.0, y - 1.0, 0.0, 1.0);
}
@group(0) @binding(0) var t: texture_2d<f32>;
@fragment
fn fs_main(@builtin(position) pos: vec4<f32>) -> @location(0) vec4<f32> {
let coords = floor(pos.xy);
return textureLoad(t, vec2<i32>(coords), 0i);
}