fix resizing snapshot stretch bug

This commit is contained in:
2025-03-31 23:27:20 -04:00
parent d859412e88
commit bd0c8690b4
9 changed files with 22 additions and 76 deletions

View File

@@ -35,7 +35,6 @@ Snapshots will copy the current texture and let you view it as the new one gener
## Known Bugs
- It crashes when you zoom out too far (relative to the starting view), because the renderer is currently not designed for zooming out, only in. This does not matter at all for fractals like the mandelbrot set where the detail is confined in a small circle (radius 2). An easy fix would be to cap the zoom, but I'm more interested in having the precision expand in the opposite direction as well for equations that require it (no clue if those exist).
- The snapshot texture gets stretched in some way when you resize the window. Don't know exactly why because that code was trial and errored (just wanted it to work), but I think I know where the fix would be and it shouldn't be complicated. Not a critical issue at all though, it only affects the previously taken snapshot.
## TODO List

View File

@@ -1,5 +1,5 @@
use nalgebra::Vector2;
use std::ops::{AddAssign, Neg};
use std::ops::AddAssign;
use crate::util::FixedDec;
@@ -88,9 +88,6 @@ impl Zoom {
pub fn level(&self) -> i32 {
self.level
}
pub fn rel_zoom(&self) -> f32 {
self.exp.exp2()
}
}
impl AddAssign<f32> for Zoom {

View File

@@ -10,7 +10,6 @@ use super::{
pub struct Layout {
bind_layout: wgpu::BindGroupLayout,
pipeline_layout: wgpu::PipelineLayout,
format: wgpu::TextureFormat,
pub output: Texture,
pub view: Storage,
pub work: ArrayBuffer<u32>,
@@ -81,7 +80,6 @@ impl Layout {
bind_layout,
pipeline_layout,
work,
format: config.format,
}
}

View File

@@ -132,7 +132,7 @@ impl Renderer<'_> {
camera,
self.len,
);
self.chunk_view.update(camera, &self.size, snapshot);
self.chunk_view.update(camera, snapshot);
self.render_pipeline.update(
&self.device,
&mut self.encoder,
@@ -168,7 +168,7 @@ impl Renderer<'_> {
self.compute_pipeline
.resize(&self.device, self.size, self.len);
self.render_pipeline
.resize(&self.device, self.size, &self.compute_pipeline.output);
.resize(&self.device, &self.compute_pipeline.output);
}
fn create_encoder(device: &wgpu::Device) -> wgpu::CommandEncoder {

View File

@@ -1,13 +1,12 @@
use nalgebra::Vector2;
use super::{Camera, CHUNK_WIDTH};
use super::Camera;
#[repr(C, align(8))]
#[derive(Clone, Copy, Default, PartialEq)]
pub struct WindowView {
pub stretch: Vector2<f32>,
pub pos: Vector2<f32>,
pub rendered_chunks: Vector2<u32>,
pub snapshot: u32,
}
@@ -15,40 +14,16 @@ unsafe impl bytemuck::Pod for WindowView {}
unsafe impl bytemuck::Zeroable for WindowView {}
impl WindowView {
pub fn from_camera_size(
camera: &Camera,
ss_cam: Option<&Camera>,
size: &Vector2<u32>,
snapshot: bool,
) -> Self {
// TODO: most of this is useless and just preparation for chunked textures if I add them
let visible_chunks = (size * 2 / CHUNK_WIDTH).add_scalar(1);
let rendered_chunks = Vector2::new(
visible_chunks.x.next_power_of_two(),
visible_chunks.y.next_power_of_two(),
);
// let adj_zoom = camera.zoom.level() - CHUNK_POW as i32;
// let pos = camera.pos.zip_map(&rendered_chunks, |pos, rc| {
// let p = (pos << adj_zoom).with_lens(1, 1);
// let (pw, pd) = p.split_whole_dec();
// let mut chunk = (pw.parts().first().unwrap_or(&0) & (rc - 1)) as f32;
// if pw.is_neg() {
// chunk = rc as f32 - chunk;
// }
// let dec = f32::from(pd);
// chunk + dec
// });
//
// let stretch = size.cast::<f32>() * camera.zoom.rel_zoom() / (CHUNK_WIDTH as f32);
pub fn from_camera(camera: &Camera, ss_cam: Option<&Camera>, snapshot: bool) -> Self {
let (pos, stretch) = if let Some(ss_cam) = ss_cam {
let aspect = camera.inv_stretch() * 2.0;
let s = camera.zoom.mult() * ss_cam.zoom.inv_mult();
let s_mult = camera.stretch().component_div(&ss_cam.stretch());
let aspect = camera.inv_stretch().component_mul(&s_mult) * 2.0;
let s = s_mult * f32::from(camera.zoom.mult() * ss_cam.zoom.inv_mult());
(
((&camera.pos - &ss_cam.pos) * ss_cam.zoom.inv_mult().clone())
.map(f32::from)
.component_mul(&aspect),
Vector2::from_element(f32::from(s)),
s,
)
} else {
(Vector2::default(), Vector2::default())
@@ -57,7 +32,6 @@ impl WindowView {
Self {
pos,
stretch,
rendered_chunks,
snapshot: snapshot as u32,
}
}

View File

@@ -1,5 +1,3 @@
use nalgebra::Vector2;
use crate::client::render::util::Texture;
use super::{
@@ -22,7 +20,6 @@ impl Layout {
pub fn init(
device: &wgpu::Device,
config: &wgpu::SurfaceConfiguration,
input: &Texture,
) -> Self {
let view = Storage::init_with(device, "view", bytemuck::bytes_of(&WindowView::default()));

View File

@@ -12,7 +12,6 @@ pub struct RenderPipeline {
layout: Layout,
pipeline: wgpu::RenderPipeline,
bind_group: wgpu::BindGroup,
size: Vector2<u32>,
}
const SHADER: wgpu::ShaderModuleDescriptor<'_> = include_wgsl!("shader.wgsl");
@@ -23,12 +22,11 @@ impl RenderPipeline {
config: &wgpu::SurfaceConfiguration,
input: &Texture,
) -> Self {
let layout = Layout::init(device, config, input);
let layout = Layout::init(device, config);
let shader = device.create_shader_module(SHADER);
Self {
pipeline: layout.pipeline(device, &shader),
bind_group: layout.bind_group(device, input),
size: Vector2::zeros(),
layout,
}
}
@@ -92,7 +90,7 @@ impl RenderPipeline {
render_pass.draw(0..4, 0..1);
}
pub fn resize(&mut self, device: &wgpu::Device, size: Vector2<u32>, input: &Texture) {
pub fn resize(&mut self, device: &wgpu::Device, input: &Texture) {
self.bind_group = self.layout.bind_group(device, input);
}
}

View File

@@ -1,7 +1,6 @@
struct View {
stretch: vec2<f32>,
pos: vec2<f32>,
rendered_chunks: vec2<u32>,
ss_stretch: vec2<f32>,
ss_pos: vec2<f32>,
snapshot: u32,
}
@@ -32,18 +31,15 @@ fn vs_main(
) -> VertexOutput {
var out: VertexOutput;
let pos = vec2<f32>(
f32(vi % 2u),
f32(1 - vi / 2u),
) * 2.0 - 1.0;
out.vertex_pos = vec4<f32>(pos.x, pos.y, 0.0, 1.0);
let pos2 = vec2<f32>(
let tpos = vec2<f32>(
f32(vi % 2u),
f32(1 - vi / 2u),
);
out.tex_pos = pos2;
out.ss_pos = pos * view.stretch + view.pos;
let vpos = tpos * 2.0 - 1.0;
out.vertex_pos = vec4<f32>(vpos, 0.0, 1.0);
out.tex_pos = tpos;
out.ss_pos = vpos * view.ss_stretch + view.ss_pos;
out.ss_pos = (out.ss_pos + 1.0) / 2.0;
return out;
@@ -63,13 +59,3 @@ fn fs_main(
}
}
fn div_euclid(x: i32, y: i32) -> i32 {
if x < 0 {
return -((-x - 1) / y) - 1;
}
return x / y;
}
fn rem_euclid(x: i32, y: i32) -> i32 {
return x - div_euclid(x, y) * y;
}

View File

@@ -1,11 +1,8 @@
use std::collections::HashSet;
use nalgebra::Vector2;
use crate::client::camera::Camera;
use super::output::WindowView;
// TODO: move this out; this is not needed rn
#[derive(Default)]
pub struct ChunkView {
pub render: WindowView,
@@ -17,11 +14,11 @@ impl ChunkView {
Self::default()
}
pub fn update(&mut self, camera: &Camera, size: &Vector2<u32>, snapshot: bool) {
pub fn update(&mut self, camera: &Camera, snapshot: bool) {
if snapshot {
self.snapshot = Some(camera.clone());
}
let render = WindowView::from_camera_size(camera, self.snapshot.as_ref(), size, snapshot);
let render = WindowView::from_camera(camera, self.snapshot.as_ref(), snapshot);
if self.render == render {
return;