moved renderer to separate thread, add evenio and start structure, make it so you can add stuff

This commit is contained in:
2024-06-11 01:47:05 -04:00
parent 7d48ac5a9c
commit 149c5a2659
35 changed files with 1125 additions and 487 deletions

View File

@@ -1,8 +1,183 @@
mod buf;
mod instance;
mod renderer;
mod storage;
mod thread;
mod util;
pub mod voxel;
mod uniform;
pub use renderer::*;
pub use thread::*;
use super::camera::Camera;
use crate::client::rsc::{CLEAR_COLOR, FRAME_TIME};
use nalgebra::Vector2;
use smaa::{SmaaMode, SmaaTarget};
use std::{
sync::Arc,
time::{Duration, Instant},
};
use voxel::VoxelPipeline;
use winit::{
dpi::PhysicalSize,
window::{Fullscreen, Window},
};
pub struct Renderer<'a> {
size: Vector2<u32>,
surface: wgpu::Surface<'a>,
device: wgpu::Device,
queue: wgpu::Queue,
config: wgpu::SurfaceConfiguration,
staging_belt: wgpu::util::StagingBelt,
voxel_pipeline: VoxelPipeline,
smaa_target: SmaaTarget,
camera: Camera,
frame_time: Duration,
target: Instant,
}
impl<'a> Renderer<'a> {
pub fn new(window: Arc<Window>) -> Self {
let fullscreen = false;
if fullscreen {
window.set_fullscreen(Some(Fullscreen::Borderless(None)));
}
let size = window.inner_size();
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::PRIMARY,
..Default::default()
});
let surface = instance
.create_surface(window)
.expect("Could not create window surface!");
let adapter = pollster::block_on(instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
force_fallback_adapter: false,
}))
.expect("Could not get adapter!");
let (device, queue) = pollster::block_on(adapter.request_device(
&wgpu::DeviceDescriptor {
label: None,
required_features: wgpu::Features::empty(),
required_limits: wgpu::Limits::default(),
},
None, // Trace path
))
.expect("Could not get device!");
// TODO: use a logger
let info = adapter.get_info();
println!("Adapter: {}", info.name);
println!("Backend: {:?}", info.backend);
let surface_caps = surface.get_capabilities(&adapter);
// Set surface format to srbg
let surface_format = surface_caps
.formats
.iter()
.copied()
.find(|f| f.is_srgb())
.unwrap_or(surface_caps.formats[0]);
// create surface config
let config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: surface_format,
width: size.width,
height: size.height,
present_mode: surface_caps.present_modes[0],
alpha_mode: surface_caps.alpha_modes[0],
view_formats: vec![],
desired_maximum_frame_latency: 2,
};
surface.configure(&device, &config);
// not exactly sure what this number should be,
// doesn't affect performance much and depends on "normal" zoom
let staging_belt = wgpu::util::StagingBelt::new(4096 * 4);
let smaa_target = SmaaTarget::new(
&device,
&queue,
size.width,
size.height,
surface_format,
SmaaMode::Smaa1X,
);
Self {
camera: Camera::default(),
size: Vector2::new(size.width, size.height),
voxel_pipeline: VoxelPipeline::new(&device, &config.format),
staging_belt,
surface,
device,
config,
queue,
smaa_target,
frame_time: FRAME_TIME,
target: Instant::now(),
}
}
fn create_encoder(&mut self) -> wgpu::CommandEncoder {
self.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
})
}
pub fn draw(&mut self, encoder: &mut wgpu::CommandEncoder) {
let mut encoder = std::mem::replace(encoder, self.create_encoder());
let output = self.surface.get_current_texture().unwrap();
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let smaa_frame = self
.smaa_target
.start_frame(&self.device, &self.queue, &view);
{
let render_pass = &mut encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &smaa_frame,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(CLEAR_COLOR),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
self.voxel_pipeline.draw(render_pass);
}
smaa_frame.resolve();
self.staging_belt.finish();
self.queue.submit(std::iter::once(encoder.finish()));
output.present();
self.staging_belt.recall();
}
pub fn resize(&mut self, size: PhysicalSize<u32>, encoder: &mut wgpu::CommandEncoder) {
self.size = Vector2::new(size.width, size.height);
self.config.width = size.width;
self.config.height = size.height;
self.surface.configure(&self.device, &self.config);
self.smaa_target
.resize(&self.device, size.width, size.height);
self.voxel_pipeline.update_view(
&self.device,
encoder,
&mut self.staging_belt,
self.size,
&self.camera,
);
}
}

View File

@@ -1,184 +0,0 @@
use smaa::{SmaaTarget, SmaaMode};
use super::voxel::VoxelPipeline;
use crate::client::{rsc::CLEAR_COLOR, ClientState};
use std::sync::Arc;
use winit::{
dpi::PhysicalSize,
window::{Fullscreen, Window},
};
pub struct Renderer<'a> {
size: PhysicalSize<u32>,
surface: wgpu::Surface<'a>,
device: wgpu::Device,
queue: wgpu::Queue,
config: wgpu::SurfaceConfiguration,
adapter: wgpu::Adapter,
encoder: Option<wgpu::CommandEncoder>,
staging_belt: wgpu::util::StagingBelt,
voxel_pipeline: VoxelPipeline,
smaa_target: SmaaTarget,
}
impl<'a> Renderer<'a> {
pub fn new(window: Arc<Window>, fullscreen: bool) -> Self {
if fullscreen {
window.set_fullscreen(Some(Fullscreen::Borderless(None)));
}
let size = window.inner_size();
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::PRIMARY,
..Default::default()
});
let surface = instance
.create_surface(window)
.expect("Could not create window surface!");
let adapter = pollster::block_on(instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
force_fallback_adapter: false,
}))
.expect("Could not get adapter!");
let (device, queue) = pollster::block_on(adapter.request_device(
&wgpu::DeviceDescriptor {
label: None,
required_features: wgpu::Features::empty(),
required_limits: wgpu::Limits::default(),
},
None, // Trace path
))
.expect("Could not get device!");
// TODO: use a logger
let info = adapter.get_info();
println!("Adapter: {}", info.name);
println!("Backend: {:?}", info.backend);
let surface_caps = surface.get_capabilities(&adapter);
// Set surface format to srbg
let surface_format = surface_caps
.formats
.iter()
.copied()
.find(|f| f.is_srgb())
.unwrap_or(surface_caps.formats[0]);
// create surface config
let config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: surface_format,
width: size.width,
height: size.height,
present_mode: surface_caps.present_modes[0],
alpha_mode: surface_caps.alpha_modes[0],
view_formats: vec![],
desired_maximum_frame_latency: 2,
};
surface.configure(&device, &config);
// not exactly sure what this number should be,
// doesn't affect performance much and depends on "normal" zoom
let staging_belt = wgpu::util::StagingBelt::new(4096 * 4);
let smaa_target = SmaaTarget::new(
&device,
&queue,
size.width,
size.height,
surface_format,
SmaaMode::Smaa1X,
);
Self {
size,
voxel_pipeline: VoxelPipeline::new(&device, &config.format),
encoder: None,
staging_belt,
surface,
device,
adapter,
config,
queue,
smaa_target,
}
}
fn create_encoder(&mut self) -> wgpu::CommandEncoder {
self.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
})
}
pub fn draw(&mut self) {
let output = self.surface.get_current_texture().unwrap();
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = self.encoder.take().unwrap_or(self.create_encoder());
let smaa_frame = self.smaa_target.start_frame(&self.device, &self.queue, &view);
{
let render_pass = &mut encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &smaa_frame,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(CLEAR_COLOR),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
self.voxel_pipeline.draw(render_pass);
}
smaa_frame.resolve();
self.staging_belt.finish();
self.queue.submit(std::iter::once(encoder.finish()));
output.present();
self.staging_belt.recall();
}
pub fn update(&mut self, state: &ClientState) {
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
self.voxel_pipeline.update(
&self.device,
&mut encoder,
&mut self.staging_belt,
&RenderUpdateData {
state,
size: &self.size,
},
);
self.encoder = Some(encoder);
}
pub fn resize(&mut self, size: PhysicalSize<u32>) {
self.size = size;
self.config.width = size.width;
self.config.height = size.height;
self.surface.configure(&self.device, &self.config);
self.smaa_target.resize(&self.device, size.width, size.height);
}
pub fn size(&self) -> &PhysicalSize<u32> {
&self.size
}
}
pub struct RenderUpdateData<'a> {
pub state: &'a ClientState,
pub size: &'a PhysicalSize<u32>,
}

View File

@@ -0,0 +1,91 @@
use crate::client::camera::Camera;
use super::{voxel::VoxelColor, Renderer};
use nalgebra::{Rotation3, Vector3};
use std::{
sync::{
mpsc::{channel, Receiver, Sender},
Arc,
},
thread::JoinHandle,
time::Instant,
};
use winit::{dpi::PhysicalSize, window::Window};
#[derive(Debug)]
pub enum RenderMessage {
Resize(PhysicalSize<u32>),
Draw,
CreateVoxelGrid(CreateVoxelGrid),
ViewUpdate(Camera),
Exit,
}
pub type RendererChannel = Sender<RenderMessage>;
#[derive(Debug)]
pub struct CreateVoxelGrid {
pub pos: Vector3<f32>,
pub orientation: Rotation3<f32>,
pub dimensions: Vector3<usize>,
pub grid: Vec<VoxelColor>,
}
impl Renderer<'_> {
pub fn spawn(window: Arc<Window>) -> (RendererChannel, JoinHandle<()>) {
let (s, mut r) = channel();
(
s,
std::thread::spawn(move || {
Self::new(window.clone()).start(&mut r);
}),
)
}
pub fn start(&mut self, reciever: &mut Receiver<RenderMessage>) {
let mut encoder = self.create_encoder();
let mut new_camera = false;
'main: loop {
let now = Instant::now();
while let Ok(msg) = reciever.try_recv() {
match msg {
RenderMessage::CreateVoxelGrid(desc) => {
self.voxel_pipeline.add_group(
&self.device,
&mut encoder,
&mut self.staging_belt,
desc,
);
}
RenderMessage::Draw => {
self.draw(&mut encoder);
}
RenderMessage::Resize(size) => {
self.resize(size, &mut encoder);
}
RenderMessage::Exit => {
break 'main;
}
RenderMessage::ViewUpdate(camera) => {
new_camera = true;
self.camera = camera;
}
}
}
if now >= self.target {
self.target = now + self.frame_time;
if new_camera {
self.voxel_pipeline.update_view(
&self.device,
&mut encoder,
&mut self.staging_belt,
self.size,
&self.camera,
);
new_camera = false;
}
self.draw(&mut encoder);
}
}
}
}

View File

@@ -53,13 +53,11 @@ impl<T: bytemuck::Pod> ArrBuf<T> {
&self.buffer,
(update.offset * std::mem::size_of::<T>()) as BufferAddress,
unsafe {
std::num::NonZeroU64::new_unchecked(
(update.data.len() * std::mem::size_of::<T>()) as u64,
)
std::num::NonZeroU64::new_unchecked(std::mem::size_of_val(update.data) as u64)
},
device,
);
view.copy_from_slice(bytemuck::cast_slice(&update.data));
view.copy_from_slice(bytemuck::cast_slice(update.data));
}
resized
}
@@ -100,11 +98,15 @@ impl<T: bytemuck::Pod> ArrBuf<T> {
pub fn mov(&mut self, mov: BufMove) {
self.moves.push(mov);
}
pub fn len(&self) -> usize {
self.len
}
}
pub struct ArrBufUpdate<T> {
pub struct ArrBufUpdate<'a, T> {
pub offset: usize,
pub data: Vec<T>,
pub data: &'a [T],
}
#[derive(Clone, Copy, Debug)]

View File

@@ -0,0 +1,9 @@
mod buf;
mod instance;
mod storage;
mod uniform;
pub use buf::*;
pub use instance::*;
pub use storage::*;
pub use uniform::*;

View File

@@ -52,4 +52,8 @@ impl<T: PartialEq + bytemuck::Pod> Storage<T> {
pub fn mov(&mut self, mov: BufMove) {
self.buf.mov(mov);
}
pub fn len(&mut self) -> usize {
self.buf.len()
}
}

View File

@@ -1,33 +1,28 @@
use std::marker::PhantomData;
use wgpu::util::DeviceExt;
use super::RenderUpdateData;
pub trait UniformData {
fn update(&mut self, data: &RenderUpdateData) -> bool;
}
pub struct Uniform<T: bytemuck::Pod + PartialEq + UniformData> {
data: T,
pub struct Uniform<T: bytemuck::Pod> {
buffer: wgpu::Buffer,
binding: u32,
ty: PhantomData<T>,
}
impl<T: Default + PartialEq + bytemuck::Pod + UniformData> Uniform<T> {
impl<T: Default + bytemuck::Pod> Uniform<T> {
pub fn init(device: &wgpu::Device, name: &str, binding: u32) -> Self {
let data = T::default();
Self {
data,
buffer: device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&(name.to_owned() + " Uniform Buf")),
contents: bytemuck::cast_slice(&[data]),
contents: bytemuck::cast_slice(&[T::default()]),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
}),
binding,
ty: PhantomData,
}
}
}
impl<T: PartialEq + bytemuck::Pod + UniformData> Uniform<T> {
impl<T: PartialEq + bytemuck::Pod> Uniform<T> {
pub fn bind_group_layout_entry(&self) -> wgpu::BindGroupLayoutEntry {
wgpu::BindGroupLayoutEntry {
binding: self.binding,
@@ -51,22 +46,18 @@ impl<T: PartialEq + bytemuck::Pod + UniformData> Uniform<T> {
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
belt: &mut wgpu::util::StagingBelt,
update_data: &RenderUpdateData,
data: T,
) {
if self.data.update(update_data) {
let slice = &[self.data];
let mut view = belt.write_buffer(
encoder,
&self.buffer,
0,
unsafe {
std::num::NonZeroU64::new_unchecked(
(slice.len() * std::mem::size_of::<T>()) as u64,
)
},
device,
);
view.copy_from_slice(bytemuck::cast_slice(slice));
}
let slice = &[data];
let mut view = belt.write_buffer(
encoder,
&self.buffer,
0,
unsafe {
std::num::NonZeroU64::new_unchecked((slice.len() * std::mem::size_of::<T>()) as u64)
},
device,
);
view.copy_from_slice(bytemuck::cast_slice(slice));
}
}

View File

@@ -1,7 +1,189 @@
mod grid;
mod view;
mod pipeline;
mod color;
mod group;
pub use pipeline::*;
pub use color::*;
use nalgebra::{Projective3, Transform3, Translation3, Vector2};
use {group::VoxelGroup, view::View};
use crate::client::{
camera::Camera,
render::{
util::{ArrBufUpdate, Storage, Uniform},
CreateVoxelGrid,
},
};
pub struct VoxelPipeline {
pipeline: wgpu::RenderPipeline,
view: Uniform<View>,
bind_group_layout: wgpu::BindGroupLayout,
bind_group: wgpu::BindGroup,
voxel_groups: Storage<VoxelGroup>,
voxels: Storage<VoxelColor>,
}
impl VoxelPipeline {
pub fn new(device: &wgpu::Device, format: &wgpu::TextureFormat) -> Self {
// shaders
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Tile Shader"),
source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
});
let view = Uniform::<View>::init(device, "view", 0);
let voxels = Storage::init(device, "voxels", 1);
let voxel_groups = Storage::init(device, "voxel groups", 2);
// bind groups
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
view.bind_group_layout_entry(),
voxels.bind_group_layout_entry(),
voxel_groups.bind_group_layout_entry(),
],
label: Some("tile_bind_group_layout"),
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
entries: &[
view.bind_group_entry(),
voxels.bind_group_entry(),
voxel_groups.bind_group_entry(),
],
label: Some("tile_bind_group"),
});
// pipeline
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Tile Pipeline Layout"),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Voxel Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[],
compilation_options: wgpu::PipelineCompilationOptions::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[Some(wgpu::ColorTargetState {
format: *format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: wgpu::PipelineCompilationOptions::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleStrip,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: None,
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: true,
},
multiview: None,
});
Self {
pipeline: render_pipeline,
view,
bind_group,
bind_group_layout,
voxels,
voxel_groups,
}
}
pub fn add_group(
&mut self,
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
belt: &mut wgpu::util::StagingBelt,
CreateVoxelGrid {
pos,
orientation,
dimensions,
grid,
}: CreateVoxelGrid,
) {
let offset = self.voxels.len();
let updates = [ArrBufUpdate {
offset,
data: &grid,
}];
let size = offset + grid.len();
self.voxels.update(device, encoder, belt, size, &updates);
let proj = Projective3::identity()
* Translation3::from(pos)
* orientation
* Translation3::from(-dimensions.cast() / 2.0);
let group = VoxelGroup {
transform: proj,
transform_inv: proj.inverse(),
dimensions: dimensions.cast(),
offset: offset as u32,
};
let updates = [ArrBufUpdate {
offset: self.voxel_groups.len(),
data: &[group],
}];
let size = self.voxel_groups.len() + 1;
self.voxel_groups
.update(device, encoder, belt, size, &updates);
self.bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &self.bind_group_layout,
entries: &[
self.view.bind_group_entry(),
self.voxels.bind_group_entry(),
self.voxel_groups.bind_group_entry(),
],
label: Some("tile_bind_group"),
});
}
pub fn update_view(
&mut self,
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
belt: &mut wgpu::util::StagingBelt,
size: Vector2<u32>,
camera: &Camera,
) {
let transform =
Transform3::identity() * Translation3::from(camera.pos) * camera.orientation;
let data = View {
width: size.x,
height: size.y,
zoom: camera.scale,
padding: 0,
transform,
};
self.view.update(device, encoder, belt, data)
}
pub fn draw<'a>(&'a self, render_pass: &mut wgpu::RenderPass<'a>) {
render_pass.set_pipeline(&self.pipeline);
render_pass.set_bind_group(0, &self.bind_group, &[]);
render_pass.draw(0..4, 0..1);
}
}

View File

@@ -167,7 +167,7 @@ fn apply_group(
var depth = 0u;
var prev_a = 0.0;
loop {
let i = u32(vox_pos.x + vox_pos.y * dim_i.x + vox_pos.z * dim_i.x * dim_i.y) + group.offset;
let i = u32(vox_pos.x * dim_i.y * dim_i.z + vox_pos.y * dim_i.z + vox_pos.z) + group.offset;
var vcolor = unpack4x8unorm(voxels[i]);
let normal = next_normal;
@@ -204,7 +204,7 @@ fn apply_group(
// lighting
let light = trace_light(full_pos);
let diffuse = max(dot(norm_light, normal) * 1.3 + 0.1, 0.0);
let diffuse = max(dot(norm_light, normal) * ((dot(dir_view.xyz, normal) + 1.0) / 2.0 * .7 + .3) * 1.3 + 0.1, 0.0);
let ambient = 0.2;
let specular = (exp(max(
-(dot(reflect(dir_view.xyz, normal), norm_light) + 0.90) * 4.0, 0.0
@@ -297,7 +297,7 @@ fn trace_one(gi: u32, pos_view: vec4<f32>, dir_view: vec4<f32>) -> vec4<f32> {
var next_t = inc_t * abs(pos - corner);
var color = vec4<f32>(0.0);
loop {
let i = u32(vox_pos.x + vox_pos.y * dim_i.x + vox_pos.z * dim_i.x * dim_i.y) + group.offset;
let i = u32(vox_pos.x * dim_i.y * dim_i.z + vox_pos.y * dim_i.z + vox_pos.z) + group.offset;
var vcolor = unpack4x8unorm(voxels[i]);
// select next voxel to move to next based on least time

View File

@@ -1,6 +1,4 @@
use nalgebra::{Transform3, Translation3};
use crate::client::render::uniform::UniformData;
use nalgebra::Transform3;
#[repr(C, align(16))]
#[derive(Clone, Copy, PartialEq, bytemuck::Zeroable)]
@@ -25,26 +23,3 @@ impl Default for View {
}
}
}
impl UniformData for View {
fn update(&mut self, data: &crate::client::render::RenderUpdateData) -> bool {
let camera = data.state.camera;
let new = Transform3::identity() * Translation3::from(camera.pos) * camera.orientation;
if new == self.transform
&& data.size.width == self.width
&& data.size.height == self.height
&& camera.scale == self.zoom
{
false
} else {
*self = Self {
width: data.size.width,
height: data.size.height,
zoom: camera.scale,
padding: 0,
transform: new,
};
true
}
}
}

View File

@@ -0,0 +1,48 @@
use rand::distributions::{Distribution, Standard};
#[repr(C)]
#[derive(Debug, Clone, Copy, PartialEq, bytemuck::Zeroable, bytemuck::Pod)]
pub struct VoxelColor {
pub r: u8,
pub g: u8,
pub b: u8,
pub a: u8,
}
impl VoxelColor {
pub fn none() -> Self {
Self {
r: 0,
g: 0,
b: 0,
a: 0,
}
}
pub fn black() -> Self {
Self {
r: 0,
g: 0,
b: 0,
a: 255,
}
}
pub fn white() -> Self {
Self {
r: 255,
g: 255,
b: 255,
a: 255,
}
}
}
impl Distribution<VoxelColor> for Standard {
fn sample<R: rand::prelude::Rng + ?Sized>(&self, rng: &mut R) -> VoxelColor {
VoxelColor {
r: rng.gen(),
g: rng.gen(),
b: rng.gen(),
a: rng.gen(),
}
}
}

View File

@@ -1,9 +1,10 @@
use nalgebra::{Projective3, Rotation3, Transform3, Translation3, UnitVector3, Vector3};
use super::uniform::Uniform;
use nalgebra::{Projective3, Translation3, Rotation3};
use super::{color::VoxelColor, group::VoxelGroup, view::View};
use crate::client::render::{
buf::ArrBufUpdate, storage::Storage, uniform::Uniform, RenderUpdateData,
};
mod view;
mod color;
mod vertex;
mod square;
pub struct VoxelPipeline {
pipeline: wgpu::RenderPipeline,

View File

@@ -0,0 +1,69 @@
// Vertex shader
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) tex_coords: vec2<f32>,
};
struct View {
transform: mat4x4<f32>,
width: u32,
height: u32,
zoom: f32,
};
struct VoxelGroup {
transform: mat4x4<f32>,
transform_inv: mat4x4<f32>,
dimensions: vec3<u32>,
offset: u32,
};
@group(0) @binding(0)
var<uniform> view: View;
@group(0) @binding(1)
var<storage, read> voxels: array<u32>;
@group(0) @binding(2)
var<storage, read> voxel_groups: array<VoxelGroup>;
@vertex
fn vs_main(
@builtin(vertex_index) vi: u32,
@builtin(instance_index) ii: u32,
) -> VertexOutput {
var out: VertexOutput;
var pos = vec2<f32>(
f32(vi % 2u) * 2.0 - 1.0,
f32(vi / 2u) * 2.0 - 1.0,
) ;
out.clip_position = vec4<f32>(pos.x, pos.y, 0.0, 1.0);
out.tex_coords = pos;
return out;
}
// Fragment shader
@fragment
fn fs_main(
in: VertexOutput,
) -> @location(0) vec4<f32> {
// get position of the pixel; eye at origin, pixel on plane z = 1
let win_dim = vec2<f32>(f32(view.width), f32(view.height));
let aspect = win_dim.y / win_dim.x;
let pixel_pos = vec3<f32>(
(in.clip_position.xy / win_dim - vec2<f32>(0.5)) * vec2<f32>(2.0, -2.0 * aspect),
1.0
);
// move to position in world
let pos = view.transform * vec4<f32>(pixel_pos, 1.0);
let dir = view.transform * vec4<f32>(normalize(pixel_pos), 0.0);
var color = trace_full(pos, dir);
let light_mult = clamp((-dot(dir.xyz, normalize(GLOBAL_LIGHT)) - 0.99) * 200.0, 0.0, 1.0);
let sky_color = light_mult * vec3<f32>(1.0, 1.0, 1.0);
color += vec4<f32>(sky_color * (1.0 - color.a), 1.0 - color.a);
color.a = 1.0;
return color;
}

View File

View File

@@ -0,0 +1,6 @@
#[repr(C)]
#[derive(Copy, Clone, Debug)]
struct Vertex {
position: [f32; 3],
color: [f32; 3],
}

View File

@@ -0,0 +1,47 @@
use nalgebra::{Transform3, Translation3};
use crate::client::render::uniform::UniformData;
#[repr(C, align(16))]
#[derive(Clone, Copy, PartialEq, bytemuck::Zeroable)]
pub struct View {
pub transform: Transform3<f32>,
pub width: u32,
pub height: u32,
pub zoom: f32,
}
unsafe impl bytemuck::Pod for View {}
impl Default for View {
fn default() -> Self {
Self {
width: 1,
height: 1,
zoom: 1.0,
transform: Transform3::identity(),
}
}
}
impl UniformData for View {
fn update(&mut self, data: &crate::client::render::RenderUpdateData) -> bool {
let camera = data.state.camera;
let new = Transform3::identity() * Translation3::from(camera.pos) * camera.orientation;
if new == self.transform
&& data.size.width == self.width
&& data.size.height == self.height
&& camera.scale == self.zoom
{
false
} else {
*self = Self {
width: data.size.width,
height: data.size.height,
zoom: camera.scale,
transform: new,
};
true
}
}
}