square-based pyramid

This commit is contained in:
aiden 2023-05-13 10:41:36 +01:00
commit fce1df2a7d
Signed by: aiden
GPG Key ID: EFA9C74AEBF806E0
5 changed files with 570 additions and 0 deletions

13
Cargo.toml Normal file

@ -0,0 +1,13 @@
[package]
name = "game"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bytemuck = { version = "1.13.1", features = ["derive"] }
cgmath = "0.18.0"
futures = "0.3.28"
wgpu = "0.16.0"
winit = "0.28.4"

116
src/camera.rs Normal file

@ -0,0 +1,116 @@
use cgmath::{Vector3, Point3, Rad, InnerSpace, Deg};
use crate::Input;
#[derive(Debug)]
pub struct Camera {
pub position: Point3<f32>,
pub yaw: Rad<f32>,
pub pitch: Rad<f32>,
aspect: f32,
fovy: Rad<f32>,
znear: f32,
zfar: f32,
}
impl Camera {
pub fn new<
V: Into<Point3<f32>>,
Y: Into<Rad<f32>>,
P: Into<Rad<f32>>,
>(
dimensions: winit::dpi::PhysicalSize<u32>,
position: V,
yaw: Y,
pitch: P
) -> Self {
Self {
position: position.into(),
yaw: yaw.into(),
pitch: pitch.into(),
aspect: dimensions.width as f32 / dimensions.height as f32,
fovy: Deg(45.0).into(),
znear: 0.1,
zfar: 100.0,
}
}
}
impl Camera {
pub fn reconfigure(&mut self, dimensions: winit::dpi::PhysicalSize<u32>) {
self.aspect = dimensions.width as f32 / dimensions.height as f32;
}
pub fn update(&mut self, input: &Input, dt: std::time::Duration) {
let dt = dt.as_secs_f32();
// Move forward/backward and left/right
let (yaw_sin, yaw_cos) = self.yaw.0.sin_cos();
let forward = Vector3::new(yaw_cos, 0.0, yaw_sin).normalize();
let right = Vector3::new(-yaw_sin, 0.0, yaw_cos).normalize();
self.position += forward * (input.amount_forward - input.amount_backward) * input.speed * dt;
self.position += right * (input.amount_right - input.amount_left) * input.speed * dt;
// Move up/down. Since we don't use roll, we can just
// modify the y coordinate directly.
self.position.y += (input.amount_up - input.amount_down) * input.speed * dt;
// Rotate
self.yaw += Rad(input.rotate_horizontal) * input.sensitivity * dt;
self.pitch += Rad(-input.rotate_vertical) * input.sensitivity * dt;
// Keep the self's angle from going too high/low.
if self.pitch < -Rad(std::f32::consts::FRAC_PI_2 - 0.0001) {
self.pitch = -Rad(std::f32::consts::FRAC_PI_2 - 0.0001);
} else if self.pitch > Rad(std::f32::consts::FRAC_PI_2 - 0.0001) {
self.pitch = Rad(std::f32::consts::FRAC_PI_2 - 0.0001);
}
}
}
pub struct CameraUniform {
buffer: wgpu::Buffer,
}
impl<'a> CameraUniform {
const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
const SIZE: usize = std::mem::size_of::<[[f32; 4]; 4]>();
pub fn new(device: &wgpu::Device) -> Self {
Self {
buffer: device.create_buffer(
&(wgpu::BufferDescriptor {
label: Some("Camera Buffer"),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
size: Self::SIZE as u64,
mapped_at_creation: false,
})
),
}
}
pub fn set_view_projection_matrix(&self, queue: &wgpu::Queue, camera: &Camera) {
let (sin_pitch, cos_pitch) = camera.pitch.0.sin_cos();
let (sin_yaw, cos_yaw) = camera.yaw.0.sin_cos();
let target = Vector3::new(
cos_pitch * cos_yaw,
sin_pitch,
cos_pitch * sin_yaw
).normalize();
let view = cgmath::Matrix4::look_to_rh(camera.position, target, Vector3::unit_y());
let proj = cgmath::perspective(camera.fovy, camera.aspect, camera.znear, camera.zfar);
let transformed_proj: [[f32; 4]; 4] = (Self::OPENGL_TO_WGPU_MATRIX * proj * view).into();
queue.write_buffer(&(self.buffer), 0, bytemuck::cast_slice(&(transformed_proj)));
}
pub fn as_entire_binding(&self) -> wgpu::BindingResource {
self.buffer.as_entire_binding()
}
}

152
src/main.rs Normal file

@ -0,0 +1,152 @@
// based on https://sotrh.github.io/learn-wgpu/
use {winit::{event as Event, event_loop::{ControlFlow, EventLoop}, window::WindowBuilder}, std::process::ExitCode};
mod state;
use state::State;
mod camera;
use camera::Camera;
use winit::event::{VirtualKeyCode, ElementState, KeyboardInput, WindowEvent};
#[derive(Debug)]
pub struct Input {
amount_left: f32,
amount_right: f32,
amount_forward: f32,
amount_backward: f32,
amount_up: f32,
amount_down: f32,
rotate_horizontal: f32,
rotate_vertical: f32,
speed: f32,
sensitivity: f32,
}
impl Input {
pub fn new(speed: f32, sensitivity: f32) -> Self {
Self {
amount_left: 0.0,
amount_right: 0.0,
amount_forward: 0.0,
amount_backward: 0.0,
amount_up: 0.0,
amount_down: 0.0,
rotate_horizontal: 0.0,
rotate_vertical: 0.0,
speed,
sensitivity,
}
}
pub fn process_keyboard(&mut self, key: VirtualKeyCode, state: ElementState) -> bool{
let amount = if state == ElementState::Pressed { 1.0 } else { 0.0 };
match key {
VirtualKeyCode::I | VirtualKeyCode::Up => {
self.amount_forward = amount;
true
}
VirtualKeyCode::J | VirtualKeyCode::Left => {
self.amount_left = amount;
true
}
VirtualKeyCode::K | VirtualKeyCode::Down => {
self.amount_backward = amount;
true
}
VirtualKeyCode::L | VirtualKeyCode::Right => {
self.amount_right = amount;
true
}
VirtualKeyCode::Space => {
self.amount_up = amount;
true
}
VirtualKeyCode::Semicolon => {
self.amount_down = amount;
true
}
_ => false,
}
}
pub fn process_mouse(&mut self, (dx, dy): (f64, f64)) {
self.rotate_horizontal = dx as f32;
self.rotate_vertical = dy as f32;
}
}
fn real_main() -> Result<(), &'static str> {
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.with_title("game")
.build(&(event_loop))
.map_err(|_| "failed to create window")?;
let mut state = State::new(window)?;
let mut input = Input::new(1.0, 8.0);
let mut last_render_time = std::time::Instant::now();
event_loop.run(move |event, _, flow| {
*flow = ControlFlow::Poll;
let window = state.window();
window.set_cursor_grab(winit::window::CursorGrabMode::Confined).expect("failed to lock cursor");
match event {
Event::Event::DeviceEvent {
event: Event::DeviceEvent::MouseMotion { delta, },
..
} => {
input.process_mouse(delta);
}
Event::Event::WindowEvent { window_id, event } if window_id == window.id() => match event {
WindowEvent::KeyboardInput {
input:
KeyboardInput {
virtual_keycode: Some(key),
state,
..
},
..
} => { input.process_keyboard(key, state); }
WindowEvent::CloseRequested => {
*flow = ControlFlow::Exit;
},
WindowEvent::Resized(physical_size) => {
state.reconfigure(Some(physical_size));
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
state.reconfigure(Some(*new_inner_size));
}
_ => (),
}
Event::Event::MainEventsCleared => {
let now = std::time::Instant::now();
let dt = now - last_render_time;
last_render_time = now;
state.update(&(input), dt);
input.rotate_horizontal = 0.0;
input.rotate_vertical = 0.0;
match state.render() {
Err(wgpu::SurfaceError::Lost) => state.reconfigure(None),
Err(wgpu::SurfaceError::OutOfMemory) => *flow = ControlFlow::ExitWithCode(1),
Err(e) => eprintln!("{:?}", e),
_ => (),
};
}
_ => (),
};
return;
});
}
fn main() -> ExitCode {
let r: Result<(), &'static str> = real_main();
if let Err(e) = r {
eprintln!("{e}");
return ExitCode::FAILURE;
}
return ExitCode::SUCCESS;
}

31
src/shader.wgsl Normal file

@ -0,0 +1,31 @@
@group(0) @binding(0)
var<uniform> camera: mat4x4<f32>;
// vertex shader
struct VertexInput {
@location(0) position: vec3<f32>,
@location(1) color: vec3<f32>,
};
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) color: vec3<f32>,
};
@vertex
fn vs_main(
model: VertexInput,
) -> VertexOutput {
var out: VertexOutput;
out.clip_position = camera * vec4<f32>(model.position, 1.0);
out.color = model.color;
return out;
}
// fragment shader (takes in the output of the vertex shader)
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
return vec4<f32>(in.color, 1.0);
}

258
src/state.rs Normal file

@ -0,0 +1,258 @@
use std::time::Duration;
use cgmath::Deg;
use winit::window::Window;
use crate::Input;
use wgpu::{util::DeviceExt};
use crate::{Camera, camera::CameraUniform};
pub struct State {
window: Window,
size: winit::dpi::PhysicalSize<u32>,
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
config: wgpu::SurfaceConfiguration,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
camera: Camera,
camera_uniform: CameraUniform,
camera_bind_group: wgpu::BindGroup,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct Vertex {
position: [f32; 3],
color: [f32; 3],
}
impl State {
pub fn new(window: Window) -> Result<Self, &'static str> {
let size = window.inner_size();
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::all(),
..Default::default()
});
let surface = unsafe { instance.create_surface(&(window)) }.map_err(|_| "create_surface failed")?;
// details about gpu
let adapter = futures::executor::block_on(instance.request_adapter(
&(wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&(surface)),
force_fallback_adapter: false,
}),
)).ok_or("request_adapter failed")?;
// gpu connection instance + queue
let (device, queue) = futures::executor::block_on(adapter.request_device(
&(wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
label: None,
}),
None,
)).map_err(|_| "request_device failed")?;
let config = {
let caps = surface.get_capabilities(&(adapter));
wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: caps
.formats
.into_iter()
.find(|f| f.is_srgb())
.ok_or("no srgb surface")?,
width: size.width,
height: size.height,
present_mode: caps.present_modes[0],
alpha_mode: caps.alpha_modes[0],
view_formats: vec![],
}
};
surface.configure(&(device), &(config));
let shader = device.create_shader_module(wgpu::include_wgsl!("shader.wgsl"));
let vertex_buffer = device.create_buffer_init(
&(wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(&[
Vertex { position: [0.0, 0.5, -0.5], color: [0.0, 0.0, 0.1] }, // top
Vertex { position: [-0.5, -0.5, -1.0], color: [0.0, 1.0, 0.0] }, // left
Vertex { position: [-0.5, -0.5, 0.0], color: [1.0, 0.0, 0.0] }, // right
Vertex { position: [0.0, 0.5, -0.5], color: [1.0, 0.0, 0.0] }, // top
Vertex { position: [-0.5, -0.5, 0.0], color: [0.0, 1.0, 0.0] }, // left
Vertex { position: [0.5, -0.5, 0.0], color: [0.0, 0.0, 1.0] }, // right
Vertex { position: [0.0, 0.5, -0.5], color: [0.0, 1.0, 0.0] }, // top
Vertex { position: [0.5, -0.5, 0.0], color: [1.0, 0.0, 0.0] }, // left
Vertex { position: [0.5, -0.5, -1.0], color: [0.0, 0.0, 1.0] }, // right
Vertex { position: [0.0, 0.5, -0.5], color: [1.0, 0.0, 0.0] }, // top
Vertex { position: [0.5, -0.5, -1.0], color: [0.0, 0.0, 1.0] }, // left
Vertex { position: [-0.5, -0.5, -1.0], color: [0.0, 1.0, 0.0] }, // right
]),
usage: wgpu::BufferUsages::VERTEX,
})
);
let camera = Camera::new(size, (0.0, 0.0, 0.0), Deg(-90.0), Deg(-20.0));
let camera_bind_group_layout = &(device.create_bind_group_layout(&(wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}
],
label: Some("camera_bind_group_layout"),
})));
let render_pipeline_layout = device.create_pipeline_layout(&(wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[camera_bind_group_layout],
push_constant_ranges: &[],
}));
let render_pipeline =
device.create_render_pipeline(&(wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&(render_pipeline_layout)),
vertex: wgpu::VertexState {
module: &(shader),
entry_point: "vs_main",
buffers: &[
// index 0
wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &(wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x3]),
},
],
},
fragment: Some(wgpu::FragmentState {
module: &(shader),
entry_point: "fs_main",
targets: &[Some(wgpu::ColorTargetState {
format: config.format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
})],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
multiview: None,
}));
let camera_uniform = CameraUniform::new(&(device));
camera_uniform.set_view_projection_matrix(&(queue), &(camera));
let camera_bind_group = device.create_bind_group(&(wgpu::BindGroupDescriptor {
layout: camera_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: camera_uniform.as_entire_binding(),
}
],
label: Some("camera_bind_group"),
}));
return Ok(Self {
window,
size,
surface,
device,
queue,
config,
render_pipeline,
vertex_buffer,
camera,
camera_uniform,
camera_bind_group,
});
}
pub fn window(&self) -> &Window {
return &(self.window);
}
pub fn reconfigure(&mut self, new_size: Option<winit::dpi::PhysicalSize<u32>>) {
let new_size = new_size.unwrap_or(self.size);
assert!(new_size.width > 0 && new_size.height > 0);
self.size = new_size;
self.config.width = new_size.width;
self.config.height = new_size.height;
self.surface.configure(&(self.device), &(self.config));
self.camera.reconfigure(new_size);
}
pub fn update(&mut self, input: &Input, dt: Duration) {
self.camera.update(input, dt);
self.camera_uniform.set_view_projection_matrix(&(self.queue), &(self.camera));
}
pub fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
let output = self.surface.get_current_texture()?;
let view = output.texture.create_view(&(wgpu::TextureViewDescriptor::default()));
let mut encoder = self.device.create_command_encoder(&(wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
}));
{
let mut render_pass = encoder.begin_render_pass(&(wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &(view),
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(Default::default()),
store: true,
},
})],
depth_stencil_attachment: None,
}));
render_pass.set_pipeline(&(self.render_pipeline));
render_pass.set_bind_group(0, &(self.camera_bind_group), &[]);
render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
render_pass.draw(0..12, 0..1);
}
// submit will accept anything that implements IntoIter
self.queue.submit(std::iter::once(encoder.finish()));
output.present();
return Ok(());
}
}