almost render mesh sorta

This commit is contained in:
aiden 2023-06-20 19:16:20 +01:00
parent d927bd06c2
commit 18ce5cde06
Signed by: aiden
GPG Key ID: EFA9C74AEBF806E0
8 changed files with 281 additions and 85 deletions

View File

@ -9,6 +9,7 @@ edition = "2021"
bytemuck = { version = "1.13.1", features = ["derive"] }
cgmath = "0.18.0"
futures = "0.3.28"
image = "0.24.6"
tobj = "4.0.0"
wgpu = "0.16.0"
winit = { path = "../winit" }
winit = { path = "../winit" }

View File

@ -73,11 +73,12 @@ pub struct CameraUniform {
buffer: wgpu::Buffer,
}
impl<'a> CameraUniform {
// https://github.com/sotrh/learn-wgpu/issues/478
const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
0.0, 0.0, 0.5, 0.5,
0.0, 0.0, 0.0, 1.0,
);
const SIZE: usize = std::mem::size_of::<[[f32; 4]; 4]>();

View File

@ -54,6 +54,8 @@ mod state;
mod camera;
mod input;
mod player;
mod texture;
mod obj;
use input::Input;
use state::State;

141
src/obj.rs Normal file
View File

@ -0,0 +1,141 @@
use std::{io::{Cursor, BufReader}, path::Path};
use crate::texture;
use image;
use wgpu::util::DeviceExt;
fn load_bytes<T: AsRef<Path>>(file_name: T) -> Vec<u8> {
return std::fs::read(file_name.as_ref()).unwrap();
}
fn load_string<T: AsRef<Path>>(file_name: T) -> String {
return std::fs::read_to_string(file_name.as_ref()).unwrap();
}
fn load_texture(
file_name: &str,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> texture::Texture {
let bytes = load_bytes(file_name);
return texture::Texture::from_image(device, queue, &(image::load_from_memory(&(bytes)).unwrap()), Some(file_name));
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct ModelVertex {
pub position: [f32; 3],
pub tex_coords: [f32; 2],
pub normal: [f32; 3],
}
pub struct Material {
pub name: String,
pub diffuse_texture: texture::Texture,
pub bind_group: wgpu::BindGroup,
}
pub struct Mesh {
pub name: String,
pub vertex_buffer: wgpu::Buffer,
pub index_buffer: wgpu::Buffer,
pub num_elements: u32,
pub material: usize,
}
pub struct Model {
pub meshes: Vec<Mesh>,
pub materials: Vec<Material>,
}
pub fn load_obj(
file_name: &str,
device: &wgpu::Device,
queue: &wgpu::Queue,
layout: &wgpu::BindGroupLayout,
) -> Model {
let obj_text = load_string(file_name);
let obj_cursor = Cursor::new(obj_text);
let mut obj_reader = BufReader::new(obj_cursor);
let (models, obj_materials) = tobj::load_obj_buf(
&mut(obj_reader),
&(tobj::LoadOptions {
triangulate: true,
single_index: true,
..Default::default()
}),
|p| {
let p = format!("models/{}", p.to_str().unwrap()); // fixme
let mat_text = load_string(p);
tobj::load_mtl_buf(&mut(BufReader::new(Cursor::new(mat_text))))
}
).unwrap();
let mut materials = Vec::new();
for m in obj_materials.unwrap() {
let x = format!("models/ruby/{}", m.diffuse_texture.unwrap()); // fixme
let diffuse_texture = load_texture(&(x), device, queue);
let bind_group = device.create_bind_group(&(wgpu::BindGroupDescriptor {
layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&(diffuse_texture.view)),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&(diffuse_texture.sampler)),
},
],
label: None,
}));
materials.push(Material {
name: m.name,
diffuse_texture,
bind_group,
})
}
let meshes = models
.into_iter()
.map(|m| {
let vertices = (0..m.mesh.positions.len() / 3)
.map(|i| ModelVertex {
position: [
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
],
tex_coords: [m.mesh.texcoords[i * 2], m.mesh.texcoords[i * 2 + 1]],
normal: [
m.mesh.normals[i * 3],
m.mesh.normals[i * 3 + 1],
m.mesh.normals[i * 3 + 2],
],
})
.collect::<Vec<_>>();
let vertex_buffer = device.create_buffer_init(&(wgpu::util::BufferInitDescriptor {
label: Some(&(format!("{:?} Vertex Buffer", file_name))),
contents: bytemuck::cast_slice(&(vertices)),
usage: wgpu::BufferUsages::VERTEX,
}));
let index_buffer = device.create_buffer_init(&(wgpu::util::BufferInitDescriptor {
label: Some(&(format!("{:?} Index Buffer", file_name))),
contents: bytemuck::cast_slice(&(m.mesh.indices)),
usage: wgpu::BufferUsages::INDEX,
}));
Mesh {
name: file_name.to_string(),
vertex_buffer,
index_buffer,
num_elements: m.mesh.indices.len() as u32,
material: m.mesh.material_id.unwrap_or(0),
}
})
.collect::<Vec<_>>();
Model { meshes, materials }
}

View File

@ -4,8 +4,6 @@ use crate::input::Input;
pub struct Player {
pub position: Point3<f32>,
pub rot_x: Deg<f32>,
pub buffer: wgpu::Buffer,
}
impl Player {

View File

@ -1,16 +1,21 @@
@group(0) @binding(0)
var<uniform> camera: mat4x4<f32>;
@group(1) @binding(0)
var t_diffuse: texture_2d<f32>;
@group(1) @binding(1)
var s_diffuse: sampler;
// vertex shader
struct VertexInput {
@location(0) position: vec3<f32>,
@location(1) color: vec3<f32>,
@location(1) tex_coords: vec2<f32>,
};
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) color: vec3<f32>,
@location(0) tex_coords: vec2<f32>,
};
@vertex
@ -19,7 +24,7 @@ fn vs_main(
) -> VertexOutput {
var out: VertexOutput;
out.clip_position = camera * vec4<f32>(model.position, 1.0);
out.color = model.color;
out.tex_coords = model.tex_coords;
return out;
}
@ -27,5 +32,5 @@ fn vs_main(
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
return vec4<f32>(in.color, 1.0);
return textureSample(t_diffuse, s_diffuse, in.tex_coords);
}

View File

@ -1,8 +1,7 @@
use cgmath::{Deg, Rad, Point3, EuclideanSpace};
use cgmath::{Deg, Rad, Point3};
use winit::window::Window;
use wgpu::{util::DeviceExt, BufferAddress};
use crate::{camera::*, Input, player::Player};
use crate::{camera::*, Input, player::Player, obj::{self, ModelVertex}};
pub struct State {
pub input: Input,
@ -18,22 +17,14 @@ pub struct State {
config: wgpu::SurfaceConfiguration,
render_pipeline: wgpu::RenderPipeline,
plane_buffer: wgpu::Buffer,
build_buffer: wgpu::Buffer,
player: Player,
camera: Camera,
camera_uniform: CameraUniform,
camera_bind_group: wgpu::BindGroup,
depth_view: wgpu::TextureView,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct Vertex {
position: [f32; 3],
color: [f32; 3],
skin: obj::Model,
}
fn depth_view(device: &wgpu::Device, config: &wgpu::SurfaceConfiguration) -> wgpu::TextureView {
@ -106,7 +97,7 @@ impl State {
let shader = device.create_shader_module(wgpu::include_wgsl!("shader.wgsl"));
let plane_buffer = device.create_buffer_init(
/*let plane_buffer = device.create_buffer_init(
&(wgpu::util::BufferInitDescriptor {
label: Some("plane_buffer"),
usage: wgpu::BufferUsages::VERTEX,
@ -126,17 +117,11 @@ impl State {
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
size: std::mem::size_of::<Vertex>() as u64 * 6,
mapped_at_creation: false,
}));
}));*/
let player = Player {
position: (1.0, 0.25, -1.0).into(),
rot_x: Deg(0.0),
buffer: device.create_buffer(&(wgpu::BufferDescriptor {
label: Some("player_buffer"),
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
size: std::mem::size_of::<Vertex>() as u64 * 6,
mapped_at_creation: false,
})),
};
let mut camera = Camera::new(size, Deg(0.0));
camera.update_pos(&(player));
@ -157,9 +142,49 @@ impl State {
label: Some("camera_bind_group_layout"),
})));
let depth_view = depth_view(&(device), &(config));
let camera_uniform = CameraUniform::new(&(device));
camera_uniform.set_view_projection_matrix(&(queue), &(camera));
let camera_bind_group = device.create_bind_group(&(wgpu::BindGroupDescriptor {
layout: camera_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: camera_uniform.as_entire_binding(),
}
],
label: Some("camera_bind_group"),
}));
let texture_bind_group_layout = &(device.create_bind_group_layout(&(wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
label: Some("texture_bind_group_layout"),
})));
let skin = obj::load_obj("models/skin.obj", &(device), &(queue), &(texture_bind_group_layout));
let render_pipeline_layout = device.create_pipeline_layout(&(wgpu::PipelineLayoutDescriptor {
label: Some("render_pipeline_layout"),
bind_group_layouts: &[camera_bind_group_layout],
bind_group_layouts: &[camera_bind_group_layout, texture_bind_group_layout],
push_constant_ranges: &[],
}));
let render_pipeline = device.create_render_pipeline(&(wgpu::RenderPipelineDescriptor {
@ -171,9 +196,9 @@ impl State {
buffers: &[
// index 0
wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
array_stride: std::mem::size_of::<ModelVertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &(wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x3]),
attributes: &(wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x2, 2 => Float32x3]),
},
],
},
@ -191,7 +216,7 @@ impl State {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: None, // should use Some(wgpu::Face::Back) once i 3d objects (rather than 2d ones) in space
cull_mode: None, // should use Some(wgpu::Face::Back) once i have 3d objects (rather than 2d ones) in space
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
@ -210,23 +235,6 @@ impl State {
},
multiview: None,
}));
let depth_view = depth_view(&(device), &(config));
let camera_uniform = CameraUniform::new(&(device));
camera_uniform.set_view_projection_matrix(&(queue), &(camera));
let camera_bind_group = device.create_bind_group(&(wgpu::BindGroupDescriptor {
layout: camera_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: camera_uniform.as_entire_binding(),
}
],
label: Some("camera_bind_group"),
}));
return Ok(Self {
input,
@ -241,15 +249,14 @@ impl State {
config,
render_pipeline,
plane_buffer,
build_buffer,
player,
camera,
camera_uniform,
camera_bind_group,
depth_view,
skin,
});
}
@ -367,7 +374,8 @@ impl State {
render_pass.set_bind_group(0, &(self.camera_bind_group), &[]);
// player
fn rot_rect(w: f32, h: f32, r: Rad<f32>) -> [Point3<f32>; 6] {
let skin = &(self.skin);
fn _rot_rect(w: f32, h: f32, r: Rad<f32>) -> [Point3<f32>; 6] {
use cgmath::Transform;
let hw = w / 2.0;
@ -396,40 +404,18 @@ impl State {
vertices[0],
];
}
// (USED TO) render the player in-front of the camera
// this should really be done the other way
// around though; the camera should be placed
// *behind the player*, and the camera should
// be affected by collision to prevent it going
// inside of walls, etc.
// the camera should be placed behind the player,
// and it should be affected by collision to prevent
// it going inside of walls, etc.
// (the camera should rotate around the player,
// and the player should also rotate so that
// its back is facing the camera.)
self.queue.write_buffer(&(self.player.buffer), 0, bytemuck::cast_slice(&(rot_rect(
0.2, 0.5, Rad::from(self.player.rot_x)
).map(|point| Vertex { position: (self.player.position + point.to_vec()).into(), color: [1.0, 1.0, 1.0] }))));
render_pass.set_vertex_buffer(0, self.player.buffer.slice(..));
render_pass.draw(0..6, 0..1);
// build grid
self.queue.write_buffer(&(self.build_buffer), 0, bytemuck::cast_slice(&[
Vertex { position: [0.0, 1.0, 0.0], color: [1.0, 0.0, 0.0] },
Vertex { position: [0.0, 0.0, 0.0], color: [1.0, 0.0, 0.0] },
Vertex { position: [1.0, 0.0, 0.0], color: [1.0, 0.0, 0.0] },
Vertex { position: [0.0, 1.0, 0.0], color: [1.0, 0.0, 0.0] },
Vertex { position: [1.0, 0.0, 0.0], color: [1.0, 0.0, 0.0] },
Vertex { position: [1.0, 1.0, 0.0], color: [1.0, 0.0, 0.0] },
]));
render_pass.set_vertex_buffer(0, self.build_buffer.slice(..));
render_pass.draw(0..6, 0..1);
// plane
render_pass.set_vertex_buffer(0, self.plane_buffer.slice(..));
let n_vertices = (
self.plane_buffer.size() / std::mem::size_of::<Vertex>() as BufferAddress
) as u32;
render_pass.draw(0..n_vertices, 0..1);
render_pass.set_pipeline(&(self.render_pipeline));
let mesh = &(skin.meshes[1]);
render_pass.set_bind_group(1, &(skin.materials[mesh.material].bind_group), &[]);
render_pass.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
render_pass.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
render_pass.draw_indexed(0..mesh.num_elements, 0, 0..1);
drop(render_pass);

62
src/texture.rs Normal file
View File

@ -0,0 +1,62 @@
pub struct Texture {
pub texture: wgpu::Texture,
pub view: wgpu::TextureView,
pub sampler: wgpu::Sampler,
}
impl Texture {
pub fn from_image(
device: &wgpu::Device,
queue: &wgpu::Queue,
img: &image::DynamicImage,
label: Option<&str>
) -> Self {
let rgba = img.to_rgba8();
let dimensions = rgba.dimensions();
let size = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
};
let texture = device.create_texture(&(wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::R8Unorm,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[],
}));
queue.write_texture(
wgpu::ImageCopyTexture {
aspect: wgpu::TextureAspect::All,
texture: &(texture),
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
&(rgba),
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: Some(dimensions.0 * 4),
rows_per_image: Some(dimensions.1),
},
size
);
let view = texture.create_view(&(wgpu::TextureViewDescriptor::default()));
let sampler = device.create_sampler(&(wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
}));
return Self { texture, view, sampler };
}
}