indexed rendering
This commit is contained in:
@@ -73,9 +73,11 @@ const DeviceDispatch = vk.DeviceWrapper(.{
|
|||||||
.cmdEndRenderPass = true,
|
.cmdEndRenderPass = true,
|
||||||
.cmdBindPipeline = true,
|
.cmdBindPipeline = true,
|
||||||
.cmdDraw = true,
|
.cmdDraw = true,
|
||||||
|
.cmdDrawIndexed = true,
|
||||||
.cmdSetViewport = true,
|
.cmdSetViewport = true,
|
||||||
.cmdSetScissor = true,
|
.cmdSetScissor = true,
|
||||||
.cmdBindVertexBuffers = true,
|
.cmdBindVertexBuffers = true,
|
||||||
|
.cmdBindIndexBuffer = true,
|
||||||
.cmdCopyBuffer = true,
|
.cmdCopyBuffer = true,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
83
src/main.zig
83
src/main.zig
@@ -8,7 +8,7 @@ const Allocator = std.mem.Allocator;
|
|||||||
|
|
||||||
const app_name = "vulkan-zig triangle example";
|
const app_name = "vulkan-zig triangle example";
|
||||||
|
|
||||||
const Vertex = struct {
|
const Vertex = extern struct {
|
||||||
const binding_description = vk.VertexInputBindingDescription{
|
const binding_description = vk.VertexInputBindingDescription{
|
||||||
.binding = 0,
|
.binding = 0,
|
||||||
.stride = @sizeOf(Vertex),
|
.stride = @sizeOf(Vertex),
|
||||||
@@ -35,11 +35,20 @@ const Vertex = struct {
|
|||||||
};
|
};
|
||||||
|
|
||||||
const vertices = [_]Vertex{
|
const vertices = [_]Vertex{
|
||||||
.{ .pos = .{ 0, -0.5 }, .color = .{ 1, 0, 0 } },
|
// .{ .pos = .{ -0.5, -0.5 }, .color = .{ 1, 0, 0 } },
|
||||||
.{ .pos = .{ 0.5, 0.5 }, .color = .{ 0, 1, 0 } },
|
// .{ .pos = .{ 0.5, 0.5 }, .color = .{ 0, 1, 0 } },
|
||||||
.{ .pos = .{ -0.5, 0.5 }, .color = .{ 0, 0, 1 } },
|
// .{ .pos = .{ -0.5, 0.5 }, .color = .{ 0, 0, 1 } },
|
||||||
|
|
||||||
|
.{ .pos = .{ -0.5, -0.5 }, .color = .{ 1, 0, 0 } },
|
||||||
|
.{ .pos = .{ -0.5, 0.5 }, .color = .{ 0, 1, 0 } },
|
||||||
|
.{ .pos = .{ 0.5, -0.5 }, .color = .{ 0, 0, 1 } },
|
||||||
|
.{ .pos = .{ 0.5, 0.5 }, .color = .{ 1, 1, 0 } },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const Index = u16;
|
||||||
|
|
||||||
|
const indices = [_]Index{ 0, 2, 1, 1, 2, 3 };
|
||||||
|
|
||||||
pub fn main() !void {
|
pub fn main() !void {
|
||||||
if (c.glfwInit() != c.GLFW_TRUE) return error.GlfwInitFailed;
|
if (c.glfwInit() != c.GLFW_TRUE) return error.GlfwInitFailed;
|
||||||
defer c.glfwTerminate();
|
defer c.glfwTerminate();
|
||||||
@@ -96,24 +105,38 @@ pub fn main() !void {
|
|||||||
}, null);
|
}, null);
|
||||||
defer gc.vkd.destroyCommandPool(gc.dev, pool, null);
|
defer gc.vkd.destroyCommandPool(gc.dev, pool, null);
|
||||||
|
|
||||||
const buffer = try gc.vkd.createBuffer(gc.dev, &.{
|
const vertex_buffer = try gc.vkd.createBuffer(gc.dev, &.{
|
||||||
.size = @sizeOf(@TypeOf(vertices)),
|
.size = @sizeOf(@TypeOf(vertices)),
|
||||||
.usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true },
|
.usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true },
|
||||||
.sharing_mode = .exclusive,
|
.sharing_mode = .exclusive,
|
||||||
}, null);
|
}, null);
|
||||||
defer gc.vkd.destroyBuffer(gc.dev, buffer, null);
|
defer gc.vkd.destroyBuffer(gc.dev, vertex_buffer, null);
|
||||||
const mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, buffer);
|
const vertex_mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, vertex_buffer);
|
||||||
const memory = try gc.allocate(mem_reqs, .{ .device_local_bit = true });
|
const vertex_memory = try gc.allocate(vertex_mem_reqs, .{ .device_local_bit = true });
|
||||||
defer gc.vkd.freeMemory(gc.dev, memory, null);
|
defer gc.vkd.freeMemory(gc.dev, vertex_memory, null);
|
||||||
try gc.vkd.bindBufferMemory(gc.dev, buffer, memory, 0);
|
try gc.vkd.bindBufferMemory(gc.dev, vertex_buffer, vertex_memory, 0);
|
||||||
|
|
||||||
try uploadVertices(&gc, pool, buffer);
|
try uploadData(Vertex, &gc, pool, vertex_buffer, &vertices);
|
||||||
|
|
||||||
|
const index_buffer = try gc.vkd.createBuffer(gc.dev, &.{
|
||||||
|
.size = @sizeOf(@TypeOf(indices)),
|
||||||
|
.usage = .{ .transfer_dst_bit = true, .index_buffer_bit = true },
|
||||||
|
.sharing_mode = .exclusive,
|
||||||
|
}, null);
|
||||||
|
defer gc.vkd.destroyBuffer(gc.dev, index_buffer, null);
|
||||||
|
const index_mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, index_buffer);
|
||||||
|
const index_memory = try gc.allocate(index_mem_reqs, .{ .device_local_bit = true });
|
||||||
|
defer gc.vkd.freeMemory(gc.dev, index_memory, null);
|
||||||
|
try gc.vkd.bindBufferMemory(gc.dev, index_buffer, index_memory, 0);
|
||||||
|
|
||||||
|
try uploadData(Index, &gc, pool, index_buffer, &indices);
|
||||||
|
|
||||||
var cmdbufs = try createCommandBuffers(
|
var cmdbufs = try createCommandBuffers(
|
||||||
&gc,
|
&gc,
|
||||||
pool,
|
pool,
|
||||||
allocator,
|
allocator,
|
||||||
buffer,
|
vertex_buffer,
|
||||||
|
index_buffer,
|
||||||
swapchain.extent,
|
swapchain.extent,
|
||||||
render_pass,
|
render_pass,
|
||||||
pipeline,
|
pipeline,
|
||||||
@@ -152,7 +175,8 @@ pub fn main() !void {
|
|||||||
&gc,
|
&gc,
|
||||||
pool,
|
pool,
|
||||||
allocator,
|
allocator,
|
||||||
buffer,
|
vertex_buffer,
|
||||||
|
index_buffer,
|
||||||
swapchain.extent,
|
swapchain.extent,
|
||||||
render_pass,
|
render_pass,
|
||||||
pipeline,
|
pipeline,
|
||||||
@@ -167,27 +191,36 @@ pub fn main() !void {
|
|||||||
try gc.vkd.deviceWaitIdle(gc.dev);
|
try gc.vkd.deviceWaitIdle(gc.dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn uploadVertices(gc: *const GraphicsContext, pool: vk.CommandPool, buffer: vk.Buffer) !void {
|
fn uploadData(comptime T: type, gc: *const GraphicsContext, pool: vk.CommandPool, buffer: vk.Buffer, source: []const T) !void {
|
||||||
|
// if (@typeInfo(T) == .Struct and @typeInfo(T).Struct.layout == .auto) @compileError("Requires defined T layout");
|
||||||
|
|
||||||
|
const size = @sizeOf(T) * source.len;
|
||||||
|
|
||||||
const staging_buffer = try gc.vkd.createBuffer(gc.dev, &.{
|
const staging_buffer = try gc.vkd.createBuffer(gc.dev, &.{
|
||||||
.size = @sizeOf(@TypeOf(vertices)),
|
.size = size,
|
||||||
.usage = .{ .transfer_src_bit = true },
|
.usage = .{ .transfer_src_bit = true },
|
||||||
.sharing_mode = .exclusive,
|
.sharing_mode = .exclusive,
|
||||||
}, null);
|
}, null);
|
||||||
defer gc.vkd.destroyBuffer(gc.dev, staging_buffer, null);
|
defer gc.vkd.destroyBuffer(gc.dev, staging_buffer, null);
|
||||||
|
|
||||||
const mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, staging_buffer);
|
const mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, staging_buffer);
|
||||||
const staging_memory = try gc.allocate(mem_reqs, .{ .host_visible_bit = true, .host_coherent_bit = true });
|
const staging_memory = try gc.allocate(mem_reqs, .{
|
||||||
|
.host_visible_bit = true,
|
||||||
|
.host_coherent_bit = true,
|
||||||
|
});
|
||||||
defer gc.vkd.freeMemory(gc.dev, staging_memory, null);
|
defer gc.vkd.freeMemory(gc.dev, staging_memory, null);
|
||||||
|
|
||||||
try gc.vkd.bindBufferMemory(gc.dev, staging_buffer, staging_memory, 0);
|
try gc.vkd.bindBufferMemory(gc.dev, staging_buffer, staging_memory, 0);
|
||||||
|
|
||||||
{
|
{
|
||||||
const data = try gc.vkd.mapMemory(gc.dev, staging_memory, 0, vk.WHOLE_SIZE, .{});
|
const data = try gc.vkd.mapMemory(gc.dev, staging_memory, 0, vk.WHOLE_SIZE, .{});
|
||||||
defer gc.vkd.unmapMemory(gc.dev, staging_memory);
|
defer gc.vkd.unmapMemory(gc.dev, staging_memory);
|
||||||
|
|
||||||
const gpu_vertices: [*]Vertex = @ptrCast(@alignCast(data));
|
const dest: [*]T = @ptrCast(@alignCast(data));
|
||||||
@memcpy(gpu_vertices, vertices[0..]);
|
@memcpy(dest, source);
|
||||||
}
|
}
|
||||||
|
|
||||||
try copyBuffer(gc, pool, buffer, staging_buffer, @sizeOf(@TypeOf(vertices)));
|
try copyBuffer(gc, pool, buffer, staging_buffer, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn copyBuffer(gc: *const GraphicsContext, pool: vk.CommandPool, dst: vk.Buffer, src: vk.Buffer, size: vk.DeviceSize) !void {
|
fn copyBuffer(gc: *const GraphicsContext, pool: vk.CommandPool, dst: vk.Buffer, src: vk.Buffer, size: vk.DeviceSize) !void {
|
||||||
@@ -217,6 +250,9 @@ fn copyBuffer(gc: *const GraphicsContext, pool: vk.CommandPool, dst: vk.Buffer,
|
|||||||
.p_command_buffers = @ptrCast(&cmdbuf),
|
.p_command_buffers = @ptrCast(&cmdbuf),
|
||||||
.p_wait_dst_stage_mask = undefined,
|
.p_wait_dst_stage_mask = undefined,
|
||||||
};
|
};
|
||||||
|
// creating and submitting a queue for every copy operation seems a bad idea for "streamed" data
|
||||||
|
// gonna want a way to send a copy operation WITH SYNCHRONIZATION PRIMITIVES on a particular queue
|
||||||
|
// see https://stackoverflow.com/a/62183243
|
||||||
try gc.vkd.queueSubmit(gc.graphics_queue.handle, 1, @ptrCast(&si), .null_handle);
|
try gc.vkd.queueSubmit(gc.graphics_queue.handle, 1, @ptrCast(&si), .null_handle);
|
||||||
try gc.vkd.queueWaitIdle(gc.graphics_queue.handle);
|
try gc.vkd.queueWaitIdle(gc.graphics_queue.handle);
|
||||||
}
|
}
|
||||||
@@ -225,7 +261,8 @@ fn createCommandBuffers(
|
|||||||
gc: *const GraphicsContext,
|
gc: *const GraphicsContext,
|
||||||
pool: vk.CommandPool,
|
pool: vk.CommandPool,
|
||||||
allocator: Allocator,
|
allocator: Allocator,
|
||||||
buffer: vk.Buffer,
|
vertex_buffer: vk.Buffer,
|
||||||
|
index_buffer: vk.Buffer,
|
||||||
extent: vk.Extent2D,
|
extent: vk.Extent2D,
|
||||||
render_pass: vk.RenderPass,
|
render_pass: vk.RenderPass,
|
||||||
pipeline: vk.Pipeline,
|
pipeline: vk.Pipeline,
|
||||||
@@ -281,8 +318,10 @@ fn createCommandBuffers(
|
|||||||
|
|
||||||
gc.vkd.cmdBindPipeline(cmdbuf, .graphics, pipeline);
|
gc.vkd.cmdBindPipeline(cmdbuf, .graphics, pipeline);
|
||||||
const offset = [_]vk.DeviceSize{0};
|
const offset = [_]vk.DeviceSize{0};
|
||||||
gc.vkd.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast(&buffer), &offset);
|
gc.vkd.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast(&vertex_buffer), &offset);
|
||||||
gc.vkd.cmdDraw(cmdbuf, vertices.len, 1, 0, 0);
|
gc.vkd.cmdBindIndexBuffer(cmdbuf, index_buffer, 0, .uint16);
|
||||||
|
gc.vkd.cmdDrawIndexed(cmdbuf, indices.len, 1, 0, 0, 0);
|
||||||
|
// gc.vkd.cmdDraw(cmdbuf, vertices.len, 1, 0, 0);
|
||||||
|
|
||||||
gc.vkd.cmdEndRenderPass(cmdbuf);
|
gc.vkd.cmdEndRenderPass(cmdbuf);
|
||||||
try gc.vkd.endCommandBuffer(cmdbuf);
|
try gc.vkd.endCommandBuffer(cmdbuf);
|
||||||
|
Reference in New Issue
Block a user