incremental remove GraphicsContext

This commit is contained in:
David Allemang
2024-03-30 00:41:48 -04:00
parent 5a48cdd936
commit 2402f29742
2 changed files with 198 additions and 155 deletions

View File

@@ -2,8 +2,9 @@ const std = @import("std");
const vk = @import("vk"); const vk = @import("vk");
const c = @import("c.zig"); const c = @import("c.zig");
const shaders = @import("shaders"); const shaders = @import("shaders");
const GraphicsContext = @import("graphics_context.zig").GraphicsContext; // const GraphicsContext = @import("graphics_context.zig").GraphicsContext;
const Swapchain = @import("swapchain.zig").Swapchain; const Swapchain = @import("swapchain.zig").Swapchain;
const Context = @import("swapchain.zig").Context;
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
const gfx = @import("gfx.zig"); const gfx = @import("gfx.zig");
@@ -100,7 +101,7 @@ fn create_window(extent: vk.Extent2D, title: [*:0]const u8) !*c.GLFWwindow {
) orelse error.WindowInitFailed; ) orelse error.WindowInitFailed;
} }
const DevicePair = std.meta.Tuple(&.{ vk.PhysicalDevice, vk.Device, gfx.DeviceDispatch, vk.Queue }); const DevicePair = std.meta.Tuple(&.{ vk.PhysicalDevice, vk.Device, gfx.DeviceDispatch, u32 });
/// note: destroy with vkd.destroyDevice(dev, null) /// note: destroy with vkd.destroyDevice(dev, null)
fn create_device( fn create_device(
@@ -195,9 +196,7 @@ fn create_device(
const vkd = try gfx.DeviceDispatch.load(dev, vki.dispatch.vkGetDeviceProcAddr); const vkd = try gfx.DeviceDispatch.load(dev, vki.dispatch.vkGetDeviceProcAddr);
errdefer vkd.destroyDevice(dev, null); errdefer vkd.destroyDevice(dev, null);
const queue = vkd.getDeviceQueue(dev, graphics_family, 0); return .{ pdev, dev, vkd, graphics_family };
return .{ pdev, dev, vkd, queue };
} }
return error.NoSuitableDevice; return error.NoSuitableDevice;
@@ -229,142 +228,154 @@ pub fn main() !void {
const surface = try create_surface(instance, window); const surface = try create_surface(instance, window);
defer vki.destroySurfaceKHR(instance, surface, null); defer vki.destroySurfaceKHR(instance, surface, null);
const pdev: vk.PhysicalDevice, const dev: vk.Device, const vkd: gfx.DeviceDispatch, const queue: vk.Queue = const pdev: vk.PhysicalDevice, const dev: vk.Device, const vkd: gfx.DeviceDispatch, const family: u32 =
try create_device(ally, instance, surface, vki); try create_device(ally, instance, surface, vki);
defer vkd.destroyDevice(dev, null); defer vkd.destroyDevice(dev, null);
var swapchain: vk.SwapchainKHR = .null_handle; const queue = vkd.getDeviceQueue(dev, family, 0);
defer vkd.destroySwapchainKHR(dev, swapchain, null);
swapchain = try vkd.createSwapchainKHR(dev, &.{ // var swapchain: vk.SwapchainKHR = .null_handle;
.surface = surface, // defer vkd.destroySwapchainKHR(dev, swapchain, null);
.min_image_count = 3, // todo compute //
.image_format = .r8g8b8a8_sint, // todo compute // swapchain = try vkd.createSwapchainKHR(dev, &.{
// .surface = surface,
// .min_image_count = 3, // todo compute
// .image_format = .r8g8b8a8_sint, // todo compute // .image_format = .r8g8b8a8_sint, // todo compute
.image_color_space = .srgb_nonlinear_khr, // todo compute // // .image_format = .r8g8b8a8_sint, // todo compute
.image_extent = extent, // todo compute // .image_color_space = .srgb_nonlinear_khr, // todo compute
.image_array_layers = 1, // .image_extent = extent, // todo compute
.image_usage = .{ .color_attachment_bit = true, .transfer_dst_bit = true }, // .image_array_layers = 1,
.image_sharing_mode = .exclusive, // since we only choose one queue family // .image_usage = .{ .color_attachment_bit = true, .transfer_dst_bit = true },
.pre_transform = .{ .identity_bit_khr = true }, // todo compute // .image_sharing_mode = .exclusive, // since we only choose one queue family
.composite_alpha = .{ .opaque_bit_khr = true }, // .pre_transform = .{ .identity_bit_khr = true }, // todo compute
.present_mode = .mailbox_khr, // todo compute // .composite_alpha = .{ .opaque_bit_khr = true },
.clipped = vk.TRUE, // .present_mode = .mailbox_khr, // todo compute
.old_swapchain = swapchain, // .clipped = vk.TRUE,
// .old_swapchain = swapchain,
// }, null);
//
// _ = try vkd.queuePresentKHR(queue, &.{
// .wait_semaphore_count = 0,
// .swapchain_count = 1,
// .p_swapchains = &[_]vk.SwapchainKHR{swapchain},
// .p_image_indices = &[_]u32{0},
// });
//
// try vkd.deviceWaitIdle(dev);
//
// _ = pdev;
// extent = undefined;
const gc: Context = .{
.vki = vki,
.vkd = vkd,
.pdev = pdev,
.dev = dev,
.surface = surface,
.queue = queue,
.family = family,
};
var swapchain = try Swapchain.init(&gc, ally, extent);
defer swapchain.deinit();
const pipeline_layout = try gc.vkd.createPipelineLayout(gc.dev, &.{
.flags = .{},
.set_layout_count = 0,
.p_set_layouts = undefined,
.push_constant_range_count = 0,
.p_push_constant_ranges = undefined,
}, null); }, null);
defer gc.vkd.destroyPipelineLayout(gc.dev, pipeline_layout, null);
_ = try vkd.queuePresentKHR(queue, &.{ const pipeline = try createPipeline(&gc, pipeline_layout, swapchain);
.wait_semaphore_count = 0, defer gc.vkd.destroyPipeline(gc.dev, pipeline, null);
.swapchain_count = 1,
.p_swapchains = &[_]vk.SwapchainKHR{swapchain},
.p_image_indices = &[_]u32{0},
});
try vkd.deviceWaitIdle(dev); const pool = try gc.vkd.createCommandPool(gc.dev, &.{
.queue_family_index = family,
}, null);
defer gc.vkd.destroyCommandPool(gc.dev, pool, null);
_ = pdev; const vertex_buffer = try gc.vkd.createBuffer(gc.dev, &.{
extent = undefined; .size = @sizeOf(@TypeOf(vertices)),
.usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true },
.sharing_mode = .exclusive,
}, null);
defer gc.vkd.destroyBuffer(gc.dev, vertex_buffer, null);
const vertex_mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, vertex_buffer);
const vertex_memory = try gc.allocate(vertex_mem_reqs, .{ .device_local_bit = true });
defer gc.vkd.freeMemory(gc.dev, vertex_memory, null);
try gc.vkd.bindBufferMemory(gc.dev, vertex_buffer, vertex_memory, 0);
// var swapchain = try Swapchain.init(&gc, ally, extent); try uploadData(Vertex, &gc, pool, vertex_buffer, &vertices);
// defer swapchain.deinit();
// const pipeline_layout = try gc.vkd.createPipelineLayout(gc.dev, &.{ const index_buffer = try gc.vkd.createBuffer(gc.dev, &.{
// .flags = .{}, .size = @sizeOf(@TypeOf(indices)),
// .set_layout_count = 0, .usage = .{ .transfer_dst_bit = true, .index_buffer_bit = true },
// .p_set_layouts = undefined, .sharing_mode = .exclusive,
// .push_constant_range_count = 0, }, null);
// .p_push_constant_ranges = undefined, defer gc.vkd.destroyBuffer(gc.dev, index_buffer, null);
// }, null); const index_mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, index_buffer);
// defer gc.vkd.destroyPipelineLayout(gc.dev, pipeline_layout, null); const index_memory = try gc.allocate(index_mem_reqs, .{ .device_local_bit = true });
// defer gc.vkd.freeMemory(gc.dev, index_memory, null);
// const pipeline = try createPipeline(&gc, pipeline_layout, swapchain); try gc.vkd.bindBufferMemory(gc.dev, index_buffer, index_memory, 0);
// defer gc.vkd.destroyPipeline(gc.dev, pipeline, null);
// try uploadData(Index, &gc, pool, index_buffer, &indices);
// const pool = try gc.vkd.createCommandPool(gc.dev, &.{
// .queue_family_index = gc.graphics_queue.family, var cmdbufs = try createCommandBuffers(
// }, null); &gc,
// defer gc.vkd.destroyCommandPool(gc.dev, pool, null); pool,
// ally,
// const vertex_buffer = try gc.vkd.createBuffer(gc.dev, &.{ vertex_buffer,
// .size = @sizeOf(@TypeOf(vertices)), index_buffer,
// .usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, pipeline,
// .sharing_mode = .exclusive, swapchain,
// }, null); );
// defer gc.vkd.destroyBuffer(gc.dev, vertex_buffer, null); defer destroyCommandBuffers(&gc, pool, ally, cmdbufs);
// const vertex_mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, vertex_buffer);
// const vertex_memory = try gc.allocate(vertex_mem_reqs, .{ .device_local_bit = true }); while (c.glfwWindowShouldClose(window) == c.GLFW_FALSE) {
// defer gc.vkd.freeMemory(gc.dev, vertex_memory, null); var w: c_int = undefined;
// try gc.vkd.bindBufferMemory(gc.dev, vertex_buffer, vertex_memory, 0); var h: c_int = undefined;
// c.glfwGetFramebufferSize(window, &w, &h);
// try uploadData(Vertex, &gc, pool, vertex_buffer, &vertices);
// // Don't present or resize swapchain while the window is minimized
// const index_buffer = try gc.vkd.createBuffer(gc.dev, &.{ if (w == 0 or h == 0) {
// .size = @sizeOf(@TypeOf(indices)), c.glfwPollEvents();
// .usage = .{ .transfer_dst_bit = true, .index_buffer_bit = true }, continue;
// .sharing_mode = .exclusive, }
// }, null);
// defer gc.vkd.destroyBuffer(gc.dev, index_buffer, null); const cmdbuf = cmdbufs[swapchain.image_index];
// const index_mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, index_buffer);
// const index_memory = try gc.allocate(index_mem_reqs, .{ .device_local_bit = true }); const state = swapchain.present(cmdbuf) catch |err| switch (err) {
// defer gc.vkd.freeMemory(gc.dev, index_memory, null); error.OutOfDateKHR => Swapchain.PresentState.suboptimal,
// try gc.vkd.bindBufferMemory(gc.dev, index_buffer, index_memory, 0); else => |narrow| return narrow,
// };
// try uploadData(Index, &gc, pool, index_buffer, &indices);
// if (state == .suboptimal or extent.width != @as(u32, @intCast(w)) or extent.height != @as(u32, @intCast(h))) {
// var cmdbufs = try createCommandBuffers( extent.width = @intCast(w);
// &gc, extent.height = @intCast(h);
// pool, try swapchain.recreate(extent);
// ally,
// vertex_buffer, destroyCommandBuffers(&gc, pool, ally, cmdbufs);
// index_buffer, cmdbufs = try createCommandBuffers(
// pipeline, &gc,
// swapchain, pool,
// ); ally,
// defer destroyCommandBuffers(&gc, pool, ally, cmdbufs); vertex_buffer,
// index_buffer,
// while (c.glfwWindowShouldClose(window) == c.GLFW_FALSE) { pipeline,
// var w: c_int = undefined; swapchain,
// var h: c_int = undefined; );
// c.glfwGetFramebufferSize(window, &w, &h); }
//
// // Don't present or resize swapchain while the window is minimized c.glfwPollEvents();
// if (w == 0 or h == 0) { }
// c.glfwPollEvents();
// continue; try swapchain.waitForAllFences();
// } try gc.vkd.deviceWaitIdle(gc.dev);
//
// const cmdbuf = cmdbufs[swapchain.image_index];
//
// const state = swapchain.present(cmdbuf) catch |err| switch (err) {
// error.OutOfDateKHR => Swapchain.PresentState.suboptimal,
// else => |narrow| return narrow,
// };
//
// if (state == .suboptimal or extent.width != @as(u32, @intCast(w)) or extent.height != @as(u32, @intCast(h))) {
// extent.width = @intCast(w);
// extent.height = @intCast(h);
// try swapchain.recreate(extent);
//
// destroyCommandBuffers(&gc, pool, ally, cmdbufs);
// cmdbufs = try createCommandBuffers(
// &gc,
// pool,
// ally,
// vertex_buffer,
// index_buffer,
// pipeline,
// swapchain,
// );
// }
//
// c.glfwPollEvents();
// }
//
// try swapchain.waitForAllFences();
// try gc.vkd.deviceWaitIdle(gc.dev);
} }
fn uploadData(comptime T: type, gc: *const GraphicsContext, pool: vk.CommandPool, buffer: vk.Buffer, source: []const T) !void { fn uploadData(comptime T: type, gc: *const Context, pool: vk.CommandPool, buffer: vk.Buffer, source: []const T) !void {
// if (@typeInfo(T) == .Struct and @typeInfo(T).Struct.layout == .auto) @compileError("Requires defined T layout"); // if (@typeInfo(T) == .Struct and @typeInfo(T).Struct.layout == .auto) @compileError("Requires defined T layout");
const size = @sizeOf(T) * source.len; const size = @sizeOf(T) * source.len;
@@ -396,7 +407,7 @@ fn uploadData(comptime T: type, gc: *const GraphicsContext, pool: vk.CommandPool
try copyBuffer(gc, pool, buffer, staging_buffer, size); try copyBuffer(gc, pool, buffer, staging_buffer, size);
} }
fn copyBuffer(gc: *const GraphicsContext, pool: vk.CommandPool, dst: vk.Buffer, src: vk.Buffer, size: vk.DeviceSize) !void { fn copyBuffer(gc: *const Context, pool: vk.CommandPool, dst: vk.Buffer, src: vk.Buffer, size: vk.DeviceSize) !void {
var cmdbuf: vk.CommandBuffer = undefined; var cmdbuf: vk.CommandBuffer = undefined;
try gc.vkd.allocateCommandBuffers(gc.dev, &.{ try gc.vkd.allocateCommandBuffers(gc.dev, &.{
.command_pool = pool, .command_pool = pool,
@@ -426,12 +437,12 @@ fn copyBuffer(gc: *const GraphicsContext, pool: vk.CommandPool, dst: vk.Buffer,
// creating and submitting a queue for every copy operation seems a bad idea for "streamed" data // creating and submitting a queue for every copy operation seems a bad idea for "streamed" data
// gonna want a way to send a copy operation WITH SYNCHRONIZATION PRIMITIVES on a particular queue // gonna want a way to send a copy operation WITH SYNCHRONIZATION PRIMITIVES on a particular queue
// see https://stackoverflow.com/a/62183243 // see https://stackoverflow.com/a/62183243
try gc.vkd.queueSubmit(gc.graphics_queue.handle, 1, @ptrCast(&si), .null_handle); try gc.vkd.queueSubmit(gc.queue, 1, @ptrCast(&si), .null_handle);
try gc.vkd.queueWaitIdle(gc.graphics_queue.handle); try gc.vkd.queueWaitIdle(gc.queue);
} }
fn createCommandBuffers( fn createCommandBuffers(
gc: *const GraphicsContext, gc: *const Context,
pool: vk.CommandPool, pool: vk.CommandPool,
allocator: Allocator, allocator: Allocator,
vertex_buffer: vk.Buffer, vertex_buffer: vk.Buffer,
@@ -512,12 +523,12 @@ fn createCommandBuffers(
return cmdbufs; return cmdbufs;
} }
fn destroyCommandBuffers(gc: *const GraphicsContext, pool: vk.CommandPool, allocator: Allocator, cmdbufs: []vk.CommandBuffer) void { fn destroyCommandBuffers(gc: *const Context, pool: vk.CommandPool, allocator: Allocator, cmdbufs: []vk.CommandBuffer) void {
gc.vkd.freeCommandBuffers(gc.dev, pool, @truncate(cmdbufs.len), cmdbufs.ptr); gc.vkd.freeCommandBuffers(gc.dev, pool, @truncate(cmdbufs.len), cmdbufs.ptr);
allocator.free(cmdbufs); allocator.free(cmdbufs);
} }
fn createPipeline(gc: *const GraphicsContext, layout: vk.PipelineLayout, swapchain: Swapchain) !vk.Pipeline { fn createPipeline(gc: *const Context, layout: vk.PipelineLayout, swapchain: Swapchain) !vk.Pipeline {
const vert = try gc.vkd.createShaderModule(gc.dev, &.{ const vert = try gc.vkd.createShaderModule(gc.dev, &.{
.code_size = shaders.triangle_vert.len, .code_size = shaders.triangle_vert.len,
.p_code = @as([*]const u32, @ptrCast(&shaders.triangle_vert)), .p_code = @as([*]const u32, @ptrCast(&shaders.triangle_vert)),

View File

@@ -1,15 +1,55 @@
const std = @import("std"); const std = @import("std");
const vk = @import("vk"); const vk = @import("vk");
const GraphicsContext = @import("graphics_context.zig").GraphicsContext; const gfx = @import("gfx.zig");
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
pub const Context = struct {
vki: gfx.InstanceDispatch,
vkd: gfx.DeviceDispatch,
pdev: vk.PhysicalDevice,
dev: vk.Device,
surface: vk.SurfaceKHR,
queue: vk.Queue,
family: u32,
pub fn findMemoryTypeIndex(
self: @This(),
memory_type_bits: u32,
flags: vk.MemoryPropertyFlags,
) !u32 {
const mem_props = self.vki.getPhysicalDeviceMemoryProperties(self.pdev);
for (mem_props.memory_types[0..mem_props.memory_type_count], 0..) |mem_type, i| {
if (memory_type_bits & (@as(u32, 1) << @truncate(i)) != 0 and mem_type.property_flags.contains(flags)) {
return @truncate(i);
}
}
return error.NoSuitableMemoryType;
}
pub fn allocate(
self: @This(),
requirements: vk.MemoryRequirements,
flags: vk.MemoryPropertyFlags,
) !vk.DeviceMemory {
return try self.vkd.allocateMemory(self.dev, &.{
.allocation_size = requirements.size,
.memory_type_index = try self.findMemoryTypeIndex(requirements.memory_type_bits, flags),
}, null);
}
};
pub const Swapchain = struct { pub const Swapchain = struct {
pub const PresentState = enum { pub const PresentState = enum {
optimal, optimal,
suboptimal, suboptimal,
}; };
gc: *const GraphicsContext, gc: *const Context,
allocator: Allocator, allocator: Allocator,
surface_format: vk.SurfaceFormatKHR, surface_format: vk.SurfaceFormatKHR,
@@ -21,11 +61,11 @@ pub const Swapchain = struct {
image_index: u32, image_index: u32,
next_image_acquired: vk.Semaphore, next_image_acquired: vk.Semaphore,
pub fn init(gc: *const GraphicsContext, allocator: Allocator, extent: vk.Extent2D) !Swapchain { pub fn init(gc: *const Context, allocator: Allocator, extent: vk.Extent2D) !Swapchain {
return try initRecycle(gc, allocator, extent, .null_handle); return try initRecycle(gc, allocator, extent, .null_handle);
} }
pub fn initRecycle(gc: *const GraphicsContext, allocator: Allocator, extent: vk.Extent2D, old_handle: vk.SwapchainKHR) !Swapchain { pub fn initRecycle(gc: *const Context, allocator: Allocator, extent: vk.Extent2D, old_handle: vk.SwapchainKHR) !Swapchain {
const caps = try gc.vki.getPhysicalDeviceSurfaceCapabilitiesKHR(gc.pdev, gc.surface); const caps = try gc.vki.getPhysicalDeviceSurfaceCapabilitiesKHR(gc.pdev, gc.surface);
const actual_extent = findActualExtent(caps, extent); const actual_extent = findActualExtent(caps, extent);
if (actual_extent.width == 0 or actual_extent.height == 0) { if (actual_extent.width == 0 or actual_extent.height == 0) {
@@ -40,12 +80,6 @@ pub const Swapchain = struct {
image_count = @min(image_count, caps.max_image_count); image_count = @min(image_count, caps.max_image_count);
} }
const qfi = [_]u32{ gc.graphics_queue.family, gc.present_queue.family };
const sharing_mode: vk.SharingMode = if (gc.graphics_queue.family != gc.present_queue.family)
.concurrent
else
.exclusive;
const handle = try gc.vkd.createSwapchainKHR(gc.dev, &.{ const handle = try gc.vkd.createSwapchainKHR(gc.dev, &.{
.surface = gc.surface, .surface = gc.surface,
.min_image_count = image_count, .min_image_count = image_count,
@@ -54,9 +88,7 @@ pub const Swapchain = struct {
.image_extent = actual_extent, .image_extent = actual_extent,
.image_array_layers = 1, .image_array_layers = 1,
.image_usage = .{ .color_attachment_bit = true, .transfer_dst_bit = true }, .image_usage = .{ .color_attachment_bit = true, .transfer_dst_bit = true },
.image_sharing_mode = sharing_mode, .image_sharing_mode = .exclusive,
.queue_family_index_count = qfi.len,
.p_queue_family_indices = &qfi,
.pre_transform = caps.current_transform, .pre_transform = caps.current_transform,
.composite_alpha = .{ .opaque_bit_khr = true }, .composite_alpha = .{ .opaque_bit_khr = true },
.present_mode = present_mode, .present_mode = present_mode,
@@ -154,7 +186,7 @@ pub const Swapchain = struct {
// Step 2: Submit the command buffer // Step 2: Submit the command buffer
const wait_stage = [_]vk.PipelineStageFlags{.{ .top_of_pipe_bit = true }}; const wait_stage = [_]vk.PipelineStageFlags{.{ .top_of_pipe_bit = true }};
try self.gc.vkd.queueSubmit(self.gc.graphics_queue.handle, 1, &[_]vk.SubmitInfo{.{ try self.gc.vkd.queueSubmit(self.gc.queue, 1, &[_]vk.SubmitInfo{.{
.wait_semaphore_count = 1, .wait_semaphore_count = 1,
.p_wait_semaphores = @ptrCast(&current.image_acquired), .p_wait_semaphores = @ptrCast(&current.image_acquired),
.p_wait_dst_stage_mask = &wait_stage, .p_wait_dst_stage_mask = &wait_stage,
@@ -165,7 +197,7 @@ pub const Swapchain = struct {
}}, current.frame_fence); }}, current.frame_fence);
// Step 3: Present the current frame // Step 3: Present the current frame
_ = try self.gc.vkd.queuePresentKHR(self.gc.present_queue.handle, &.{ _ = try self.gc.vkd.queuePresentKHR(self.gc.queue, &.{
.wait_semaphore_count = 1, .wait_semaphore_count = 1,
.p_wait_semaphores = @as([*]const vk.Semaphore, @ptrCast(&current.render_finished)), .p_wait_semaphores = @as([*]const vk.Semaphore, @ptrCast(&current.render_finished)),
.swapchain_count = 1, .swapchain_count = 1,
@@ -200,7 +232,7 @@ const SwapImage = struct {
render_finished: vk.Semaphore, render_finished: vk.Semaphore,
frame_fence: vk.Fence, frame_fence: vk.Fence,
fn init(gc: *const GraphicsContext, image: vk.Image, format: vk.Format) !SwapImage { fn init(gc: *const Context, image: vk.Image, format: vk.Format) !SwapImage {
const view = try gc.vkd.createImageView(gc.dev, &.{ const view = try gc.vkd.createImageView(gc.dev, &.{
.image = image, .image = image,
.view_type = .@"2d", .view_type = .@"2d",
@@ -234,7 +266,7 @@ const SwapImage = struct {
}; };
} }
fn deinit(self: SwapImage, gc: *const GraphicsContext) void { fn deinit(self: SwapImage, gc: *const Context) void {
self.waitForFence(gc) catch return; self.waitForFence(gc) catch return;
gc.vkd.destroyImageView(gc.dev, self.view, null); gc.vkd.destroyImageView(gc.dev, self.view, null);
gc.vkd.destroySemaphore(gc.dev, self.image_acquired, null); gc.vkd.destroySemaphore(gc.dev, self.image_acquired, null);
@@ -242,12 +274,12 @@ const SwapImage = struct {
gc.vkd.destroyFence(gc.dev, self.frame_fence, null); gc.vkd.destroyFence(gc.dev, self.frame_fence, null);
} }
fn waitForFence(self: SwapImage, gc: *const GraphicsContext) !void { fn waitForFence(self: SwapImage, gc: *const Context) !void {
_ = try gc.vkd.waitForFences(gc.dev, 1, @ptrCast(&self.frame_fence), vk.TRUE, std.math.maxInt(u64)); _ = try gc.vkd.waitForFences(gc.dev, 1, @ptrCast(&self.frame_fence), vk.TRUE, std.math.maxInt(u64));
} }
}; };
fn initSwapchainImages(gc: *const GraphicsContext, swapchain: vk.SwapchainKHR, format: vk.Format, allocator: Allocator) ![]SwapImage { fn initSwapchainImages(gc: *const Context, swapchain: vk.SwapchainKHR, format: vk.Format, allocator: Allocator) ![]SwapImage {
var count: u32 = undefined; var count: u32 = undefined;
_ = try gc.vkd.getSwapchainImagesKHR(gc.dev, swapchain, &count, null); _ = try gc.vkd.getSwapchainImagesKHR(gc.dev, swapchain, &count, null);
const images = try allocator.alloc(vk.Image, count); const images = try allocator.alloc(vk.Image, count);
@@ -268,7 +300,7 @@ fn initSwapchainImages(gc: *const GraphicsContext, swapchain: vk.SwapchainKHR, f
return swap_images; return swap_images;
} }
fn findSurfaceFormat(gc: *const GraphicsContext, allocator: Allocator) !vk.SurfaceFormatKHR { fn findSurfaceFormat(gc: *const Context, allocator: Allocator) !vk.SurfaceFormatKHR {
const preferred = vk.SurfaceFormatKHR{ const preferred = vk.SurfaceFormatKHR{
.format = .b8g8r8a8_srgb, .format = .b8g8r8a8_srgb,
.color_space = .srgb_nonlinear_khr, .color_space = .srgb_nonlinear_khr,
@@ -289,7 +321,7 @@ fn findSurfaceFormat(gc: *const GraphicsContext, allocator: Allocator) !vk.Surfa
return surface_formats[0]; // There must always be at least one supported surface format return surface_formats[0]; // There must always be at least one supported surface format
} }
fn findPresentMode(gc: *const GraphicsContext, allocator: Allocator) !vk.PresentModeKHR { fn findPresentMode(gc: *const Context, allocator: Allocator) !vk.PresentModeKHR {
var count: u32 = undefined; var count: u32 = undefined;
_ = try gc.vki.getPhysicalDeviceSurfacePresentModesKHR(gc.pdev, gc.surface, &count, null); _ = try gc.vki.getPhysicalDeviceSurfacePresentModesKHR(gc.pdev, gc.surface, &count, null);
const present_modes = try allocator.alloc(vk.PresentModeKHR, count); const present_modes = try allocator.alloc(vk.PresentModeKHR, count);