diff --git a/src/inspect.zig b/src/inspect.zig index 9aead49..66cc397 100644 --- a/src/inspect.zig +++ b/src/inspect.zig @@ -93,6 +93,8 @@ pub fn main() !void { std.debug.print("type: {any}\n", .{props.device_type}); // props.device_type + std.debug.print("max_push_constants_size: {d}\n", .{props.limits.max_push_constants_size}); + var family_count: u32 = undefined; vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); const families = try ally.alloc(vk.QueueFamilyProperties, family_count); diff --git a/src/main.zig b/src/main.zig index 303939e..3addfe1 100644 --- a/src/main.zig +++ b/src/main.zig @@ -164,7 +164,7 @@ fn create_device( pdev_search: for (pdevs) |pdev| { const props = vki.getPhysicalDeviceProperties(pdev); - // if (props.device_type != .discrete_gpu) continue :pdev_search; + if (props.device_type != .discrete_gpu) continue :pdev_search; var format_count: u32 = undefined; _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null); @@ -348,6 +348,7 @@ pub fn main() !void { defer destroyCommandBuffers(&gc, pool, ally, cmdbufs); while (c.glfwWindowShouldClose(window) == c.GLFW_FALSE) { + std.log.debug("new frame", .{ }); var w: c_int = undefined; var h: c_int = undefined; c.glfwGetFramebufferSize(window, &w, &h); @@ -364,13 +365,19 @@ pub fn main() !void { error.OutOfDateKHR => Swapchain.PresentState.suboptimal, else => |narrow| return narrow, }; + + std.log.debug("state: {}", .{state}); if (state == .suboptimal or extent.width != @as(u32, @intCast(w)) or extent.height != @as(u32, @intCast(h))) { extent.width = @intCast(w); extent.height = @intCast(h); + std.log.debug("about to recreate", .{ }); try swapchain.recreate(extent); + std.log.debug("about to destroy command buffers", .{ }); destroyCommandBuffers(&gc, pool, ally, cmdbufs); + + std.log.debug("about to create command buffers", .{ }); cmdbufs = try createCommandBuffers( &gc, pool, @@ -499,8 +506,17 @@ fn createCommandBuffers( for (cmdbufs, swapchain.swap_images) |cmdbuf, image| { try gc.vkd.beginCommandBuffer(cmdbuf, &.{}); - const pre_render_barriers: []const vk.ImageMemoryBarrier = &.{ - vk.ImageMemoryBarrier{ + gc.vkd.cmdPipelineBarrier( + cmdbuf, + .{ .top_of_pipe_bit = true }, + .{ .color_attachment_output_bit = true }, + .{}, + 0, + null, + 0, + null, + 1, + @ptrCast(&vk.ImageMemoryBarrier{ .src_access_mask = .{}, .dst_access_mask = .{ .color_attachment_write_bit = true }, .old_layout = .undefined, @@ -515,19 +531,7 @@ fn createCommandBuffers( .base_array_layer = 0, .layer_count = 1, }, - }, - }; - gc.vkd.cmdPipelineBarrier( - cmdbuf, - .{ .top_of_pipe_bit = true }, - .{ .color_attachment_output_bit = true }, - .{}, - 0, - null, - 0, - null, - @intCast(pre_render_barriers.len), - pre_render_barriers.ptr, + }), ); gc.vkd.cmdSetViewport(cmdbuf, 0, 1, @ptrCast(&viewport)); @@ -564,8 +568,17 @@ fn createCommandBuffers( gc.vkd.cmdEndRenderingKHR(cmdbuf); - const post_render_barriers: []const vk.ImageMemoryBarrier = &.{ - vk.ImageMemoryBarrier{ + gc.vkd.cmdPipelineBarrier( + cmdbuf, + .{ .color_attachment_output_bit = true }, + .{ .bottom_of_pipe_bit = true }, + .{}, + 0, + null, + 0, + null, + 1, + @ptrCast(&vk.ImageMemoryBarrier{ .src_access_mask = .{ .color_attachment_write_bit = true }, .dst_access_mask = .{}, .old_layout = .color_attachment_optimal, @@ -580,19 +593,7 @@ fn createCommandBuffers( .base_array_layer = 0, .layer_count = 1, }, - }, - }; - gc.vkd.cmdPipelineBarrier( - cmdbuf, - .{ .color_attachment_output_bit = true }, - .{ .bottom_of_pipe_bit = true }, - .{}, - 0, - null, - 0, - null, - @intCast(post_render_barriers.len), - post_render_barriers.ptr, + }), ); try gc.vkd.endCommandBuffer(cmdbuf); diff --git a/src/swapchain.zig b/src/swapchain.zig index 13d2ce0..e250fde 100644 --- a/src/swapchain.zig +++ b/src/swapchain.zig @@ -111,9 +111,18 @@ pub const Swapchain = struct { var next_image_acquired = try gc.vkd.createSemaphore(gc.dev, &.{}, null); errdefer gc.vkd.destroySemaphore(gc.dev, next_image_acquired, null); - const result = try gc.vkd.acquireNextImageKHR(gc.dev, handle, std.math.maxInt(u64), next_image_acquired, .null_handle); - if (result.result != .success) { - return error.ImageAcquireFailed; + const result = try gc.vkd.acquireNextImageKHR( + gc.dev, + handle, + std.math.maxInt(u64), + next_image_acquired, + .null_handle, + ); + switch (result.result) { + vk.Result.success, vk.Result.suboptimal_khr => {}, + vk.Result.timeout => return error.Timeout, + vk.Result.not_ready => return error.NotReady, + else => unreachable, } std.mem.swap(vk.Semaphore, &swap_images[result.image_index].image_acquired, &next_image_acquired); @@ -267,6 +276,15 @@ const SwapImage = struct { } fn deinit(self: SwapImage, gc: *const Context) void { + // todo critical: this waitForFence deadlocks when recreating swapchain on nvidia. + // Something about the main "present" loop is fucked. Can't just ignore the fence; validation layers show errors + // that you can't destroy a fence while a queue depends on it (details may be wrong... I don't fully understand) + // I suspect this is more an issue with the vulkan-zig example, so I probably need to revisit vulkan-tutorial or + // try to rebuild the swapchain infrastructure myself. + // + // I do think it's clunky how the swapchain is created and recreated; duplicate logic in creation and recreation + // that could maybe be avoided with .null_handle? Maybe there's some more straightforward way to handle it. + self.waitForFence(gc) catch return; gc.vkd.destroyImageView(gc.dev, self.view, null); gc.vkd.destroySemaphore(gc.dev, self.image_acquired, null);