diff --git a/build.zig b/build.zig index 1826f14..bded306 100644 --- a/build.zig +++ b/build.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const vkgen = @import("vulkan-zig"); pub fn build(b: *std.Build) void { const target = b.standardTargetOptions(.{}); @@ -17,6 +18,15 @@ pub fn build(b: *std.Build) void { .optimize = optimize, }); + const shaders = vkgen.ShaderCompileStep.create( + b, + &[_][]const u8{ "glslc", "--target-env=vulkan1.3" }, + "-o", + ); + shaders.add("triangle_vert", "src/shaders/triangle.vert", .{}); + shaders.add("triangle_frag", "src/shaders/triangle.frag", .{}); + exe.root_module.addImport("shaders", shaders.getModule()); + // this requires PKG_CONFIG_PATH to be set. something like: // ~/.local/lib/pkgconfig/ exe.linkSystemLibrary2("glfw3", .{ diff --git a/src/graphics_context.zig b/src/graphics_context.zig new file mode 100644 index 0000000..18ee6d9 --- /dev/null +++ b/src/graphics_context.zig @@ -0,0 +1,344 @@ +const std = @import("std"); +const vk = @import("vk"); +const c = @import("c.zig"); +const Allocator = std.mem.Allocator; + +const required_device_extensions = [_][*:0]const u8{vk.extension_info.khr_swapchain.name}; + +const BaseDispatch = vk.BaseWrapper(.{ + .createInstance = true, + .getInstanceProcAddr = true, +}); + +const InstanceDispatch = vk.InstanceWrapper(.{ + .destroyInstance = true, + .createDevice = true, + .destroySurfaceKHR = true, + .enumeratePhysicalDevices = true, + .getPhysicalDeviceProperties = true, + .enumerateDeviceExtensionProperties = true, + .getPhysicalDeviceSurfaceFormatsKHR = true, + .getPhysicalDeviceSurfacePresentModesKHR = true, + .getPhysicalDeviceSurfaceCapabilitiesKHR = true, + .getPhysicalDeviceQueueFamilyProperties = true, + .getPhysicalDeviceSurfaceSupportKHR = true, + .getPhysicalDeviceMemoryProperties = true, + .getDeviceProcAddr = true, +}); + +const DeviceDispatch = vk.DeviceWrapper(.{ + .destroyDevice = true, + .getDeviceQueue = true, + .createSemaphore = true, + .createFence = true, + .createImageView = true, + .destroyImageView = true, + .destroySemaphore = true, + .destroyFence = true, + .getSwapchainImagesKHR = true, + .createSwapchainKHR = true, + .destroySwapchainKHR = true, + .acquireNextImageKHR = true, + .deviceWaitIdle = true, + .waitForFences = true, + .resetFences = true, + .queueSubmit = true, + .queuePresentKHR = true, + .createCommandPool = true, + .destroyCommandPool = true, + .allocateCommandBuffers = true, + .freeCommandBuffers = true, + .queueWaitIdle = true, + .createShaderModule = true, + .destroyShaderModule = true, + .createPipelineLayout = true, + .destroyPipelineLayout = true, + .createRenderPass = true, + .destroyRenderPass = true, + .createGraphicsPipelines = true, + .destroyPipeline = true, + .createFramebuffer = true, + .destroyFramebuffer = true, + .beginCommandBuffer = true, + .endCommandBuffer = true, + .allocateMemory = true, + .freeMemory = true, + .createBuffer = true, + .destroyBuffer = true, + .getBufferMemoryRequirements = true, + .mapMemory = true, + .unmapMemory = true, + .bindBufferMemory = true, + .cmdBeginRenderPass = true, + .cmdEndRenderPass = true, + .cmdBindPipeline = true, + .cmdDraw = true, + .cmdSetViewport = true, + .cmdSetScissor = true, + .cmdBindVertexBuffers = true, + .cmdCopyBuffer = true, +}); + +pub const GraphicsContext = struct { + vkb: BaseDispatch, + vki: InstanceDispatch, + vkd: DeviceDispatch, + + instance: vk.Instance, + surface: vk.SurfaceKHR, + pdev: vk.PhysicalDevice, + props: vk.PhysicalDeviceProperties, + mem_props: vk.PhysicalDeviceMemoryProperties, + + dev: vk.Device, + graphics_queue: Queue, + present_queue: Queue, + + pub fn init(allocator: Allocator, app_name: [*:0]const u8, window: *c.GLFWwindow) !GraphicsContext { + var self: GraphicsContext = undefined; + self.vkb = try BaseDispatch.load(c.glfwGetInstanceProcAddress); + + var glfw_exts_count: u32 = 0; + const glfw_exts = c.glfwGetRequiredInstanceExtensions(&glfw_exts_count); + + const app_info = vk.ApplicationInfo{ + .p_application_name = app_name, + .application_version = vk.makeApiVersion(0, 0, 0, 0), + .p_engine_name = app_name, + .engine_version = vk.makeApiVersion(0, 0, 0, 0), + .api_version = vk.API_VERSION_1_2, + }; + + self.instance = try self.vkb.createInstance(&.{ + .p_application_info = &app_info, + .enabled_extension_count = glfw_exts_count, + .pp_enabled_extension_names = @as([*]const [*:0]const u8, @ptrCast(glfw_exts)), + }, null); + + self.vki = try InstanceDispatch.load(self.instance, self.vkb.dispatch.vkGetInstanceProcAddr); + errdefer self.vki.destroyInstance(self.instance, null); + + self.surface = try createSurface(self.instance, window); + errdefer self.vki.destroySurfaceKHR(self.instance, self.surface, null); + + const candidate = try pickPhysicalDevice(self.vki, self.instance, allocator, self.surface); + self.pdev = candidate.pdev; + self.props = candidate.props; + self.dev = try initializeCandidate(self.vki, candidate); + self.vkd = try DeviceDispatch.load(self.dev, self.vki.dispatch.vkGetDeviceProcAddr); + errdefer self.vkd.destroyDevice(self.dev, null); + + self.graphics_queue = Queue.init(self.vkd, self.dev, candidate.queues.graphics_family); + self.present_queue = Queue.init(self.vkd, self.dev, candidate.queues.present_family); + + self.mem_props = self.vki.getPhysicalDeviceMemoryProperties(self.pdev); + + return self; + } + + pub fn deinit(self: GraphicsContext) void { + self.vkd.destroyDevice(self.dev, null); + self.vki.destroySurfaceKHR(self.instance, self.surface, null); + self.vki.destroyInstance(self.instance, null); + } + + pub fn deviceName(self: *const GraphicsContext) []const u8 { + return std.mem.sliceTo(&self.props.device_name, 0); + } + + pub fn findMemoryTypeIndex(self: GraphicsContext, memory_type_bits: u32, flags: vk.MemoryPropertyFlags) !u32 { + for (self.mem_props.memory_types[0..self.mem_props.memory_type_count], 0..) |mem_type, i| { + if (memory_type_bits & (@as(u32, 1) << @truncate(i)) != 0 and mem_type.property_flags.contains(flags)) { + return @truncate(i); + } + } + + return error.NoSuitableMemoryType; + } + + pub fn allocate(self: GraphicsContext, requirements: vk.MemoryRequirements, flags: vk.MemoryPropertyFlags) !vk.DeviceMemory { + return try self.vkd.allocateMemory(self.dev, &.{ + .allocation_size = requirements.size, + .memory_type_index = try self.findMemoryTypeIndex(requirements.memory_type_bits, flags), + }, null); + } +}; + +pub const Queue = struct { + handle: vk.Queue, + family: u32, + + fn init(vkd: DeviceDispatch, dev: vk.Device, family: u32) Queue { + return .{ + .handle = vkd.getDeviceQueue(dev, family, 0), + .family = family, + }; + } +}; + +fn createSurface(instance: vk.Instance, window: *c.GLFWwindow) !vk.SurfaceKHR { + var surface: vk.SurfaceKHR = undefined; + if (c.glfwCreateWindowSurface(instance, window, null, &surface) != .success) { + return error.SurfaceInitFailed; + } + + return surface; +} + +fn initializeCandidate(vki: InstanceDispatch, candidate: DeviceCandidate) !vk.Device { + const priority = [_]f32{1}; + const qci = [_]vk.DeviceQueueCreateInfo{ + .{ + .queue_family_index = candidate.queues.graphics_family, + .queue_count = 1, + .p_queue_priorities = &priority, + }, + .{ + .queue_family_index = candidate.queues.present_family, + .queue_count = 1, + .p_queue_priorities = &priority, + }, + }; + + const queue_count: u32 = if (candidate.queues.graphics_family == candidate.queues.present_family) + 1 + else + 2; + + return try vki.createDevice(candidate.pdev, &.{ + .queue_create_info_count = queue_count, + .p_queue_create_infos = &qci, + .enabled_extension_count = required_device_extensions.len, + .pp_enabled_extension_names = @as([*]const [*:0]const u8, @ptrCast(&required_device_extensions)), + }, null); +} + +const DeviceCandidate = struct { + pdev: vk.PhysicalDevice, + props: vk.PhysicalDeviceProperties, + queues: QueueAllocation, +}; + +const QueueAllocation = struct { + graphics_family: u32, + present_family: u32, +}; + +fn pickPhysicalDevice( + vki: InstanceDispatch, + instance: vk.Instance, + allocator: Allocator, + surface: vk.SurfaceKHR, +) !DeviceCandidate { + var device_count: u32 = undefined; + _ = try vki.enumeratePhysicalDevices(instance, &device_count, null); + + const pdevs = try allocator.alloc(vk.PhysicalDevice, device_count); + defer allocator.free(pdevs); + + _ = try vki.enumeratePhysicalDevices(instance, &device_count, pdevs.ptr); + + for (pdevs) |pdev| { + if (try checkSuitable(vki, pdev, allocator, surface)) |candidate| { + return candidate; + } + } + + return error.NoSuitableDevice; +} + +fn checkSuitable( + vki: InstanceDispatch, + pdev: vk.PhysicalDevice, + allocator: Allocator, + surface: vk.SurfaceKHR, +) !?DeviceCandidate { + const props = vki.getPhysicalDeviceProperties(pdev); + + if (!try checkExtensionSupport(vki, pdev, allocator)) { + return null; + } + + if (!try checkSurfaceSupport(vki, pdev, surface)) { + return null; + } + + if (try allocateQueues(vki, pdev, allocator, surface)) |allocation| { + return DeviceCandidate{ + .pdev = pdev, + .props = props, + .queues = allocation, + }; + } + + return null; +} + +fn allocateQueues(vki: InstanceDispatch, pdev: vk.PhysicalDevice, allocator: Allocator, surface: vk.SurfaceKHR) !?QueueAllocation { + var family_count: u32 = undefined; + vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); + + const families = try allocator.alloc(vk.QueueFamilyProperties, family_count); + defer allocator.free(families); + vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr); + + var graphics_family: ?u32 = null; + var present_family: ?u32 = null; + + for (families, 0..) |properties, i| { + const family: u32 = @intCast(i); + + if (graphics_family == null and properties.queue_flags.graphics_bit) { + graphics_family = family; + } + + if (present_family == null and (try vki.getPhysicalDeviceSurfaceSupportKHR(pdev, family, surface)) == vk.TRUE) { + present_family = family; + } + } + + if (graphics_family != null and present_family != null) { + return QueueAllocation{ + .graphics_family = graphics_family.?, + .present_family = present_family.?, + }; + } + + return null; +} + +fn checkSurfaceSupport(vki: InstanceDispatch, pdev: vk.PhysicalDevice, surface: vk.SurfaceKHR) !bool { + var format_count: u32 = undefined; + _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null); + + var present_mode_count: u32 = undefined; + _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &present_mode_count, null); + + return format_count > 0 and present_mode_count > 0; +} + +fn checkExtensionSupport( + vki: InstanceDispatch, + pdev: vk.PhysicalDevice, + allocator: Allocator, +) !bool { + var count: u32 = undefined; + _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &count, null); + + const propsv = try allocator.alloc(vk.ExtensionProperties, count); + defer allocator.free(propsv); + + _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &count, propsv.ptr); + + for (required_device_extensions) |ext| { + for (propsv) |props| { + if (std.mem.eql(u8, std.mem.span(ext), std.mem.sliceTo(&props.extension_name, 0))) { + break; + } + } else { + return false; + } + } + + return true; +} diff --git a/src/main.zig b/src/main.zig index 1aa6e33..f17842b 100644 --- a/src/main.zig +++ b/src/main.zig @@ -1,29 +1,483 @@ const std = @import("std"); -const c = @import("c.zig"); const vk = @import("vk"); +const c = @import("c.zig"); +const shaders = @import("shaders"); +const GraphicsContext = @import("graphics_context.zig").GraphicsContext; +const Swapchain = @import("swapchain.zig").Swapchain; +const Allocator = std.mem.Allocator; -const Context = @import("Context.zig"); +const app_name = "vulkan-zig triangle example"; + +const Vertex = struct { + const binding_description = vk.VertexInputBindingDescription{ + .binding = 0, + .stride = @sizeOf(Vertex), + .input_rate = .vertex, + }; + + const attribute_description = [_]vk.VertexInputAttributeDescription{ + .{ + .binding = 0, + .location = 0, + .format = .r32g32_sfloat, + .offset = @offsetOf(Vertex, "pos"), + }, + .{ + .binding = 0, + .location = 1, + .format = .r32g32b32_sfloat, + .offset = @offsetOf(Vertex, "color"), + }, + }; + + pos: [2]f32, + color: [3]f32, +}; + +const vertices = [_]Vertex{ + .{ .pos = .{ 0, -0.5 }, .color = .{ 1, 0, 0 } }, + .{ .pos = .{ 0.5, 0.5 }, .color = .{ 0, 1, 0 } }, + .{ .pos = .{ -0.5, 0.5 }, .color = .{ 0, 0, 1 } }, +}; pub fn main() !void { - if (c.glfwInit() != c.GLFW_TRUE) { - return error.GlfwInitFailed; - } + if (c.glfwInit() != c.GLFW_TRUE) return error.GlfwInitFailed; defer c.glfwTerminate(); + if (c.glfwVulkanSupported() != c.GLFW_TRUE) { + std.log.err("GLFW could not find libvulkan", .{}); + return error.NoVulkan; + } + + var extent = vk.Extent2D{ .width = 800, .height = 600 }; + c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); const window = c.glfwCreateWindow( - 720, - 1280, - "Hello World!", + @intCast(extent.width), + @intCast(extent.height), + app_name, null, null, - ) orelse return error.glfwCreateWindowFailed; + ) orelse return error.WindowInitFailed; defer c.glfwDestroyWindow(window); - const ctx = try Context.init(window); - defer ctx.deinit(); + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + defer _ = gpa.deinit(); + const allocator = gpa.allocator(); - while (c.glfwWindowShouldClose(window) == 0) : (c.glfwPollEvents()) { - c.glfwSwapBuffers(window); + const gc = try GraphicsContext.init(allocator, app_name, window); + defer gc.deinit(); + + std.log.debug("Using device: {s}", .{gc.deviceName()}); + + var swapchain = try Swapchain.init(&gc, allocator, extent); + defer swapchain.deinit(); + + const pipeline_layout = try gc.vkd.createPipelineLayout(gc.dev, &.{ + .flags = .{}, + .set_layout_count = 0, + .p_set_layouts = undefined, + .push_constant_range_count = 0, + .p_push_constant_ranges = undefined, + }, null); + defer gc.vkd.destroyPipelineLayout(gc.dev, pipeline_layout, null); + + const render_pass = try createRenderPass(&gc, swapchain); + defer gc.vkd.destroyRenderPass(gc.dev, render_pass, null); + + const pipeline = try createPipeline(&gc, pipeline_layout, render_pass); + defer gc.vkd.destroyPipeline(gc.dev, pipeline, null); + + var framebuffers = try createFramebuffers(&gc, allocator, render_pass, swapchain); + defer destroyFramebuffers(&gc, allocator, framebuffers); + + const pool = try gc.vkd.createCommandPool(gc.dev, &.{ + .queue_family_index = gc.graphics_queue.family, + }, null); + defer gc.vkd.destroyCommandPool(gc.dev, pool, null); + + const buffer = try gc.vkd.createBuffer(gc.dev, &.{ + .size = @sizeOf(@TypeOf(vertices)), + .usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, + .sharing_mode = .exclusive, + }, null); + defer gc.vkd.destroyBuffer(gc.dev, buffer, null); + const mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, buffer); + const memory = try gc.allocate(mem_reqs, .{ .device_local_bit = true }); + defer gc.vkd.freeMemory(gc.dev, memory, null); + try gc.vkd.bindBufferMemory(gc.dev, buffer, memory, 0); + + try uploadVertices(&gc, pool, buffer); + + var cmdbufs = try createCommandBuffers( + &gc, + pool, + allocator, + buffer, + swapchain.extent, + render_pass, + pipeline, + framebuffers, + ); + defer destroyCommandBuffers(&gc, pool, allocator, cmdbufs); + + while (c.glfwWindowShouldClose(window) == c.GLFW_FALSE) { + var w: c_int = undefined; + var h: c_int = undefined; + c.glfwGetFramebufferSize(window, &w, &h); + + // Don't present or resize swapchain while the window is minimized + if (w == 0 or h == 0) { + c.glfwPollEvents(); + continue; + } + + const cmdbuf = cmdbufs[swapchain.image_index]; + + const state = swapchain.present(cmdbuf) catch |err| switch (err) { + error.OutOfDateKHR => Swapchain.PresentState.suboptimal, + else => |narrow| return narrow, + }; + + if (state == .suboptimal or extent.width != @as(u32, @intCast(w)) or extent.height != @as(u32, @intCast(h))) { + extent.width = @intCast(w); + extent.height = @intCast(h); + try swapchain.recreate(extent); + + destroyFramebuffers(&gc, allocator, framebuffers); + framebuffers = try createFramebuffers(&gc, allocator, render_pass, swapchain); + + destroyCommandBuffers(&gc, pool, allocator, cmdbufs); + cmdbufs = try createCommandBuffers( + &gc, + pool, + allocator, + buffer, + swapchain.extent, + render_pass, + pipeline, + framebuffers, + ); + } + + c.glfwPollEvents(); } + + try swapchain.waitForAllFences(); + try gc.vkd.deviceWaitIdle(gc.dev); +} + +fn uploadVertices(gc: *const GraphicsContext, pool: vk.CommandPool, buffer: vk.Buffer) !void { + const staging_buffer = try gc.vkd.createBuffer(gc.dev, &.{ + .size = @sizeOf(@TypeOf(vertices)), + .usage = .{ .transfer_src_bit = true }, + .sharing_mode = .exclusive, + }, null); + defer gc.vkd.destroyBuffer(gc.dev, staging_buffer, null); + const mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, staging_buffer); + const staging_memory = try gc.allocate(mem_reqs, .{ .host_visible_bit = true, .host_coherent_bit = true }); + defer gc.vkd.freeMemory(gc.dev, staging_memory, null); + try gc.vkd.bindBufferMemory(gc.dev, staging_buffer, staging_memory, 0); + + { + const data = try gc.vkd.mapMemory(gc.dev, staging_memory, 0, vk.WHOLE_SIZE, .{}); + defer gc.vkd.unmapMemory(gc.dev, staging_memory); + + const gpu_vertices: [*]Vertex = @ptrCast(@alignCast(data)); + @memcpy(gpu_vertices, vertices[0..]); + } + + try copyBuffer(gc, pool, buffer, staging_buffer, @sizeOf(@TypeOf(vertices))); +} + +fn copyBuffer(gc: *const GraphicsContext, pool: vk.CommandPool, dst: vk.Buffer, src: vk.Buffer, size: vk.DeviceSize) !void { + var cmdbuf: vk.CommandBuffer = undefined; + try gc.vkd.allocateCommandBuffers(gc.dev, &.{ + .command_pool = pool, + .level = .primary, + .command_buffer_count = 1, + }, @ptrCast(&cmdbuf)); + defer gc.vkd.freeCommandBuffers(gc.dev, pool, 1, @ptrCast(&cmdbuf)); + + try gc.vkd.beginCommandBuffer(cmdbuf, &.{ + .flags = .{ .one_time_submit_bit = true }, + }); + + const region = vk.BufferCopy{ + .src_offset = 0, + .dst_offset = 0, + .size = size, + }; + gc.vkd.cmdCopyBuffer(cmdbuf, src, dst, 1, @ptrCast(®ion)); + + try gc.vkd.endCommandBuffer(cmdbuf); + + const si = vk.SubmitInfo{ + .command_buffer_count = 1, + .p_command_buffers = @ptrCast(&cmdbuf), + .p_wait_dst_stage_mask = undefined, + }; + try gc.vkd.queueSubmit(gc.graphics_queue.handle, 1, @ptrCast(&si), .null_handle); + try gc.vkd.queueWaitIdle(gc.graphics_queue.handle); +} + +fn createCommandBuffers( + gc: *const GraphicsContext, + pool: vk.CommandPool, + allocator: Allocator, + buffer: vk.Buffer, + extent: vk.Extent2D, + render_pass: vk.RenderPass, + pipeline: vk.Pipeline, + framebuffers: []vk.Framebuffer, +) ![]vk.CommandBuffer { + const cmdbufs = try allocator.alloc(vk.CommandBuffer, framebuffers.len); + errdefer allocator.free(cmdbufs); + + try gc.vkd.allocateCommandBuffers(gc.dev, &.{ + .command_pool = pool, + .level = .primary, + .command_buffer_count = @as(u32, @truncate(cmdbufs.len)), + }, cmdbufs.ptr); + errdefer gc.vkd.freeCommandBuffers(gc.dev, pool, @truncate(cmdbufs.len), cmdbufs.ptr); + + const clear = vk.ClearValue{ + .color = .{ .float_32 = .{ 0, 0, 0, 1 } }, + }; + + const viewport = vk.Viewport{ + .x = 0, + .y = 0, + .width = @as(f32, @floatFromInt(extent.width)), + .height = @as(f32, @floatFromInt(extent.height)), + .min_depth = 0, + .max_depth = 1, + }; + + const scissor = vk.Rect2D{ + .offset = .{ .x = 0, .y = 0 }, + .extent = extent, + }; + + for (cmdbufs, framebuffers) |cmdbuf, framebuffer| { + try gc.vkd.beginCommandBuffer(cmdbuf, &.{}); + + gc.vkd.cmdSetViewport(cmdbuf, 0, 1, @ptrCast(&viewport)); + gc.vkd.cmdSetScissor(cmdbuf, 0, 1, @ptrCast(&scissor)); + + // This needs to be a separate definition - see https://github.com/ziglang/zig/issues/7627. + const render_area = vk.Rect2D{ + .offset = .{ .x = 0, .y = 0 }, + .extent = extent, + }; + + gc.vkd.cmdBeginRenderPass(cmdbuf, &.{ + .render_pass = render_pass, + .framebuffer = framebuffer, + .render_area = render_area, + .clear_value_count = 1, + .p_clear_values = @as([*]const vk.ClearValue, @ptrCast(&clear)), + }, .@"inline"); + + gc.vkd.cmdBindPipeline(cmdbuf, .graphics, pipeline); + const offset = [_]vk.DeviceSize{0}; + gc.vkd.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast(&buffer), &offset); + gc.vkd.cmdDraw(cmdbuf, vertices.len, 1, 0, 0); + + gc.vkd.cmdEndRenderPass(cmdbuf); + try gc.vkd.endCommandBuffer(cmdbuf); + } + + return cmdbufs; +} + +fn destroyCommandBuffers(gc: *const GraphicsContext, pool: vk.CommandPool, allocator: Allocator, cmdbufs: []vk.CommandBuffer) void { + gc.vkd.freeCommandBuffers(gc.dev, pool, @truncate(cmdbufs.len), cmdbufs.ptr); + allocator.free(cmdbufs); +} + +fn createFramebuffers(gc: *const GraphicsContext, allocator: Allocator, render_pass: vk.RenderPass, swapchain: Swapchain) ![]vk.Framebuffer { + const framebuffers = try allocator.alloc(vk.Framebuffer, swapchain.swap_images.len); + errdefer allocator.free(framebuffers); + + var i: usize = 0; + errdefer for (framebuffers[0..i]) |fb| gc.vkd.destroyFramebuffer(gc.dev, fb, null); + + for (framebuffers) |*fb| { + fb.* = try gc.vkd.createFramebuffer(gc.dev, &.{ + .render_pass = render_pass, + .attachment_count = 1, + .p_attachments = @as([*]const vk.ImageView, @ptrCast(&swapchain.swap_images[i].view)), + .width = swapchain.extent.width, + .height = swapchain.extent.height, + .layers = 1, + }, null); + i += 1; + } + + return framebuffers; +} + +fn destroyFramebuffers(gc: *const GraphicsContext, allocator: Allocator, framebuffers: []const vk.Framebuffer) void { + for (framebuffers) |fb| gc.vkd.destroyFramebuffer(gc.dev, fb, null); + allocator.free(framebuffers); +} + +fn createRenderPass(gc: *const GraphicsContext, swapchain: Swapchain) !vk.RenderPass { + const color_attachment = vk.AttachmentDescription{ + .format = swapchain.surface_format.format, + .samples = .{ .@"1_bit" = true }, + .load_op = .clear, + .store_op = .store, + .stencil_load_op = .dont_care, + .stencil_store_op = .dont_care, + .initial_layout = .undefined, + .final_layout = .present_src_khr, + }; + + const color_attachment_ref = vk.AttachmentReference{ + .attachment = 0, + .layout = .color_attachment_optimal, + }; + + const subpass = vk.SubpassDescription{ + .pipeline_bind_point = .graphics, + .color_attachment_count = 1, + .p_color_attachments = @ptrCast(&color_attachment_ref), + }; + + return try gc.vkd.createRenderPass(gc.dev, &.{ + .attachment_count = 1, + .p_attachments = @as([*]const vk.AttachmentDescription, @ptrCast(&color_attachment)), + .subpass_count = 1, + .p_subpasses = @as([*]const vk.SubpassDescription, @ptrCast(&subpass)), + }, null); +} + +fn createPipeline( + gc: *const GraphicsContext, + layout: vk.PipelineLayout, + render_pass: vk.RenderPass, +) !vk.Pipeline { + const vert = try gc.vkd.createShaderModule(gc.dev, &.{ + .code_size = shaders.triangle_vert.len, + .p_code = @as([*]const u32, @ptrCast(&shaders.triangle_vert)), + }, null); + defer gc.vkd.destroyShaderModule(gc.dev, vert, null); + + const frag = try gc.vkd.createShaderModule(gc.dev, &.{ + .code_size = shaders.triangle_frag.len, + .p_code = @as([*]const u32, @ptrCast(&shaders.triangle_frag)), + }, null); + defer gc.vkd.destroyShaderModule(gc.dev, frag, null); + + const pssci = [_]vk.PipelineShaderStageCreateInfo{ + .{ + .stage = .{ .vertex_bit = true }, + .module = vert, + .p_name = "main", + }, + .{ + .stage = .{ .fragment_bit = true }, + .module = frag, + .p_name = "main", + }, + }; + + const pvisci = vk.PipelineVertexInputStateCreateInfo{ + .vertex_binding_description_count = 1, + .p_vertex_binding_descriptions = @ptrCast(&Vertex.binding_description), + .vertex_attribute_description_count = Vertex.attribute_description.len, + .p_vertex_attribute_descriptions = &Vertex.attribute_description, + }; + + const piasci = vk.PipelineInputAssemblyStateCreateInfo{ + .topology = .triangle_list, + .primitive_restart_enable = vk.FALSE, + }; + + const pvsci = vk.PipelineViewportStateCreateInfo{ + .viewport_count = 1, + .p_viewports = undefined, // set in createCommandBuffers with cmdSetViewport + .scissor_count = 1, + .p_scissors = undefined, // set in createCommandBuffers with cmdSetScissor + }; + + const prsci = vk.PipelineRasterizationStateCreateInfo{ + .depth_clamp_enable = vk.FALSE, + .rasterizer_discard_enable = vk.FALSE, + .polygon_mode = .fill, + .cull_mode = .{ .back_bit = true }, + .front_face = .clockwise, + .depth_bias_enable = vk.FALSE, + .depth_bias_constant_factor = 0, + .depth_bias_clamp = 0, + .depth_bias_slope_factor = 0, + .line_width = 1, + }; + + const pmsci = vk.PipelineMultisampleStateCreateInfo{ + .rasterization_samples = .{ .@"1_bit" = true }, + .sample_shading_enable = vk.FALSE, + .min_sample_shading = 1, + .alpha_to_coverage_enable = vk.FALSE, + .alpha_to_one_enable = vk.FALSE, + }; + + const pcbas = vk.PipelineColorBlendAttachmentState{ + .blend_enable = vk.FALSE, + .src_color_blend_factor = .one, + .dst_color_blend_factor = .zero, + .color_blend_op = .add, + .src_alpha_blend_factor = .one, + .dst_alpha_blend_factor = .zero, + .alpha_blend_op = .add, + .color_write_mask = .{ .r_bit = true, .g_bit = true, .b_bit = true, .a_bit = true }, + }; + + const pcbsci = vk.PipelineColorBlendStateCreateInfo{ + .logic_op_enable = vk.FALSE, + .logic_op = .copy, + .attachment_count = 1, + .p_attachments = @ptrCast(&pcbas), + .blend_constants = [_]f32{ 0, 0, 0, 0 }, + }; + + const dynstate = [_]vk.DynamicState{ .viewport, .scissor }; + const pdsci = vk.PipelineDynamicStateCreateInfo{ + .flags = .{}, + .dynamic_state_count = dynstate.len, + .p_dynamic_states = &dynstate, + }; + + const gpci = vk.GraphicsPipelineCreateInfo{ + .flags = .{}, + .stage_count = 2, + .p_stages = &pssci, + .p_vertex_input_state = &pvisci, + .p_input_assembly_state = &piasci, + .p_tessellation_state = null, + .p_viewport_state = &pvsci, + .p_rasterization_state = &prsci, + .p_multisample_state = &pmsci, + .p_depth_stencil_state = null, + .p_color_blend_state = &pcbsci, + .p_dynamic_state = &pdsci, + .layout = layout, + .render_pass = render_pass, + .subpass = 0, + .base_pipeline_handle = .null_handle, + .base_pipeline_index = -1, + }; + + var pipeline: vk.Pipeline = undefined; + _ = try gc.vkd.createGraphicsPipelines( + gc.dev, + .null_handle, + 1, + @ptrCast(&gpci), + null, + @ptrCast(&pipeline), + ); + return pipeline; } diff --git a/src/shaders/triangle.frag b/src/shaders/triangle.frag new file mode 100644 index 0000000..8c952fe --- /dev/null +++ b/src/shaders/triangle.frag @@ -0,0 +1,9 @@ +#version 450 + +layout(location = 0) in vec3 v_color; + +layout(location = 0) out vec4 f_color; + +void main() { + f_color = vec4(v_color, 1.0); +} diff --git a/src/shaders/triangle.vert b/src/shaders/triangle.vert new file mode 100644 index 0000000..2b8dfa5 --- /dev/null +++ b/src/shaders/triangle.vert @@ -0,0 +1,11 @@ +#version 450 + +layout(location = 0) in vec2 a_pos; +layout(location = 1) in vec3 a_color; + +layout(location = 0) out vec3 v_color; + +void main() { + gl_Position = vec4(a_pos, 0.0, 1.0); + v_color = a_color; +} diff --git a/src/swapchain.zig b/src/swapchain.zig new file mode 100644 index 0000000..09519ff --- /dev/null +++ b/src/swapchain.zig @@ -0,0 +1,322 @@ +const std = @import("std"); +const vk = @import("vk"); +const GraphicsContext = @import("graphics_context.zig").GraphicsContext; +const Allocator = std.mem.Allocator; + +pub const Swapchain = struct { + pub const PresentState = enum { + optimal, + suboptimal, + }; + + gc: *const GraphicsContext, + allocator: Allocator, + + surface_format: vk.SurfaceFormatKHR, + present_mode: vk.PresentModeKHR, + extent: vk.Extent2D, + handle: vk.SwapchainKHR, + + swap_images: []SwapImage, + image_index: u32, + next_image_acquired: vk.Semaphore, + + pub fn init(gc: *const GraphicsContext, allocator: Allocator, extent: vk.Extent2D) !Swapchain { + return try initRecycle(gc, allocator, extent, .null_handle); + } + + pub fn initRecycle(gc: *const GraphicsContext, allocator: Allocator, extent: vk.Extent2D, old_handle: vk.SwapchainKHR) !Swapchain { + const caps = try gc.vki.getPhysicalDeviceSurfaceCapabilitiesKHR(gc.pdev, gc.surface); + const actual_extent = findActualExtent(caps, extent); + if (actual_extent.width == 0 or actual_extent.height == 0) { + return error.InvalidSurfaceDimensions; + } + + const surface_format = try findSurfaceFormat(gc, allocator); + const present_mode = try findPresentMode(gc, allocator); + + var image_count = caps.min_image_count + 1; + if (caps.max_image_count > 0) { + image_count = @min(image_count, caps.max_image_count); + } + + const qfi = [_]u32{ gc.graphics_queue.family, gc.present_queue.family }; + const sharing_mode: vk.SharingMode = if (gc.graphics_queue.family != gc.present_queue.family) + .concurrent + else + .exclusive; + + const handle = try gc.vkd.createSwapchainKHR(gc.dev, &.{ + .surface = gc.surface, + .min_image_count = image_count, + .image_format = surface_format.format, + .image_color_space = surface_format.color_space, + .image_extent = actual_extent, + .image_array_layers = 1, + .image_usage = .{ .color_attachment_bit = true, .transfer_dst_bit = true }, + .image_sharing_mode = sharing_mode, + .queue_family_index_count = qfi.len, + .p_queue_family_indices = &qfi, + .pre_transform = caps.current_transform, + .composite_alpha = .{ .opaque_bit_khr = true }, + .present_mode = present_mode, + .clipped = vk.TRUE, + .old_swapchain = old_handle, + }, null); + errdefer gc.vkd.destroySwapchainKHR(gc.dev, handle, null); + + if (old_handle != .null_handle) { + // Apparently, the old swapchain handle still needs to be destroyed after recreating. + gc.vkd.destroySwapchainKHR(gc.dev, old_handle, null); + } + + const swap_images = try initSwapchainImages(gc, handle, surface_format.format, allocator); + errdefer { + for (swap_images) |si| si.deinit(gc); + allocator.free(swap_images); + } + + var next_image_acquired = try gc.vkd.createSemaphore(gc.dev, &.{}, null); + errdefer gc.vkd.destroySemaphore(gc.dev, next_image_acquired, null); + + const result = try gc.vkd.acquireNextImageKHR(gc.dev, handle, std.math.maxInt(u64), next_image_acquired, .null_handle); + if (result.result != .success) { + return error.ImageAcquireFailed; + } + + std.mem.swap(vk.Semaphore, &swap_images[result.image_index].image_acquired, &next_image_acquired); + return Swapchain{ + .gc = gc, + .allocator = allocator, + .surface_format = surface_format, + .present_mode = present_mode, + .extent = actual_extent, + .handle = handle, + .swap_images = swap_images, + .image_index = result.image_index, + .next_image_acquired = next_image_acquired, + }; + } + + fn deinitExceptSwapchain(self: Swapchain) void { + for (self.swap_images) |si| si.deinit(self.gc); + self.allocator.free(self.swap_images); + self.gc.vkd.destroySemaphore(self.gc.dev, self.next_image_acquired, null); + } + + pub fn waitForAllFences(self: Swapchain) !void { + for (self.swap_images) |si| si.waitForFence(self.gc) catch {}; + } + + pub fn deinit(self: Swapchain) void { + self.deinitExceptSwapchain(); + self.gc.vkd.destroySwapchainKHR(self.gc.dev, self.handle, null); + } + + pub fn recreate(self: *Swapchain, new_extent: vk.Extent2D) !void { + const gc = self.gc; + const allocator = self.allocator; + const old_handle = self.handle; + self.deinitExceptSwapchain(); + self.* = try initRecycle(gc, allocator, new_extent, old_handle); + } + + pub fn currentImage(self: Swapchain) vk.Image { + return self.swap_images[self.image_index].image; + } + + pub fn currentSwapImage(self: Swapchain) *const SwapImage { + return &self.swap_images[self.image_index]; + } + + pub fn present(self: *Swapchain, cmdbuf: vk.CommandBuffer) !PresentState { + // Simple method: + // 1) Acquire next image + // 2) Wait for and reset fence of the acquired image + // 3) Submit command buffer with fence of acquired image, + // dependendent on the semaphore signalled by the first step. + // 4) Present current frame, dependent on semaphore signalled by previous step + // Problem: This way we can't reference the current image while rendering. + // Better method: Shuffle the steps around such that acquire next image is the last step, + // leaving the swapchain in a state with the current image. + // 1) Wait for and reset fence of current image + // 2) Submit command buffer, signalling fence of current image and dependent on + // the semaphore signalled by step 4. + // 3) Present current frame, dependent on semaphore signalled by the submit + // 4) Acquire next image, signalling its semaphore + // One problem that arises is that we can't know beforehand which semaphore to signal, + // so we keep an extra auxilery semaphore that is swapped around + + // Step 1: Make sure the current frame has finished rendering + const current = self.currentSwapImage(); + try current.waitForFence(self.gc); + try self.gc.vkd.resetFences(self.gc.dev, 1, @ptrCast(¤t.frame_fence)); + + // Step 2: Submit the command buffer + const wait_stage = [_]vk.PipelineStageFlags{.{ .top_of_pipe_bit = true }}; + try self.gc.vkd.queueSubmit(self.gc.graphics_queue.handle, 1, &[_]vk.SubmitInfo{.{ + .wait_semaphore_count = 1, + .p_wait_semaphores = @ptrCast(¤t.image_acquired), + .p_wait_dst_stage_mask = &wait_stage, + .command_buffer_count = 1, + .p_command_buffers = @ptrCast(&cmdbuf), + .signal_semaphore_count = 1, + .p_signal_semaphores = @ptrCast(¤t.render_finished), + }}, current.frame_fence); + + // Step 3: Present the current frame + _ = try self.gc.vkd.queuePresentKHR(self.gc.present_queue.handle, &.{ + .wait_semaphore_count = 1, + .p_wait_semaphores = @as([*]const vk.Semaphore, @ptrCast(¤t.render_finished)), + .swapchain_count = 1, + .p_swapchains = @as([*]const vk.SwapchainKHR, @ptrCast(&self.handle)), + .p_image_indices = @as([*]const u32, @ptrCast(&self.image_index)), + }); + + // Step 4: Acquire next frame + const result = try self.gc.vkd.acquireNextImageKHR( + self.gc.dev, + self.handle, + std.math.maxInt(u64), + self.next_image_acquired, + .null_handle, + ); + + std.mem.swap(vk.Semaphore, &self.swap_images[result.image_index].image_acquired, &self.next_image_acquired); + self.image_index = result.image_index; + + return switch (result.result) { + .success => .optimal, + .suboptimal_khr => .suboptimal, + else => unreachable, + }; + } +}; + +const SwapImage = struct { + image: vk.Image, + view: vk.ImageView, + image_acquired: vk.Semaphore, + render_finished: vk.Semaphore, + frame_fence: vk.Fence, + + fn init(gc: *const GraphicsContext, image: vk.Image, format: vk.Format) !SwapImage { + const view = try gc.vkd.createImageView(gc.dev, &.{ + .image = image, + .view_type = .@"2d", + .format = format, + .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity }, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }, null); + errdefer gc.vkd.destroyImageView(gc.dev, view, null); + + const image_acquired = try gc.vkd.createSemaphore(gc.dev, &.{}, null); + errdefer gc.vkd.destroySemaphore(gc.dev, image_acquired, null); + + const render_finished = try gc.vkd.createSemaphore(gc.dev, &.{}, null); + errdefer gc.vkd.destroySemaphore(gc.dev, render_finished, null); + + const frame_fence = try gc.vkd.createFence(gc.dev, &.{ .flags = .{ .signaled_bit = true } }, null); + errdefer gc.vkd.destroyFence(gc.dev, frame_fence, null); + + return SwapImage{ + .image = image, + .view = view, + .image_acquired = image_acquired, + .render_finished = render_finished, + .frame_fence = frame_fence, + }; + } + + fn deinit(self: SwapImage, gc: *const GraphicsContext) void { + self.waitForFence(gc) catch return; + gc.vkd.destroyImageView(gc.dev, self.view, null); + gc.vkd.destroySemaphore(gc.dev, self.image_acquired, null); + gc.vkd.destroySemaphore(gc.dev, self.render_finished, null); + gc.vkd.destroyFence(gc.dev, self.frame_fence, null); + } + + fn waitForFence(self: SwapImage, gc: *const GraphicsContext) !void { + _ = try gc.vkd.waitForFences(gc.dev, 1, @ptrCast(&self.frame_fence), vk.TRUE, std.math.maxInt(u64)); + } +}; + +fn initSwapchainImages(gc: *const GraphicsContext, swapchain: vk.SwapchainKHR, format: vk.Format, allocator: Allocator) ![]SwapImage { + var count: u32 = undefined; + _ = try gc.vkd.getSwapchainImagesKHR(gc.dev, swapchain, &count, null); + const images = try allocator.alloc(vk.Image, count); + defer allocator.free(images); + _ = try gc.vkd.getSwapchainImagesKHR(gc.dev, swapchain, &count, images.ptr); + + const swap_images = try allocator.alloc(SwapImage, count); + errdefer allocator.free(swap_images); + + var i: usize = 0; + errdefer for (swap_images[0..i]) |si| si.deinit(gc); + + for (images) |image| { + swap_images[i] = try SwapImage.init(gc, image, format); + i += 1; + } + + return swap_images; +} + +fn findSurfaceFormat(gc: *const GraphicsContext, allocator: Allocator) !vk.SurfaceFormatKHR { + const preferred = vk.SurfaceFormatKHR{ + .format = .b8g8r8a8_srgb, + .color_space = .srgb_nonlinear_khr, + }; + + var count: u32 = undefined; + _ = try gc.vki.getPhysicalDeviceSurfaceFormatsKHR(gc.pdev, gc.surface, &count, null); + const surface_formats = try allocator.alloc(vk.SurfaceFormatKHR, count); + defer allocator.free(surface_formats); + _ = try gc.vki.getPhysicalDeviceSurfaceFormatsKHR(gc.pdev, gc.surface, &count, surface_formats.ptr); + + for (surface_formats) |sfmt| { + if (std.meta.eql(sfmt, preferred)) { + return preferred; + } + } + + return surface_formats[0]; // There must always be at least one supported surface format +} + +fn findPresentMode(gc: *const GraphicsContext, allocator: Allocator) !vk.PresentModeKHR { + var count: u32 = undefined; + _ = try gc.vki.getPhysicalDeviceSurfacePresentModesKHR(gc.pdev, gc.surface, &count, null); + const present_modes = try allocator.alloc(vk.PresentModeKHR, count); + defer allocator.free(present_modes); + _ = try gc.vki.getPhysicalDeviceSurfacePresentModesKHR(gc.pdev, gc.surface, &count, present_modes.ptr); + + const preferred = [_]vk.PresentModeKHR{ + .mailbox_khr, + .immediate_khr, + }; + + for (preferred) |mode| { + if (std.mem.indexOfScalar(vk.PresentModeKHR, present_modes, mode) != null) { + return mode; + } + } + + return .fifo_khr; +} + +fn findActualExtent(caps: vk.SurfaceCapabilitiesKHR, extent: vk.Extent2D) vk.Extent2D { + if (caps.current_extent.width != 0xFFFF_FFFF) { + return caps.current_extent; + } else { + return .{ + .width = std.math.clamp(extent.width, caps.min_image_extent.width, caps.max_image_extent.width), + .height = std.math.clamp(extent.height, caps.min_image_extent.height, caps.max_image_extent.height), + }; + } +}