Files
zig-experiments/src/gfx.zig
2024-04-03 16:14:32 -04:00

519 lines
17 KiB
Zig

const std = @import("std");
const builtin = @import("builtin");
const vk = @import("vk");
const c = @import("c.zig");
pub const use_debug_messenger = switch (builtin.mode) {
.Debug, .ReleaseSafe => true,
.ReleaseSmall, .ReleaseFast => false,
};
const InstancePair = std.meta.Tuple(&.{ vk.Instance, InstanceDispatch, vk.DebugUtilsMessengerEXT });
/// note: destroy with vki.destroyInstance(instance, null)
pub fn create_instance(vkb: BaseDispatch, app_name: [*:0]const u8) !InstancePair {
var exts = std.BoundedArray([*:0]const u8, 32){};
var layers = std.BoundedArray([*:0]const u8, 32){};
if (use_debug_messenger) {
try exts.appendSlice(&.{
vk.extension_info.ext_debug_utils.name,
});
try layers.appendSlice(&.{
"VK_LAYER_KHRONOS_validation",
});
}
var glfw_exts_count: u32 = 0;
const glfw_exts: [*]const [*:0]const u8 =
@ptrCast(c.glfwGetRequiredInstanceExtensions(&glfw_exts_count));
try exts.appendSlice(glfw_exts[0..glfw_exts_count]);
const dumci: vk.DebugUtilsMessengerCreateInfoEXT = .{
.message_severity = .{
.error_bit_ext = true,
.info_bit_ext = true,
.verbose_bit_ext = true,
.warning_bit_ext = true,
},
.message_type = .{
.device_address_binding_bit_ext = true,
.general_bit_ext = false,
.performance_bit_ext = true,
.validation_bit_ext = true,
},
.pfn_user_callback = &debug_callback,
.p_user_data = null,
};
const instance = try vkb.createInstance(&vk.InstanceCreateInfo{
.p_application_info = &vk.ApplicationInfo{
.p_application_name = app_name,
.application_version = vk.makeApiVersion(0, 0, 0, 0),
.p_engine_name = app_name,
.engine_version = vk.makeApiVersion(0, 0, 0, 0),
.api_version = vk.API_VERSION_1_3,
},
.enabled_extension_count = @intCast(exts.len),
.pp_enabled_extension_names = &exts.buffer,
.enabled_layer_count = @intCast(layers.len),
.pp_enabled_layer_names = &layers.buffer,
.p_next = if (use_debug_messenger) &dumci else null,
}, null);
const vki = try InstanceDispatch.load(instance, vkb.dispatch.vkGetInstanceProcAddr);
errdefer vki.destroyInstance(instance, null);
const messenger: vk.DebugUtilsMessengerEXT = if (use_debug_messenger)
try vki.createDebugUtilsMessengerEXT(instance, &dumci, null)
else
.null_handle;
errdefer if (use_debug_messenger)
vki.destroyDebugUtilsMessengerEXT(instance, messenger, null);
return .{ instance, vki, messenger };
}
/// note: destroy with vki.destroySurfaceKHR(instance, surface, null)
pub fn create_surface(instance: vk.Instance, window: *c.GLFWwindow) !vk.SurfaceKHR {
var surface: vk.SurfaceKHR = undefined;
if (c.glfwCreateWindowSurface(instance, window, null, &surface) != .success) {
return error.SurfaceInitFailed;
}
return surface;
}
/// note: destroy with c.glfwDestroyWindow(window)
pub fn create_window(extent: vk.Extent2D, title: [*:0]const u8) !*c.GLFWwindow {
c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, "floating_window");
c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, "floating_window");
c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API);
return c.glfwCreateWindow(
@intCast(extent.width),
@intCast(extent.height),
title,
null,
null,
) orelse error.WindowInitFailed;
}
const DevicePair = std.meta.Tuple(&.{ vk.PhysicalDevice, vk.Device, DeviceDispatch, u32 });
/// note: destroy with vkd.destroyDevice(dev, null)
pub fn create_device(
ally: std.mem.Allocator,
instance: vk.Instance,
surface: vk.SurfaceKHR,
vki: InstanceDispatch,
) !DevicePair {
const required_device_extensions: []const [*:0]const u8 = &.{
vk.extension_info.khr_swapchain.name,
vk.extension_info.khr_dynamic_rendering.name,
};
var pdev_count: u32 = undefined;
_ = try vki.enumeratePhysicalDevices(instance, &pdev_count, null);
const pdevs = try ally.alloc(vk.PhysicalDevice, pdev_count);
defer ally.free(pdevs);
_ = try vki.enumeratePhysicalDevices(instance, &pdev_count, pdevs.ptr);
pdev_search: for (pdevs) |pdev| {
const props = vki.getPhysicalDeviceProperties(pdev);
if (props.device_type != .discrete_gpu) continue :pdev_search;
var format_count: u32 = undefined;
_ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null);
if (format_count == 0) continue :pdev_search;
var mode_count: u32 = undefined;
_ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &mode_count, null);
if (mode_count == 0) continue :pdev_search;
var ext_count: u32 = undefined;
_ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, null);
const exts = try ally.alloc(vk.ExtensionProperties, ext_count);
defer ally.free(exts);
_ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, exts.ptr);
for (required_device_extensions) |name| {
for (exts) |ext| {
if (std.mem.eql(
u8,
std.mem.span(name),
std.mem.sliceTo(&ext.extension_name, 0),
)) {
break;
}
} else {
continue :pdev_search;
}
}
var family_count: u32 = undefined;
vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null);
const families = try ally.alloc(vk.QueueFamilyProperties, family_count);
defer ally.free(families);
vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr);
// just find one family that does graphics and present, so we can use exclusive sharing
// on the swapchain. apparently most hardware supports this. logic for queue allocation
// and swapchain creation is so much simpler this way. swapchain creation needs to know
// the list of queue family indices which will have access to the images, and there's a
// performance penalty to allow concurrent access to multiple queue families.
//
// multiple _queues_ may have exclusive access, but only if they're in the smae family.
const graphics_family: u32 = for (families, 0..) |family, idx| {
const graphics = family.queue_flags.graphics_bit;
const present = try vki.getPhysicalDeviceSurfaceSupportKHR(pdev, @intCast(idx), surface) == vk.TRUE;
if (graphics and present) {
break @intCast(idx);
}
} else {
continue :pdev_search;
};
std.log.debug("selecting device {s}", .{std.mem.sliceTo(&props.device_name, 0)});
const qci: []const vk.DeviceQueueCreateInfo = &.{
vk.DeviceQueueCreateInfo{
.queue_family_index = graphics_family,
.queue_count = 1,
.p_queue_priorities = &[_]f32{1.0},
},
};
const dev = try vki.createDevice(pdev, &.{
.queue_create_info_count = @intCast(qci.len),
.p_queue_create_infos = qci.ptr,
.enabled_extension_count = @intCast(required_device_extensions.len),
.pp_enabled_extension_names = required_device_extensions.ptr,
.p_next = &vk.PhysicalDeviceDynamicRenderingFeaturesKHR{
.dynamic_rendering = vk.TRUE,
},
}, null);
const vkd = try DeviceDispatch.load(dev, vki.dispatch.vkGetDeviceProcAddr);
errdefer vkd.destroyDevice(dev, null);
return .{ pdev, dev, vkd, graphics_family };
}
return error.NoSuitableDevice;
}
pub fn find_surface_format(
pdev: vk.PhysicalDevice,
vki: InstanceDispatch,
surface: vk.SurfaceKHR,
preferred: vk.SurfaceFormatKHR,
) !vk.SurfaceFormatKHR {
var formats_buf: [64]vk.SurfaceFormatKHR = undefined;
var formats_count: u32 = @intCast(formats_buf.len);
_ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &formats_count, &formats_buf);
const formats = formats_buf[0..formats_count];
for (formats) |format| {
if (std.meta.eql(format, preferred)) {
return format;
}
}
return formats[0];
}
pub fn find_present_mode(
pdev: vk.PhysicalDevice,
vki: InstanceDispatch,
surface: vk.SurfaceKHR,
preferred: vk.PresentModeKHR,
) !vk.PresentModeKHR {
var modes_buf: [8]vk.PresentModeKHR = undefined;
var modes_count: u32 = @intCast(modes_buf.len);
_ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &modes_count, &modes_buf);
const modes = modes_buf[0..modes_count];
for (modes) |mode| {
if (std.meta.eql(mode, preferred)) {
return mode;
}
}
return .mailbox_khr;
}
pub fn find_swap_extent(
pdev: vk.PhysicalDevice,
vki: InstanceDispatch,
surface: vk.SurfaceKHR,
window: *c.GLFWwindow,
) !vk.Extent2D {
const caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, surface);
var extent = caps.current_extent;
if (extent.width == std.math.maxInt(u32)) {
c.glfwGetFramebufferSize(window, @ptrCast(&extent.width), @ptrCast(&extent.height));
extent.width = std.math.clamp(extent.width, caps.min_image_extent.width, caps.max_image_extent.width);
extent.height = std.math.clamp(extent.height, caps.min_image_extent.height, caps.max_image_extent.height);
}
return extent;
}
pub fn find_swap_image_count(
pdev: vk.PhysicalDevice,
vki: InstanceDispatch,
surface: vk.SurfaceKHR,
) !u32 {
const caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, surface);
var count = @max(3, caps.min_image_count + 1);
if (caps.max_image_count > 0) {
count = @min(count, caps.max_image_count);
}
return count;
}
pub fn uploadData(
comptime T: type,
pdev: vk.PhysicalDevice,
vki: InstanceDispatch,
dev: vk.Device,
vkd: DeviceDispatch,
queue: vk.Queue,
pool: vk.CommandPool,
buffer: vk.Buffer,
source: []const T,
) !void {
// if (@typeInfo(T) == .Struct and @typeInfo(T).Struct.layout == .auto) @compileError("Requires defined T layout");
const size = @sizeOf(T) * source.len;
const staging_buffer = try vkd.createBuffer(dev, &.{
.size = size,
.usage = .{ .transfer_src_bit = true },
.sharing_mode = .exclusive,
}, null);
defer vkd.destroyBuffer(dev, staging_buffer, null);
const mem_reqs = vkd.getBufferMemoryRequirements(dev, staging_buffer);
const staging_memory = try allocate(pdev, vki, dev, vkd, mem_reqs, .{
.host_visible_bit = true,
.host_coherent_bit = true,
});
defer vkd.freeMemory(dev, staging_memory, null);
try vkd.bindBufferMemory(dev, staging_buffer, staging_memory, 0);
{
const data = try vkd.mapMemory(dev, staging_memory, 0, vk.WHOLE_SIZE, .{});
defer vkd.unmapMemory(dev, staging_memory);
const dest: [*]T = @ptrCast(@alignCast(data));
@memcpy(dest, source);
}
try copyBuffer(dev, queue, pool, buffer, staging_buffer, size, vkd);
}
pub fn copyBuffer(
dev: vk.Device,
queue: vk.Queue,
pool: vk.CommandPool,
dst: vk.Buffer,
src: vk.Buffer,
size: vk.DeviceSize,
vkd: DeviceDispatch,
) !void {
var cmdbuf: vk.CommandBuffer = undefined;
try vkd.allocateCommandBuffers(dev, &.{
.command_pool = pool,
.level = .primary,
.command_buffer_count = 1,
}, @ptrCast(&cmdbuf));
defer vkd.freeCommandBuffers(dev, pool, 1, @ptrCast(&cmdbuf));
try vkd.beginCommandBuffer(cmdbuf, &.{
.flags = .{ .one_time_submit_bit = true },
});
const region = vk.BufferCopy{
.src_offset = 0,
.dst_offset = 0,
.size = size,
};
vkd.cmdCopyBuffer(cmdbuf, src, dst, 1, @ptrCast(&region));
try vkd.endCommandBuffer(cmdbuf);
const si = vk.SubmitInfo{
.command_buffer_count = 1,
.p_command_buffers = @ptrCast(&cmdbuf),
.p_wait_dst_stage_mask = undefined,
};
// creating and submitting a queue for every copy operation seems a bad idea for "streamed" data
// gonna want a way to send a copy operation WITH SYNCHRONIZATION PRIMITIVES on a particular queue
// see https://stackoverflow.com/a/62183243
//
// this may be a misunderstanding on how submission works...
try vkd.queueSubmit(queue, 1, @ptrCast(&si), .null_handle);
try vkd.queueWaitIdle(queue);
}
pub fn findMemoryTypeIndex(
pdev: vk.PhysicalDevice,
memory_type_bits: u32,
flags: vk.MemoryPropertyFlags,
vki: InstanceDispatch,
) !u32 {
const mem_props = vki.getPhysicalDeviceMemoryProperties(pdev);
for (mem_props.memory_types[0..mem_props.memory_type_count], 0..) |mem_type, i| {
if (memory_type_bits & (@as(u32, 1) << @truncate(i)) != 0 and mem_type.property_flags.contains(flags)) {
return @truncate(i);
}
}
return error.NoSuitableMemoryType;
}
pub fn allocate(
pdev: vk.PhysicalDevice,
vki: InstanceDispatch,
dev: vk.Device,
vkd: DeviceDispatch,
requirements: vk.MemoryRequirements,
flags: vk.MemoryPropertyFlags,
) !vk.DeviceMemory {
return try vkd.allocateMemory(dev, &.{
.allocation_size = requirements.size,
.memory_type_index = try findMemoryTypeIndex(pdev, requirements.memory_type_bits, flags, vki),
}, null);
}
pub const BaseDispatch = vk.BaseWrapper(.{
.createInstance = true,
.getInstanceProcAddr = true,
});
pub const InstanceDispatch = vk.InstanceWrapper(.{
.destroyInstance = true,
.createDevice = true,
.destroySurfaceKHR = true,
.enumeratePhysicalDevices = true,
.getPhysicalDeviceProperties = true,
.enumerateDeviceExtensionProperties = true,
.getPhysicalDeviceSurfaceFormatsKHR = true,
.getPhysicalDeviceSurfacePresentModesKHR = true,
.getPhysicalDeviceSurfaceCapabilitiesKHR = true,
.getPhysicalDeviceQueueFamilyProperties = true,
.getPhysicalDeviceSurfaceSupportKHR = true,
.getPhysicalDeviceMemoryProperties = true,
.getDeviceProcAddr = true,
.createDebugUtilsMessengerEXT = use_debug_messenger,
.destroyDebugUtilsMessengerEXT = use_debug_messenger,
});
pub const DeviceDispatch = vk.DeviceWrapper(.{
.destroyDevice = true,
.getDeviceQueue = true,
.createSemaphore = true,
.createFence = true,
.createImageView = true,
.destroyImageView = true,
.destroySemaphore = true,
.destroyFence = true,
.getSwapchainImagesKHR = true,
.createSwapchainKHR = true,
.destroySwapchainKHR = true,
.acquireNextImageKHR = true,
.deviceWaitIdle = true,
.waitForFences = true,
.resetFences = true,
.queueSubmit = true,
.queuePresentKHR = true,
.createCommandPool = true,
.destroyCommandPool = true,
.allocateCommandBuffers = true,
.freeCommandBuffers = true,
.queueWaitIdle = true,
.createShaderModule = true,
.destroyShaderModule = true,
.createPipelineLayout = true,
.destroyPipelineLayout = true,
.createGraphicsPipelines = true,
.destroyPipeline = true,
.beginCommandBuffer = true,
.endCommandBuffer = true,
.allocateMemory = true,
.freeMemory = true,
.createBuffer = true,
.destroyBuffer = true,
.getBufferMemoryRequirements = true,
.mapMemory = true,
.unmapMemory = true,
.bindBufferMemory = true,
.cmdBeginRenderPass = true,
.cmdEndRenderPass = true,
.cmdBindPipeline = true,
.cmdDraw = true,
.cmdDrawIndexed = true,
.cmdSetViewport = true,
.cmdSetScissor = true,
.cmdBindVertexBuffers = true,
.cmdBindIndexBuffer = true,
.cmdCopyBuffer = true,
.cmdBeginRenderingKHR = true,
.cmdEndRenderingKHR = true,
.cmdPipelineBarrier = true,
});
pub fn debug_callback(
msg_severity: vk.DebugUtilsMessageSeverityFlagsEXT,
msg_type: vk.DebugUtilsMessageTypeFlagsEXT,
p_data: ?*const vk.DebugUtilsMessengerCallbackDataEXT,
_: ?*anyopaque,
) callconv(vk.vulkan_call_conv) vk.Bool32 {
// ripped from std.log.defaultLog
const data = p_data orelse return vk.FALSE;
const message = data.p_message orelse return vk.FALSE;
const severity_prefix = if (msg_severity.verbose_bit_ext)
"verbose:"
else if (msg_severity.info_bit_ext)
"info:"
else if (msg_severity.warning_bit_ext)
"warning:"
else if (msg_severity.error_bit_ext)
"error:"
else
"?:";
const type_prefix = if (msg_type.general_bit_ext)
""
else if (msg_type.validation_bit_ext)
"validation:"
else if (msg_type.performance_bit_ext)
"performance:"
else if (msg_type.device_address_binding_bit_ext)
"device_address_binding:"
else
"?:";
const stderr = std.io.getStdErr().writer();
var bw = std.io.bufferedWriter(stderr);
const writer = bw.writer();
std.debug.getStderrMutex().lock();
defer std.debug.getStderrMutex().unlock();
nosuspend {
writer.print("vk-{s}{s} {s}\n", .{ severity_prefix, type_prefix, message }) catch return vk.FALSE;
bw.flush() catch return vk.FALSE;
}
return vk.FALSE;
}