move noise to gfx.zig
This commit is contained in:
384
src/gfx.zig
384
src/gfx.zig
@@ -1,12 +1,396 @@
|
|||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
|
|
||||||
const vk = @import("vk");
|
const vk = @import("vk");
|
||||||
|
const c = @import("c.zig");
|
||||||
|
|
||||||
pub const use_debug_messenger = switch (builtin.mode) {
|
pub const use_debug_messenger = switch (builtin.mode) {
|
||||||
.Debug, .ReleaseSafe => true,
|
.Debug, .ReleaseSafe => true,
|
||||||
.ReleaseSmall, .ReleaseFast => false,
|
.ReleaseSmall, .ReleaseFast => false,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const InstancePair = std.meta.Tuple(&.{ vk.Instance, InstanceDispatch, vk.DebugUtilsMessengerEXT });
|
||||||
|
|
||||||
|
/// note: destroy with vki.destroyInstance(instance, null)
|
||||||
|
pub fn create_instance(vkb: BaseDispatch, app_name: [*:0]const u8) !InstancePair {
|
||||||
|
var exts = std.BoundedArray([*:0]const u8, 32){};
|
||||||
|
var layers = std.BoundedArray([*:0]const u8, 32){};
|
||||||
|
|
||||||
|
if (use_debug_messenger) {
|
||||||
|
try exts.appendSlice(&.{
|
||||||
|
vk.extension_info.ext_debug_utils.name,
|
||||||
|
});
|
||||||
|
|
||||||
|
try layers.appendSlice(&.{
|
||||||
|
"VK_LAYER_KHRONOS_validation",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
var glfw_exts_count: u32 = 0;
|
||||||
|
const glfw_exts: [*]const [*:0]const u8 =
|
||||||
|
@ptrCast(c.glfwGetRequiredInstanceExtensions(&glfw_exts_count));
|
||||||
|
try exts.appendSlice(glfw_exts[0..glfw_exts_count]);
|
||||||
|
|
||||||
|
const dumci: vk.DebugUtilsMessengerCreateInfoEXT = .{
|
||||||
|
.message_severity = .{
|
||||||
|
.error_bit_ext = true,
|
||||||
|
.info_bit_ext = true,
|
||||||
|
.verbose_bit_ext = true,
|
||||||
|
.warning_bit_ext = true,
|
||||||
|
},
|
||||||
|
.message_type = .{
|
||||||
|
.device_address_binding_bit_ext = true,
|
||||||
|
.general_bit_ext = false,
|
||||||
|
.performance_bit_ext = true,
|
||||||
|
.validation_bit_ext = true,
|
||||||
|
},
|
||||||
|
.pfn_user_callback = &debug_callback,
|
||||||
|
.p_user_data = null,
|
||||||
|
};
|
||||||
|
|
||||||
|
const instance = try vkb.createInstance(&vk.InstanceCreateInfo{
|
||||||
|
.p_application_info = &vk.ApplicationInfo{
|
||||||
|
.p_application_name = app_name,
|
||||||
|
.application_version = vk.makeApiVersion(0, 0, 0, 0),
|
||||||
|
.p_engine_name = app_name,
|
||||||
|
.engine_version = vk.makeApiVersion(0, 0, 0, 0),
|
||||||
|
.api_version = vk.API_VERSION_1_3,
|
||||||
|
},
|
||||||
|
.enabled_extension_count = @intCast(exts.len),
|
||||||
|
.pp_enabled_extension_names = &exts.buffer,
|
||||||
|
.enabled_layer_count = @intCast(layers.len),
|
||||||
|
.pp_enabled_layer_names = &layers.buffer,
|
||||||
|
.p_next = if (use_debug_messenger) &dumci else null,
|
||||||
|
}, null);
|
||||||
|
const vki = try InstanceDispatch.load(instance, vkb.dispatch.vkGetInstanceProcAddr);
|
||||||
|
errdefer vki.destroyInstance(instance, null);
|
||||||
|
|
||||||
|
const messenger: vk.DebugUtilsMessengerEXT = if (use_debug_messenger)
|
||||||
|
try vki.createDebugUtilsMessengerEXT(instance, &dumci, null)
|
||||||
|
else
|
||||||
|
.null_handle;
|
||||||
|
errdefer if (use_debug_messenger)
|
||||||
|
vki.destroyDebugUtilsMessengerEXT(instance, messenger, null);
|
||||||
|
|
||||||
|
return .{ instance, vki, messenger };
|
||||||
|
}
|
||||||
|
|
||||||
|
/// note: destroy with vki.destroySurfaceKHR(instance, surface, null)
|
||||||
|
pub fn create_surface(instance: vk.Instance, window: *c.GLFWwindow) !vk.SurfaceKHR {
|
||||||
|
var surface: vk.SurfaceKHR = undefined;
|
||||||
|
if (c.glfwCreateWindowSurface(instance, window, null, &surface) != .success) {
|
||||||
|
return error.SurfaceInitFailed;
|
||||||
|
}
|
||||||
|
return surface;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// note: destroy with c.glfwDestroyWindow(window)
|
||||||
|
pub fn create_window(extent: vk.Extent2D, title: [*:0]const u8) !*c.GLFWwindow {
|
||||||
|
c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, "floating_window");
|
||||||
|
c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, "floating_window");
|
||||||
|
c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API);
|
||||||
|
|
||||||
|
return c.glfwCreateWindow(
|
||||||
|
@intCast(extent.width),
|
||||||
|
@intCast(extent.height),
|
||||||
|
title,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
) orelse error.WindowInitFailed;
|
||||||
|
}
|
||||||
|
|
||||||
|
const DevicePair = std.meta.Tuple(&.{ vk.PhysicalDevice, vk.Device, DeviceDispatch, u32 });
|
||||||
|
|
||||||
|
/// note: destroy with vkd.destroyDevice(dev, null)
|
||||||
|
pub fn create_device(
|
||||||
|
ally: std.mem.Allocator,
|
||||||
|
instance: vk.Instance,
|
||||||
|
surface: vk.SurfaceKHR,
|
||||||
|
vki: InstanceDispatch,
|
||||||
|
) !DevicePair {
|
||||||
|
const required_device_extensions: []const [*:0]const u8 = &.{
|
||||||
|
vk.extension_info.khr_swapchain.name,
|
||||||
|
vk.extension_info.khr_dynamic_rendering.name,
|
||||||
|
};
|
||||||
|
|
||||||
|
var pdev_count: u32 = undefined;
|
||||||
|
_ = try vki.enumeratePhysicalDevices(instance, &pdev_count, null);
|
||||||
|
const pdevs = try ally.alloc(vk.PhysicalDevice, pdev_count);
|
||||||
|
defer ally.free(pdevs);
|
||||||
|
_ = try vki.enumeratePhysicalDevices(instance, &pdev_count, pdevs.ptr);
|
||||||
|
|
||||||
|
pdev_search: for (pdevs) |pdev| {
|
||||||
|
const props = vki.getPhysicalDeviceProperties(pdev);
|
||||||
|
if (props.device_type != .discrete_gpu) continue :pdev_search;
|
||||||
|
|
||||||
|
var format_count: u32 = undefined;
|
||||||
|
_ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null);
|
||||||
|
if (format_count == 0) continue :pdev_search;
|
||||||
|
|
||||||
|
var mode_count: u32 = undefined;
|
||||||
|
_ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &mode_count, null);
|
||||||
|
if (mode_count == 0) continue :pdev_search;
|
||||||
|
|
||||||
|
var ext_count: u32 = undefined;
|
||||||
|
_ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, null);
|
||||||
|
const exts = try ally.alloc(vk.ExtensionProperties, ext_count);
|
||||||
|
defer ally.free(exts);
|
||||||
|
_ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, exts.ptr);
|
||||||
|
|
||||||
|
for (required_device_extensions) |name| {
|
||||||
|
for (exts) |ext| {
|
||||||
|
if (std.mem.eql(
|
||||||
|
u8,
|
||||||
|
std.mem.span(name),
|
||||||
|
std.mem.sliceTo(&ext.extension_name, 0),
|
||||||
|
)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
continue :pdev_search;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var family_count: u32 = undefined;
|
||||||
|
vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null);
|
||||||
|
const families = try ally.alloc(vk.QueueFamilyProperties, family_count);
|
||||||
|
defer ally.free(families);
|
||||||
|
vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr);
|
||||||
|
|
||||||
|
// just find one family that does graphics and present, so we can use exclusive sharing
|
||||||
|
// on the swapchain. apparently most hardware supports this. logic for queue allocation
|
||||||
|
// and swapchain creation is so much simpler this way. swapchain creation needs to know
|
||||||
|
// the list of queue family indices which will have access to the images, and there's a
|
||||||
|
// performance penalty to allow concurrent access to multiple queue families.
|
||||||
|
//
|
||||||
|
// multiple _queues_ may have exclusive access, but only if they're in the smae family.
|
||||||
|
|
||||||
|
const graphics_family: u32 = for (families, 0..) |family, idx| {
|
||||||
|
const graphics = family.queue_flags.graphics_bit;
|
||||||
|
const present = try vki.getPhysicalDeviceSurfaceSupportKHR(pdev, @intCast(idx), surface) == vk.TRUE;
|
||||||
|
if (graphics and present) {
|
||||||
|
break @intCast(idx);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
continue :pdev_search;
|
||||||
|
};
|
||||||
|
|
||||||
|
std.log.debug("selecting device {s}", .{std.mem.sliceTo(&props.device_name, 0)});
|
||||||
|
|
||||||
|
const qci: []const vk.DeviceQueueCreateInfo = &.{
|
||||||
|
vk.DeviceQueueCreateInfo{
|
||||||
|
.queue_family_index = graphics_family,
|
||||||
|
.queue_count = 1,
|
||||||
|
.p_queue_priorities = &[_]f32{1.0},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const dev = try vki.createDevice(pdev, &.{
|
||||||
|
.queue_create_info_count = @intCast(qci.len),
|
||||||
|
.p_queue_create_infos = qci.ptr,
|
||||||
|
.enabled_extension_count = @intCast(required_device_extensions.len),
|
||||||
|
.pp_enabled_extension_names = required_device_extensions.ptr,
|
||||||
|
.p_next = &vk.PhysicalDeviceDynamicRenderingFeaturesKHR{
|
||||||
|
.dynamic_rendering = vk.TRUE,
|
||||||
|
},
|
||||||
|
}, null);
|
||||||
|
const vkd = try DeviceDispatch.load(dev, vki.dispatch.vkGetDeviceProcAddr);
|
||||||
|
errdefer vkd.destroyDevice(dev, null);
|
||||||
|
|
||||||
|
return .{ pdev, dev, vkd, graphics_family };
|
||||||
|
}
|
||||||
|
|
||||||
|
return error.NoSuitableDevice;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_surface_format(
|
||||||
|
pdev: vk.PhysicalDevice,
|
||||||
|
vki: InstanceDispatch,
|
||||||
|
surface: vk.SurfaceKHR,
|
||||||
|
preferred: vk.SurfaceFormatKHR,
|
||||||
|
) !vk.SurfaceFormatKHR {
|
||||||
|
var formats_buf: [64]vk.SurfaceFormatKHR = undefined;
|
||||||
|
var formats_count: u32 = @intCast(formats_buf.len);
|
||||||
|
_ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &formats_count, &formats_buf);
|
||||||
|
const formats = formats_buf[0..formats_count];
|
||||||
|
|
||||||
|
for (formats) |format| {
|
||||||
|
if (std.meta.eql(format, preferred)) {
|
||||||
|
return format;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return formats[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_present_mode(
|
||||||
|
pdev: vk.PhysicalDevice,
|
||||||
|
vki: InstanceDispatch,
|
||||||
|
surface: vk.SurfaceKHR,
|
||||||
|
preferred: vk.PresentModeKHR,
|
||||||
|
) !vk.PresentModeKHR {
|
||||||
|
var modes_buf: [8]vk.PresentModeKHR = undefined;
|
||||||
|
var modes_count: u32 = @intCast(modes_buf.len);
|
||||||
|
_ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &modes_count, &modes_buf);
|
||||||
|
const modes = modes_buf[0..modes_count];
|
||||||
|
|
||||||
|
for (modes) |mode| {
|
||||||
|
if (std.meta.eql(mode, preferred)) {
|
||||||
|
return mode;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return .mailbox_khr;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_swap_extent(
|
||||||
|
pdev: vk.PhysicalDevice,
|
||||||
|
vki: InstanceDispatch,
|
||||||
|
surface: vk.SurfaceKHR,
|
||||||
|
window: *c.GLFWwindow,
|
||||||
|
) !vk.Extent2D {
|
||||||
|
const caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, surface);
|
||||||
|
var extent = caps.current_extent;
|
||||||
|
|
||||||
|
if (extent.width == std.math.maxInt(u32)) {
|
||||||
|
c.glfwGetFramebufferSize(window, @ptrCast(&extent.width), @ptrCast(&extent.height));
|
||||||
|
extent.width = std.math.clamp(extent.width, caps.min_image_extent.width, caps.max_image_extent.width);
|
||||||
|
extent.height = std.math.clamp(extent.height, caps.min_image_extent.height, caps.max_image_extent.height);
|
||||||
|
}
|
||||||
|
|
||||||
|
return extent;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_swap_image_count(
|
||||||
|
pdev: vk.PhysicalDevice,
|
||||||
|
vki: InstanceDispatch,
|
||||||
|
surface: vk.SurfaceKHR,
|
||||||
|
) !u32 {
|
||||||
|
const caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, surface);
|
||||||
|
var count = caps.min_image_count + 1;
|
||||||
|
if (caps.max_image_count > 0) {
|
||||||
|
count = @min(count, caps.max_image_count);
|
||||||
|
}
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn uploadData(
|
||||||
|
comptime T: type,
|
||||||
|
pdev: vk.PhysicalDevice,
|
||||||
|
vki: InstanceDispatch,
|
||||||
|
dev: vk.Device,
|
||||||
|
vkd: DeviceDispatch,
|
||||||
|
queue: vk.Queue,
|
||||||
|
pool: vk.CommandPool,
|
||||||
|
buffer: vk.Buffer,
|
||||||
|
source: []const T,
|
||||||
|
) !void {
|
||||||
|
// if (@typeInfo(T) == .Struct and @typeInfo(T).Struct.layout == .auto) @compileError("Requires defined T layout");
|
||||||
|
|
||||||
|
const size = @sizeOf(T) * source.len;
|
||||||
|
|
||||||
|
const staging_buffer = try vkd.createBuffer(dev, &.{
|
||||||
|
.size = size,
|
||||||
|
.usage = .{ .transfer_src_bit = true },
|
||||||
|
.sharing_mode = .exclusive,
|
||||||
|
}, null);
|
||||||
|
defer vkd.destroyBuffer(dev, staging_buffer, null);
|
||||||
|
|
||||||
|
const mem_reqs = vkd.getBufferMemoryRequirements(dev, staging_buffer);
|
||||||
|
const staging_memory = try allocate(pdev, vki, dev, vkd, mem_reqs, .{
|
||||||
|
.host_visible_bit = true,
|
||||||
|
.host_coherent_bit = true,
|
||||||
|
});
|
||||||
|
defer vkd.freeMemory(dev, staging_memory, null);
|
||||||
|
|
||||||
|
try vkd.bindBufferMemory(dev, staging_buffer, staging_memory, 0);
|
||||||
|
|
||||||
|
{
|
||||||
|
const data = try vkd.mapMemory(dev, staging_memory, 0, vk.WHOLE_SIZE, .{});
|
||||||
|
defer vkd.unmapMemory(dev, staging_memory);
|
||||||
|
|
||||||
|
const dest: [*]T = @ptrCast(@alignCast(data));
|
||||||
|
@memcpy(dest, source);
|
||||||
|
}
|
||||||
|
|
||||||
|
try copyBuffer(dev, queue, pool, buffer, staging_buffer, size, vkd);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn copyBuffer(
|
||||||
|
dev: vk.Device,
|
||||||
|
queue: vk.Queue,
|
||||||
|
pool: vk.CommandPool,
|
||||||
|
dst: vk.Buffer,
|
||||||
|
src: vk.Buffer,
|
||||||
|
size: vk.DeviceSize,
|
||||||
|
vkd: DeviceDispatch,
|
||||||
|
) !void {
|
||||||
|
var cmdbuf: vk.CommandBuffer = undefined;
|
||||||
|
try vkd.allocateCommandBuffers(dev, &.{
|
||||||
|
.command_pool = pool,
|
||||||
|
.level = .primary,
|
||||||
|
.command_buffer_count = 1,
|
||||||
|
}, @ptrCast(&cmdbuf));
|
||||||
|
defer vkd.freeCommandBuffers(dev, pool, 1, @ptrCast(&cmdbuf));
|
||||||
|
|
||||||
|
try vkd.beginCommandBuffer(cmdbuf, &.{
|
||||||
|
.flags = .{ .one_time_submit_bit = true },
|
||||||
|
});
|
||||||
|
|
||||||
|
const region = vk.BufferCopy{
|
||||||
|
.src_offset = 0,
|
||||||
|
.dst_offset = 0,
|
||||||
|
.size = size,
|
||||||
|
};
|
||||||
|
vkd.cmdCopyBuffer(cmdbuf, src, dst, 1, @ptrCast(®ion));
|
||||||
|
|
||||||
|
try vkd.endCommandBuffer(cmdbuf);
|
||||||
|
|
||||||
|
const si = vk.SubmitInfo{
|
||||||
|
.command_buffer_count = 1,
|
||||||
|
.p_command_buffers = @ptrCast(&cmdbuf),
|
||||||
|
.p_wait_dst_stage_mask = undefined,
|
||||||
|
};
|
||||||
|
// creating and submitting a queue for every copy operation seems a bad idea for "streamed" data
|
||||||
|
// gonna want a way to send a copy operation WITH SYNCHRONIZATION PRIMITIVES on a particular queue
|
||||||
|
// see https://stackoverflow.com/a/62183243
|
||||||
|
//
|
||||||
|
// this may be a misunderstanding on how submission works...
|
||||||
|
try vkd.queueSubmit(queue, 1, @ptrCast(&si), .null_handle);
|
||||||
|
try vkd.queueWaitIdle(queue);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn findMemoryTypeIndex(
|
||||||
|
pdev: vk.PhysicalDevice,
|
||||||
|
memory_type_bits: u32,
|
||||||
|
flags: vk.MemoryPropertyFlags,
|
||||||
|
vki: InstanceDispatch,
|
||||||
|
) !u32 {
|
||||||
|
const mem_props = vki.getPhysicalDeviceMemoryProperties(pdev);
|
||||||
|
|
||||||
|
for (mem_props.memory_types[0..mem_props.memory_type_count], 0..) |mem_type, i| {
|
||||||
|
if (memory_type_bits & (@as(u32, 1) << @truncate(i)) != 0 and mem_type.property_flags.contains(flags)) {
|
||||||
|
return @truncate(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return error.NoSuitableMemoryType;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn allocate(
|
||||||
|
pdev: vk.PhysicalDevice,
|
||||||
|
vki: InstanceDispatch,
|
||||||
|
dev: vk.Device,
|
||||||
|
vkd: DeviceDispatch,
|
||||||
|
requirements: vk.MemoryRequirements,
|
||||||
|
flags: vk.MemoryPropertyFlags,
|
||||||
|
) !vk.DeviceMemory {
|
||||||
|
return try vkd.allocateMemory(dev, &.{
|
||||||
|
.allocation_size = requirements.size,
|
||||||
|
.memory_type_index = try findMemoryTypeIndex(pdev, requirements.memory_type_bits, flags, vki),
|
||||||
|
}, null);
|
||||||
|
}
|
||||||
|
|
||||||
pub const BaseDispatch = vk.BaseWrapper(.{
|
pub const BaseDispatch = vk.BaseWrapper(.{
|
||||||
.createInstance = true,
|
.createInstance = true,
|
||||||
.getInstanceProcAddr = true,
|
.getInstanceProcAddr = true,
|
||||||
|
566
src/main.zig
566
src/main.zig
@@ -50,271 +50,6 @@ const vertices = [_]Vertex{
|
|||||||
|
|
||||||
const indices = [_]Index{ 4, 5, 6, 6, 5, 7 };
|
const indices = [_]Index{ 4, 5, 6, 6, 5, 7 };
|
||||||
|
|
||||||
const InstancePair = std.meta.Tuple(&.{ vk.Instance, gfx.InstanceDispatch, vk.DebugUtilsMessengerEXT });
|
|
||||||
|
|
||||||
/// note: destroy with vki.destroyInstance(instance, null)
|
|
||||||
fn create_instance(vkb: gfx.BaseDispatch) !InstancePair {
|
|
||||||
var exts = std.BoundedArray([*:0]const u8, 32){};
|
|
||||||
var layers = std.BoundedArray([*:0]const u8, 32){};
|
|
||||||
|
|
||||||
if (gfx.use_debug_messenger) {
|
|
||||||
try exts.appendSlice(&.{
|
|
||||||
vk.extension_info.ext_debug_utils.name,
|
|
||||||
});
|
|
||||||
|
|
||||||
try layers.appendSlice(&.{
|
|
||||||
"VK_LAYER_KHRONOS_validation",
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
var glfw_exts_count: u32 = 0;
|
|
||||||
const glfw_exts: [*]const [*:0]const u8 =
|
|
||||||
@ptrCast(c.glfwGetRequiredInstanceExtensions(&glfw_exts_count));
|
|
||||||
try exts.appendSlice(glfw_exts[0..glfw_exts_count]);
|
|
||||||
|
|
||||||
const dumci: vk.DebugUtilsMessengerCreateInfoEXT = .{
|
|
||||||
.message_severity = .{
|
|
||||||
.error_bit_ext = true,
|
|
||||||
.info_bit_ext = true,
|
|
||||||
.verbose_bit_ext = true,
|
|
||||||
.warning_bit_ext = true,
|
|
||||||
},
|
|
||||||
.message_type = .{
|
|
||||||
.device_address_binding_bit_ext = true,
|
|
||||||
.general_bit_ext = false,
|
|
||||||
.performance_bit_ext = true,
|
|
||||||
.validation_bit_ext = true,
|
|
||||||
},
|
|
||||||
.pfn_user_callback = &gfx.debug_callback,
|
|
||||||
.p_user_data = null,
|
|
||||||
};
|
|
||||||
|
|
||||||
const instance = try vkb.createInstance(&vk.InstanceCreateInfo{
|
|
||||||
.p_application_info = &vk.ApplicationInfo{
|
|
||||||
.p_application_name = app_name,
|
|
||||||
.application_version = vk.makeApiVersion(0, 0, 0, 0),
|
|
||||||
.p_engine_name = app_name,
|
|
||||||
.engine_version = vk.makeApiVersion(0, 0, 0, 0),
|
|
||||||
.api_version = vk.API_VERSION_1_3,
|
|
||||||
},
|
|
||||||
.enabled_extension_count = @intCast(exts.len),
|
|
||||||
.pp_enabled_extension_names = &exts.buffer,
|
|
||||||
.enabled_layer_count = @intCast(layers.len),
|
|
||||||
.pp_enabled_layer_names = &layers.buffer,
|
|
||||||
.p_next = if (gfx.use_debug_messenger) &dumci else null,
|
|
||||||
}, null);
|
|
||||||
const vki = try gfx.InstanceDispatch.load(instance, vkb.dispatch.vkGetInstanceProcAddr);
|
|
||||||
errdefer vki.destroyInstance(instance, null);
|
|
||||||
|
|
||||||
const messenger: vk.DebugUtilsMessengerEXT = if (gfx.use_debug_messenger)
|
|
||||||
try vki.createDebugUtilsMessengerEXT(instance, &dumci, null)
|
|
||||||
else
|
|
||||||
.null_handle;
|
|
||||||
errdefer if (gfx.use_debug_messenger)
|
|
||||||
vki.destroyDebugUtilsMessengerEXT(instance, messenger, null);
|
|
||||||
|
|
||||||
return .{ instance, vki, messenger };
|
|
||||||
}
|
|
||||||
|
|
||||||
/// note: destroy with vki.destroySurfaceKHR(instance, surface, null)
|
|
||||||
fn create_surface(instance: vk.Instance, window: *c.GLFWwindow) !vk.SurfaceKHR {
|
|
||||||
var surface: vk.SurfaceKHR = undefined;
|
|
||||||
if (c.glfwCreateWindowSurface(instance, window, null, &surface) != .success) {
|
|
||||||
return error.SurfaceInitFailed;
|
|
||||||
}
|
|
||||||
return surface;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// note: destroy with c.glfwDestroyWindow(window)
|
|
||||||
fn create_window(extent: vk.Extent2D, title: [*:0]const u8) !*c.GLFWwindow {
|
|
||||||
c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, "floating_window");
|
|
||||||
c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, "floating_window");
|
|
||||||
c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API);
|
|
||||||
|
|
||||||
return c.glfwCreateWindow(
|
|
||||||
@intCast(extent.width),
|
|
||||||
@intCast(extent.height),
|
|
||||||
title,
|
|
||||||
null,
|
|
||||||
null,
|
|
||||||
) orelse error.WindowInitFailed;
|
|
||||||
}
|
|
||||||
|
|
||||||
const DevicePair = std.meta.Tuple(&.{ vk.PhysicalDevice, vk.Device, gfx.DeviceDispatch, u32 });
|
|
||||||
|
|
||||||
/// note: destroy with vkd.destroyDevice(dev, null)
|
|
||||||
fn create_device(
|
|
||||||
ally: std.mem.Allocator,
|
|
||||||
instance: vk.Instance,
|
|
||||||
surface: vk.SurfaceKHR,
|
|
||||||
vki: gfx.InstanceDispatch,
|
|
||||||
) !DevicePair {
|
|
||||||
const required_device_extensions: []const [*:0]const u8 = &.{
|
|
||||||
vk.extension_info.khr_swapchain.name,
|
|
||||||
vk.extension_info.khr_dynamic_rendering.name,
|
|
||||||
};
|
|
||||||
|
|
||||||
var pdev_count: u32 = undefined;
|
|
||||||
_ = try vki.enumeratePhysicalDevices(instance, &pdev_count, null);
|
|
||||||
const pdevs = try ally.alloc(vk.PhysicalDevice, pdev_count);
|
|
||||||
defer ally.free(pdevs);
|
|
||||||
_ = try vki.enumeratePhysicalDevices(instance, &pdev_count, pdevs.ptr);
|
|
||||||
|
|
||||||
pdev_search: for (pdevs) |pdev| {
|
|
||||||
const props = vki.getPhysicalDeviceProperties(pdev);
|
|
||||||
if (props.device_type != .discrete_gpu) continue :pdev_search;
|
|
||||||
|
|
||||||
var format_count: u32 = undefined;
|
|
||||||
_ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null);
|
|
||||||
if (format_count == 0) continue :pdev_search;
|
|
||||||
|
|
||||||
var mode_count: u32 = undefined;
|
|
||||||
_ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &mode_count, null);
|
|
||||||
if (mode_count == 0) continue :pdev_search;
|
|
||||||
|
|
||||||
var ext_count: u32 = undefined;
|
|
||||||
_ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, null);
|
|
||||||
const exts = try ally.alloc(vk.ExtensionProperties, ext_count);
|
|
||||||
defer ally.free(exts);
|
|
||||||
_ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, exts.ptr);
|
|
||||||
|
|
||||||
for (required_device_extensions) |name| {
|
|
||||||
for (exts) |ext| {
|
|
||||||
if (std.mem.eql(
|
|
||||||
u8,
|
|
||||||
std.mem.span(name),
|
|
||||||
std.mem.sliceTo(&ext.extension_name, 0),
|
|
||||||
)) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
continue :pdev_search;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var family_count: u32 = undefined;
|
|
||||||
vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null);
|
|
||||||
const families = try ally.alloc(vk.QueueFamilyProperties, family_count);
|
|
||||||
defer ally.free(families);
|
|
||||||
vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr);
|
|
||||||
|
|
||||||
// just find one family that does graphics and present, so we can use exclusive sharing
|
|
||||||
// on the swapchain. apparently most hardware supports this. logic for queue allocation
|
|
||||||
// and swapchain creation is so much simpler this way. swapchain creation needs to know
|
|
||||||
// the list of queue family indices which will have access to the images, and there's a
|
|
||||||
// performance penalty to allow concurrent access to multiple queue families.
|
|
||||||
//
|
|
||||||
// multiple _queues_ may have exclusive access, but only if they're in the smae family.
|
|
||||||
|
|
||||||
const graphics_family: u32 = for (families, 0..) |family, idx| {
|
|
||||||
const graphics = family.queue_flags.graphics_bit;
|
|
||||||
const present = try vki.getPhysicalDeviceSurfaceSupportKHR(pdev, @intCast(idx), surface) == vk.TRUE;
|
|
||||||
if (graphics and present) {
|
|
||||||
break @intCast(idx);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
continue :pdev_search;
|
|
||||||
};
|
|
||||||
|
|
||||||
std.log.debug("selecting device {s}", .{std.mem.sliceTo(&props.device_name, 0)});
|
|
||||||
|
|
||||||
const qci: []const vk.DeviceQueueCreateInfo = &.{
|
|
||||||
vk.DeviceQueueCreateInfo{
|
|
||||||
.queue_family_index = graphics_family,
|
|
||||||
.queue_count = 1,
|
|
||||||
.p_queue_priorities = &[_]f32{1.0},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const dev = try vki.createDevice(pdev, &.{
|
|
||||||
.queue_create_info_count = @intCast(qci.len),
|
|
||||||
.p_queue_create_infos = qci.ptr,
|
|
||||||
.enabled_extension_count = @intCast(required_device_extensions.len),
|
|
||||||
.pp_enabled_extension_names = required_device_extensions.ptr,
|
|
||||||
.p_next = &vk.PhysicalDeviceDynamicRenderingFeaturesKHR{
|
|
||||||
.dynamic_rendering = vk.TRUE,
|
|
||||||
},
|
|
||||||
}, null);
|
|
||||||
const vkd = try gfx.DeviceDispatch.load(dev, vki.dispatch.vkGetDeviceProcAddr);
|
|
||||||
errdefer vkd.destroyDevice(dev, null);
|
|
||||||
|
|
||||||
return .{ pdev, dev, vkd, graphics_family };
|
|
||||||
}
|
|
||||||
|
|
||||||
return error.NoSuitableDevice;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn find_surface_format(
|
|
||||||
pdev: vk.PhysicalDevice,
|
|
||||||
vki: gfx.InstanceDispatch,
|
|
||||||
surface: vk.SurfaceKHR,
|
|
||||||
preferred: vk.SurfaceFormatKHR,
|
|
||||||
) !vk.SurfaceFormatKHR {
|
|
||||||
var formats_buf: [64]vk.SurfaceFormatKHR = undefined;
|
|
||||||
var formats_count: u32 = @intCast(formats_buf.len);
|
|
||||||
_ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &formats_count, &formats_buf);
|
|
||||||
const formats = formats_buf[0..formats_count];
|
|
||||||
|
|
||||||
for (formats) |format| {
|
|
||||||
if (std.meta.eql(format, preferred)) {
|
|
||||||
return format;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return formats[0];
|
|
||||||
}
|
|
||||||
|
|
||||||
fn find_present_mode(
|
|
||||||
pdev: vk.PhysicalDevice,
|
|
||||||
vki: gfx.InstanceDispatch,
|
|
||||||
surface: vk.SurfaceKHR,
|
|
||||||
preferred: vk.PresentModeKHR,
|
|
||||||
) !vk.PresentModeKHR {
|
|
||||||
var modes_buf: [8]vk.PresentModeKHR = undefined;
|
|
||||||
var modes_count: u32 = @intCast(modes_buf.len);
|
|
||||||
_ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &modes_count, &modes_buf);
|
|
||||||
const modes = modes_buf[0..modes_count];
|
|
||||||
|
|
||||||
for (modes) |mode| {
|
|
||||||
if (std.meta.eql(mode, preferred)) {
|
|
||||||
return mode;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return .mailbox_khr;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn find_swap_extent(
|
|
||||||
pdev: vk.PhysicalDevice,
|
|
||||||
vki: gfx.InstanceDispatch,
|
|
||||||
surface: vk.SurfaceKHR,
|
|
||||||
window: *c.GLFWwindow,
|
|
||||||
) !vk.Extent2D {
|
|
||||||
const caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, surface);
|
|
||||||
var extent = caps.current_extent;
|
|
||||||
|
|
||||||
if (extent.width == std.math.maxInt(u32)) {
|
|
||||||
c.glfwGetFramebufferSize(window, @ptrCast(&extent.width), @ptrCast(&extent.height));
|
|
||||||
extent.width = std.math.clamp(extent.width, caps.min_image_extent.width, caps.max_image_extent.width);
|
|
||||||
extent.height = std.math.clamp(extent.height, caps.min_image_extent.height, caps.max_image_extent.height);
|
|
||||||
}
|
|
||||||
|
|
||||||
return extent;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn find_swap_image_count(
|
|
||||||
pdev: vk.PhysicalDevice,
|
|
||||||
vki: gfx.InstanceDispatch,
|
|
||||||
surface: vk.SurfaceKHR,
|
|
||||||
) !u32 {
|
|
||||||
const caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, surface);
|
|
||||||
var count = caps.min_image_count + 1;
|
|
||||||
if (caps.max_image_count > 0) {
|
|
||||||
count = @min(count, caps.max_image_count);
|
|
||||||
}
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn main() !void {
|
pub fn main() !void {
|
||||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||||
defer _ = gpa.deinit();
|
defer _ = gpa.deinit();
|
||||||
@@ -330,21 +65,21 @@ pub fn main() !void {
|
|||||||
|
|
||||||
var extent = vk.Extent2D{ .width = 800, .height = 600 };
|
var extent = vk.Extent2D{ .width = 800, .height = 600 };
|
||||||
|
|
||||||
const window = try create_window(extent, app_name);
|
const window = try gfx.create_window(extent, app_name);
|
||||||
defer c.glfwDestroyWindow(window);
|
defer c.glfwDestroyWindow(window);
|
||||||
|
|
||||||
const vkb = try gfx.BaseDispatch.load(c.glfwGetInstanceProcAddress);
|
const vkb = try gfx.BaseDispatch.load(c.glfwGetInstanceProcAddress);
|
||||||
|
|
||||||
const instance, const vki, const messenger = try create_instance(vkb);
|
const instance, const vki, const messenger = try gfx.create_instance(vkb, app_name);
|
||||||
defer vki.destroyInstance(instance, null);
|
defer vki.destroyInstance(instance, null);
|
||||||
defer if (gfx.use_debug_messenger)
|
defer if (gfx.use_debug_messenger)
|
||||||
vki.destroyDebugUtilsMessengerEXT(instance, messenger, null);
|
vki.destroyDebugUtilsMessengerEXT(instance, messenger, null);
|
||||||
|
|
||||||
const surface = try create_surface(instance, window);
|
const surface = try gfx.create_surface(instance, window);
|
||||||
defer vki.destroySurfaceKHR(instance, surface, null);
|
defer vki.destroySurfaceKHR(instance, surface, null);
|
||||||
|
|
||||||
const pdev: vk.PhysicalDevice, const dev: vk.Device, const vkd: gfx.DeviceDispatch, const family: u32 =
|
const pdev: vk.PhysicalDevice, const dev: vk.Device, const vkd: gfx.DeviceDispatch, const family: u32 =
|
||||||
try create_device(ally, instance, surface, vki);
|
try gfx.create_device(ally, instance, surface, vki);
|
||||||
defer vkd.destroyDevice(dev, null);
|
defer vkd.destroyDevice(dev, null);
|
||||||
|
|
||||||
const queue = vkd.getDeviceQueue(dev, family, 0);
|
const queue = vkd.getDeviceQueue(dev, family, 0);
|
||||||
@@ -353,12 +88,12 @@ pub fn main() !void {
|
|||||||
.format = .b8g8r8a8_srgb,
|
.format = .b8g8r8a8_srgb,
|
||||||
.color_space = .srgb_nonlinear_khr,
|
.color_space = .srgb_nonlinear_khr,
|
||||||
};
|
};
|
||||||
const format = try find_surface_format(pdev, vki, surface, preferred_format);
|
const format = try gfx.find_surface_format(pdev, vki, surface, preferred_format);
|
||||||
extent = try find_swap_extent(pdev, vki, surface, window);
|
extent = try gfx.find_swap_extent(pdev, vki, surface, window);
|
||||||
|
|
||||||
const present_mode = try find_present_mode(pdev, vki, surface, .mailbox_khr);
|
const present_mode = try gfx.find_present_mode(pdev, vki, surface, .mailbox_khr);
|
||||||
|
|
||||||
const swap_image_count = try find_swap_image_count(pdev, vki, surface);
|
const swap_image_count = try gfx.find_swap_image_count(pdev, vki, surface);
|
||||||
|
|
||||||
var swapchain: vk.SwapchainKHR = .null_handle;
|
var swapchain: vk.SwapchainKHR = .null_handle;
|
||||||
defer vkd.destroySwapchainKHR(dev, swapchain, null);
|
defer vkd.destroySwapchainKHR(dev, swapchain, null);
|
||||||
@@ -432,11 +167,11 @@ pub fn main() !void {
|
|||||||
}, null);
|
}, null);
|
||||||
defer vkd.destroyBuffer(dev, vertex_buffer, null);
|
defer vkd.destroyBuffer(dev, vertex_buffer, null);
|
||||||
const vertex_mem_reqs = vkd.getBufferMemoryRequirements(dev, vertex_buffer);
|
const vertex_mem_reqs = vkd.getBufferMemoryRequirements(dev, vertex_buffer);
|
||||||
const vertex_memory = try allocate(pdev, vki, dev, vkd, vertex_mem_reqs, .{ .device_local_bit = true });
|
const vertex_memory = try gfx.allocate(pdev, vki, dev, vkd, vertex_mem_reqs, .{ .device_local_bit = true });
|
||||||
defer vkd.freeMemory(dev, vertex_memory, null);
|
defer vkd.freeMemory(dev, vertex_memory, null);
|
||||||
try vkd.bindBufferMemory(dev, vertex_buffer, vertex_memory, 0);
|
try vkd.bindBufferMemory(dev, vertex_buffer, vertex_memory, 0);
|
||||||
|
|
||||||
try uploadData(Vertex, pdev, vki, dev, vkd, queue, pool, vertex_buffer, &vertices);
|
try gfx.uploadData(Vertex, pdev, vki, dev, vkd, queue, pool, vertex_buffer, &vertices);
|
||||||
|
|
||||||
const index_buffer = try vkd.createBuffer(dev, &.{
|
const index_buffer = try vkd.createBuffer(dev, &.{
|
||||||
.size = @sizeOf(@TypeOf(indices)),
|
.size = @sizeOf(@TypeOf(indices)),
|
||||||
@@ -445,11 +180,11 @@ pub fn main() !void {
|
|||||||
}, null);
|
}, null);
|
||||||
defer vkd.destroyBuffer(dev, index_buffer, null);
|
defer vkd.destroyBuffer(dev, index_buffer, null);
|
||||||
const index_mem_reqs = vkd.getBufferMemoryRequirements(dev, index_buffer);
|
const index_mem_reqs = vkd.getBufferMemoryRequirements(dev, index_buffer);
|
||||||
const index_memory = try allocate(pdev, vki, dev, vkd, index_mem_reqs, .{ .device_local_bit = true });
|
const index_memory = try gfx.allocate(pdev, vki, dev, vkd, index_mem_reqs, .{ .device_local_bit = true });
|
||||||
defer vkd.freeMemory(dev, index_memory, null);
|
defer vkd.freeMemory(dev, index_memory, null);
|
||||||
try vkd.bindBufferMemory(dev, index_buffer, index_memory, 0);
|
try vkd.bindBufferMemory(dev, index_buffer, index_memory, 0);
|
||||||
|
|
||||||
try uploadData(Index, pdev, vki, dev, vkd, queue, pool, index_buffer, &indices);
|
try gfx.uploadData(Index, pdev, vki, dev, vkd, queue, pool, index_buffer, &indices);
|
||||||
|
|
||||||
// var cmdbufs = try createCommandBuffers(
|
// var cmdbufs = try createCommandBuffers(
|
||||||
// &gc,
|
// &gc,
|
||||||
@@ -505,92 +240,6 @@ pub fn main() !void {
|
|||||||
try vkd.deviceWaitIdle(dev);
|
try vkd.deviceWaitIdle(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn uploadData(
|
|
||||||
comptime T: type,
|
|
||||||
pdev: vk.PhysicalDevice,
|
|
||||||
vki: gfx.InstanceDispatch,
|
|
||||||
dev: vk.Device,
|
|
||||||
vkd: gfx.DeviceDispatch,
|
|
||||||
queue: vk.Queue,
|
|
||||||
pool: vk.CommandPool,
|
|
||||||
buffer: vk.Buffer,
|
|
||||||
source: []const T,
|
|
||||||
) !void {
|
|
||||||
// if (@typeInfo(T) == .Struct and @typeInfo(T).Struct.layout == .auto) @compileError("Requires defined T layout");
|
|
||||||
|
|
||||||
const size = @sizeOf(T) * source.len;
|
|
||||||
|
|
||||||
const staging_buffer = try vkd.createBuffer(dev, &.{
|
|
||||||
.size = size,
|
|
||||||
.usage = .{ .transfer_src_bit = true },
|
|
||||||
.sharing_mode = .exclusive,
|
|
||||||
}, null);
|
|
||||||
defer vkd.destroyBuffer(dev, staging_buffer, null);
|
|
||||||
|
|
||||||
const mem_reqs = vkd.getBufferMemoryRequirements(dev, staging_buffer);
|
|
||||||
const staging_memory = try allocate(pdev, vki, dev, vkd, mem_reqs, .{
|
|
||||||
.host_visible_bit = true,
|
|
||||||
.host_coherent_bit = true,
|
|
||||||
});
|
|
||||||
defer vkd.freeMemory(dev, staging_memory, null);
|
|
||||||
|
|
||||||
try vkd.bindBufferMemory(dev, staging_buffer, staging_memory, 0);
|
|
||||||
|
|
||||||
{
|
|
||||||
const data = try vkd.mapMemory(dev, staging_memory, 0, vk.WHOLE_SIZE, .{});
|
|
||||||
defer vkd.unmapMemory(dev, staging_memory);
|
|
||||||
|
|
||||||
const dest: [*]T = @ptrCast(@alignCast(data));
|
|
||||||
@memcpy(dest, source);
|
|
||||||
}
|
|
||||||
|
|
||||||
try copyBuffer(dev, queue, pool, buffer, staging_buffer, size, vkd);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn copyBuffer(
|
|
||||||
dev: vk.Device,
|
|
||||||
queue: vk.Queue,
|
|
||||||
pool: vk.CommandPool,
|
|
||||||
dst: vk.Buffer,
|
|
||||||
src: vk.Buffer,
|
|
||||||
size: vk.DeviceSize,
|
|
||||||
vkd: gfx.DeviceDispatch,
|
|
||||||
) !void {
|
|
||||||
var cmdbuf: vk.CommandBuffer = undefined;
|
|
||||||
try vkd.allocateCommandBuffers(dev, &.{
|
|
||||||
.command_pool = pool,
|
|
||||||
.level = .primary,
|
|
||||||
.command_buffer_count = 1,
|
|
||||||
}, @ptrCast(&cmdbuf));
|
|
||||||
defer vkd.freeCommandBuffers(dev, pool, 1, @ptrCast(&cmdbuf));
|
|
||||||
|
|
||||||
try vkd.beginCommandBuffer(cmdbuf, &.{
|
|
||||||
.flags = .{ .one_time_submit_bit = true },
|
|
||||||
});
|
|
||||||
|
|
||||||
const region = vk.BufferCopy{
|
|
||||||
.src_offset = 0,
|
|
||||||
.dst_offset = 0,
|
|
||||||
.size = size,
|
|
||||||
};
|
|
||||||
vkd.cmdCopyBuffer(cmdbuf, src, dst, 1, @ptrCast(®ion));
|
|
||||||
|
|
||||||
try vkd.endCommandBuffer(cmdbuf);
|
|
||||||
|
|
||||||
const si = vk.SubmitInfo{
|
|
||||||
.command_buffer_count = 1,
|
|
||||||
.p_command_buffers = @ptrCast(&cmdbuf),
|
|
||||||
.p_wait_dst_stage_mask = undefined,
|
|
||||||
};
|
|
||||||
// creating and submitting a queue for every copy operation seems a bad idea for "streamed" data
|
|
||||||
// gonna want a way to send a copy operation WITH SYNCHRONIZATION PRIMITIVES on a particular queue
|
|
||||||
// see https://stackoverflow.com/a/62183243
|
|
||||||
//
|
|
||||||
// this may be a misunderstanding on how submission works...
|
|
||||||
try vkd.queueSubmit(queue, 1, @ptrCast(&si), .null_handle);
|
|
||||||
try vkd.queueWaitIdle(queue);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn createCommandBuffers(
|
fn createCommandBuffers(
|
||||||
views: []const vk.Image,
|
views: []const vk.Image,
|
||||||
images: []const vk.ImageView,
|
images: []const vk.ImageView,
|
||||||
@@ -767,118 +416,95 @@ fn createPipeline(dev: vk.Device, layout: vk.PipelineLayout, format: vk.SurfaceF
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
const pcbas = vk.PipelineColorBlendAttachmentState{
|
const color_blend_attachment_states = [_]vk.PipelineColorBlendAttachmentState{
|
||||||
.blend_enable = vk.FALSE,
|
vk.PipelineColorBlendAttachmentState{
|
||||||
.src_color_blend_factor = .one,
|
.blend_enable = vk.FALSE,
|
||||||
.dst_color_blend_factor = .zero,
|
.src_color_blend_factor = .one,
|
||||||
.color_blend_op = .add,
|
.dst_color_blend_factor = .zero,
|
||||||
.src_alpha_blend_factor = .one,
|
.color_blend_op = .add,
|
||||||
.dst_alpha_blend_factor = .zero,
|
.src_alpha_blend_factor = .one,
|
||||||
.alpha_blend_op = .add,
|
.dst_alpha_blend_factor = .zero,
|
||||||
.color_write_mask = .{ .r_bit = true, .g_bit = true, .b_bit = true, .a_bit = true },
|
.alpha_blend_op = .add,
|
||||||
|
.color_write_mask = .{ .r_bit = true, .g_bit = true, .b_bit = true, .a_bit = true },
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
const dynstate = [_]vk.DynamicState{ .viewport, .scissor };
|
const dynamic_states = [_]vk.DynamicState{
|
||||||
|
.viewport,
|
||||||
|
.scissor,
|
||||||
|
};
|
||||||
|
|
||||||
const gpci = vk.GraphicsPipelineCreateInfo{
|
const create_infos = [_]vk.GraphicsPipelineCreateInfo{
|
||||||
.flags = .{},
|
.{
|
||||||
.stage_count = @intCast(pssci.len),
|
|
||||||
.p_stages = &pssci,
|
|
||||||
.p_vertex_input_state = &vk.PipelineVertexInputStateCreateInfo{
|
|
||||||
.vertex_binding_description_count = 1,
|
|
||||||
.p_vertex_binding_descriptions = @ptrCast(&Vertex.binding_description),
|
|
||||||
.vertex_attribute_description_count = Vertex.attribute_description.len,
|
|
||||||
.p_vertex_attribute_descriptions = &Vertex.attribute_description,
|
|
||||||
},
|
|
||||||
.p_input_assembly_state = &vk.PipelineInputAssemblyStateCreateInfo{
|
|
||||||
.topology = .triangle_list,
|
|
||||||
.primitive_restart_enable = vk.FALSE,
|
|
||||||
},
|
|
||||||
.p_tessellation_state = null,
|
|
||||||
.p_viewport_state = &vk.PipelineViewportStateCreateInfo{
|
|
||||||
.viewport_count = 1,
|
|
||||||
.p_viewports = undefined, // set in createCommandBuffers with cmdSetViewport
|
|
||||||
.scissor_count = 1,
|
|
||||||
.p_scissors = undefined, // set in createCommandBuffers with cmdSetScissor
|
|
||||||
},
|
|
||||||
.p_rasterization_state = &vk.PipelineRasterizationStateCreateInfo{
|
|
||||||
.depth_clamp_enable = vk.FALSE,
|
|
||||||
.rasterizer_discard_enable = vk.FALSE,
|
|
||||||
.polygon_mode = .fill,
|
|
||||||
.cull_mode = .{ .back_bit = true },
|
|
||||||
.front_face = .counter_clockwise,
|
|
||||||
.depth_bias_enable = vk.FALSE,
|
|
||||||
.depth_bias_constant_factor = 0,
|
|
||||||
.depth_bias_clamp = 0,
|
|
||||||
.depth_bias_slope_factor = 0,
|
|
||||||
.line_width = 1,
|
|
||||||
},
|
|
||||||
.p_multisample_state = &vk.PipelineMultisampleStateCreateInfo{
|
|
||||||
.rasterization_samples = .{ .@"1_bit" = true },
|
|
||||||
.sample_shading_enable = vk.FALSE,
|
|
||||||
.min_sample_shading = 1,
|
|
||||||
.alpha_to_coverage_enable = vk.FALSE,
|
|
||||||
.alpha_to_one_enable = vk.FALSE,
|
|
||||||
},
|
|
||||||
.p_depth_stencil_state = null,
|
|
||||||
.p_color_blend_state = &vk.PipelineColorBlendStateCreateInfo{
|
|
||||||
.logic_op_enable = vk.FALSE,
|
|
||||||
.logic_op = .copy,
|
|
||||||
.attachment_count = 1,
|
|
||||||
.p_attachments = @ptrCast(&pcbas),
|
|
||||||
.blend_constants = [_]f32{ 0, 0, 0, 0 },
|
|
||||||
},
|
|
||||||
.p_dynamic_state = &vk.PipelineDynamicStateCreateInfo{
|
|
||||||
.flags = .{},
|
.flags = .{},
|
||||||
.dynamic_state_count = dynstate.len,
|
.stage_count = @intCast(pssci.len),
|
||||||
.p_dynamic_states = &dynstate,
|
.p_stages = &pssci,
|
||||||
},
|
.p_vertex_input_state = &vk.PipelineVertexInputStateCreateInfo{
|
||||||
.layout = layout,
|
.vertex_binding_description_count = 1,
|
||||||
.render_pass = .null_handle,
|
.p_vertex_binding_descriptions = @ptrCast(&Vertex.binding_description),
|
||||||
.subpass = 0,
|
.vertex_attribute_description_count = Vertex.attribute_description.len,
|
||||||
.base_pipeline_handle = .null_handle,
|
.p_vertex_attribute_descriptions = &Vertex.attribute_description,
|
||||||
.base_pipeline_index = -1,
|
},
|
||||||
.p_next = &vk.PipelineRenderingCreateInfoKHR{
|
.p_input_assembly_state = &vk.PipelineInputAssemblyStateCreateInfo{
|
||||||
.color_attachment_count = 1,
|
.topology = .triangle_list,
|
||||||
.p_color_attachment_formats = @ptrCast(&format),
|
.primitive_restart_enable = vk.FALSE,
|
||||||
.depth_attachment_format = .undefined,
|
},
|
||||||
.stencil_attachment_format = .undefined,
|
.p_tessellation_state = null,
|
||||||
.view_mask = 0,
|
.p_viewport_state = &vk.PipelineViewportStateCreateInfo{
|
||||||
|
.viewport_count = 1,
|
||||||
|
.p_viewports = undefined, // set in createCommandBuffers with cmdSetViewport
|
||||||
|
.scissor_count = 1,
|
||||||
|
.p_scissors = undefined, // set in createCommandBuffers with cmdSetScissor
|
||||||
|
},
|
||||||
|
.p_rasterization_state = &vk.PipelineRasterizationStateCreateInfo{
|
||||||
|
.depth_clamp_enable = vk.FALSE,
|
||||||
|
.rasterizer_discard_enable = vk.FALSE,
|
||||||
|
.polygon_mode = .fill,
|
||||||
|
.cull_mode = .{ .back_bit = true },
|
||||||
|
.front_face = .counter_clockwise,
|
||||||
|
.depth_bias_enable = vk.FALSE,
|
||||||
|
.depth_bias_constant_factor = 0,
|
||||||
|
.depth_bias_clamp = 0,
|
||||||
|
.depth_bias_slope_factor = 0,
|
||||||
|
.line_width = 1,
|
||||||
|
},
|
||||||
|
.p_multisample_state = &vk.PipelineMultisampleStateCreateInfo{
|
||||||
|
.rasterization_samples = .{ .@"1_bit" = true },
|
||||||
|
.sample_shading_enable = vk.FALSE,
|
||||||
|
.min_sample_shading = 1,
|
||||||
|
.alpha_to_coverage_enable = vk.FALSE,
|
||||||
|
.alpha_to_one_enable = vk.FALSE,
|
||||||
|
},
|
||||||
|
.p_depth_stencil_state = null,
|
||||||
|
.p_color_blend_state = &vk.PipelineColorBlendStateCreateInfo{
|
||||||
|
.logic_op_enable = vk.FALSE,
|
||||||
|
.logic_op = .copy,
|
||||||
|
.attachment_count = @intCast(color_blend_attachment_states.len),
|
||||||
|
.p_attachments = &color_blend_attachment_states,
|
||||||
|
.blend_constants = [_]f32{ 0, 0, 0, 0 },
|
||||||
|
},
|
||||||
|
.p_dynamic_state = &vk.PipelineDynamicStateCreateInfo{
|
||||||
|
.flags = .{},
|
||||||
|
.dynamic_state_count = @intCast(dynamic_states.len),
|
||||||
|
.p_dynamic_states = &dynamic_states,
|
||||||
|
},
|
||||||
|
.layout = layout,
|
||||||
|
.render_pass = .null_handle,
|
||||||
|
.subpass = 0,
|
||||||
|
.base_pipeline_handle = .null_handle,
|
||||||
|
.base_pipeline_index = -1,
|
||||||
|
.p_next = &vk.PipelineRenderingCreateInfoKHR{
|
||||||
|
.color_attachment_count = 1,
|
||||||
|
.p_color_attachment_formats = @ptrCast(&format),
|
||||||
|
.depth_attachment_format = .undefined,
|
||||||
|
.stencil_attachment_format = .undefined,
|
||||||
|
.view_mask = 0,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
var pipeline: vk.Pipeline = undefined;
|
var pipelines: [create_infos.len]vk.Pipeline = undefined;
|
||||||
_ = try vkd.createGraphicsPipelines(dev, .null_handle, 1, @ptrCast(&gpci), null, @ptrCast(&pipeline));
|
_ = try vkd.createGraphicsPipelines(dev, .null_handle, @intCast(create_infos.len), &create_infos, null, &pipelines);
|
||||||
return pipeline;
|
std.debug.assert(pipelines.len == 1);
|
||||||
}
|
return pipelines[0];
|
||||||
|
|
||||||
pub fn findMemoryTypeIndex(
|
|
||||||
pdev: vk.PhysicalDevice,
|
|
||||||
memory_type_bits: u32,
|
|
||||||
flags: vk.MemoryPropertyFlags,
|
|
||||||
vki: gfx.InstanceDispatch,
|
|
||||||
) !u32 {
|
|
||||||
const mem_props = vki.getPhysicalDeviceMemoryProperties(pdev);
|
|
||||||
|
|
||||||
for (mem_props.memory_types[0..mem_props.memory_type_count], 0..) |mem_type, i| {
|
|
||||||
if (memory_type_bits & (@as(u32, 1) << @truncate(i)) != 0 and mem_type.property_flags.contains(flags)) {
|
|
||||||
return @truncate(i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return error.NoSuitableMemoryType;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn allocate(
|
|
||||||
pdev: vk.PhysicalDevice,
|
|
||||||
vki: gfx.InstanceDispatch,
|
|
||||||
dev: vk.Device,
|
|
||||||
vkd: gfx.DeviceDispatch,
|
|
||||||
requirements: vk.MemoryRequirements,
|
|
||||||
flags: vk.MemoryPropertyFlags,
|
|
||||||
) !vk.DeviceMemory {
|
|
||||||
return try vkd.allocateMemory(dev, &.{
|
|
||||||
.allocation_size = requirements.size,
|
|
||||||
.memory_type_index = try findMemoryTypeIndex(pdev, requirements.memory_type_bits, flags, vki),
|
|
||||||
}, null);
|
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user