Merge pull request #136 from Snektron/proxy

Proxy
This commit is contained in:
Robin Voetter
2024-04-29 23:36:53 +02:00
committed by GitHub
6 changed files with 455 additions and 193 deletions

View File

@@ -143,12 +143,27 @@ For each function, a wrapper is generated into one of three structs:
* InstanceWrapper. This contains wrappers for functions which are otherwise loaded by `vkGetInstanceProcAddr`.
* DeviceWrapper. This contains wrappers for functions which are loaded by `vkGetDeviceProcAddr`.
Each wrapper struct can be called with an array of the appropriate enums:
To create a wrapper type, an "api specification" should be passed to it. This is a list of `ApiInfo` structs, which allows one to specify the functions that should be made available. An `ApiInfo` structure is initialized 3 optional fields, `base_commands`, `instance_commands`, and `device_commands`. Each of these takes a set of the vulkan functions that should be made available for that category, for example, setting `.createInstance = true` in `base_commands` makes the `createInstance` function available (loaded from `vkCreateInstance`). An entire feature level or extension can be pulled in at once too, for example, `vk.features.version_1_0` contains all functions for Vulkan 1.0. `vk.extensions.khr_surface` contains all functions for the `VK_KHR_surface` extension.
```zig
const vk = @import("vulkan");
const BaseDispatch = vk.BaseWrapper(.{
/// To construct base, instance and device wrappers for vulkan-zig, you need to pass a list of 'apis' to it.
const apis: []const vk.ApiInfo = &.{
// You can either add invidiual functions by manually creating an 'api'
.{
.base_commands = .{
.createInstance = true,
});
},
.instance_commands = .{
.createDevice = true,
},
},
// Or you can add entire feature sets or extensions
vk.features.version_1_0,
vk.extensions.khr_surface,
vk.extensions.khr_swapchain,
};
const BaseDispatch = vk.BaseWrapper(apis);
```
The wrapper struct then provides wrapper functions for each function pointer in the dispatch struct:
```zig
@@ -232,6 +247,23 @@ By default, wrapper `load` functions return `error.CommandLoadFailure` if a call
One can access the underlying unwrapped C functions by doing `wrapper.dispatch.vkFuncYouWant(..)`.
#### Proxying Wrappers
Proxying wrappers wrap a wrapper and a pointer to the associated handle in a single struct, and automatically passes this handle to commands as appropriate. Besides the proxying wrappers for instances and devices, there are also proxying wrappers for queues and command buffers. Proxying wrapper type are constructed in the same way as a regular wrapper, by passing an api specification to them. To initialize a proxying wrapper, it must be passed a handle and a pointer to an appropriate wrapper. For queue and command buffer proxying wrappers, a pointer to a device wrapper must be passed.
```zig
// Create the dispatch tables
const InstanceDispatch = vk.InstanceWrapper(apis);
const Instance = vk.InstanceProxy(apis);
const instance_handle = try vkb.createInstance(...);
const vki = try InstanceDispatch.load(instance_handle, vkb.vkGetInstanceProcAddr);
const instance = Instance.load(instance_handle, &vki);
defer instance.destroyInstance(null);
```
For queue and command buffer proxying wrappers, the `queue` and `cmd` prefix is removed for functions where appropriate. Note that the device proxying wrappers also have the queue and command buffer functions made available for convenience, but there the prefix is not stripped.
### Bitflags
Packed structs of bools are used for bit flags in vulkan-zig, instead of both a `FlagBits` and `Flags` variant. Places where either of these variants are used are both replaced by this packed struct instead. This means that even in places where just one flag would normally be accepted, the packed struct is accepted. The programmer is responsible for only enabling a single bit.

View File

@@ -27,23 +27,30 @@ const BaseDispatch = vk.BaseWrapper(apis);
const InstanceDispatch = vk.InstanceWrapper(apis);
const DeviceDispatch = vk.DeviceWrapper(apis);
pub const GraphicsContext = struct {
vkb: BaseDispatch,
vki: InstanceDispatch,
vkd: DeviceDispatch,
// Also create some proxying wrappers, which also have the respective handles
const Instance = vk.InstanceProxy(apis);
const Device = vk.DeviceProxy(apis);
instance: vk.Instance,
pub const GraphicsContext = struct {
pub const CommandBuffer = vk.CommandBufferProxy(apis);
allocator: Allocator,
vkb: BaseDispatch,
instance: Instance,
surface: vk.SurfaceKHR,
pdev: vk.PhysicalDevice,
props: vk.PhysicalDeviceProperties,
mem_props: vk.PhysicalDeviceMemoryProperties,
dev: vk.Device,
dev: Device,
graphics_queue: Queue,
present_queue: Queue,
pub fn init(allocator: Allocator, app_name: [*:0]const u8, window: *c.GLFWwindow) !GraphicsContext {
var self: GraphicsContext = undefined;
self.allocator = allocator;
self.vkb = try BaseDispatch.load(c.glfwGetInstanceProcAddress);
var glfw_exts_count: u32 = 0;
@@ -57,37 +64,49 @@ pub const GraphicsContext = struct {
.api_version = vk.API_VERSION_1_2,
};
self.instance = try self.vkb.createInstance(&.{
const instance = try self.vkb.createInstance(&.{
.p_application_info = &app_info,
.enabled_extension_count = glfw_exts_count,
.pp_enabled_extension_names = @as([*]const [*:0]const u8, @ptrCast(glfw_exts)),
.pp_enabled_extension_names = @ptrCast(glfw_exts),
}, null);
self.vki = try InstanceDispatch.load(self.instance, self.vkb.dispatch.vkGetInstanceProcAddr);
errdefer self.vki.destroyInstance(self.instance, null);
const vki = try allocator.create(InstanceDispatch);
errdefer allocator.destroy(vki);
vki.* = try InstanceDispatch.load(instance, self.vkb.dispatch.vkGetInstanceProcAddr);
self.instance = Instance.init(instance, vki);
errdefer self.instance.destroyInstance(null);
self.surface = try createSurface(self.instance, window);
errdefer self.vki.destroySurfaceKHR(self.instance, self.surface, null);
errdefer self.instance.destroySurfaceKHR(self.surface, null);
const candidate = try pickPhysicalDevice(self.vki, self.instance, allocator, self.surface);
const candidate = try pickPhysicalDevice(self.instance, allocator, self.surface);
self.pdev = candidate.pdev;
self.props = candidate.props;
self.dev = try initializeCandidate(self.vki, candidate);
self.vkd = try DeviceDispatch.load(self.dev, self.vki.dispatch.vkGetDeviceProcAddr);
errdefer self.vkd.destroyDevice(self.dev, null);
self.graphics_queue = Queue.init(self.vkd, self.dev, candidate.queues.graphics_family);
self.present_queue = Queue.init(self.vkd, self.dev, candidate.queues.present_family);
const dev = try initializeCandidate(self.instance, candidate);
self.mem_props = self.vki.getPhysicalDeviceMemoryProperties(self.pdev);
const vkd = try allocator.create(DeviceDispatch);
errdefer allocator.destroy(vkd);
vkd.* = try DeviceDispatch.load(dev, self.instance.wrapper.dispatch.vkGetDeviceProcAddr);
self.dev = Device.init(dev, vkd);
errdefer self.dev.destroyDevice(null);
self.graphics_queue = Queue.init(self.dev, candidate.queues.graphics_family);
self.present_queue = Queue.init(self.dev, candidate.queues.present_family);
self.mem_props = self.instance.getPhysicalDeviceMemoryProperties(self.pdev);
return self;
}
pub fn deinit(self: GraphicsContext) void {
self.vkd.destroyDevice(self.dev, null);
self.vki.destroySurfaceKHR(self.instance, self.surface, null);
self.vki.destroyInstance(self.instance, null);
self.dev.destroyDevice(null);
self.instance.destroySurfaceKHR(self.surface, null);
self.instance.destroyInstance(null);
// Don't forget to free the tables to prevent a memory leak.
self.allocator.destroy(self.dev.wrapper);
self.allocator.destroy(self.instance.wrapper);
}
pub fn deviceName(self: *const GraphicsContext) []const u8 {
@@ -105,7 +124,7 @@ pub const GraphicsContext = struct {
}
pub fn allocate(self: GraphicsContext, requirements: vk.MemoryRequirements, flags: vk.MemoryPropertyFlags) !vk.DeviceMemory {
return try self.vkd.allocateMemory(self.dev, &.{
return try self.dev.allocateMemory(&.{
.allocation_size = requirements.size,
.memory_type_index = try self.findMemoryTypeIndex(requirements.memory_type_bits, flags),
}, null);
@@ -116,24 +135,24 @@ pub const Queue = struct {
handle: vk.Queue,
family: u32,
fn init(vkd: DeviceDispatch, dev: vk.Device, family: u32) Queue {
fn init(device: Device, family: u32) Queue {
return .{
.handle = vkd.getDeviceQueue(dev, family, 0),
.handle = device.getDeviceQueue(family, 0),
.family = family,
};
}
};
fn createSurface(instance: vk.Instance, window: *c.GLFWwindow) !vk.SurfaceKHR {
fn createSurface(instance: Instance, window: *c.GLFWwindow) !vk.SurfaceKHR {
var surface: vk.SurfaceKHR = undefined;
if (c.glfwCreateWindowSurface(instance, window, null, &surface) != .success) {
if (c.glfwCreateWindowSurface(instance.handle, window, null, &surface) != .success) {
return error.SurfaceInitFailed;
}
return surface;
}
fn initializeCandidate(vki: InstanceDispatch, candidate: DeviceCandidate) !vk.Device {
fn initializeCandidate(instance: Instance, candidate: DeviceCandidate) !vk.Device {
const priority = [_]f32{1};
const qci = [_]vk.DeviceQueueCreateInfo{
.{
@@ -153,11 +172,11 @@ fn initializeCandidate(vki: InstanceDispatch, candidate: DeviceCandidate) !vk.De
else
2;
return try vki.createDevice(candidate.pdev, &.{
return try instance.createDevice(candidate.pdev, &.{
.queue_create_info_count = queue_count,
.p_queue_create_infos = &qci,
.enabled_extension_count = required_device_extensions.len,
.pp_enabled_extension_names = @as([*]const [*:0]const u8, @ptrCast(&required_device_extensions)),
.pp_enabled_extension_names = @ptrCast(&required_device_extensions),
}, null);
}
@@ -173,21 +192,20 @@ const QueueAllocation = struct {
};
fn pickPhysicalDevice(
vki: InstanceDispatch,
instance: vk.Instance,
instance: Instance,
allocator: Allocator,
surface: vk.SurfaceKHR,
) !DeviceCandidate {
var device_count: u32 = undefined;
_ = try vki.enumeratePhysicalDevices(instance, &device_count, null);
_ = try instance.enumeratePhysicalDevices(&device_count, null);
const pdevs = try allocator.alloc(vk.PhysicalDevice, device_count);
defer allocator.free(pdevs);
_ = try vki.enumeratePhysicalDevices(instance, &device_count, pdevs.ptr);
_ = try instance.enumeratePhysicalDevices(&device_count, pdevs.ptr);
for (pdevs) |pdev| {
if (try checkSuitable(vki, pdev, allocator, surface)) |candidate| {
if (try checkSuitable(instance, pdev, allocator, surface)) |candidate| {
return candidate;
}
}
@@ -196,22 +214,21 @@ fn pickPhysicalDevice(
}
fn checkSuitable(
vki: InstanceDispatch,
instance: Instance,
pdev: vk.PhysicalDevice,
allocator: Allocator,
surface: vk.SurfaceKHR,
) !?DeviceCandidate {
const props = vki.getPhysicalDeviceProperties(pdev);
if (!try checkExtensionSupport(vki, pdev, allocator)) {
if (!try checkExtensionSupport(instance, pdev, allocator)) {
return null;
}
if (!try checkSurfaceSupport(vki, pdev, surface)) {
if (!try checkSurfaceSupport(instance, pdev, surface)) {
return null;
}
if (try allocateQueues(vki, pdev, allocator, surface)) |allocation| {
if (try allocateQueues(instance, pdev, allocator, surface)) |allocation| {
const props = instance.getPhysicalDeviceProperties(pdev);
return DeviceCandidate{
.pdev = pdev,
.props = props,
@@ -222,13 +239,13 @@ fn checkSuitable(
return null;
}
fn allocateQueues(vki: InstanceDispatch, pdev: vk.PhysicalDevice, allocator: Allocator, surface: vk.SurfaceKHR) !?QueueAllocation {
fn allocateQueues(instance: Instance, pdev: vk.PhysicalDevice, allocator: Allocator, surface: vk.SurfaceKHR) !?QueueAllocation {
var family_count: u32 = undefined;
vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null);
instance.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null);
const families = try allocator.alloc(vk.QueueFamilyProperties, family_count);
defer allocator.free(families);
vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr);
instance.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr);
var graphics_family: ?u32 = null;
var present_family: ?u32 = null;
@@ -240,7 +257,7 @@ fn allocateQueues(vki: InstanceDispatch, pdev: vk.PhysicalDevice, allocator: All
graphics_family = family;
}
if (present_family == null and (try vki.getPhysicalDeviceSurfaceSupportKHR(pdev, family, surface)) == vk.TRUE) {
if (present_family == null and (try instance.getPhysicalDeviceSurfaceSupportKHR(pdev, family, surface)) == vk.TRUE) {
present_family = family;
}
}
@@ -255,28 +272,28 @@ fn allocateQueues(vki: InstanceDispatch, pdev: vk.PhysicalDevice, allocator: All
return null;
}
fn checkSurfaceSupport(vki: InstanceDispatch, pdev: vk.PhysicalDevice, surface: vk.SurfaceKHR) !bool {
fn checkSurfaceSupport(instance: Instance, pdev: vk.PhysicalDevice, surface: vk.SurfaceKHR) !bool {
var format_count: u32 = undefined;
_ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null);
_ = try instance.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null);
var present_mode_count: u32 = undefined;
_ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &present_mode_count, null);
_ = try instance.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &present_mode_count, null);
return format_count > 0 and present_mode_count > 0;
}
fn checkExtensionSupport(
vki: InstanceDispatch,
instance: Instance,
pdev: vk.PhysicalDevice,
allocator: Allocator,
) !bool {
var count: u32 = undefined;
_ = try vki.enumerateDeviceExtensionProperties(pdev, null, &count, null);
_ = try instance.enumerateDeviceExtensionProperties(pdev, null, &count, null);
const propsv = try allocator.alloc(vk.ExtensionProperties, count);
defer allocator.free(propsv);
_ = try vki.enumerateDeviceExtensionProperties(pdev, null, &count, propsv.ptr);
_ = try instance.enumerateDeviceExtensionProperties(pdev, null, &count, propsv.ptr);
for (required_device_extensions) |ext| {
for (propsv) |props| {

View File

@@ -26,7 +26,7 @@ pub const Swapchain = struct {
}
pub fn initRecycle(gc: *const GraphicsContext, allocator: Allocator, extent: vk.Extent2D, old_handle: vk.SwapchainKHR) !Swapchain {
const caps = try gc.vki.getPhysicalDeviceSurfaceCapabilitiesKHR(gc.pdev, gc.surface);
const caps = try gc.instance.getPhysicalDeviceSurfaceCapabilitiesKHR(gc.pdev, gc.surface);
const actual_extent = findActualExtent(caps, extent);
if (actual_extent.width == 0 or actual_extent.height == 0) {
return error.InvalidSurfaceDimensions;
@@ -46,7 +46,7 @@ pub const Swapchain = struct {
else
.exclusive;
const handle = try gc.vkd.createSwapchainKHR(gc.dev, &.{
const handle = try gc.dev.createSwapchainKHR(&.{
.surface = gc.surface,
.min_image_count = image_count,
.image_format = surface_format.format,
@@ -63,11 +63,11 @@ pub const Swapchain = struct {
.clipped = vk.TRUE,
.old_swapchain = old_handle,
}, null);
errdefer gc.vkd.destroySwapchainKHR(gc.dev, handle, null);
errdefer gc.dev.destroySwapchainKHR(handle, null);
if (old_handle != .null_handle) {
// Apparently, the old swapchain handle still needs to be destroyed after recreating.
gc.vkd.destroySwapchainKHR(gc.dev, old_handle, null);
gc.dev.destroySwapchainKHR(old_handle, null);
}
const swap_images = try initSwapchainImages(gc, handle, surface_format.format, allocator);
@@ -76,10 +76,10 @@ pub const Swapchain = struct {
allocator.free(swap_images);
}
var next_image_acquired = try gc.vkd.createSemaphore(gc.dev, &.{}, null);
errdefer gc.vkd.destroySemaphore(gc.dev, next_image_acquired, null);
var next_image_acquired = try gc.dev.createSemaphore(&.{}, null);
errdefer gc.dev.destroySemaphore(next_image_acquired, null);
const result = try gc.vkd.acquireNextImageKHR(gc.dev, handle, std.math.maxInt(u64), next_image_acquired, .null_handle);
const result = try gc.dev.acquireNextImageKHR(handle, std.math.maxInt(u64), next_image_acquired, .null_handle);
if (result.result != .success) {
return error.ImageAcquireFailed;
}
@@ -101,7 +101,7 @@ pub const Swapchain = struct {
fn deinitExceptSwapchain(self: Swapchain) void {
for (self.swap_images) |si| si.deinit(self.gc);
self.allocator.free(self.swap_images);
self.gc.vkd.destroySemaphore(self.gc.dev, self.next_image_acquired, null);
self.gc.dev.destroySemaphore(self.next_image_acquired, null);
}
pub fn waitForAllFences(self: Swapchain) !void {
@@ -110,7 +110,7 @@ pub const Swapchain = struct {
pub fn deinit(self: Swapchain) void {
self.deinitExceptSwapchain();
self.gc.vkd.destroySwapchainKHR(self.gc.dev, self.handle, null);
self.gc.dev.destroySwapchainKHR(self.handle, null);
}
pub fn recreate(self: *Swapchain, new_extent: vk.Extent2D) !void {
@@ -150,11 +150,11 @@ pub const Swapchain = struct {
// Step 1: Make sure the current frame has finished rendering
const current = self.currentSwapImage();
try current.waitForFence(self.gc);
try self.gc.vkd.resetFences(self.gc.dev, 1, @ptrCast(&current.frame_fence));
try self.gc.dev.resetFences(1, @ptrCast(&current.frame_fence));
// Step 2: Submit the command buffer
const wait_stage = [_]vk.PipelineStageFlags{.{ .top_of_pipe_bit = true }};
try self.gc.vkd.queueSubmit(self.gc.graphics_queue.handle, 1, &[_]vk.SubmitInfo{.{
try self.gc.dev.queueSubmit(self.gc.graphics_queue.handle, 1, &[_]vk.SubmitInfo{.{
.wait_semaphore_count = 1,
.p_wait_semaphores = @ptrCast(&current.image_acquired),
.p_wait_dst_stage_mask = &wait_stage,
@@ -165,17 +165,16 @@ pub const Swapchain = struct {
}}, current.frame_fence);
// Step 3: Present the current frame
_ = try self.gc.vkd.queuePresentKHR(self.gc.present_queue.handle, &.{
_ = try self.gc.dev.queuePresentKHR(self.gc.present_queue.handle, &.{
.wait_semaphore_count = 1,
.p_wait_semaphores = @as([*]const vk.Semaphore, @ptrCast(&current.render_finished)),
.p_wait_semaphores = @ptrCast(&current.render_finished),
.swapchain_count = 1,
.p_swapchains = @as([*]const vk.SwapchainKHR, @ptrCast(&self.handle)),
.p_image_indices = @as([*]const u32, @ptrCast(&self.image_index)),
.p_swapchains = @ptrCast(&self.handle),
.p_image_indices = @ptrCast(&self.image_index),
});
// Step 4: Acquire next frame
const result = try self.gc.vkd.acquireNextImageKHR(
self.gc.dev,
const result = try self.gc.dev.acquireNextImageKHR(
self.handle,
std.math.maxInt(u64),
self.next_image_acquired,
@@ -201,7 +200,7 @@ const SwapImage = struct {
frame_fence: vk.Fence,
fn init(gc: *const GraphicsContext, image: vk.Image, format: vk.Format) !SwapImage {
const view = try gc.vkd.createImageView(gc.dev, &.{
const view = try gc.dev.createImageView(&.{
.image = image,
.view_type = .@"2d",
.format = format,
@@ -214,16 +213,16 @@ const SwapImage = struct {
.layer_count = 1,
},
}, null);
errdefer gc.vkd.destroyImageView(gc.dev, view, null);
errdefer gc.dev.destroyImageView(view, null);
const image_acquired = try gc.vkd.createSemaphore(gc.dev, &.{}, null);
errdefer gc.vkd.destroySemaphore(gc.dev, image_acquired, null);
const image_acquired = try gc.dev.createSemaphore(&.{}, null);
errdefer gc.dev.destroySemaphore(image_acquired, null);
const render_finished = try gc.vkd.createSemaphore(gc.dev, &.{}, null);
errdefer gc.vkd.destroySemaphore(gc.dev, render_finished, null);
const render_finished = try gc.dev.createSemaphore(&.{}, null);
errdefer gc.dev.destroySemaphore(render_finished, null);
const frame_fence = try gc.vkd.createFence(gc.dev, &.{ .flags = .{ .signaled_bit = true } }, null);
errdefer gc.vkd.destroyFence(gc.dev, frame_fence, null);
const frame_fence = try gc.dev.createFence(&.{ .flags = .{ .signaled_bit = true } }, null);
errdefer gc.dev.destroyFence(frame_fence, null);
return SwapImage{
.image = image,
@@ -236,23 +235,23 @@ const SwapImage = struct {
fn deinit(self: SwapImage, gc: *const GraphicsContext) void {
self.waitForFence(gc) catch return;
gc.vkd.destroyImageView(gc.dev, self.view, null);
gc.vkd.destroySemaphore(gc.dev, self.image_acquired, null);
gc.vkd.destroySemaphore(gc.dev, self.render_finished, null);
gc.vkd.destroyFence(gc.dev, self.frame_fence, null);
gc.dev.destroyImageView(self.view, null);
gc.dev.destroySemaphore(self.image_acquired, null);
gc.dev.destroySemaphore(self.render_finished, null);
gc.dev.destroyFence(self.frame_fence, null);
}
fn waitForFence(self: SwapImage, gc: *const GraphicsContext) !void {
_ = try gc.vkd.waitForFences(gc.dev, 1, @ptrCast(&self.frame_fence), vk.TRUE, std.math.maxInt(u64));
_ = try gc.dev.waitForFences(1, @ptrCast(&self.frame_fence), vk.TRUE, std.math.maxInt(u64));
}
};
fn initSwapchainImages(gc: *const GraphicsContext, swapchain: vk.SwapchainKHR, format: vk.Format, allocator: Allocator) ![]SwapImage {
var count: u32 = undefined;
_ = try gc.vkd.getSwapchainImagesKHR(gc.dev, swapchain, &count, null);
_ = try gc.dev.getSwapchainImagesKHR(swapchain, &count, null);
const images = try allocator.alloc(vk.Image, count);
defer allocator.free(images);
_ = try gc.vkd.getSwapchainImagesKHR(gc.dev, swapchain, &count, images.ptr);
_ = try gc.dev.getSwapchainImagesKHR(swapchain, &count, images.ptr);
const swap_images = try allocator.alloc(SwapImage, count);
errdefer allocator.free(swap_images);
@@ -275,10 +274,10 @@ fn findSurfaceFormat(gc: *const GraphicsContext, allocator: Allocator) !vk.Surfa
};
var count: u32 = undefined;
_ = try gc.vki.getPhysicalDeviceSurfaceFormatsKHR(gc.pdev, gc.surface, &count, null);
_ = try gc.instance.getPhysicalDeviceSurfaceFormatsKHR(gc.pdev, gc.surface, &count, null);
const surface_formats = try allocator.alloc(vk.SurfaceFormatKHR, count);
defer allocator.free(surface_formats);
_ = try gc.vki.getPhysicalDeviceSurfaceFormatsKHR(gc.pdev, gc.surface, &count, surface_formats.ptr);
_ = try gc.instance.getPhysicalDeviceSurfaceFormatsKHR(gc.pdev, gc.surface, &count, surface_formats.ptr);
for (surface_formats) |sfmt| {
if (std.meta.eql(sfmt, preferred)) {
@@ -291,10 +290,10 @@ fn findSurfaceFormat(gc: *const GraphicsContext, allocator: Allocator) !vk.Surfa
fn findPresentMode(gc: *const GraphicsContext, allocator: Allocator) !vk.PresentModeKHR {
var count: u32 = undefined;
_ = try gc.vki.getPhysicalDeviceSurfacePresentModesKHR(gc.pdev, gc.surface, &count, null);
_ = try gc.instance.getPhysicalDeviceSurfacePresentModesKHR(gc.pdev, gc.surface, &count, null);
const present_modes = try allocator.alloc(vk.PresentModeKHR, count);
defer allocator.free(present_modes);
_ = try gc.vki.getPhysicalDeviceSurfacePresentModesKHR(gc.pdev, gc.surface, &count, present_modes.ptr);
_ = try gc.instance.getPhysicalDeviceSurfacePresentModesKHR(gc.pdev, gc.surface, &count, present_modes.ptr);
const preferred = [_]vk.PresentModeKHR{
.mailbox_khr,

View File

@@ -73,39 +73,39 @@ pub fn main() !void {
var swapchain = try Swapchain.init(&gc, allocator, extent);
defer swapchain.deinit();
const pipeline_layout = try gc.vkd.createPipelineLayout(gc.dev, &.{
const pipeline_layout = try gc.dev.createPipelineLayout(&.{
.flags = .{},
.set_layout_count = 0,
.p_set_layouts = undefined,
.push_constant_range_count = 0,
.p_push_constant_ranges = undefined,
}, null);
defer gc.vkd.destroyPipelineLayout(gc.dev, pipeline_layout, null);
defer gc.dev.destroyPipelineLayout(pipeline_layout, null);
const render_pass = try createRenderPass(&gc, swapchain);
defer gc.vkd.destroyRenderPass(gc.dev, render_pass, null);
defer gc.dev.destroyRenderPass(render_pass, null);
const pipeline = try createPipeline(&gc, pipeline_layout, render_pass);
defer gc.vkd.destroyPipeline(gc.dev, pipeline, null);
defer gc.dev.destroyPipeline(pipeline, null);
var framebuffers = try createFramebuffers(&gc, allocator, render_pass, swapchain);
defer destroyFramebuffers(&gc, allocator, framebuffers);
const pool = try gc.vkd.createCommandPool(gc.dev, &.{
const pool = try gc.dev.createCommandPool(&.{
.queue_family_index = gc.graphics_queue.family,
}, null);
defer gc.vkd.destroyCommandPool(gc.dev, pool, null);
defer gc.dev.destroyCommandPool(pool, null);
const buffer = try gc.vkd.createBuffer(gc.dev, &.{
const buffer = try gc.dev.createBuffer(&.{
.size = @sizeOf(@TypeOf(vertices)),
.usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true },
.sharing_mode = .exclusive,
}, null);
defer gc.vkd.destroyBuffer(gc.dev, buffer, null);
const mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, buffer);
defer gc.dev.destroyBuffer(buffer, null);
const mem_reqs = gc.dev.getBufferMemoryRequirements(buffer);
const memory = try gc.allocate(mem_reqs, .{ .device_local_bit = true });
defer gc.vkd.freeMemory(gc.dev, memory, null);
try gc.vkd.bindBufferMemory(gc.dev, buffer, memory, 0);
defer gc.dev.freeMemory(memory, null);
try gc.dev.bindBufferMemory(buffer, memory, 0);
try uploadVertices(&gc, pool, buffer);
@@ -164,24 +164,24 @@ pub fn main() !void {
}
try swapchain.waitForAllFences();
try gc.vkd.deviceWaitIdle(gc.dev);
try gc.dev.deviceWaitIdle();
}
fn uploadVertices(gc: *const GraphicsContext, pool: vk.CommandPool, buffer: vk.Buffer) !void {
const staging_buffer = try gc.vkd.createBuffer(gc.dev, &.{
const staging_buffer = try gc.dev.createBuffer(&.{
.size = @sizeOf(@TypeOf(vertices)),
.usage = .{ .transfer_src_bit = true },
.sharing_mode = .exclusive,
}, null);
defer gc.vkd.destroyBuffer(gc.dev, staging_buffer, null);
const mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, staging_buffer);
defer gc.dev.destroyBuffer(staging_buffer, null);
const mem_reqs = gc.dev.getBufferMemoryRequirements(staging_buffer);
const staging_memory = try gc.allocate(mem_reqs, .{ .host_visible_bit = true, .host_coherent_bit = true });
defer gc.vkd.freeMemory(gc.dev, staging_memory, null);
try gc.vkd.bindBufferMemory(gc.dev, staging_buffer, staging_memory, 0);
defer gc.dev.freeMemory(staging_memory, null);
try gc.dev.bindBufferMemory(staging_buffer, staging_memory, 0);
{
const data = try gc.vkd.mapMemory(gc.dev, staging_memory, 0, vk.WHOLE_SIZE, .{});
defer gc.vkd.unmapMemory(gc.dev, staging_memory);
const data = try gc.dev.mapMemory(staging_memory, 0, vk.WHOLE_SIZE, .{});
defer gc.dev.unmapMemory(staging_memory);
const gpu_vertices: [*]Vertex = @ptrCast(@alignCast(data));
@memcpy(gpu_vertices, vertices[0..]);
@@ -191,15 +191,17 @@ fn uploadVertices(gc: *const GraphicsContext, pool: vk.CommandPool, buffer: vk.B
}
fn copyBuffer(gc: *const GraphicsContext, pool: vk.CommandPool, dst: vk.Buffer, src: vk.Buffer, size: vk.DeviceSize) !void {
var cmdbuf: vk.CommandBuffer = undefined;
try gc.vkd.allocateCommandBuffers(gc.dev, &.{
var cmdbuf_handle: vk.CommandBuffer = undefined;
try gc.dev.allocateCommandBuffers(&.{
.command_pool = pool,
.level = .primary,
.command_buffer_count = 1,
}, @ptrCast(&cmdbuf));
defer gc.vkd.freeCommandBuffers(gc.dev, pool, 1, @ptrCast(&cmdbuf));
}, @ptrCast(&cmdbuf_handle));
defer gc.dev.freeCommandBuffers(pool, 1, @ptrCast(&cmdbuf_handle));
try gc.vkd.beginCommandBuffer(cmdbuf, &.{
const cmdbuf = GraphicsContext.CommandBuffer.init(cmdbuf_handle, gc.dev.wrapper);
try cmdbuf.beginCommandBuffer(&.{
.flags = .{ .one_time_submit_bit = true },
});
@@ -208,17 +210,17 @@ fn copyBuffer(gc: *const GraphicsContext, pool: vk.CommandPool, dst: vk.Buffer,
.dst_offset = 0,
.size = size,
};
gc.vkd.cmdCopyBuffer(cmdbuf, src, dst, 1, @ptrCast(&region));
cmdbuf.copyBuffer(src, dst, 1, @ptrCast(&region));
try gc.vkd.endCommandBuffer(cmdbuf);
try cmdbuf.endCommandBuffer();
const si = vk.SubmitInfo{
.command_buffer_count = 1,
.p_command_buffers = @ptrCast(&cmdbuf),
.p_command_buffers = (&cmdbuf.handle)[0..1],
.p_wait_dst_stage_mask = undefined,
};
try gc.vkd.queueSubmit(gc.graphics_queue.handle, 1, @ptrCast(&si), .null_handle);
try gc.vkd.queueWaitIdle(gc.graphics_queue.handle);
try gc.dev.queueSubmit(gc.graphics_queue.handle, 1, @ptrCast(&si), .null_handle);
try gc.dev.queueWaitIdle(gc.graphics_queue.handle);
}
fn createCommandBuffers(
@@ -234,12 +236,12 @@ fn createCommandBuffers(
const cmdbufs = try allocator.alloc(vk.CommandBuffer, framebuffers.len);
errdefer allocator.free(cmdbufs);
try gc.vkd.allocateCommandBuffers(gc.dev, &.{
try gc.dev.allocateCommandBuffers(&.{
.command_pool = pool,
.level = .primary,
.command_buffer_count = @as(u32, @truncate(cmdbufs.len)),
.command_buffer_count = @intCast(cmdbufs.len),
}, cmdbufs.ptr);
errdefer gc.vkd.freeCommandBuffers(gc.dev, pool, @truncate(cmdbufs.len), cmdbufs.ptr);
errdefer gc.dev.freeCommandBuffers(pool, @intCast(cmdbufs.len), cmdbufs.ptr);
const clear = vk.ClearValue{
.color = .{ .float_32 = .{ 0, 0, 0, 1 } },
@@ -248,8 +250,8 @@ fn createCommandBuffers(
const viewport = vk.Viewport{
.x = 0,
.y = 0,
.width = @as(f32, @floatFromInt(extent.width)),
.height = @as(f32, @floatFromInt(extent.height)),
.width = @floatFromInt(extent.width),
.height = @floatFromInt(extent.height),
.min_depth = 0,
.max_depth = 1,
};
@@ -260,10 +262,10 @@ fn createCommandBuffers(
};
for (cmdbufs, framebuffers) |cmdbuf, framebuffer| {
try gc.vkd.beginCommandBuffer(cmdbuf, &.{});
try gc.dev.beginCommandBuffer(cmdbuf, &.{});
gc.vkd.cmdSetViewport(cmdbuf, 0, 1, @ptrCast(&viewport));
gc.vkd.cmdSetScissor(cmdbuf, 0, 1, @ptrCast(&scissor));
gc.dev.cmdSetViewport(cmdbuf, 0, 1, @ptrCast(&viewport));
gc.dev.cmdSetScissor(cmdbuf, 0, 1, @ptrCast(&scissor));
// This needs to be a separate definition - see https://github.com/ziglang/zig/issues/7627.
const render_area = vk.Rect2D{
@@ -271,28 +273,28 @@ fn createCommandBuffers(
.extent = extent,
};
gc.vkd.cmdBeginRenderPass(cmdbuf, &.{
gc.dev.cmdBeginRenderPass(cmdbuf, &.{
.render_pass = render_pass,
.framebuffer = framebuffer,
.render_area = render_area,
.clear_value_count = 1,
.p_clear_values = @as([*]const vk.ClearValue, @ptrCast(&clear)),
.p_clear_values = @ptrCast(&clear),
}, .@"inline");
gc.vkd.cmdBindPipeline(cmdbuf, .graphics, pipeline);
gc.dev.cmdBindPipeline(cmdbuf, .graphics, pipeline);
const offset = [_]vk.DeviceSize{0};
gc.vkd.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast(&buffer), &offset);
gc.vkd.cmdDraw(cmdbuf, vertices.len, 1, 0, 0);
gc.dev.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast(&buffer), &offset);
gc.dev.cmdDraw(cmdbuf, vertices.len, 1, 0, 0);
gc.vkd.cmdEndRenderPass(cmdbuf);
try gc.vkd.endCommandBuffer(cmdbuf);
gc.dev.cmdEndRenderPass(cmdbuf);
try gc.dev.endCommandBuffer(cmdbuf);
}
return cmdbufs;
}
fn destroyCommandBuffers(gc: *const GraphicsContext, pool: vk.CommandPool, allocator: Allocator, cmdbufs: []vk.CommandBuffer) void {
gc.vkd.freeCommandBuffers(gc.dev, pool, @truncate(cmdbufs.len), cmdbufs.ptr);
gc.dev.freeCommandBuffers(pool, @truncate(cmdbufs.len), cmdbufs.ptr);
allocator.free(cmdbufs);
}
@@ -301,13 +303,13 @@ fn createFramebuffers(gc: *const GraphicsContext, allocator: Allocator, render_p
errdefer allocator.free(framebuffers);
var i: usize = 0;
errdefer for (framebuffers[0..i]) |fb| gc.vkd.destroyFramebuffer(gc.dev, fb, null);
errdefer for (framebuffers[0..i]) |fb| gc.dev.destroyFramebuffer(fb, null);
for (framebuffers) |*fb| {
fb.* = try gc.vkd.createFramebuffer(gc.dev, &.{
fb.* = try gc.dev.createFramebuffer(&.{
.render_pass = render_pass,
.attachment_count = 1,
.p_attachments = @as([*]const vk.ImageView, @ptrCast(&swapchain.swap_images[i].view)),
.p_attachments = @ptrCast(&swapchain.swap_images[i].view),
.width = swapchain.extent.width,
.height = swapchain.extent.height,
.layers = 1,
@@ -319,7 +321,7 @@ fn createFramebuffers(gc: *const GraphicsContext, allocator: Allocator, render_p
}
fn destroyFramebuffers(gc: *const GraphicsContext, allocator: Allocator, framebuffers: []const vk.Framebuffer) void {
for (framebuffers) |fb| gc.vkd.destroyFramebuffer(gc.dev, fb, null);
for (framebuffers) |fb| gc.dev.destroyFramebuffer(fb, null);
allocator.free(framebuffers);
}
@@ -346,11 +348,11 @@ fn createRenderPass(gc: *const GraphicsContext, swapchain: Swapchain) !vk.Render
.p_color_attachments = @ptrCast(&color_attachment_ref),
};
return try gc.vkd.createRenderPass(gc.dev, &.{
return try gc.dev.createRenderPass(&.{
.attachment_count = 1,
.p_attachments = @as([*]const vk.AttachmentDescription, @ptrCast(&color_attachment)),
.p_attachments = @ptrCast(&color_attachment),
.subpass_count = 1,
.p_subpasses = @as([*]const vk.SubpassDescription, @ptrCast(&subpass)),
.p_subpasses = @ptrCast(&subpass),
}, null);
}
@@ -359,17 +361,17 @@ fn createPipeline(
layout: vk.PipelineLayout,
render_pass: vk.RenderPass,
) !vk.Pipeline {
const vert = try gc.vkd.createShaderModule(gc.dev, &.{
const vert = try gc.dev.createShaderModule(&.{
.code_size = shaders.triangle_vert.len,
.p_code = @as([*]const u32, @ptrCast(&shaders.triangle_vert)),
.p_code = @ptrCast(&shaders.triangle_vert),
}, null);
defer gc.vkd.destroyShaderModule(gc.dev, vert, null);
defer gc.dev.destroyShaderModule(vert, null);
const frag = try gc.vkd.createShaderModule(gc.dev, &.{
const frag = try gc.dev.createShaderModule(&.{
.code_size = shaders.triangle_frag.len,
.p_code = @as([*]const u32, @ptrCast(&shaders.triangle_frag)),
.p_code = @ptrCast(&shaders.triangle_frag),
}, null);
defer gc.vkd.destroyShaderModule(gc.dev, frag, null);
defer gc.dev.destroyShaderModule(frag, null);
const pssci = [_]vk.PipelineShaderStageCreateInfo{
.{
@@ -471,8 +473,7 @@ fn createPipeline(
};
var pipeline: vk.Pipeline = undefined;
_ = try gc.vkd.createGraphicsPipelines(
gc.dev,
_ = try gc.dev.createGraphicsPipelines(
.null_handle,
1,
@ptrCast(&gpci),

View File

@@ -11,6 +11,21 @@ fn invalidUsage(prog_name: []const u8, comptime fmt: []const u8, args: anytype)
std.process.exit(1);
}
fn reportParseErrors(tree: std.zig.Ast) !void {
const stderr = std.io.getStdErr().writer();
for (tree.errors) |err| {
const loc = tree.tokenLocation(0, err.token);
try stderr.print("(vulkan-zig error):{}:{}: error: ", .{ loc.line + 1, loc.column + 1 });
try tree.renderError(err, stderr);
try stderr.print("\n{s}\n", .{tree.source[loc.line_start..loc.line_end]});
for (0..loc.column) |_| {
try stderr.writeAll(" ");
}
try stderr.writeAll("^\n");
}
}
pub fn main() void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
@@ -23,6 +38,7 @@ pub fn main() void {
var maybe_xml_path: ?[]const u8 = null;
var maybe_out_path: ?[]const u8 = null;
var debug: bool = false;
var api = generator.Api.vulkan;
while (args.next()) |arg| {
@@ -40,6 +56,7 @@ pub fn main() void {
\\Options:
\\-h --help show this message and exit.
\\-a --api <api> Generate API for 'vulkan' or 'vulkansc'. Defaults to 'vulkan'.
\\--debug Write out unformatted source if does not parse correctly.
\\
,
.{prog_name},
@@ -59,6 +76,8 @@ pub fn main() void {
maybe_xml_path = arg;
} else if (maybe_out_path == null) {
maybe_out_path = arg;
} else if (std.mem.eql(u8, arg, "--debug")) {
debug = true;
} else {
invalidUsage(prog_name, "superficial argument '{s}'", .{arg});
}
@@ -106,18 +125,24 @@ pub fn main() void {
error.OutOfMemory => @panic("oom"),
};
if (tree.errors.len > 0) {
const formatted = if (tree.errors.len > 0) blk: {
std.log.err("generated invalid zig code", .{});
std.log.err("this is a bug in vulkan-zig", .{});
std.log.err("please make a bug report at https://github.com/Snektron/vulkan-zig/issues/", .{});
std.log.err("or run with --debug to write out unformatted source", .{});
reportParseErrors(tree) catch |err| {
std.log.err("failed to dump ast errors: {s}", .{@errorName(err)});
std.process.exit(1);
}
};
const formatted = tree.render(allocator) catch |err| switch (err) {
if (debug) {
break :blk src;
}
std.process.exit(1);
} else tree.render(allocator) catch |err| switch (err) {
error.OutOfMemory => @panic("oom"),
};
defer allocator.free(formatted);
if (std.fs.path.dirname(out_path)) |dir| {
cwd.makePath(dir) catch |err| {

View File

@@ -144,6 +144,53 @@ const foreign_types = std.StaticStringMap([]const u8).initComptime(.{
.{ "IDirectFBSurface", "opaque {}" },
});
const CommandDispatchType = enum {
base,
instance,
device,
fn name(self: CommandDispatchType) []const u8 {
return switch (self) {
.base => "Base",
.instance => "Instance",
.device => "Device",
};
}
fn nameLower(self: CommandDispatchType) []const u8 {
return switch (self) {
.base => "base",
.instance => "instance",
.device => "device",
};
}
};
const dispatchable_handles = std.StaticStringMap(CommandDispatchType).initComptime(.{
.{ "VkDevice", .device },
.{ "VkCommandBuffer", .device },
.{ "VkQueue", .device },
.{ "VkInstance", .instance },
});
const additional_namespaces = std.StaticStringMap([]const u8).initComptime(.{
// vkCmdBegin...
.{ "VkCommandBuffer", "Cmd" },
// vkQueueSubmit...
.{ "VkQueue", "Queue" },
});
const dispatch_override_functions = std.StaticStringMap(CommandDispatchType).initComptime(.{
// See https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#initialization-functionpointers
.{ "vkGetInstanceProcAddr", .base },
.{ "vkGetDeviceProcAddr", .instance },
.{ "vkEnumerateInstanceVersion", .base },
.{ "vkEnumerateInstanceExtensionProperties", .base },
.{ "vkEnumerateInstanceLayerProperties", .base },
.{ "vkCreateInstance", .base },
});
fn eqlIgnoreCase(lhs: []const u8, rhs: []const u8) bool {
if (lhs.len != rhs.len) {
return false;
@@ -194,6 +241,7 @@ fn Renderer(comptime WriterType: type) type {
bitflags,
mut_buffer_len,
buffer_len,
dispatch_handle,
other,
};
@@ -206,12 +254,6 @@ fn Renderer(comptime WriterType: type) type {
},
};
const CommandDispatchType = enum {
base,
instance,
device,
};
writer: WriterType,
allocator: Allocator,
registry: *const reg.Registry,
@@ -417,6 +459,10 @@ fn Renderer(comptime WriterType: type) type {
}
},
.name => |name| {
if (dispatchable_handles.get(name) != null) {
return .dispatch_handle;
}
if ((try self.extractBitflagName(name)) != null or self.isFlags(name)) {
return .bitflags;
}
@@ -432,29 +478,14 @@ fn Renderer(comptime WriterType: type) type {
}
fn classifyCommandDispatch(name: []const u8, command: reg.Command) CommandDispatchType {
const device_handles = std.StaticStringMap(void).initComptime(.{
.{ "VkDevice", {} },
.{ "VkCommandBuffer", {} },
.{ "VkQueue", {} },
});
const override_functions = std.StaticStringMap(CommandDispatchType).initComptime(.{
.{ "vkGetInstanceProcAddr", .base },
.{ "vkCreateInstance", .base },
.{ "vkEnumerateInstanceLayerProperties", .base },
.{ "vkEnumerateInstanceExtensionProperties", .base },
.{ "vkEnumerateInstanceVersion", .base },
.{ "vkGetDeviceProcAddr", .instance },
});
if (override_functions.get(name)) |dispatch_type| {
if (dispatch_override_functions.get(name)) |dispatch_type| {
return dispatch_type;
}
switch (command.params[0].param_type) {
.name => |first_param_type_name| {
if (device_handles.get(first_param_type_name)) |_| {
return .device;
if (dispatchable_handles.get(first_param_type_name)) |dispatch_type| {
return dispatch_type;
}
},
else => {},
@@ -478,6 +509,7 @@ fn Renderer(comptime WriterType: type) type {
try self.renderFeatureInfo();
try self.renderExtensionInfo();
try self.renderWrappers();
try self.renderProxies();
}
fn renderApiConstant(self: *Self, api_constant: reg.ApiConstant) !void {
@@ -1109,6 +1141,8 @@ fn Renderer(comptime WriterType: type) type {
\\
);
// The commands in an extension are not pre-sorted based on if they are instance or device functions.
var base_commands = std.BufSet.init(self.allocator);
defer base_commands.deinit();
var instance_commands = std.BufSet.init(self.allocator);
defer instance_commands.deinit();
var device_commands = std.BufSet.init(self.allocator);
@@ -1131,8 +1165,9 @@ fn Renderer(comptime WriterType: type) type {
};
const class = classifyCommandDispatch(command_name, command);
switch (class) {
// Vulkan extensions cannot add base functions.
.base => return error.InvalidRegistry,
.base => {
try base_commands.insert(command_name);
},
.instance => {
try instance_commands.insert(command_name);
},
@@ -1143,6 +1178,10 @@ fn Renderer(comptime WriterType: type) type {
}
}
// and write them out
try self.writer.writeAll(".base_commands = ");
try self.renderCommandFlags(&base_commands);
base_commands.hash_map.clearRetainingCapacity();
try self.writer.writeAll(".instance_commands = ");
try self.renderCommandFlags(&instance_commands);
instance_commands.hash_map.clearRetainingCapacity();
@@ -1223,11 +1262,8 @@ fn Renderer(comptime WriterType: type) type {
}
fn renderWrappersOfDispatchType(self: *Self, dispatch_type: CommandDispatchType) !void {
const name, const name_lower = switch (dispatch_type) {
.base => .{ "Base", "base" },
.instance => .{ "Instance", "instance" },
.device => .{ "Device", "device" },
};
const name = dispatch_type.name();
const name_lower = dispatch_type.nameLower();
try self.writer.print(
\\pub const {0s}CommandFlags = packed struct {{
@@ -1264,7 +1300,7 @@ fn Renderer(comptime WriterType: type) type {
};
if (classifyCommandDispatch(decl.name, command) == dispatch_type) {
try self.writer.writeAll((" " ** 8) ++ ".");
try self.writer.writeByte('.');
try self.writeIdentifierWithCase(.camel, trimVkNamespace(decl.name));
try self.writer.writeAll(" => ");
try self.renderCommandPtrName(decl.name);
@@ -1288,7 +1324,7 @@ fn Renderer(comptime WriterType: type) type {
};
if (classifyCommandDispatch(decl.name, command) == dispatch_type) {
try self.writer.writeAll((" " ** 8) ++ ".");
try self.writer.writeByte('.');
try self.writeIdentifierWithCase(.camel, trimVkNamespace(decl.name));
try self.writer.print(
\\ => "{s}",
@@ -1416,6 +1452,126 @@ fn Renderer(comptime WriterType: type) type {
, .{ .params = params, .first_arg = loader_first_arg });
}
fn renderProxies(self: *Self) !void {
try self.renderProxy(.instance, "VkInstance", true);
try self.renderProxy(.device, "VkDevice", true);
try self.renderProxy(.device, "VkCommandBuffer", false);
try self.renderProxy(.device, "VkQueue", false);
}
fn renderProxy(
self: *Self,
dispatch_type: CommandDispatchType,
dispatch_handle: []const u8,
also_add_other_commands: bool,
) !void {
const loader_name = dispatch_type.name();
try self.writer.print(
\\pub fn {0s}Proxy(comptime apis: []const ApiInfo) type {{
\\ return struct {{
\\ const Self = @This();
\\ pub const Wrapper = {1s}Wrapper(apis);
\\
\\ handle: {0s},
\\ wrapper: *const Wrapper,
\\
\\ pub fn init(handle: {0s}, wrapper: *const Wrapper) Self {{
\\ return .{{
\\ .handle = handle,
\\ .wrapper = wrapper,
\\ }};
\\ }}
, .{ trimVkNamespace(dispatch_handle), loader_name });
for (self.registry.decls) |decl| {
const decl_type = self.resolveAlias(decl.decl_type) catch continue;
const command = switch (decl_type) {
.command => |cmd| cmd,
else => continue,
};
if (classifyCommandDispatch(decl.name, command) != dispatch_type) {
continue;
}
switch (command.params[0].param_type) {
.name => |name| {
const skip = blk: {
if (mem.eql(u8, name, dispatch_handle)) {
break :blk false;
}
break :blk !also_add_other_commands;
};
if (skip) continue;
},
else => continue, // Not a dispatchable handle
}
try self.renderProxyCommand(decl.name, command, dispatch_handle);
}
try self.writer.writeAll(
\\ };
\\}
);
}
fn renderProxyCommand(self: *Self, name: []const u8, command: reg.Command, dispatch_handle: []const u8) !void {
const returns_vk_result = command.return_type.* == .name and mem.eql(u8, command.return_type.name, "VkResult");
const returns = try self.extractReturns(command);
if (returns_vk_result) {
try self.writer.writeAll("pub const ");
try self.renderErrorSetName(name);
try self.writer.writeAll(" = Wrapper.");
try self.renderErrorSetName(name);
try self.writer.writeAll(";\n");
}
if (returns.len > 1) {
try self.writer.writeAll("pub const ");
try self.renderReturnStructName(name);
try self.writer.writeAll(" = Wrapper.");
try self.renderReturnStructName(name);
try self.writer.writeAll(";\n");
}
try self.renderWrapperPrototype(name, command, returns, dispatch_handle, .proxy);
try self.writer.writeAll(
\\{
\\return self.wrapper.
);
try self.writeIdentifierWithCase(.camel, trimVkNamespace(name));
try self.writer.writeByte('(');
for (command.params) |param| {
switch (try self.classifyParam(param)) {
.out_pointer => continue,
.dispatch_handle => {
if (mem.eql(u8, param.param_type.name, dispatch_handle)) {
try self.writer.writeAll("self.handle");
} else {
try self.writeIdentifierWithCase(.snake, param.name);
}
},
else => {
try self.writeIdentifierWithCase(.snake, param.name);
},
}
try self.writer.writeAll(", ");
}
try self.writer.writeAll(
\\);
\\}
\\
);
}
fn derefName(name: []const u8) []const u8 {
var it = id_render.SegmentIterator.init(name);
return if (mem.eql(u8, it.next().?, "p"))
@@ -1424,14 +1580,46 @@ fn Renderer(comptime WriterType: type) type {
name;
}
fn renderWrapperPrototype(self: *Self, name: []const u8, command: reg.Command, returns: []const ReturnValue) !void {
const WrapperKind = enum {
wrapper,
proxy,
};
fn renderWrapperPrototype(
self: *Self,
name: []const u8,
command: reg.Command,
returns: []const ReturnValue,
dispatch_handle: []const u8,
kind: WrapperKind,
) !void {
try self.writer.writeAll("pub fn ");
try self.writeIdentifierWithCase(.camel, trimVkNamespace(name));
const trimmed_name = switch (kind) {
.wrapper => trimVkNamespace(name),
.proxy => blk: {
// Strip additional namespaces: queue for VkQueue and cmd for VkCommandBuffer
const no_vk = trimVkNamespace(name);
const additional_namespace = additional_namespaces.get(dispatch_handle) orelse break :blk no_vk;
if (std.mem.startsWith(u8, no_vk, additional_namespace)) {
break :blk no_vk[additional_namespace.len..];
}
break :blk no_vk;
},
};
try self.writeIdentifierWithCase(.camel, trimmed_name);
try self.writer.writeAll("(self: Self, ");
for (command.params) |param| {
const class = try self.classifyParam(param);
// Skip the dispatch type for proxying wrappers
if (kind == .proxy and class == .dispatch_handle and mem.eql(u8, param.param_type.name, dispatch_handle)) {
continue;
}
// This parameter is returned instead.
if ((try self.classifyParam(param)) == .out_pointer) {
if (class == .out_pointer) {
continue;
}
@@ -1479,7 +1667,7 @@ fn Renderer(comptime WriterType: type) type {
try self.writeIdentifierWithCase(.snake, derefName(param.name));
}
},
.bitflags, .in_pointer, .in_out_pointer, .buffer_len, .mut_buffer_len, .other => {
else => {
try self.writeIdentifierWithCase(.snake, param.name);
},
}
@@ -1570,7 +1758,7 @@ fn Renderer(comptime WriterType: type) type {
try self.writer.writeAll(";\n");
}
try self.renderWrapperPrototype(name, command, returns);
try self.renderWrapperPrototype(name, command, returns, "", .wrapper);
if (returns.len == 1 and returns[0].origin == .inner_return_value) {
try self.writer.writeAll("{\n\n");