From 90f3ecda90625302e60bc1bf4596546ebfb09631 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 20 Mar 2024 17:09:27 -0400 Subject: [PATCH 001/113] minimal vulkan + glfw --- .gitignore | 44 + build.zig | 53 + build.zig.zon | 15 + reg/vk.xml | 26856 ++++++++++++++++++++++++++++++++++++++++++++++++ src/c.zig | 25 + src/main.zig | 75 + 6 files changed, 27068 insertions(+) create mode 100644 .gitignore create mode 100644 build.zig create mode 100644 build.zig.zon create mode 100644 reg/vk.xml create mode 100644 src/c.zig create mode 100644 src/main.zig diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..7ac1866 --- /dev/null +++ b/.gitignore @@ -0,0 +1,44 @@ +*.d +*.slo +*.lo +*.o +*.obj +*.gch +*.pch +*.so +*.dylib +*.dll +*.mod +*.smod +*.lai +*.la +*.a +*.lib +*.exe +*.out +*.app +*.ko +*.elf +*.ilk +*.map +*.exp +*.so.* +*.i*86 +*.x86_64 +*.hex +*.dSYM/ +*.su +*.idb +*.pdb +*.mod* +*.cmd +.tmp_versions/ +modules.order +Module.symvers +Mkfile.old +dkms.conf +zig-cache/ +zig-out/ +build/ +build-*/ +docgen_tmp/ diff --git a/build.zig b/build.zig new file mode 100644 index 0000000..574ad5b --- /dev/null +++ b/build.zig @@ -0,0 +1,53 @@ +const std = @import("std"); + +pub fn build(b: *std.Build) void { + const target = b.standardTargetOptions(.{}); + + const optimize = b.standardOptimizeOption(.{}); + + const vk = b.dependency("vulkan-zig", .{ + .registry = @as([]const u8, b.pathFromRoot("reg/vk.xml")), + }); + const vkmod = vk.module("vulkan-zig"); + + const exe = b.addExecutable(.{ + .name = "scratchzig", + .root_source_file = .{ .path = "src/main.zig" }, + .target = target, + .optimize = optimize, + }); + + // this requires PKG_CONFIG_PATH to be set. something like: + // ~/.local/lib/pkgconfig/ + exe.linkSystemLibrary2("glfw3", .{ + .needed = true, + .preferred_link_mode = .static, + .use_pkg_config = .force, + }); + exe.linkLibC(); + + b.installArtifact(exe); + exe.root_module.addImport("vk", vkmod); + + const run_cmd = b.addRunArtifact(exe); + + run_cmd.step.dependOn(b.getInstallStep()); + + if (b.args) |args| { + run_cmd.addArgs(args); + } + + const run_step = b.step("run", "Run the app"); + run_step.dependOn(&run_cmd.step); + + const exe_unit_tests = b.addTest(.{ + .root_source_file = .{ .path = "src/main.zig" }, + .target = target, + .optimize = optimize, + }); + + const run_exe_unit_tests = b.addRunArtifact(exe_unit_tests); + + const test_step = b.step("test", "Run unit tests"); + test_step.dependOn(&run_exe_unit_tests.step); +} diff --git a/build.zig.zon b/build.zig.zon new file mode 100644 index 0000000..bac4b3c --- /dev/null +++ b/build.zig.zon @@ -0,0 +1,15 @@ +.{ + .name = "scratchzig", + .version = "0.0.0", + + .dependencies = .{ + .@"vulkan-zig" = .{ + .url = "https://github.com/Snektron/vulkan-zig/archive/ac4103a733c479b599aae8d42c08cabd7d5cf48a.tar.gz", + .hash = "122085abbcfa0328f5f6e0e702d25ee0a61bb92d0ce9ba415a2fea1d33f43129cb66", + }, + }, + + .paths = .{ + "", + }, +} diff --git a/reg/vk.xml b/reg/vk.xml new file mode 100644 index 0000000..7b2fd07 --- /dev/null +++ b/reg/vk.xml @@ -0,0 +1,26856 @@ + + + +Copyright 2015-2024 The Khronos Group Inc. + +SPDX-License-Identifier: Apache-2.0 OR MIT + + + +This file, vk.xml, is the Vulkan API Registry. It is a critically important +and normative part of the Vulkan Specification, including a canonical +machine-readable definition of the API, parameter and member validation +language incorporated into the Specification and reference pages, and other +material which is registered by Khronos, such as tags used by extension and +layer authors. The authoritative public version of vk.xml is maintained in +the default branch (currently named main) of the Khronos Vulkan GitHub +project. The authoritative private version is maintained in the default +branch of the member gitlab server. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #include "vk_platform.h" + + WSI extensions + + + + + + + + + + + + + + In the current header structure, each platform's interfaces + are confined to a platform-specific header (vulkan_xlib.h, + vulkan_win32.h, etc.). These headers are not self-contained, + and should not include native headers (X11/Xlib.h, + windows.h, etc.). Code should either include vulkan.h after + defining the appropriate VK_USE_PLATFORM_platform + macros, or include the required native headers prior to + explicitly including the corresponding platform header. + + To accomplish this, the dependencies of native types require + native headers, but the XML defines the content for those + native headers as empty. The actual native header includes + can be restored by modifying the native header tags above + to #include the header file in the 'name' attribute. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + // DEPRECATED: This define is deprecated. VK_MAKE_API_VERSION should be used instead. +#define VK_MAKE_VERSION(major, minor, patch) \ + ((((uint32_t)(major)) << 22U) | (((uint32_t)(minor)) << 12U) | ((uint32_t)(patch))) + // DEPRECATED: This define is deprecated. VK_API_VERSION_MAJOR should be used instead. +#define VK_VERSION_MAJOR(version) ((uint32_t)(version) >> 22U) + // DEPRECATED: This define is deprecated. VK_API_VERSION_MINOR should be used instead. +#define VK_VERSION_MINOR(version) (((uint32_t)(version) >> 12U) & 0x3FFU) + // DEPRECATED: This define is deprecated. VK_API_VERSION_PATCH should be used instead. +#define VK_VERSION_PATCH(version) ((uint32_t)(version) & 0xFFFU) + + #define VK_MAKE_API_VERSION(variant, major, minor, patch) \ + ((((uint32_t)(variant)) << 29U) | (((uint32_t)(major)) << 22U) | (((uint32_t)(minor)) << 12U) | ((uint32_t)(patch))) + #define VK_API_VERSION_VARIANT(version) ((uint32_t)(version) >> 29U) + #define VK_API_VERSION_MAJOR(version) (((uint32_t)(version) >> 22U) & 0x7FU) + #define VK_API_VERSION_MINOR(version) (((uint32_t)(version) >> 12U) & 0x3FFU) + #define VK_API_VERSION_PATCH(version) ((uint32_t)(version) & 0xFFFU) + + // Vulkan SC variant number +#define VKSC_API_VARIANT 1 + + // DEPRECATED: This define has been removed. Specific version defines (e.g. VK_API_VERSION_1_0), or the VK_MAKE_VERSION macro, should be used instead. +//#define VK_API_VERSION VK_MAKE_API_VERSION(0, 1, 0, 0) // Patch version should always be set to 0 + // Vulkan 1.0 version number +#define VK_API_VERSION_1_0 VK_MAKE_API_VERSION(0, 1, 0, 0)// Patch version should always be set to 0 + // Vulkan 1.1 version number +#define VK_API_VERSION_1_1 VK_MAKE_API_VERSION(0, 1, 1, 0)// Patch version should always be set to 0 + // Vulkan 1.2 version number +#define VK_API_VERSION_1_2 VK_MAKE_API_VERSION(0, 1, 2, 0)// Patch version should always be set to 0 + // Vulkan 1.3 version number +#define VK_API_VERSION_1_3 VK_MAKE_API_VERSION(0, 1, 3, 0)// Patch version should always be set to 0 + // Vulkan SC 1.0 version number +#define VKSC_API_VERSION_1_0 VK_MAKE_API_VERSION(VKSC_API_VARIANT, 1, 0, 0)// Patch version should always be set to 0 + + // Version of this file +#define VK_HEADER_VERSION 280 + // Complete version of this file +#define VK_HEADER_VERSION_COMPLETE VK_MAKE_API_VERSION(0, 1, 3, VK_HEADER_VERSION) + // Version of this file +#define VK_HEADER_VERSION 14 + // Complete version of this file +#define VK_HEADER_VERSION_COMPLETE VK_MAKE_API_VERSION(VKSC_API_VARIANT, 1, 0, VK_HEADER_VERSION) + + +#define VK_DEFINE_HANDLE(object) typedef struct object##_T* object; + +#define VK_DEFINE_HANDLE(object) typedef struct object##_T* (object); + + +#ifndef VK_USE_64_BIT_PTR_DEFINES + #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) || (defined(__riscv) && __riscv_xlen == 64) + #define VK_USE_64_BIT_PTR_DEFINES 1 + #else + #define VK_USE_64_BIT_PTR_DEFINES 0 + #endif +#endif + +#ifndef VK_DEFINE_NON_DISPATCHABLE_HANDLE + #if (VK_USE_64_BIT_PTR_DEFINES==1) + #if (defined(__cplusplus) && (__cplusplus >= 201103L)) || (defined(_MSVC_LANG) && (_MSVC_LANG >= 201103L)) + #define VK_NULL_HANDLE nullptr + #else + #define VK_NULL_HANDLE ((void*)0) + #endif + #else + #define VK_NULL_HANDLE 0ULL + #endif +#endif +#ifndef VK_NULL_HANDLE + #define VK_NULL_HANDLE 0 +#endif + +#ifndef VK_DEFINE_NON_DISPATCHABLE_HANDLE + #if (VK_USE_64_BIT_PTR_DEFINES==1) + #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T *object; + #else + #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object; + #endif +#endif + +#ifndef VK_DEFINE_NON_DISPATCHABLE_HANDLE + #if (VK_USE_64_BIT_PTR_DEFINES==1) + #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T *(object); + #else + #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t (object); + #endif +#endif + + struct ANativeWindow; + struct AHardwareBuffer; + #ifdef __OBJC__ +@class CAMetalLayer; +#else +typedef void CAMetalLayer; +#endif + #ifdef __OBJC__ +@protocol MTLDevice; +typedef id<MTLDevice> MTLDevice_id; +#else +typedef void* MTLDevice_id; +#endif + #ifdef __OBJC__ +@protocol MTLCommandQueue; +typedef id<MTLCommandQueue> MTLCommandQueue_id; +#else +typedef void* MTLCommandQueue_id; +#endif + #ifdef __OBJC__ +@protocol MTLBuffer; +typedef id<MTLBuffer> MTLBuffer_id; +#else +typedef void* MTLBuffer_id; +#endif + #ifdef __OBJC__ +@protocol MTLTexture; +typedef id<MTLTexture> MTLTexture_id; +#else +typedef void* MTLTexture_id; +#endif + #ifdef __OBJC__ +@protocol MTLSharedEvent; +typedef id<MTLSharedEvent> MTLSharedEvent_id; +#else +typedef void* MTLSharedEvent_id; +#endif + typedef struct __IOSurface* IOSurfaceRef; + + typedef uint32_t VkSampleMask; + typedef uint32_t VkBool32; + typedef uint32_t VkFlags; + typedef uint64_t VkFlags64; + typedef uint64_t VkDeviceSize; + typedef uint64_t VkDeviceAddress; + + Basic C types, pulled in via vk_platform.h + + + + + + + + + + + + + + + + Bitmask types + typedef VkFlags VkFramebufferCreateFlags; + typedef VkFlags VkQueryPoolCreateFlags; + typedef VkFlags VkRenderPassCreateFlags; + typedef VkFlags VkSamplerCreateFlags; + typedef VkFlags VkPipelineLayoutCreateFlags; + typedef VkFlags VkPipelineCacheCreateFlags; + typedef VkFlags VkPipelineDepthStencilStateCreateFlags; + typedef VkFlags VkPipelineDepthStencilStateCreateFlags; + typedef VkFlags VkPipelineDynamicStateCreateFlags; + typedef VkFlags VkPipelineColorBlendStateCreateFlags; + typedef VkFlags VkPipelineColorBlendStateCreateFlags; + typedef VkFlags VkPipelineMultisampleStateCreateFlags; + typedef VkFlags VkPipelineRasterizationStateCreateFlags; + typedef VkFlags VkPipelineViewportStateCreateFlags; + typedef VkFlags VkPipelineTessellationStateCreateFlags; + typedef VkFlags VkPipelineInputAssemblyStateCreateFlags; + typedef VkFlags VkPipelineVertexInputStateCreateFlags; + typedef VkFlags VkPipelineShaderStageCreateFlags; + typedef VkFlags VkDescriptorSetLayoutCreateFlags; + typedef VkFlags VkBufferViewCreateFlags; + typedef VkFlags VkInstanceCreateFlags; + typedef VkFlags VkDeviceCreateFlags; + typedef VkFlags VkDeviceQueueCreateFlags; + typedef VkFlags VkQueueFlags; + typedef VkFlags VkMemoryPropertyFlags; + typedef VkFlags VkMemoryHeapFlags; + typedef VkFlags VkAccessFlags; + typedef VkFlags VkBufferUsageFlags; + typedef VkFlags VkBufferCreateFlags; + typedef VkFlags VkShaderStageFlags; + typedef VkFlags VkImageUsageFlags; + typedef VkFlags VkImageCreateFlags; + typedef VkFlags VkImageViewCreateFlags; + typedef VkFlags VkPipelineCreateFlags; + typedef VkFlags VkColorComponentFlags; + typedef VkFlags VkFenceCreateFlags; + typedef VkFlags VkSemaphoreCreateFlags; + typedef VkFlags VkFormatFeatureFlags; + typedef VkFlags VkQueryControlFlags; + typedef VkFlags VkQueryResultFlags; + typedef VkFlags VkShaderModuleCreateFlags; + typedef VkFlags VkEventCreateFlags; + typedef VkFlags VkCommandPoolCreateFlags; + typedef VkFlags VkCommandPoolResetFlags; + typedef VkFlags VkCommandBufferResetFlags; + typedef VkFlags VkCommandBufferUsageFlags; + typedef VkFlags VkQueryPipelineStatisticFlags; + typedef VkFlags VkMemoryMapFlags; + typedef VkFlags VkMemoryUnmapFlagsKHR; + typedef VkFlags VkImageAspectFlags; + typedef VkFlags VkSparseMemoryBindFlags; + typedef VkFlags VkSparseImageFormatFlags; + typedef VkFlags VkSubpassDescriptionFlags; + typedef VkFlags VkPipelineStageFlags; + typedef VkFlags VkSampleCountFlags; + typedef VkFlags VkAttachmentDescriptionFlags; + typedef VkFlags VkStencilFaceFlags; + typedef VkFlags VkCullModeFlags; + typedef VkFlags VkDescriptorPoolCreateFlags; + typedef VkFlags VkDescriptorPoolResetFlags; + typedef VkFlags VkDependencyFlags; + typedef VkFlags VkSubgroupFeatureFlags; + typedef VkFlags VkIndirectCommandsLayoutUsageFlagsNV; + typedef VkFlags VkIndirectStateFlagsNV; + typedef VkFlags VkGeometryFlagsKHR; + + typedef VkFlags VkGeometryInstanceFlagsKHR; + + typedef VkFlags VkBuildAccelerationStructureFlagsKHR; + + typedef VkFlags VkPrivateDataSlotCreateFlags; + + typedef VkFlags VkAccelerationStructureCreateFlagsKHR; + typedef VkFlags VkDescriptorUpdateTemplateCreateFlags; + + typedef VkFlags VkPipelineCreationFeedbackFlags; + + typedef VkFlags VkPerformanceCounterDescriptionFlagsKHR; + typedef VkFlags VkAcquireProfilingLockFlagsKHR; + typedef VkFlags VkSemaphoreWaitFlags; + + typedef VkFlags VkPipelineCompilerControlFlagsAMD; + typedef VkFlags VkShaderCorePropertiesFlagsAMD; + typedef VkFlags VkDeviceDiagnosticsConfigFlagsNV; + typedef VkFlags VkRefreshObjectFlagsKHR; + typedef VkFlags64 VkAccessFlags2; + + typedef VkFlags64 VkPipelineStageFlags2; + + typedef VkFlags VkAccelerationStructureMotionInfoFlagsNV; + typedef VkFlags VkAccelerationStructureMotionInstanceFlagsNV; + typedef VkFlags64 VkFormatFeatureFlags2; + + typedef VkFlags VkRenderingFlags; + typedef VkFlags64 VkMemoryDecompressionMethodFlagsNV; + + typedef VkFlags VkBuildMicromapFlagsEXT; + typedef VkFlags VkMicromapCreateFlagsEXT; + typedef VkFlags VkDirectDriverLoadingFlagsLUNARG; + typedef VkFlags64 VkPipelineCreateFlags2KHR; + typedef VkFlags64 VkBufferUsageFlags2KHR; + + WSI extensions + typedef VkFlags VkCompositeAlphaFlagsKHR; + typedef VkFlags VkDisplayPlaneAlphaFlagsKHR; + typedef VkFlags VkSurfaceTransformFlagsKHR; + typedef VkFlags VkSwapchainCreateFlagsKHR; + typedef VkFlags VkDisplayModeCreateFlagsKHR; + typedef VkFlags VkDisplaySurfaceCreateFlagsKHR; + typedef VkFlags VkAndroidSurfaceCreateFlagsKHR; + typedef VkFlags VkViSurfaceCreateFlagsNN; + typedef VkFlags VkWaylandSurfaceCreateFlagsKHR; + typedef VkFlags VkWin32SurfaceCreateFlagsKHR; + typedef VkFlags VkXlibSurfaceCreateFlagsKHR; + typedef VkFlags VkXcbSurfaceCreateFlagsKHR; + typedef VkFlags VkDirectFBSurfaceCreateFlagsEXT; + typedef VkFlags VkIOSSurfaceCreateFlagsMVK; + typedef VkFlags VkMacOSSurfaceCreateFlagsMVK; + typedef VkFlags VkMetalSurfaceCreateFlagsEXT; + typedef VkFlags VkImagePipeSurfaceCreateFlagsFUCHSIA; + typedef VkFlags VkStreamDescriptorSurfaceCreateFlagsGGP; + typedef VkFlags VkHeadlessSurfaceCreateFlagsEXT; + typedef VkFlags VkScreenSurfaceCreateFlagsQNX; + typedef VkFlags VkPeerMemoryFeatureFlags; + + typedef VkFlags VkMemoryAllocateFlags; + + typedef VkFlags VkDeviceGroupPresentModeFlagsKHR; + + typedef VkFlags VkDebugReportFlagsEXT; + typedef VkFlags VkCommandPoolTrimFlags; + + typedef VkFlags VkExternalMemoryHandleTypeFlagsNV; + typedef VkFlags VkExternalMemoryFeatureFlagsNV; + typedef VkFlags VkExternalMemoryHandleTypeFlags; + + typedef VkFlags VkExternalMemoryFeatureFlags; + + typedef VkFlags VkExternalSemaphoreHandleTypeFlags; + + typedef VkFlags VkExternalSemaphoreFeatureFlags; + + typedef VkFlags VkSemaphoreImportFlags; + + typedef VkFlags VkExternalFenceHandleTypeFlags; + + typedef VkFlags VkExternalFenceFeatureFlags; + + typedef VkFlags VkFenceImportFlags; + + typedef VkFlags VkSurfaceCounterFlagsEXT; + typedef VkFlags VkPipelineViewportSwizzleStateCreateFlagsNV; + typedef VkFlags VkPipelineDiscardRectangleStateCreateFlagsEXT; + typedef VkFlags VkPipelineCoverageToColorStateCreateFlagsNV; + typedef VkFlags VkPipelineCoverageModulationStateCreateFlagsNV; + typedef VkFlags VkPipelineCoverageReductionStateCreateFlagsNV; + typedef VkFlags VkValidationCacheCreateFlagsEXT; + typedef VkFlags VkDebugUtilsMessageSeverityFlagsEXT; + typedef VkFlags VkDebugUtilsMessageTypeFlagsEXT; + typedef VkFlags VkDebugUtilsMessengerCreateFlagsEXT; + typedef VkFlags VkDebugUtilsMessengerCallbackDataFlagsEXT; + typedef VkFlags VkDeviceMemoryReportFlagsEXT; + typedef VkFlags VkPipelineRasterizationConservativeStateCreateFlagsEXT; + typedef VkFlags VkDescriptorBindingFlags; + + typedef VkFlags VkConditionalRenderingFlagsEXT; + typedef VkFlags VkResolveModeFlags; + + typedef VkFlags VkPipelineRasterizationStateStreamCreateFlagsEXT; + typedef VkFlags VkPipelineRasterizationDepthClipStateCreateFlagsEXT; + typedef VkFlags VkSwapchainImageUsageFlagsANDROID; + typedef VkFlags VkToolPurposeFlags; + + typedef VkFlags VkSubmitFlags; + + typedef VkFlags VkImageFormatConstraintsFlagsFUCHSIA; + typedef VkFlags VkHostImageCopyFlagsEXT; + typedef VkFlags VkImageConstraintsInfoFlagsFUCHSIA; + typedef VkFlags VkGraphicsPipelineLibraryFlagsEXT; + typedef VkFlags VkImageCompressionFlagsEXT; + typedef VkFlags VkImageCompressionFixedRateFlagsEXT; + typedef VkFlags VkExportMetalObjectTypeFlagsEXT; + typedef VkFlags VkDeviceAddressBindingFlagsEXT; + typedef VkFlags VkOpticalFlowGridSizeFlagsNV; + typedef VkFlags VkOpticalFlowUsageFlagsNV; + typedef VkFlags VkOpticalFlowSessionCreateFlagsNV; + typedef VkFlags VkOpticalFlowExecuteFlagsNV; + typedef VkFlags VkFrameBoundaryFlagsEXT; + typedef VkFlags VkPresentScalingFlagsEXT; + typedef VkFlags VkPresentGravityFlagsEXT; + typedef VkFlags VkShaderCreateFlagsEXT; + typedef VkFlags64 VkPhysicalDeviceSchedulingControlsFlagsARM; + + Video Core extension + typedef VkFlags VkVideoCodecOperationFlagsKHR; + typedef VkFlags VkVideoCapabilityFlagsKHR; + typedef VkFlags VkVideoSessionCreateFlagsKHR; + typedef VkFlags VkVideoSessionParametersCreateFlagsKHR; + typedef VkFlags VkVideoBeginCodingFlagsKHR; + typedef VkFlags VkVideoEndCodingFlagsKHR; + typedef VkFlags VkVideoCodingControlFlagsKHR; + + Video Decode Core extension + typedef VkFlags VkVideoDecodeUsageFlagsKHR; + typedef VkFlags VkVideoDecodeCapabilityFlagsKHR; + typedef VkFlags VkVideoDecodeFlagsKHR; + + Video Decode H.264 extension + typedef VkFlags VkVideoDecodeH264PictureLayoutFlagsKHR; + + Video Encode Core extension + typedef VkFlags VkVideoEncodeFlagsKHR; + typedef VkFlags VkVideoEncodeUsageFlagsKHR; + typedef VkFlags VkVideoEncodeContentFlagsKHR; + typedef VkFlags VkVideoEncodeCapabilityFlagsKHR; + typedef VkFlags VkVideoEncodeFeedbackFlagsKHR; + typedef VkFlags VkVideoEncodeRateControlFlagsKHR; + typedef VkFlags VkVideoEncodeRateControlModeFlagsKHR; + typedef VkFlags VkVideoChromaSubsamplingFlagsKHR; + typedef VkFlags VkVideoComponentBitDepthFlagsKHR; + + Video Encode H.264 extension + typedef VkFlags VkVideoEncodeH264CapabilityFlagsKHR; + typedef VkFlags VkVideoEncodeH264StdFlagsKHR; + typedef VkFlags VkVideoEncodeH264RateControlFlagsKHR; + + Video Encode H.265 extension + typedef VkFlags VkVideoEncodeH265CapabilityFlagsKHR; + typedef VkFlags VkVideoEncodeH265StdFlagsKHR; + typedef VkFlags VkVideoEncodeH265RateControlFlagsKHR; + typedef VkFlags VkVideoEncodeH265CtbSizeFlagsKHR; + typedef VkFlags VkVideoEncodeH265TransformBlockSizeFlagsKHR; + + Types which can be void pointers or class pointers, selected at compile time + VK_DEFINE_HANDLE(VkInstance) + VK_DEFINE_HANDLE(VkPhysicalDevice) + VK_DEFINE_HANDLE(VkDevice) + VK_DEFINE_HANDLE(VkQueue) + VK_DEFINE_HANDLE(VkCommandBuffer) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeviceMemory) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCommandPool) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBuffer) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBufferView) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImage) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImageView) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkShaderModule) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipeline) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineLayout) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSampler) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSet) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSetLayout) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorPool) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFence) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSemaphore) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkEvent) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkQueryPool) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFramebuffer) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkRenderPass) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineCache) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkIndirectCommandsLayoutNV) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorUpdateTemplate) + + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSamplerYcbcrConversion) + + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkValidationCacheEXT) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureKHR) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureNV) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPerformanceConfigurationINTEL) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBufferCollectionFUCHSIA) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeferredOperationKHR) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPrivateDataSlot) + + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCuModuleNVX) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCuFunctionNVX) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkOpticalFlowSessionNV) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkMicromapEXT) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkShaderEXT) + + WSI extensions + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayKHR) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayModeKHR) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSurfaceKHR) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSwapchainKHR) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugReportCallbackEXT) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugUtilsMessengerEXT) + + Video extensions + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkVideoSessionKHR) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkVideoSessionParametersKHR) + + VK_NV_external_sci_sync2 + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSemaphoreSciSyncPoolNV) + + Types generated from corresponding enums tags below + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Extensions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + WSI extensions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Enumerated types in the header, but not used by the API + + + + + + + + Video Core extensions + + + + + + + + + Video Decode extensions + + + + Video H.264 Decode extensions + + + Video H.265 Decode extensions + + Video Encode extensions + + + + + + + + + Video H.264 Encode extensions + + + + + Video H.265 Encode extensions + + + + + + + The PFN_vk*Function types are used by VkAllocationCallbacks below + typedef void (VKAPI_PTR *PFN_vkInternalAllocationNotification)( + void* pUserData, + size_t size, + VkInternalAllocationType allocationType, + VkSystemAllocationScope allocationScope); + typedef void (VKAPI_PTR *PFN_vkInternalFreeNotification)( + void* pUserData, + size_t size, + VkInternalAllocationType allocationType, + VkSystemAllocationScope allocationScope); + typedef void* (VKAPI_PTR *PFN_vkReallocationFunction)( + void* pUserData, + void* pOriginal, + size_t size, + size_t alignment, + VkSystemAllocationScope allocationScope); + typedef void* (VKAPI_PTR *PFN_vkAllocationFunction)( + void* pUserData, + size_t size, + size_t alignment, + VkSystemAllocationScope allocationScope); + typedef void (VKAPI_PTR *PFN_vkFreeFunction)( + void* pUserData, + void* pMemory); + + The PFN_vkVoidFunction type are used by VkGet*ProcAddr below + typedef void (VKAPI_PTR *PFN_vkVoidFunction)(void); + + The PFN_vkDebugReportCallbackEXT type are used by the DEBUG_REPORT extension + typedef VkBool32 (VKAPI_PTR *PFN_vkDebugReportCallbackEXT)( + VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objectType, + uint64_t object, + size_t location, + int32_t messageCode, + const char* pLayerPrefix, + const char* pMessage, + void* pUserData); + + The PFN_vkDebugUtilsMessengerCallbackEXT type are used by the VK_EXT_debug_utils extension + typedef VkBool32 (VKAPI_PTR *PFN_vkDebugUtilsMessengerCallbackEXT)( + VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, + void* pUserData); + + The PFN_vkFaultCallbackFunction type is used by VKSC_VERSION_1_0 + typedef void (VKAPI_PTR *PFN_vkFaultCallbackFunction)( + VkBool32 unrecordedFaults, + uint32_t faultCount, + const VkFaultData* pFaults); + + The PFN_vkDeviceMemoryReportCallbackEXT type is used by the VK_EXT_device_memory_report extension + typedef void (VKAPI_PTR *PFN_vkDeviceMemoryReportCallbackEXT)( + const VkDeviceMemoryReportCallbackDataEXT* pCallbackData, + void* pUserData); + + The PFN_vkGetInstanceProcAddrLUNARG type is used by the + VkDirectDriverLoadingInfoLUNARG structure. + We cannot introduce an explicit dependency on the + equivalent PFN_vkGetInstanceProcAddr type, even though + it is implicitly generated in the C header, because + that results in multiple definitions. + typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetInstanceProcAddrLUNARG)( + VkInstance instance, const char* pName); + + Struct types + + VkStructureType sType + struct VkBaseOutStructure* pNext + + + VkStructureType sType + const struct VkBaseInStructure* pNext + + + int32_t x + int32_t y + + + int32_t x + int32_t y + int32_t z + + + uint32_t width + uint32_t height + + + uint32_t width + uint32_t height + uint32_t depth + + + float x + float y + float width + float height + float minDepth + float maxDepth + + + VkOffset2D offset + VkExtent2D extent + + + VkRect2D rect + uint32_t baseArrayLayer + uint32_t layerCount + + + VkComponentSwizzle r + VkComponentSwizzle g + VkComponentSwizzle b + VkComponentSwizzle a + + + uint32_t apiVersion + uint32_t driverVersion + uint32_t vendorID + uint32_t deviceID + VkPhysicalDeviceType deviceType + char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE] + uint8_t pipelineCacheUUID[VK_UUID_SIZE] + VkPhysicalDeviceLimits limits + VkPhysicalDeviceSparseProperties sparseProperties + + + char extensionName[VK_MAX_EXTENSION_NAME_SIZE]extension name + uint32_t specVersionversion of the extension specification implemented + + + char layerName[VK_MAX_EXTENSION_NAME_SIZE]layer name + uint32_t specVersionversion of the layer specification implemented + uint32_t implementationVersionbuild or release version of the layer's library + char description[VK_MAX_DESCRIPTION_SIZE]Free-form description of the layer + + + VkStructureType sType + const void* pNext + const char* pApplicationName + uint32_t applicationVersion + const char* pEngineName + uint32_t engineVersion + uint32_t apiVersion + + + void* pUserData + PFN_vkAllocationFunction pfnAllocation + PFN_vkReallocationFunction pfnReallocation + PFN_vkFreeFunction pfnFree + PFN_vkInternalAllocationNotification pfnInternalAllocation + PFN_vkInternalFreeNotification pfnInternalFree + + + VkStructureType sType + const void* pNext + VkDeviceQueueCreateFlags flags + uint32_t queueFamilyIndex + uint32_t queueCount + const float* pQueuePriorities + + + VkStructureType sType + const void* pNext + VkDeviceCreateFlags flags + uint32_t queueCreateInfoCount + const VkDeviceQueueCreateInfo* pQueueCreateInfos + uint32_t enabledLayerCount + const char* const* ppEnabledLayerNamesOrdered list of layer names to be enabled + uint32_t enabledExtensionCount + const char* const* ppEnabledExtensionNames + const VkPhysicalDeviceFeatures* pEnabledFeatures + + + VkStructureType sType + const void* pNext + VkInstanceCreateFlags flags + const VkApplicationInfo* pApplicationInfo + uint32_t enabledLayerCount + const char* const* ppEnabledLayerNamesOrdered list of layer names to be enabled + uint32_t enabledExtensionCount + const char* const* ppEnabledExtensionNamesExtension names to be enabled + + + VkQueueFlags queueFlagsQueue flags + uint32_t queueCount + uint32_t timestampValidBits + VkExtent3D minImageTransferGranularityMinimum alignment requirement for image transfers + + + uint32_t memoryTypeCount + VkMemoryType memoryTypes[VK_MAX_MEMORY_TYPES] + uint32_t memoryHeapCount + VkMemoryHeap memoryHeaps[VK_MAX_MEMORY_HEAPS] + + + VkStructureType sType + const void* pNext + VkDeviceSize allocationSizeSize of memory allocation + uint32_t memoryTypeIndexIndex of the of the memory type to allocate from + + + VkDeviceSize sizeSpecified in bytes + VkDeviceSize alignmentSpecified in bytes + uint32_t memoryTypeBitsBitmask of the allowed memory type indices into memoryTypes[] for this object + + + VkImageAspectFlags aspectMask + VkExtent3D imageGranularity + VkSparseImageFormatFlags flags + + + VkSparseImageFormatProperties formatProperties + uint32_t imageMipTailFirstLod + VkDeviceSize imageMipTailSizeSpecified in bytes, must be a multiple of sparse block size in bytes / alignment + VkDeviceSize imageMipTailOffsetSpecified in bytes, must be a multiple of sparse block size in bytes / alignment + VkDeviceSize imageMipTailStrideSpecified in bytes, must be a multiple of sparse block size in bytes / alignment + + + VkMemoryPropertyFlags propertyFlagsMemory properties of this memory type + uint32_t heapIndexIndex of the memory heap allocations of this memory type are taken from + + + VkDeviceSize sizeAvailable memory in the heap + VkMemoryHeapFlags flagsFlags for the heap + + + VkStructureType sType + const void* pNext + VkDeviceMemory memoryMapped memory object + VkDeviceSize offsetOffset within the memory object where the range starts + VkDeviceSize sizeSize of the range within the memory object + + + VkFormatFeatureFlags linearTilingFeaturesFormat features in case of linear tiling + VkFormatFeatureFlags optimalTilingFeaturesFormat features in case of optimal tiling + VkFormatFeatureFlags bufferFeaturesFormat features supported by buffers + + + VkExtent3D maxExtentmax image dimensions for this resource type + uint32_t maxMipLevelsmax number of mipmap levels for this resource type + uint32_t maxArrayLayersmax array size for this resource type + VkSampleCountFlags sampleCountssupported sample counts for this resource type + VkDeviceSize maxResourceSizemax size (in bytes) of this resource type + + + VkBuffer bufferBuffer used for this descriptor slot. + VkDeviceSize offsetBase offset from buffer start in bytes to update in the descriptor set. + VkDeviceSize rangeSize in bytes of the buffer resource for this descriptor update. + + + VkSampler samplerSampler to write to the descriptor in case it is a SAMPLER or COMBINED_IMAGE_SAMPLER descriptor. Ignored otherwise. + VkImageView imageViewImage view to write to the descriptor in case it is a SAMPLED_IMAGE, STORAGE_IMAGE, COMBINED_IMAGE_SAMPLER, or INPUT_ATTACHMENT descriptor. Ignored otherwise. + VkImageLayout imageLayoutLayout the image is expected to be in when accessed using this descriptor (only used if imageView is not VK_NULL_HANDLE). + + + VkStructureType sType + const void* pNext + VkDescriptorSet dstSetDestination descriptor set + uint32_t dstBindingBinding within the destination descriptor set to write + uint32_t dstArrayElementArray element within the destination binding to write + uint32_t descriptorCountNumber of descriptors to write (determines the size of the array pointed by pDescriptors) + VkDescriptorType descriptorTypeDescriptor type to write (determines which members of the array pointed by pDescriptors are going to be used) + const VkDescriptorImageInfo* pImageInfoSampler, image view, and layout for SAMPLER, COMBINED_IMAGE_SAMPLER, {SAMPLED,STORAGE}_IMAGE, and INPUT_ATTACHMENT descriptor types. + const VkDescriptorBufferInfo* pBufferInfoRaw buffer, size, and offset for {UNIFORM,STORAGE}_BUFFER[_DYNAMIC] descriptor types. + const VkBufferView* pTexelBufferViewBuffer view to write to the descriptor for {UNIFORM,STORAGE}_TEXEL_BUFFER descriptor types. + + + VkStructureType sType + const void* pNext + VkDescriptorSet srcSetSource descriptor set + uint32_t srcBindingBinding within the source descriptor set to copy from + uint32_t srcArrayElementArray element within the source binding to copy from + VkDescriptorSet dstSetDestination descriptor set + uint32_t dstBindingBinding within the destination descriptor set to copy to + uint32_t dstArrayElementArray element within the destination binding to copy to + uint32_t descriptorCountNumber of descriptors to write (determines the size of the array pointed by pDescriptors) + + + VkStructureType sType + const void* pNext + VkBufferUsageFlags2KHR usage + + + VkStructureType sType + const void* pNext + VkBufferCreateFlags flagsBuffer creation flags + VkDeviceSize sizeSpecified in bytes + VkBufferUsageFlags usageBuffer usage flags + VkSharingMode sharingMode + uint32_t queueFamilyIndexCount + const uint32_t* pQueueFamilyIndices + + + VkStructureType sType + const void* pNext + VkBufferViewCreateFlags flags + VkBuffer buffer + VkFormat formatOptionally specifies format of elements + VkDeviceSize offsetSpecified in bytes + VkDeviceSize rangeView size specified in bytes + + + VkImageAspectFlags aspectMask + uint32_t mipLevel + uint32_t arrayLayer + + + VkImageAspectFlags aspectMask + uint32_t mipLevel + uint32_t baseArrayLayer + uint32_t layerCount + + + VkImageAspectFlags aspectMask + uint32_t baseMipLevel + uint32_t levelCount + uint32_t baseArrayLayer + uint32_t layerCount + + + VkStructureType sType + const void* pNext + VkAccessFlags srcAccessMaskMemory accesses from the source of the dependency to synchronize + VkAccessFlags dstAccessMaskMemory accesses from the destination of the dependency to synchronize + + + VkStructureType sType + const void* pNext + VkAccessFlags srcAccessMaskMemory accesses from the source of the dependency to synchronize + VkAccessFlags dstAccessMaskMemory accesses from the destination of the dependency to synchronize + uint32_t srcQueueFamilyIndexQueue family to transition ownership from + uint32_t dstQueueFamilyIndexQueue family to transition ownership to + VkBuffer bufferBuffer to sync + VkDeviceSize offsetOffset within the buffer to sync + VkDeviceSize sizeAmount of bytes to sync + + + VkStructureType sType + const void* pNext + VkAccessFlags srcAccessMaskMemory accesses from the source of the dependency to synchronize + VkAccessFlags dstAccessMaskMemory accesses from the destination of the dependency to synchronize + VkImageLayout oldLayoutCurrent layout of the image + VkImageLayout newLayoutNew layout to transition the image to + uint32_t srcQueueFamilyIndexQueue family to transition ownership from + uint32_t dstQueueFamilyIndexQueue family to transition ownership to + VkImage imageImage to sync + VkImageSubresourceRange subresourceRangeSubresource range to sync + + + VkStructureType sType + const void* pNext + VkImageCreateFlags flagsImage creation flags + VkImageType imageType + VkFormat format + VkExtent3D extent + uint32_t mipLevels + uint32_t arrayLayers + VkSampleCountFlagBits samples + VkImageTiling tiling + VkImageUsageFlags usageImage usage flags + VkSharingMode sharingModeCross-queue-family sharing mode + uint32_t queueFamilyIndexCountNumber of queue families to share across + const uint32_t* pQueueFamilyIndicesArray of queue family indices to share across + VkImageLayout initialLayoutInitial image layout for all subresources + + + VkDeviceSize offsetSpecified in bytes + VkDeviceSize sizeSpecified in bytes + VkDeviceSize rowPitchSpecified in bytes + VkDeviceSize arrayPitchSpecified in bytes + VkDeviceSize depthPitchSpecified in bytes + + + VkStructureType sType + const void* pNext + VkImageViewCreateFlags flags + VkImage image + VkImageViewType viewType + VkFormat format + VkComponentMapping components + VkImageSubresourceRange subresourceRange + + + VkDeviceSize srcOffsetSpecified in bytes + VkDeviceSize dstOffsetSpecified in bytes + VkDeviceSize sizeSpecified in bytes + + + VkDeviceSize resourceOffsetSpecified in bytes + VkDeviceSize sizeSpecified in bytes + VkDeviceMemory memory + VkDeviceSize memoryOffsetSpecified in bytes + VkSparseMemoryBindFlags flags + + + VkImageSubresource subresource + VkOffset3D offset + VkExtent3D extent + VkDeviceMemory memory + VkDeviceSize memoryOffsetSpecified in bytes + VkSparseMemoryBindFlags flags + + + VkBuffer buffer + uint32_t bindCount + const VkSparseMemoryBind* pBinds + + + VkImage image + uint32_t bindCount + const VkSparseMemoryBind* pBinds + + + VkImage image + uint32_t bindCount + const VkSparseImageMemoryBind* pBinds + + + VkStructureType sType + const void* pNext + uint32_t waitSemaphoreCount + const VkSemaphore* pWaitSemaphores + uint32_t bufferBindCount + const VkSparseBufferMemoryBindInfo* pBufferBinds + uint32_t imageOpaqueBindCount + const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds + uint32_t imageBindCount + const VkSparseImageMemoryBindInfo* pImageBinds + uint32_t signalSemaphoreCount + const VkSemaphore* pSignalSemaphores + + + VkImageSubresourceLayers srcSubresource + VkOffset3D srcOffsetSpecified in pixels for both compressed and uncompressed images + VkImageSubresourceLayers dstSubresource + VkOffset3D dstOffsetSpecified in pixels for both compressed and uncompressed images + VkExtent3D extentSpecified in pixels for both compressed and uncompressed images + + + VkImageSubresourceLayers srcSubresource + VkOffset3D srcOffsets[2]Specified in pixels for both compressed and uncompressed images + VkImageSubresourceLayers dstSubresource + VkOffset3D dstOffsets[2]Specified in pixels for both compressed and uncompressed images + + + VkDeviceSize bufferOffsetSpecified in bytes + uint32_t bufferRowLengthSpecified in texels + uint32_t bufferImageHeight + VkImageSubresourceLayers imageSubresource + VkOffset3D imageOffsetSpecified in pixels for both compressed and uncompressed images + VkExtent3D imageExtentSpecified in pixels for both compressed and uncompressed images + + + VkDeviceAddress srcAddress + VkDeviceAddress dstAddress + VkDeviceSize sizeSpecified in bytes + + + VkDeviceAddress srcAddress + uint32_t bufferRowLengthSpecified in texels + uint32_t bufferImageHeight + VkImageSubresourceLayers imageSubresource + VkOffset3D imageOffsetSpecified in pixels for both compressed and uncompressed images + VkExtent3D imageExtentSpecified in pixels for both compressed and uncompressed images + + + VkImageSubresourceLayers srcSubresource + VkOffset3D srcOffset + VkImageSubresourceLayers dstSubresource + VkOffset3D dstOffset + VkExtent3D extent + + + VkStructureType sType + const void* pNextnoautovalidity because this structure can be either an explicit parameter, or passed in a pNext chain + VkShaderModuleCreateFlags flags + size_t codeSizeSpecified in bytes + const uint32_t* pCodeBinary code of size codeSize + + + uint32_t bindingBinding number for this entry + VkDescriptorType descriptorTypeType of the descriptors in this binding + uint32_t descriptorCountNumber of descriptors in this binding + VkShaderStageFlags stageFlagsShader stages this binding is visible to + const VkSampler* pImmutableSamplersImmutable samplers (used if descriptor type is SAMPLER or COMBINED_IMAGE_SAMPLER, is either NULL or contains count number of elements) + + + VkStructureType sType + const void* pNext + VkDescriptorSetLayoutCreateFlags flags + uint32_t bindingCountNumber of bindings in the descriptor set layout + const VkDescriptorSetLayoutBinding* pBindingsArray of descriptor set layout bindings + + + VkDescriptorType type + uint32_t descriptorCount + + + VkStructureType sType + const void* pNext + VkDescriptorPoolCreateFlags flags + uint32_t maxSets + uint32_t poolSizeCount + const VkDescriptorPoolSize* pPoolSizes + + + VkStructureType sType + const void* pNext + VkDescriptorPool descriptorPool + uint32_t descriptorSetCount + const VkDescriptorSetLayout* pSetLayouts + + + uint32_t constantIDThe SpecConstant ID specified in the BIL + uint32_t offsetOffset of the value in the data block + size_t sizeSize in bytes of the SpecConstant + + + uint32_t mapEntryCountNumber of entries in the map + const VkSpecializationMapEntry* pMapEntriesArray of map entries + size_t dataSizeSize in bytes of pData + const void* pDataPointer to SpecConstant data + + + VkStructureType sType + const void* pNext + VkPipelineShaderStageCreateFlags flags + VkShaderStageFlagBits stageShader stage + VkShaderModule moduleModule containing entry point + const char* pNameNull-terminated entry point name + const char* pNameNull-terminated entry point name + const VkSpecializationInfo* pSpecializationInfo + + + VkStructureType sType + const void* pNext + VkPipelineCreateFlags flagsPipeline creation flags + VkPipelineShaderStageCreateInfo stage + VkPipelineLayout layoutInterface layout of the pipeline + VkPipeline basePipelineHandleIf VK_PIPELINE_CREATE_DERIVATIVE_BIT is set and this value is nonzero, it specifies the handle of the base pipeline this is a derivative of + int32_t basePipelineIndexIf VK_PIPELINE_CREATE_DERIVATIVE_BIT is set and this value is not -1, it specifies an index into pCreateInfos of the base pipeline this is a derivative of + + + VkStructureType sType + const void* pNext + VkDeviceAddress deviceAddress + VkDeviceSize size + VkDeviceAddress pipelineDeviceAddressCaptureReplay + + + VkStructureType sType + const void* pNext + VkPipelineCreateFlags2KHR flags + + + uint32_t bindingVertex buffer binding id + uint32_t strideDistance between vertices in bytes (0 = no advancement) + VkVertexInputRate inputRateThe rate at which the vertex data is consumed + + + uint32_t locationlocation of the shader vertex attrib + uint32_t bindingVertex buffer binding id + VkFormat formatformat of source data + uint32_t offsetOffset of first element in bytes from base of vertex + + + VkStructureType sType + const void* pNext + VkPipelineVertexInputStateCreateFlags flags + uint32_t vertexBindingDescriptionCountnumber of bindings + const VkVertexInputBindingDescription* pVertexBindingDescriptions + uint32_t vertexAttributeDescriptionCountnumber of attributes + const VkVertexInputAttributeDescription* pVertexAttributeDescriptions + + + VkStructureType sType + const void* pNext + VkPipelineInputAssemblyStateCreateFlags flags + VkPrimitiveTopology topology + VkBool32 primitiveRestartEnable + + + VkStructureType sType + const void* pNext + VkPipelineTessellationStateCreateFlags flags + uint32_t patchControlPoints + + + VkStructureType sType + const void* pNext + VkPipelineViewportStateCreateFlags flags + uint32_t viewportCount + const VkViewport* pViewports + uint32_t scissorCount + const VkRect2D* pScissors + + + VkStructureType sType + const void* pNext + VkPipelineRasterizationStateCreateFlags flags + VkBool32 depthClampEnable + VkBool32 rasterizerDiscardEnable + VkPolygonMode polygonModeoptional (GL45) + VkCullModeFlags cullMode + VkFrontFace frontFace + VkBool32 depthBiasEnable + float depthBiasConstantFactor + float depthBiasClamp + float depthBiasSlopeFactor + float lineWidth + + + VkStructureType sType + const void* pNext + VkPipelineMultisampleStateCreateFlags flags + VkSampleCountFlagBits rasterizationSamplesNumber of samples used for rasterization + VkBool32 sampleShadingEnableoptional (GL45) + float minSampleShadingoptional (GL45) + const VkSampleMask* pSampleMaskArray of sampleMask words + VkBool32 alphaToCoverageEnable + VkBool32 alphaToOneEnable + + + VkBool32 blendEnable + VkBlendFactor srcColorBlendFactor + VkBlendFactor dstColorBlendFactor + VkBlendOp colorBlendOp + VkBlendFactor srcAlphaBlendFactor + VkBlendFactor dstAlphaBlendFactor + VkBlendOp alphaBlendOp + VkColorComponentFlags colorWriteMask + + + VkStructureType sType + const void* pNext + VkPipelineColorBlendStateCreateFlags flags + VkBool32 logicOpEnable + VkLogicOp logicOp + uint32_t attachmentCount# of pAttachments + const VkPipelineColorBlendAttachmentState* pAttachments + float blendConstants[4] + + + VkStructureType sType + const void* pNext + VkPipelineDynamicStateCreateFlags flags + uint32_t dynamicStateCount + const VkDynamicState* pDynamicStates + + + VkStencilOp failOp + VkStencilOp passOp + VkStencilOp depthFailOp + VkCompareOp compareOp + uint32_t compareMask + uint32_t writeMask + uint32_t reference + + + VkStructureType sType + const void* pNext + VkPipelineDepthStencilStateCreateFlags flags + VkBool32 depthTestEnable + VkBool32 depthWriteEnable + VkCompareOp depthCompareOp + VkBool32 depthBoundsTestEnableoptional (depth_bounds_test) + VkBool32 stencilTestEnable + VkStencilOpState front + VkStencilOpState back + float minDepthBounds + float maxDepthBounds + + + VkStructureType sType + const void* pNext + VkPipelineCreateFlags flagsPipeline creation flags + uint32_t stageCount + const VkPipelineShaderStageCreateInfo* pStagesOne entry for each active shader stage + const VkPipelineShaderStageCreateInfo* pStagesOne entry for each active shader stage + const VkPipelineVertexInputStateCreateInfo* pVertexInputState + const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState + const VkPipelineTessellationStateCreateInfo* pTessellationState + const VkPipelineViewportStateCreateInfo* pViewportState + const VkPipelineRasterizationStateCreateInfo* pRasterizationState + const VkPipelineMultisampleStateCreateInfo* pMultisampleState + const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState + const VkPipelineColorBlendStateCreateInfo* pColorBlendState + const VkPipelineDynamicStateCreateInfo* pDynamicState + VkPipelineLayout layoutInterface layout of the pipeline + VkRenderPass renderPass + uint32_t subpass + VkPipeline basePipelineHandleIf VK_PIPELINE_CREATE_DERIVATIVE_BIT is set and this value is nonzero, it specifies the handle of the base pipeline this is a derivative of + int32_t basePipelineIndexIf VK_PIPELINE_CREATE_DERIVATIVE_BIT is set and this value is not -1, it specifies an index into pCreateInfos of the base pipeline this is a derivative of + + + VkStructureType sType + const void* pNext + VkPipelineCacheCreateFlags flags + size_t initialDataSizeSize of initial data to populate cache, in bytes + size_t initialDataSizeSize of initial data to populate cache, in bytes + const void* pInitialDataInitial data to populate cache + + + The fields in this structure are non-normative since structure packing is implementation-defined in C. The specification defines the normative layout. + uint32_t headerSize + VkPipelineCacheHeaderVersion headerVersion + uint32_t vendorID + uint32_t deviceID + uint8_t pipelineCacheUUID[VK_UUID_SIZE] + + + The fields in this structure are non-normative since structure packing is implementation-defined in C. The specification defines the normative layout. + uint64_t codeSize + uint64_t codeOffset + + + The fields in this structure are non-normative since structure packing is implementation-defined in C. The specification defines the normative layout. + uint8_t pipelineIdentifier[VK_UUID_SIZE] + uint64_t pipelineMemorySize + uint64_t jsonSize + uint64_t jsonOffset + uint32_t stageIndexCount + uint32_t stageIndexStride + uint64_t stageIndexOffset + + + The fields in this structure are non-normative since structure packing is implementation-defined in C. The specification defines the normative layout. + VkPipelineCacheHeaderVersionOne headerVersionOne + VkPipelineCacheValidationVersion validationVersion + uint32_t implementationData + uint32_t pipelineIndexCount + uint32_t pipelineIndexStride + uint64_t pipelineIndexOffset + + + VkShaderStageFlags stageFlagsWhich stages use the range + uint32_t offsetStart of the range, in bytes + uint32_t sizeSize of the range, in bytes + + + VkStructureType sType + const void* pNext + VkPipelineLayoutCreateFlags flags + uint32_t setLayoutCountNumber of descriptor sets interfaced by the pipeline + const VkDescriptorSetLayout* pSetLayoutsArray of setCount number of descriptor set layout objects defining the layout of the + uint32_t pushConstantRangeCountNumber of push-constant ranges used by the pipeline + const VkPushConstantRange* pPushConstantRangesArray of pushConstantRangeCount number of ranges used by various shader stages + + + VkStructureType sType + const void* pNext + VkSamplerCreateFlags flags + VkFilter magFilterFilter mode for magnification + VkFilter minFilterFilter mode for minifiation + VkSamplerMipmapMode mipmapModeMipmap selection mode + VkSamplerAddressMode addressModeU + VkSamplerAddressMode addressModeV + VkSamplerAddressMode addressModeW + float mipLodBias + VkBool32 anisotropyEnable + float maxAnisotropy + VkBool32 compareEnable + VkCompareOp compareOp + float minLod + float maxLod + VkBorderColor borderColor + VkBool32 unnormalizedCoordinates + + + VkStructureType sType + const void* pNext + VkCommandPoolCreateFlags flagsCommand pool creation flags + uint32_t queueFamilyIndex + + + VkStructureType sType + const void* pNext + VkCommandPool commandPool + VkCommandBufferLevel level + uint32_t commandBufferCount + + + VkStructureType sType + const void* pNext + VkRenderPass renderPassRender pass for secondary command buffers + uint32_t subpass + VkFramebuffer framebufferFramebuffer for secondary command buffers + VkBool32 occlusionQueryEnableWhether this secondary command buffer may be executed during an occlusion query + VkQueryControlFlags queryFlagsQuery flags used by this secondary command buffer, if executed during an occlusion query + VkQueryPipelineStatisticFlags pipelineStatisticsPipeline statistics that may be counted for this secondary command buffer + + + VkStructureType sType + const void* pNext + VkCommandBufferUsageFlags flagsCommand buffer usage flags + const VkCommandBufferInheritanceInfo* pInheritanceInfoPointer to inheritance info for secondary command buffers + + + VkStructureType sType + const void* pNext + VkRenderPass renderPass + VkFramebuffer framebuffer + VkRect2D renderArea + uint32_t clearValueCount + const VkClearValue* pClearValues + + + float float32[4] + int32_t int32[4] + uint32_t uint32[4] + + + float depth + uint32_t stencil + + + VkClearColorValue color + VkClearDepthStencilValue depthStencil + + + VkImageAspectFlags aspectMask + uint32_t colorAttachment + VkClearValue clearValue + + + VkAttachmentDescriptionFlags flags + VkFormat format + VkSampleCountFlagBits samples + VkAttachmentLoadOp loadOpLoad operation for color or depth data + VkAttachmentStoreOp storeOpStore operation for color or depth data + VkAttachmentLoadOp stencilLoadOpLoad operation for stencil data + VkAttachmentStoreOp stencilStoreOpStore operation for stencil data + VkImageLayout initialLayout + VkImageLayout finalLayout + + + uint32_t attachment + VkImageLayout layout + + + VkSubpassDescriptionFlags flags + VkPipelineBindPoint pipelineBindPointMust be VK_PIPELINE_BIND_POINT_GRAPHICS for now + uint32_t inputAttachmentCount + const VkAttachmentReference* pInputAttachments + uint32_t colorAttachmentCount + const VkAttachmentReference* pColorAttachments + const VkAttachmentReference* pResolveAttachments + const VkAttachmentReference* pDepthStencilAttachment + uint32_t preserveAttachmentCount + const uint32_t* pPreserveAttachments + + + uint32_t srcSubpass + uint32_t dstSubpass + VkPipelineStageFlags srcStageMask + VkPipelineStageFlags dstStageMask + VkAccessFlags srcAccessMaskMemory accesses from the source of the dependency to synchronize + VkAccessFlags dstAccessMaskMemory accesses from the destination of the dependency to synchronize + VkDependencyFlags dependencyFlags + + + VkStructureType sType + const void* pNext + VkRenderPassCreateFlags flags + uint32_t attachmentCount + const VkAttachmentDescription* pAttachments + uint32_t subpassCount + const VkSubpassDescription* pSubpasses + uint32_t dependencyCount + const VkSubpassDependency* pDependencies + + + VkStructureType sType + const void* pNext + VkEventCreateFlags flagsEvent creation flags + + + VkStructureType sType + const void* pNext + VkFenceCreateFlags flagsFence creation flags + + + VkBool32 robustBufferAccessout of bounds buffer accesses are well defined + VkBool32 fullDrawIndexUint32full 32-bit range of indices for indexed draw calls + VkBool32 imageCubeArrayimage views which are arrays of cube maps + VkBool32 independentBlendblending operations are controlled per-attachment + VkBool32 geometryShadergeometry stage + VkBool32 tessellationShadertessellation control and evaluation stage + VkBool32 sampleRateShadingper-sample shading and interpolation + VkBool32 dualSrcBlendblend operations which take two sources + VkBool32 logicOplogic operations + VkBool32 multiDrawIndirectmulti draw indirect + VkBool32 drawIndirectFirstInstanceindirect drawing can use non-zero firstInstance + VkBool32 depthClampdepth clamping + VkBool32 depthBiasClampdepth bias clamping + VkBool32 fillModeNonSolidpoint and wireframe fill modes + VkBool32 depthBoundsdepth bounds test + VkBool32 wideLineslines with width greater than 1 + VkBool32 largePointspoints with size greater than 1 + VkBool32 alphaToOnethe fragment alpha component can be forced to maximum representable alpha value + VkBool32 multiViewportviewport arrays + VkBool32 samplerAnisotropyanisotropic sampler filtering + VkBool32 textureCompressionETC2ETC texture compression formats + VkBool32 textureCompressionASTC_LDRASTC LDR texture compression formats + VkBool32 textureCompressionBCBC1-7 texture compressed formats + VkBool32 occlusionQueryPreciseprecise occlusion queries returning actual sample counts + VkBool32 pipelineStatisticsQuerypipeline statistics query + VkBool32 vertexPipelineStoresAndAtomicsstores and atomic ops on storage buffers and images are supported in vertex, tessellation, and geometry stages + VkBool32 fragmentStoresAndAtomicsstores and atomic ops on storage buffers and images are supported in the fragment stage + VkBool32 shaderTessellationAndGeometryPointSizetessellation and geometry stages can export point size + VkBool32 shaderImageGatherExtendedimage gather with run-time values and independent offsets + VkBool32 shaderStorageImageExtendedFormatsthe extended set of formats can be used for storage images + VkBool32 shaderStorageImageMultisamplemultisample images can be used for storage images + VkBool32 shaderStorageImageReadWithoutFormatread from storage image does not require format qualifier + VkBool32 shaderStorageImageWriteWithoutFormatwrite to storage image does not require format qualifier + VkBool32 shaderUniformBufferArrayDynamicIndexingarrays of uniform buffers can be accessed with dynamically uniform indices + VkBool32 shaderSampledImageArrayDynamicIndexingarrays of sampled images can be accessed with dynamically uniform indices + VkBool32 shaderStorageBufferArrayDynamicIndexingarrays of storage buffers can be accessed with dynamically uniform indices + VkBool32 shaderStorageImageArrayDynamicIndexingarrays of storage images can be accessed with dynamically uniform indices + VkBool32 shaderClipDistanceclip distance in shaders + VkBool32 shaderCullDistancecull distance in shaders + VkBool32 shaderFloat6464-bit floats (doubles) in shaders + VkBool32 shaderInt6464-bit integers in shaders + VkBool32 shaderInt1616-bit integers in shaders + VkBool32 shaderResourceResidencyshader can use texture operations that return resource residency information (requires sparseNonResident support) + VkBool32 shaderResourceMinLodshader can use texture operations that specify minimum resource LOD + VkBool32 sparseBindingSparse resources support: Resource memory can be managed at opaque page level rather than object level + VkBool32 sparseResidencyBufferSparse resources support: GPU can access partially resident buffers + VkBool32 sparseResidencyImage2DSparse resources support: GPU can access partially resident 2D (non-MSAA non-depth/stencil) images + VkBool32 sparseResidencyImage3DSparse resources support: GPU can access partially resident 3D images + VkBool32 sparseResidency2SamplesSparse resources support: GPU can access partially resident MSAA 2D images with 2 samples + VkBool32 sparseResidency4SamplesSparse resources support: GPU can access partially resident MSAA 2D images with 4 samples + VkBool32 sparseResidency8SamplesSparse resources support: GPU can access partially resident MSAA 2D images with 8 samples + VkBool32 sparseResidency16SamplesSparse resources support: GPU can access partially resident MSAA 2D images with 16 samples + VkBool32 sparseResidencyAliasedSparse resources support: GPU can correctly access data aliased into multiple locations (opt-in) + VkBool32 variableMultisampleRatemultisample rate must be the same for all pipelines in a subpass + VkBool32 inheritedQueriesQueries may be inherited from primary to secondary command buffers + + + VkBool32 residencyStandard2DBlockShapeSparse resources support: GPU will access all 2D (single sample) sparse resources using the standard sparse image block shapes (based on pixel format) + VkBool32 residencyStandard2DMultisampleBlockShapeSparse resources support: GPU will access all 2D (multisample) sparse resources using the standard sparse image block shapes (based on pixel format) + VkBool32 residencyStandard3DBlockShapeSparse resources support: GPU will access all 3D sparse resources using the standard sparse image block shapes (based on pixel format) + VkBool32 residencyAlignedMipSizeSparse resources support: Images with mip level dimensions that are NOT a multiple of the sparse image block dimensions will be placed in the mip tail + VkBool32 residencyNonResidentStrictSparse resources support: GPU can consistently access non-resident regions of a resource, all reads return as if data is 0, writes are discarded + + + resource maximum sizes + uint32_t maxImageDimension1Dmax 1D image dimension + uint32_t maxImageDimension2Dmax 2D image dimension + uint32_t maxImageDimension3Dmax 3D image dimension + uint32_t maxImageDimensionCubemax cubemap image dimension + uint32_t maxImageArrayLayersmax layers for image arrays + uint32_t maxTexelBufferElementsmax texel buffer size (fstexels) + uint32_t maxUniformBufferRangemax uniform buffer range (bytes) + uint32_t maxStorageBufferRangemax storage buffer range (bytes) + uint32_t maxPushConstantsSizemax size of the push constants pool (bytes) + memory limits + uint32_t maxMemoryAllocationCountmax number of device memory allocations supported + uint32_t maxSamplerAllocationCountmax number of samplers that can be allocated on a device + VkDeviceSize bufferImageGranularityGranularity (in bytes) at which buffers and images can be bound to adjacent memory for simultaneous usage + VkDeviceSize sparseAddressSpaceSizeTotal address space available for sparse allocations (bytes) + descriptor set limits + uint32_t maxBoundDescriptorSetsmax number of descriptors sets that can be bound to a pipeline + uint32_t maxPerStageDescriptorSamplersmax number of samplers allowed per-stage in a descriptor set + uint32_t maxPerStageDescriptorUniformBuffersmax number of uniform buffers allowed per-stage in a descriptor set + uint32_t maxPerStageDescriptorStorageBuffersmax number of storage buffers allowed per-stage in a descriptor set + uint32_t maxPerStageDescriptorSampledImagesmax number of sampled images allowed per-stage in a descriptor set + uint32_t maxPerStageDescriptorStorageImagesmax number of storage images allowed per-stage in a descriptor set + uint32_t maxPerStageDescriptorInputAttachmentsmax number of input attachments allowed per-stage in a descriptor set + uint32_t maxPerStageResourcesmax number of resources allowed by a single stage + uint32_t maxDescriptorSetSamplersmax number of samplers allowed in all stages in a descriptor set + uint32_t maxDescriptorSetUniformBuffersmax number of uniform buffers allowed in all stages in a descriptor set + uint32_t maxDescriptorSetUniformBuffersDynamicmax number of dynamic uniform buffers allowed in all stages in a descriptor set + uint32_t maxDescriptorSetStorageBuffersmax number of storage buffers allowed in all stages in a descriptor set + uint32_t maxDescriptorSetStorageBuffersDynamicmax number of dynamic storage buffers allowed in all stages in a descriptor set + uint32_t maxDescriptorSetSampledImagesmax number of sampled images allowed in all stages in a descriptor set + uint32_t maxDescriptorSetStorageImagesmax number of storage images allowed in all stages in a descriptor set + uint32_t maxDescriptorSetInputAttachmentsmax number of input attachments allowed in all stages in a descriptor set + vertex stage limits + uint32_t maxVertexInputAttributesmax number of vertex input attribute slots + uint32_t maxVertexInputBindingsmax number of vertex input binding slots + uint32_t maxVertexInputAttributeOffsetmax vertex input attribute offset added to vertex buffer offset + uint32_t maxVertexInputBindingStridemax vertex input binding stride + uint32_t maxVertexOutputComponentsmax number of output components written by vertex shader + tessellation control stage limits + uint32_t maxTessellationGenerationLevelmax level supported by tessellation primitive generator + uint32_t maxTessellationPatchSizemax patch size (vertices) + uint32_t maxTessellationControlPerVertexInputComponentsmax number of input components per-vertex in TCS + uint32_t maxTessellationControlPerVertexOutputComponentsmax number of output components per-vertex in TCS + uint32_t maxTessellationControlPerPatchOutputComponentsmax number of output components per-patch in TCS + uint32_t maxTessellationControlTotalOutputComponentsmax total number of per-vertex and per-patch output components in TCS + tessellation evaluation stage limits + uint32_t maxTessellationEvaluationInputComponentsmax number of input components per vertex in TES + uint32_t maxTessellationEvaluationOutputComponentsmax number of output components per vertex in TES + geometry stage limits + uint32_t maxGeometryShaderInvocationsmax invocation count supported in geometry shader + uint32_t maxGeometryInputComponentsmax number of input components read in geometry stage + uint32_t maxGeometryOutputComponentsmax number of output components written in geometry stage + uint32_t maxGeometryOutputVerticesmax number of vertices that can be emitted in geometry stage + uint32_t maxGeometryTotalOutputComponentsmax total number of components (all vertices) written in geometry stage + fragment stage limits + uint32_t maxFragmentInputComponentsmax number of input components read in fragment stage + uint32_t maxFragmentOutputAttachmentsmax number of output attachments written in fragment stage + uint32_t maxFragmentDualSrcAttachmentsmax number of output attachments written when using dual source blending + uint32_t maxFragmentCombinedOutputResourcesmax total number of storage buffers, storage images and output buffers + compute stage limits + uint32_t maxComputeSharedMemorySizemax total storage size of work group local storage (bytes) + uint32_t maxComputeWorkGroupCount[3]max num of compute work groups that may be dispatched by a single command (x,y,z) + uint32_t maxComputeWorkGroupInvocationsmax total compute invocations in a single local work group + uint32_t maxComputeWorkGroupSize[3]max local size of a compute work group (x,y,z) + uint32_t subPixelPrecisionBitsnumber bits of subpixel precision in screen x and y + uint32_t subTexelPrecisionBitsnumber bits of precision for selecting texel weights + uint32_t mipmapPrecisionBitsnumber bits of precision for selecting mipmap weights + uint32_t maxDrawIndexedIndexValuemax index value for indexed draw calls (for 32-bit indices) + uint32_t maxDrawIndirectCountmax draw count for indirect drawing calls + float maxSamplerLodBiasmax absolute sampler LOD bias + float maxSamplerAnisotropymax degree of sampler anisotropy + uint32_t maxViewportsmax number of active viewports + uint32_t maxViewportDimensions[2]max viewport dimensions (x,y) + float viewportBoundsRange[2]viewport bounds range (min,max) + uint32_t viewportSubPixelBitsnumber bits of subpixel precision for viewport + size_t minMemoryMapAlignmentmin required alignment of pointers returned by MapMemory (bytes) + VkDeviceSize minTexelBufferOffsetAlignmentmin required alignment for texel buffer offsets (bytes) + VkDeviceSize minUniformBufferOffsetAlignmentmin required alignment for uniform buffer sizes and offsets (bytes) + VkDeviceSize minStorageBufferOffsetAlignmentmin required alignment for storage buffer offsets (bytes) + int32_t minTexelOffsetmin texel offset for OpTextureSampleOffset + uint32_t maxTexelOffsetmax texel offset for OpTextureSampleOffset + int32_t minTexelGatherOffsetmin texel offset for OpTextureGatherOffset + uint32_t maxTexelGatherOffsetmax texel offset for OpTextureGatherOffset + float minInterpolationOffsetfurthest negative offset for interpolateAtOffset + float maxInterpolationOffsetfurthest positive offset for interpolateAtOffset + uint32_t subPixelInterpolationOffsetBitsnumber of subpixel bits for interpolateAtOffset + uint32_t maxFramebufferWidthmax width for a framebuffer + uint32_t maxFramebufferHeightmax height for a framebuffer + uint32_t maxFramebufferLayersmax layer count for a layered framebuffer + VkSampleCountFlags framebufferColorSampleCountssupported color sample counts for a framebuffer + VkSampleCountFlags framebufferDepthSampleCountssupported depth sample counts for a framebuffer + VkSampleCountFlags framebufferStencilSampleCountssupported stencil sample counts for a framebuffer + VkSampleCountFlags framebufferNoAttachmentsSampleCountssupported sample counts for a subpass which uses no attachments + uint32_t maxColorAttachmentsmax number of color attachments per subpass + VkSampleCountFlags sampledImageColorSampleCountssupported color sample counts for a non-integer sampled image + VkSampleCountFlags sampledImageIntegerSampleCountssupported sample counts for an integer image + VkSampleCountFlags sampledImageDepthSampleCountssupported depth sample counts for a sampled image + VkSampleCountFlags sampledImageStencilSampleCountssupported stencil sample counts for a sampled image + VkSampleCountFlags storageImageSampleCountssupported sample counts for a storage image + uint32_t maxSampleMaskWordsmax number of sample mask words + VkBool32 timestampComputeAndGraphicstimestamps on graphics and compute queues + float timestampPeriodnumber of nanoseconds it takes for timestamp query value to increment by 1 + uint32_t maxClipDistancesmax number of clip distances + uint32_t maxCullDistancesmax number of cull distances + uint32_t maxCombinedClipAndCullDistancesmax combined number of user clipping + uint32_t discreteQueuePrioritiesdistinct queue priorities available + float pointSizeRange[2]range (min,max) of supported point sizes + float lineWidthRange[2]range (min,max) of supported line widths + float pointSizeGranularitygranularity of supported point sizes + float lineWidthGranularitygranularity of supported line widths + VkBool32 strictLinesline rasterization follows preferred rules + VkBool32 standardSampleLocationssupports standard sample locations for all supported sample counts + VkDeviceSize optimalBufferCopyOffsetAlignmentoptimal offset of buffer copies + VkDeviceSize optimalBufferCopyRowPitchAlignmentoptimal pitch of buffer copies + VkDeviceSize nonCoherentAtomSizeminimum size and alignment for non-coherent host-mapped device memory access + + + VkStructureType sType + const void* pNext + VkSemaphoreCreateFlags flagsSemaphore creation flags + + + VkStructureType sType + const void* pNext + VkQueryPoolCreateFlags flags + VkQueryType queryType + uint32_t queryCount + VkQueryPipelineStatisticFlags pipelineStatisticsOptional + + + VkStructureType sType + const void* pNext + VkFramebufferCreateFlags flags + VkRenderPass renderPass + uint32_t attachmentCount + const VkImageView* pAttachments + uint32_t width + uint32_t height + uint32_t layers + + + uint32_t vertexCount + uint32_t instanceCount + uint32_t firstVertex + uint32_t firstInstance + + + uint32_t indexCount + uint32_t instanceCount + uint32_t firstIndex + int32_t vertexOffset + uint32_t firstInstance + + + uint32_t x + uint32_t y + uint32_t z + + + uint32_t firstVertex + uint32_t vertexCount + + + uint32_t firstIndex + uint32_t indexCount + int32_t vertexOffset + + + VkStructureType sType + const void* pNext + uint32_t waitSemaphoreCount + const VkSemaphore* pWaitSemaphores + const VkPipelineStageFlags* pWaitDstStageMask + uint32_t commandBufferCount + const VkCommandBuffer* pCommandBuffers + uint32_t signalSemaphoreCount + const VkSemaphore* pSignalSemaphores + + WSI extensions + + VkDisplayKHR displayHandle of the display object + const char* displayNameName of the display + VkExtent2D physicalDimensionsIn millimeters? + VkExtent2D physicalResolutionMax resolution for CRT? + VkSurfaceTransformFlagsKHR supportedTransformsone or more bits from VkSurfaceTransformFlagsKHR + VkBool32 planeReorderPossibleVK_TRUE if the overlay plane's z-order can be changed on this display. + VkBool32 persistentContentVK_TRUE if this is a "smart" display that supports self-refresh/internal buffering. + + + VkDisplayKHR currentDisplayDisplay the plane is currently associated with. Will be VK_NULL_HANDLE if the plane is not in use. + uint32_t currentStackIndexCurrent z-order of the plane. + + + VkExtent2D visibleRegionVisible scanout region. + uint32_t refreshRateNumber of times per second the display is updated. + + + VkDisplayModeKHR displayModeHandle of this display mode. + VkDisplayModeParametersKHR parametersThe parameters this mode uses. + + + VkStructureType sType + const void* pNext + VkDisplayModeCreateFlagsKHR flags + VkDisplayModeParametersKHR parametersThe parameters this mode uses. + + + VkDisplayPlaneAlphaFlagsKHR supportedAlphaTypes of alpha blending supported, if any. + VkOffset2D minSrcPositionDoes the plane have any position and extent restrictions? + VkOffset2D maxSrcPosition + VkExtent2D minSrcExtent + VkExtent2D maxSrcExtent + VkOffset2D minDstPosition + VkOffset2D maxDstPosition + VkExtent2D minDstExtent + VkExtent2D maxDstExtent + + + VkStructureType sType + const void* pNext + VkDisplaySurfaceCreateFlagsKHR flags + VkDisplayModeKHR displayModeThe mode to use when displaying this surface + uint32_t planeIndexThe plane on which this surface appears. Must be between 0 and the value returned by vkGetPhysicalDeviceDisplayPlanePropertiesKHR() in pPropertyCount. + uint32_t planeStackIndexThe z-order of the plane. + VkSurfaceTransformFlagBitsKHR transformTransform to apply to the images as part of the scanout operation + float globalAlphaGlobal alpha value. Must be between 0 and 1, inclusive. Ignored if alphaMode is not VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR + VkDisplayPlaneAlphaFlagBitsKHR alphaModeWhat type of alpha blending to use. Must be a bit from vkGetDisplayPlanePropertiesKHR::supportedAlpha. + VkExtent2D imageExtentsize of the images to use with this surface + + + VkStructureType sType + const void* pNext + VkRect2D srcRectRectangle within the presentable image to read pixel data from when presenting to the display. + VkRect2D dstRectRectangle within the current display mode's visible region to display srcRectangle in. + VkBool32 persistentFor smart displays, use buffered mode. If the display properties member "persistentMode" is VK_FALSE, this member must always be VK_FALSE. + + + uint32_t minImageCountSupported minimum number of images for the surface + uint32_t maxImageCountSupported maximum number of images for the surface, 0 for unlimited + VkExtent2D currentExtentCurrent image width and height for the surface, (0, 0) if undefined + VkExtent2D minImageExtentSupported minimum image width and height for the surface + VkExtent2D maxImageExtentSupported maximum image width and height for the surface + uint32_t maxImageArrayLayersSupported maximum number of image layers for the surface + VkSurfaceTransformFlagsKHR supportedTransforms1 or more bits representing the transforms supported + VkSurfaceTransformFlagBitsKHR currentTransformThe surface's current transform relative to the device's natural orientation + VkCompositeAlphaFlagsKHR supportedCompositeAlpha1 or more bits representing the alpha compositing modes supported + VkImageUsageFlags supportedUsageFlagsSupported image usage flags for the surface + + + VkStructureType sType + const void* pNext + VkAndroidSurfaceCreateFlagsKHR flags + struct ANativeWindow* window + + + VkStructureType sType + const void* pNext + VkViSurfaceCreateFlagsNN flags + void* window + + + VkStructureType sType + const void* pNext + VkWaylandSurfaceCreateFlagsKHR flags + struct wl_display* display + struct wl_surface* surface + + + VkStructureType sType + const void* pNext + VkWin32SurfaceCreateFlagsKHR flags + HINSTANCE hinstance + HWND hwnd + + + VkStructureType sType + const void* pNext + VkXlibSurfaceCreateFlagsKHR flags + Display* dpy + Window window + + + VkStructureType sType + const void* pNext + VkXcbSurfaceCreateFlagsKHR flags + xcb_connection_t* connection + xcb_window_t window + + + VkStructureType sType + const void* pNext + VkDirectFBSurfaceCreateFlagsEXT flags + IDirectFB* dfb + IDirectFBSurface* surface + + + VkStructureType sType + const void* pNext + VkImagePipeSurfaceCreateFlagsFUCHSIA flags + zx_handle_t imagePipeHandle + + + VkStructureType sType + const void* pNext + VkStreamDescriptorSurfaceCreateFlagsGGP flags + GgpStreamDescriptor streamDescriptor + + + VkStructureType sType + const void* pNext + VkScreenSurfaceCreateFlagsQNX flags + struct _screen_context* context + struct _screen_window* window + + + VkFormat formatSupported pair of rendering format + VkColorSpaceKHR colorSpaceand color space for the surface + + + VkStructureType sType + const void* pNext + VkSwapchainCreateFlagsKHR flags + VkSurfaceKHR surfaceThe swapchain's target surface + uint32_t minImageCountMinimum number of presentation images the application needs + VkFormat imageFormatFormat of the presentation images + VkColorSpaceKHR imageColorSpaceColorspace of the presentation images + VkExtent2D imageExtentDimensions of the presentation images + uint32_t imageArrayLayersDetermines the number of views for multiview/stereo presentation + VkImageUsageFlags imageUsageBits indicating how the presentation images will be used + VkSharingMode imageSharingModeSharing mode used for the presentation images + uint32_t queueFamilyIndexCountNumber of queue families having access to the images in case of concurrent sharing mode + const uint32_t* pQueueFamilyIndicesArray of queue family indices having access to the images in case of concurrent sharing mode + VkSurfaceTransformFlagBitsKHR preTransformThe transform, relative to the device's natural orientation, applied to the image content prior to presentation + VkCompositeAlphaFlagBitsKHR compositeAlphaThe alpha blending mode used when compositing this surface with other surfaces in the window system + VkPresentModeKHR presentModeWhich presentation mode to use for presents on this swap chain + VkBool32 clippedSpecifies whether presentable images may be affected by window clip regions + VkSwapchainKHR oldSwapchainExisting swap chain to replace, if any + VkSwapchainKHR oldSwapchainExisting swap chain to replace, if any + + + VkStructureType sType + const void* pNext + uint32_t waitSemaphoreCountNumber of semaphores to wait for before presenting + const VkSemaphore* pWaitSemaphoresSemaphores to wait for before presenting + uint32_t swapchainCountNumber of swapchains to present in this call + const VkSwapchainKHR* pSwapchainsSwapchains to present an image from + const uint32_t* pImageIndicesIndices of which presentable images to present + VkResult* pResultsOptional (i.e. if non-NULL) VkResult for each swapchain + + + VkStructureType sType + const void* pNext + VkDebugReportFlagsEXT flagsIndicates which events call this callback + PFN_vkDebugReportCallbackEXT pfnCallbackFunction pointer of a callback function + void* pUserDataUser data provided to callback function + + + VkStructureType sTypeMust be VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT + const void* pNext + uint32_t disabledValidationCheckCountNumber of validation checks to disable + const VkValidationCheckEXT* pDisabledValidationChecksValidation checks to disable + + + VkStructureType sTypeMust be VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT + const void* pNext + uint32_t enabledValidationFeatureCountNumber of validation features to enable + const VkValidationFeatureEnableEXT* pEnabledValidationFeaturesValidation features to enable + uint32_t disabledValidationFeatureCountNumber of validation features to disable + const VkValidationFeatureDisableEXT* pDisabledValidationFeaturesValidation features to disable + + + VkStructureType sTypeMust be VK_STRUCTURE_TYPE_LAYER_SETTINGS_CREATE_INFO_EXT + const void* pNext + uint32_t settingCountNumber of settings to configure + const VkLayerSettingEXT* pSettingsValidation features to enable + + + const char* pLayerName + const char* pSettingName + VkLayerSettingTypeEXT typeThe type of the object + uint32_t valueCountNumber of values of the setting + const void* pValuesValues to pass for a setting + + + VkStructureType sType + const void* pNext + uint32_t vendorID + uint32_t deviceID + uint32_t key + uint64_t value + + + VkStructureType sType + const void* pNext + VkRasterizationOrderAMD rasterizationOrderRasterization order to use for the pipeline + + + VkStructureType sType + const void* pNext + VkDebugReportObjectTypeEXT objectTypeThe type of the object + uint64_t objectThe handle of the object, cast to uint64_t + const char* pObjectNameName to apply to the object + + + VkStructureType sType + const void* pNext + VkDebugReportObjectTypeEXT objectTypeThe type of the object + uint64_t objectThe handle of the object, cast to uint64_t + uint64_t tagNameThe name of the tag to set on the object + size_t tagSizeThe length in bytes of the tag data + const void* pTagTag data to attach to the object + + + VkStructureType sType + const void* pNext + const char* pMarkerNameName of the debug marker + float color[4]Optional color for debug marker + + + VkStructureType sType + const void* pNext + VkBool32 dedicatedAllocationWhether this image uses a dedicated allocation + + + VkStructureType sType + const void* pNext + VkBool32 dedicatedAllocationWhether this buffer uses a dedicated allocation + + + VkStructureType sType + const void* pNext + VkImage imageImage that this allocation will be bound to + VkBuffer bufferBuffer that this allocation will be bound to + + + VkImageFormatProperties imageFormatProperties + VkExternalMemoryFeatureFlagsNV externalMemoryFeatures + VkExternalMemoryHandleTypeFlagsNV exportFromImportedHandleTypes + VkExternalMemoryHandleTypeFlagsNV compatibleHandleTypes + + + VkStructureType sType + const void* pNext + VkExternalMemoryHandleTypeFlagsNV handleTypes + + + VkStructureType sType + const void* pNext + VkExternalMemoryHandleTypeFlagsNV handleTypes + + + VkStructureType sType + const void* pNext + VkExternalMemoryHandleTypeFlagsNV handleType + HANDLE handle + + + VkStructureType sType + const void* pNext + const SECURITY_ATTRIBUTES* pAttributes + DWORD dwAccess + + + VkStructureType sType + const void* pNext + NvSciBufAttrList pAttributes + + + VkStructureType sType + const void* pNext + VkExternalMemoryHandleTypeFlagBits handleType + NvSciBufObj handle + + + VkStructureType sType + const void* pNext + VkDeviceMemory memory + VkExternalMemoryHandleTypeFlagBits handleType + + + VkStructureType sType + const void* pNext + uint32_t memoryTypeBits + + + VkStructureType sType + void* pNext + VkBool32 sciBufImport + VkBool32 sciBufExport + + + + VkStructureType sType + const void* pNext + uint32_t acquireCount + const VkDeviceMemory* pAcquireSyncs + const uint64_t* pAcquireKeys + const uint32_t* pAcquireTimeoutMilliseconds + uint32_t releaseCount + const VkDeviceMemory* pReleaseSyncs + const uint64_t* pReleaseKeys + + + VkStructureType sType + void* pNext + VkBool32 deviceGeneratedCommands + + + VkStructureType sType + void* pNext + VkBool32 deviceGeneratedCompute + VkBool32 deviceGeneratedComputePipelines + VkBool32 deviceGeneratedComputeCaptureReplay + + + VkStructureType sType + const void* pNext + uint32_t privateDataSlotRequestCount + + + + VkStructureType sType + const void* pNext + VkPrivateDataSlotCreateFlags flags + + + + VkStructureType sType + void* pNext + VkBool32 privateData + + + + VkStructureType sType + void* pNext + uint32_t maxGraphicsShaderGroupCount + uint32_t maxIndirectSequenceCount + uint32_t maxIndirectCommandsTokenCount + uint32_t maxIndirectCommandsStreamCount + uint32_t maxIndirectCommandsTokenOffset + uint32_t maxIndirectCommandsStreamStride + uint32_t minSequencesCountBufferOffsetAlignment + uint32_t minSequencesIndexBufferOffsetAlignment + uint32_t minIndirectCommandsBufferOffsetAlignment + + + VkStructureType sType + void* pNext + uint32_t maxMultiDrawCount + + + VkStructureType sType + const void* pNext + uint32_t stageCount + const VkPipelineShaderStageCreateInfo* pStages + const VkPipelineVertexInputStateCreateInfo* pVertexInputState + const VkPipelineTessellationStateCreateInfo* pTessellationState + + + VkStructureType sType + const void* pNext + uint32_t groupCount + const VkGraphicsShaderGroupCreateInfoNV* pGroups + uint32_t pipelineCount + const VkPipeline* pPipelines + + + uint32_t groupIndex + + + VkDeviceAddress bufferAddress + uint32_t size + VkIndexType indexType + + + VkDeviceAddress bufferAddress + uint32_t size + uint32_t stride + + + uint32_t data + + + VkBuffer buffer + VkDeviceSize offset + + + VkStructureType sType + const void* pNext + VkIndirectCommandsTokenTypeNV tokenType + uint32_t stream + uint32_t offset + uint32_t vertexBindingUnit + VkBool32 vertexDynamicStride + VkPipelineLayout pushconstantPipelineLayout + VkShaderStageFlags pushconstantShaderStageFlags + uint32_t pushconstantOffset + uint32_t pushconstantSize + VkIndirectStateFlagsNV indirectStateFlags + uint32_t indexTypeCount + const VkIndexType* pIndexTypes + const uint32_t* pIndexTypeValues + + + VkStructureType sType + const void* pNext + VkIndirectCommandsLayoutUsageFlagsNV flags + VkPipelineBindPoint pipelineBindPoint + uint32_t tokenCount + const VkIndirectCommandsLayoutTokenNV* pTokens + uint32_t streamCount + const uint32_t* pStreamStrides + + + VkStructureType sType + const void* pNext + VkPipelineBindPoint pipelineBindPoint + VkPipeline pipeline + VkIndirectCommandsLayoutNV indirectCommandsLayout + uint32_t streamCount + const VkIndirectCommandsStreamNV* pStreams + uint32_t sequencesCount + VkBuffer preprocessBuffer + VkDeviceSize preprocessOffset + VkDeviceSize preprocessSize + VkBuffer sequencesCountBuffer + VkDeviceSize sequencesCountOffset + VkBuffer sequencesIndexBuffer + VkDeviceSize sequencesIndexOffset + + + VkStructureType sType + const void* pNext + VkPipelineBindPoint pipelineBindPoint + VkPipeline pipeline + VkIndirectCommandsLayoutNV indirectCommandsLayout + uint32_t maxSequencesCount + + + VkStructureType sType + const void* pNext + VkPipelineBindPoint pipelineBindPoint + VkPipeline pipeline + + + VkDeviceAddress pipelineAddress + + + VkStructureType sType + void* pNext + VkPhysicalDeviceFeatures features + + + + VkStructureType sType + void* pNext + VkPhysicalDeviceProperties properties + + + + VkStructureType sType + void* pNext + VkFormatProperties formatProperties + + + + VkStructureType sType + void* pNext + VkImageFormatProperties imageFormatProperties + + + + VkStructureType sType + const void* pNext + VkFormat format + VkImageType type + VkImageTiling tiling + VkImageUsageFlags usage + VkImageCreateFlags flags + + + + VkStructureType sType + void* pNext + VkQueueFamilyProperties queueFamilyProperties + + + + VkStructureType sType + void* pNext + VkPhysicalDeviceMemoryProperties memoryProperties + + + + VkStructureType sType + void* pNext + VkSparseImageFormatProperties properties + + + + VkStructureType sType + const void* pNext + VkFormat format + VkImageType type + VkSampleCountFlagBits samples + VkImageUsageFlags usage + VkImageTiling tiling + + + + VkStructureType sType + void* pNext + uint32_t maxPushDescriptors + + + uint8_t major + uint8_t minor + uint8_t subminor + uint8_t patch + + + + VkStructureType sType + void* pNext + VkDriverId driverID + char driverName[VK_MAX_DRIVER_NAME_SIZE] + char driverInfo[VK_MAX_DRIVER_INFO_SIZE] + VkConformanceVersion conformanceVersion + + + + VkStructureType sType + const void* pNext + uint32_t swapchainCountCopy of VkPresentInfoKHR::swapchainCount + const VkPresentRegionKHR* pRegionsThe regions that have changed + + + uint32_t rectangleCountNumber of rectangles in pRectangles + const VkRectLayerKHR* pRectanglesArray of rectangles that have changed in a swapchain's image(s) + + + VkOffset2D offsetupper-left corner of a rectangle that has not changed, in pixels of a presentation images + VkExtent2D extentDimensions of a rectangle that has not changed, in pixels of a presentation images + uint32_t layerLayer of a swapchain's image(s), for stereoscopic-3D images + + + VkStructureType sType + void* pNext + VkBool32 variablePointersStorageBuffer + VkBool32 variablePointers + + + + + + VkExternalMemoryFeatureFlags externalMemoryFeatures + VkExternalMemoryHandleTypeFlags exportFromImportedHandleTypes + VkExternalMemoryHandleTypeFlags compatibleHandleTypes + + + + VkStructureType sType + const void* pNext + VkExternalMemoryHandleTypeFlagBits handleType + + + + VkStructureType sType + void* pNext + VkExternalMemoryProperties externalMemoryProperties + + + + VkStructureType sType + const void* pNext + VkBufferCreateFlags flags + VkBufferUsageFlags usage + VkExternalMemoryHandleTypeFlagBits handleType + + + + VkStructureType sType + void* pNext + VkExternalMemoryProperties externalMemoryProperties + + + + VkStructureType sType + void* pNext + uint8_t deviceUUID[VK_UUID_SIZE] + uint8_t driverUUID[VK_UUID_SIZE] + uint8_t deviceLUID[VK_LUID_SIZE] + uint32_t deviceNodeMask + VkBool32 deviceLUIDValid + + + + VkStructureType sType + const void* pNext + VkExternalMemoryHandleTypeFlags handleTypes + + + + VkStructureType sType + const void* pNext + VkExternalMemoryHandleTypeFlags handleTypes + + + + VkStructureType sType + const void* pNext + VkExternalMemoryHandleTypeFlags handleTypes + + + + VkStructureType sType + const void* pNext + VkExternalMemoryHandleTypeFlagBits handleType + HANDLE handle + LPCWSTR name + + + VkStructureType sType + const void* pNext + const SECURITY_ATTRIBUTES* pAttributes + DWORD dwAccess + LPCWSTR name + + + VkStructureType sType + const void* pNext + VkExternalMemoryHandleTypeFlagBits handleType + zx_handle_t handle + + + VkStructureType sType + void* pNext + uint32_t memoryTypeBits + + + VkStructureType sType + const void* pNext + VkDeviceMemory memory + VkExternalMemoryHandleTypeFlagBits handleType + + + VkStructureType sType + void* pNext + uint32_t memoryTypeBits + + + VkStructureType sType + const void* pNext + VkDeviceMemory memory + VkExternalMemoryHandleTypeFlagBits handleType + + + VkStructureType sType + const void* pNext + VkExternalMemoryHandleTypeFlagBits handleType + int fd + + + VkStructureType sType + void* pNext + uint32_t memoryTypeBits + + + VkStructureType sType + const void* pNext + VkDeviceMemory memory + VkExternalMemoryHandleTypeFlagBits handleType + + + VkStructureType sType + const void* pNext + uint32_t acquireCount + const VkDeviceMemory* pAcquireSyncs + const uint64_t* pAcquireKeys + const uint32_t* pAcquireTimeouts + uint32_t releaseCount + const VkDeviceMemory* pReleaseSyncs + const uint64_t* pReleaseKeys + + + VkStructureType sType + const void* pNext + VkExternalSemaphoreHandleTypeFlagBits handleType + + + + VkStructureType sType + void* pNext + VkExternalSemaphoreHandleTypeFlags exportFromImportedHandleTypes + VkExternalSemaphoreHandleTypeFlags compatibleHandleTypes + VkExternalSemaphoreFeatureFlags externalSemaphoreFeatures + + + + VkStructureType sType + const void* pNext + VkExternalSemaphoreHandleTypeFlags handleTypes + + + + VkStructureType sType + const void* pNext + VkSemaphore semaphore + VkSemaphoreImportFlags flags + VkExternalSemaphoreHandleTypeFlagBits handleType + HANDLE handle + LPCWSTR name + + + VkStructureType sType + const void* pNext + const SECURITY_ATTRIBUTES* pAttributes + DWORD dwAccess + LPCWSTR name + + + VkStructureType sType + const void* pNext + uint32_t waitSemaphoreValuesCount + const uint64_t* pWaitSemaphoreValues + uint32_t signalSemaphoreValuesCount + const uint64_t* pSignalSemaphoreValues + + + VkStructureType sType + const void* pNext + VkSemaphore semaphore + VkExternalSemaphoreHandleTypeFlagBits handleType + + + VkStructureType sType + const void* pNext + VkSemaphore semaphore + VkSemaphoreImportFlags flags + VkExternalSemaphoreHandleTypeFlagBits handleType + int fd + + + VkStructureType sType + const void* pNext + VkSemaphore semaphore + VkExternalSemaphoreHandleTypeFlagBits handleType + + + VkStructureType sType + const void* pNext + VkSemaphore semaphore + VkSemaphoreImportFlags flags + VkExternalSemaphoreHandleTypeFlagBits handleType + zx_handle_t zirconHandle + + + VkStructureType sType + const void* pNext + VkSemaphore semaphore + VkExternalSemaphoreHandleTypeFlagBits handleType + + + VkStructureType sType + const void* pNext + VkExternalFenceHandleTypeFlagBits handleType + + + + VkStructureType sType + void* pNext + VkExternalFenceHandleTypeFlags exportFromImportedHandleTypes + VkExternalFenceHandleTypeFlags compatibleHandleTypes + VkExternalFenceFeatureFlags externalFenceFeatures + + + + VkStructureType sType + const void* pNext + VkExternalFenceHandleTypeFlags handleTypes + + + + VkStructureType sType + const void* pNext + VkFence fence + VkFenceImportFlags flags + VkExternalFenceHandleTypeFlagBits handleType + HANDLE handle + LPCWSTR name + + + VkStructureType sType + const void* pNext + const SECURITY_ATTRIBUTES* pAttributes + DWORD dwAccess + LPCWSTR name + + + VkStructureType sType + const void* pNext + VkFence fence + VkExternalFenceHandleTypeFlagBits handleType + + + VkStructureType sType + const void* pNext + VkFence fence + VkFenceImportFlags flags + VkExternalFenceHandleTypeFlagBits handleType + int fd + + + VkStructureType sType + const void* pNext + VkFence fence + VkExternalFenceHandleTypeFlagBits handleType + + + VkStructureType sType + const void* pNext + NvSciSyncAttrList pAttributes + + + VkStructureType sType + const void* pNext + VkFence fence + VkExternalFenceHandleTypeFlagBits handleType + void* handle + + + VkStructureType sType + const void* pNext + VkFence fence + VkExternalFenceHandleTypeFlagBits handleType + + + VkStructureType sType + const void* pNext + NvSciSyncAttrList pAttributes + + + VkStructureType sType + const void* pNext + VkSemaphore semaphore + VkExternalSemaphoreHandleTypeFlagBits handleType + void* handle + + + VkStructureType sType + const void* pNext + VkSemaphore semaphore + VkExternalSemaphoreHandleTypeFlagBits handleType + + + VkStructureType sType + const void* pNext + VkSciSyncClientTypeNV clientType + VkSciSyncPrimitiveTypeNV primitiveType + + + VkStructureType sType + void* pNext + VkBool32 sciSyncFence + VkBool32 sciSyncSemaphore + VkBool32 sciSyncImport + VkBool32 sciSyncExport + + + VkStructureType sType + void* pNext + VkBool32 sciSyncFence + VkBool32 sciSyncSemaphore2 + VkBool32 sciSyncImport + VkBool32 sciSyncExport + + + VkStructureType sType + const void* pNext + NvSciSyncObj handle + + + VkStructureType sType + const void* pNext + VkSemaphoreSciSyncPoolNV semaphorePool + const NvSciSyncFence* pFence + + + VkStructureType sType + const void* pNext + uint32_t semaphoreSciSyncPoolRequestCount + + + VkStructureType sType + void* pNext + VkBool32 multiviewMultiple views in a renderpass + VkBool32 multiviewGeometryShaderMultiple views in a renderpass w/ geometry shader + VkBool32 multiviewTessellationShaderMultiple views in a renderpass w/ tessellation shader + + + + VkStructureType sType + void* pNext + uint32_t maxMultiviewViewCountmax number of views in a subpass + uint32_t maxMultiviewInstanceIndexmax instance index for a draw in a multiview subpass + + + + VkStructureType sType + const void* pNext + uint32_t subpassCount + const uint32_t* pViewMasks + uint32_t dependencyCount + const int32_t* pViewOffsets + uint32_t correlationMaskCount + const uint32_t* pCorrelationMasks + + + + VkStructureType sType + void* pNext + uint32_t minImageCountSupported minimum number of images for the surface + uint32_t maxImageCountSupported maximum number of images for the surface, 0 for unlimited + VkExtent2D currentExtentCurrent image width and height for the surface, (0, 0) if undefined + VkExtent2D minImageExtentSupported minimum image width and height for the surface + VkExtent2D maxImageExtentSupported maximum image width and height for the surface + uint32_t maxImageArrayLayersSupported maximum number of image layers for the surface + VkSurfaceTransformFlagsKHR supportedTransforms1 or more bits representing the transforms supported + VkSurfaceTransformFlagBitsKHR currentTransformThe surface's current transform relative to the device's natural orientation + VkCompositeAlphaFlagsKHR supportedCompositeAlpha1 or more bits representing the alpha compositing modes supported + VkImageUsageFlags supportedUsageFlagsSupported image usage flags for the surface + VkSurfaceCounterFlagsEXT supportedSurfaceCounters + + + VkStructureType sType + const void* pNext + VkDisplayPowerStateEXT powerState + + + VkStructureType sType + const void* pNext + VkDeviceEventTypeEXT deviceEvent + + + VkStructureType sType + const void* pNext + VkDisplayEventTypeEXT displayEvent + + + VkStructureType sType + const void* pNext + VkSurfaceCounterFlagsEXT surfaceCounters + + + VkStructureType sType + void* pNext + uint32_t physicalDeviceCount + VkPhysicalDevice physicalDevices[VK_MAX_DEVICE_GROUP_SIZE] + VkBool32 subsetAllocation + + + + VkStructureType sType + const void* pNext + VkMemoryAllocateFlags flags + uint32_t deviceMask + + + + VkStructureType sType + const void* pNext + VkBuffer buffer + VkDeviceMemory memory + VkDeviceSize memoryOffset + + + + VkStructureType sType + const void* pNext + uint32_t deviceIndexCount + const uint32_t* pDeviceIndices + + + + VkStructureType sType + const void* pNext + VkImage image + VkDeviceMemory memory + VkDeviceSize memoryOffset + + + + VkStructureType sType + const void* pNext + uint32_t deviceIndexCount + const uint32_t* pDeviceIndices + uint32_t splitInstanceBindRegionCount + const VkRect2D* pSplitInstanceBindRegions + + + + VkStructureType sType + const void* pNext + uint32_t deviceMask + uint32_t deviceRenderAreaCount + const VkRect2D* pDeviceRenderAreas + + + + VkStructureType sType + const void* pNext + uint32_t deviceMask + + + + VkStructureType sType + const void* pNext + uint32_t waitSemaphoreCount + const uint32_t* pWaitSemaphoreDeviceIndices + uint32_t commandBufferCount + const uint32_t* pCommandBufferDeviceMasks + uint32_t signalSemaphoreCount + const uint32_t* pSignalSemaphoreDeviceIndices + + + + VkStructureType sType + const void* pNext + uint32_t resourceDeviceIndex + uint32_t memoryDeviceIndex + + + + VkStructureType sType + void* pNext + uint32_t presentMask[VK_MAX_DEVICE_GROUP_SIZE] + VkDeviceGroupPresentModeFlagsKHR modes + + + VkStructureType sType + const void* pNext + VkSwapchainKHR swapchain + + + VkStructureType sType + const void* pNext + VkSwapchainKHR swapchain + uint32_t imageIndex + + + VkStructureType sType + const void* pNext + VkSwapchainKHR swapchain + uint64_t timeout + VkSemaphore semaphore + VkFence fence + uint32_t deviceMask + + + VkStructureType sType + const void* pNext + uint32_t swapchainCount + const uint32_t* pDeviceMasks + VkDeviceGroupPresentModeFlagBitsKHR mode + + + VkStructureType sType + const void* pNext + uint32_t physicalDeviceCount + const VkPhysicalDevice* pPhysicalDevices + + + + VkStructureType sType + const void* pNext + VkDeviceGroupPresentModeFlagsKHR modes + + + uint32_t dstBindingBinding within the destination descriptor set to write + uint32_t dstArrayElementArray element within the destination binding to write + uint32_t descriptorCountNumber of descriptors to write + VkDescriptorType descriptorTypeDescriptor type to write + size_t offsetOffset into pData where the descriptors to update are stored + size_t strideStride between two descriptors in pData when writing more than one descriptor + + + + VkStructureType sType + const void* pNext + VkDescriptorUpdateTemplateCreateFlags flags + uint32_t descriptorUpdateEntryCountNumber of descriptor update entries to use for the update template + const VkDescriptorUpdateTemplateEntry* pDescriptorUpdateEntriesDescriptor update entries for the template + VkDescriptorUpdateTemplateType templateType + VkDescriptorSetLayout descriptorSetLayout + VkPipelineBindPoint pipelineBindPoint + VkPipelineLayout pipelineLayoutIf used for push descriptors, this is the only allowed layout + uint32_t set + + + + float x + float y + + + VkStructureType sType + void* pNext + VkBool32 presentIdPresent ID in VkPresentInfoKHR + + + VkStructureType sType + const void* pNext + uint32_t swapchainCountCopy of VkPresentInfoKHR::swapchainCount + const uint64_t* pPresentIdsPresent ID values for each swapchain + + + VkStructureType sType + void* pNext + VkBool32 presentWaitvkWaitForPresentKHR is supported + + + Display primary in chromaticity coordinates + VkStructureType sType + const void* pNext + From SMPTE 2086 + VkXYColorEXT displayPrimaryRedDisplay primary's Red + VkXYColorEXT displayPrimaryGreenDisplay primary's Green + VkXYColorEXT displayPrimaryBlueDisplay primary's Blue + VkXYColorEXT whitePointDisplay primary's Blue + float maxLuminanceDisplay maximum luminance + float minLuminanceDisplay minimum luminance + From CTA 861.3 + float maxContentLightLevelContent maximum luminance + float maxFrameAverageLightLevel + + + VkStructureType sType + void* pNext + VkBool32 localDimmingSupport + + + VkStructureType sType + const void* pNext + VkBool32 localDimmingEnable + + + uint64_t refreshDurationNumber of nanoseconds from the start of one refresh cycle to the next + + + uint32_t presentIDApplication-provided identifier, previously given to vkQueuePresentKHR + uint64_t desiredPresentTimeEarliest time an image should have been presented, previously given to vkQueuePresentKHR + uint64_t actualPresentTimeTime the image was actually displayed + uint64_t earliestPresentTimeEarliest time the image could have been displayed + uint64_t presentMarginHow early vkQueuePresentKHR was processed vs. how soon it needed to be and make earliestPresentTime + + + VkStructureType sType + const void* pNext + uint32_t swapchainCountCopy of VkPresentInfoKHR::swapchainCount + const VkPresentTimeGOOGLE* pTimesThe earliest times to present images + + + uint32_t presentIDApplication-provided identifier + uint64_t desiredPresentTimeEarliest time an image should be presented + + + VkStructureType sType + const void* pNext + VkIOSSurfaceCreateFlagsMVK flags + const void* pView + + + VkStructureType sType + const void* pNext + VkMacOSSurfaceCreateFlagsMVK flags + const void* pView + + + VkStructureType sType + const void* pNext + VkMetalSurfaceCreateFlagsEXT flags + const CAMetalLayer* pLayer + + + float xcoeff + float ycoeff + + + VkStructureType sType + const void* pNext + VkBool32 viewportWScalingEnable + uint32_t viewportCount + const VkViewportWScalingNV* pViewportWScalings + + + VkViewportCoordinateSwizzleNV x + VkViewportCoordinateSwizzleNV y + VkViewportCoordinateSwizzleNV z + VkViewportCoordinateSwizzleNV w + + + VkStructureType sType + const void* pNext + VkPipelineViewportSwizzleStateCreateFlagsNV flags + uint32_t viewportCount + const VkViewportSwizzleNV* pViewportSwizzles + + + VkStructureType sType + void* pNext + uint32_t maxDiscardRectanglesmax number of active discard rectangles + + + VkStructureType sType + const void* pNext + VkPipelineDiscardRectangleStateCreateFlagsEXT flags + VkDiscardRectangleModeEXT discardRectangleMode + uint32_t discardRectangleCount + const VkRect2D* pDiscardRectangles + + + VkStructureType sType + void* pNext + VkBool32 perViewPositionAllComponents + + + uint32_t subpass + uint32_t inputAttachmentIndex + VkImageAspectFlags aspectMask + + + + VkStructureType sType + const void* pNext + uint32_t aspectReferenceCount + const VkInputAttachmentAspectReference* pAspectReferences + + + + VkStructureType sType + const void* pNext + VkSurfaceKHR surface + + + VkStructureType sType + void* pNext + VkSurfaceCapabilitiesKHR surfaceCapabilities + + + VkStructureType sType + void* pNext + VkSurfaceFormatKHR surfaceFormat + + + VkStructureType sType + void* pNext + VkDisplayPropertiesKHR displayProperties + + + VkStructureType sType + void* pNext + VkDisplayPlanePropertiesKHR displayPlaneProperties + + + VkStructureType sType + void* pNext + VkDisplayModePropertiesKHR displayModeProperties + + + VkStructureType sType + const void* pNext + VkDisplayModeKHR mode + uint32_t planeIndex + + + VkStructureType sType + void* pNext + VkDisplayPlaneCapabilitiesKHR capabilities + + + VkStructureType sType + void* pNext + VkImageUsageFlags sharedPresentSupportedUsageFlagsSupported image usage flags if swapchain created using a shared present mode + + + VkStructureType sType + void* pNext + VkBool32 storageBuffer16BitAccess16-bit integer/floating-point variables supported in BufferBlock + VkBool32 uniformAndStorageBuffer16BitAccess16-bit integer/floating-point variables supported in BufferBlock and Block + VkBool32 storagePushConstant1616-bit integer/floating-point variables supported in PushConstant + VkBool32 storageInputOutput1616-bit integer/floating-point variables supported in shader inputs and outputs + + + + VkStructureType sType + void* pNext + uint32_t subgroupSizeThe size of a subgroup for this queue. + VkShaderStageFlags supportedStagesBitfield of what shader stages support subgroup operations + VkSubgroupFeatureFlags supportedOperationsBitfield of what subgroup operations are supported. + VkBool32 quadOperationsInAllStagesFlag to specify whether quad operations are available in all stages. + + + VkStructureType sType + void* pNext + VkBool32 shaderSubgroupExtendedTypesFlag to specify whether subgroup operations with extended types are supported + + + + VkStructureType sType + const void* pNext + VkBuffer buffer + + + + VkStructureType sType + const void* pNext + const VkBufferCreateInfo* pCreateInfo + + + + VkStructureType sType + const void* pNext + VkImage image + + + + VkStructureType sType + const void* pNext + VkImage image + + + + VkStructureType sType + const void* pNext + const VkImageCreateInfo* pCreateInfo + VkImageAspectFlagBits planeAspect + + + + VkStructureType sType + void* pNext + VkMemoryRequirements memoryRequirements + + + + VkStructureType sType + void* pNext + VkSparseImageMemoryRequirements memoryRequirements + + + + VkStructureType sType + void* pNext + VkPointClippingBehavior pointClippingBehavior + + + + VkStructureType sType + void* pNext + VkBool32 prefersDedicatedAllocation + VkBool32 requiresDedicatedAllocation + + + + VkStructureType sType + const void* pNext + VkImage imageImage that this allocation will be bound to + VkBuffer bufferBuffer that this allocation will be bound to + + + + VkStructureType sType + const void* pNext + VkImageUsageFlags usage + + + VkStructureType sType + const void* pNext + uint32_t sliceOffset + uint32_t sliceCount + + + + VkStructureType sType + const void* pNext + VkTessellationDomainOrigin domainOrigin + + + + VkStructureType sType + const void* pNext + VkSamplerYcbcrConversion conversion + + + + VkStructureType sType + const void* pNext + VkFormat format + VkSamplerYcbcrModelConversion ycbcrModel + VkSamplerYcbcrRange ycbcrRange + VkComponentMapping components + VkChromaLocation xChromaOffset + VkChromaLocation yChromaOffset + VkFilter chromaFilter + VkBool32 forceExplicitReconstruction + + + + VkStructureType sType + const void* pNext + VkImageAspectFlagBits planeAspect + + + + VkStructureType sType + const void* pNext + VkImageAspectFlagBits planeAspect + + + + VkStructureType sType + void* pNext + VkBool32 samplerYcbcrConversionSampler color conversion supported + + + + VkStructureType sType + void* pNext + uint32_t combinedImageSamplerDescriptorCount + + + + VkStructureType sType + void* pNext + VkBool32 supportsTextureGatherLODBiasAMD + + + VkStructureType sType + const void* pNext + VkBuffer buffer + VkDeviceSize offset + VkConditionalRenderingFlagsEXT flags + + + VkStructureType sType + const void* pNext + VkBool32 protectedSubmitSubmit protected command buffers + + + VkStructureType sType + void* pNext + VkBool32 protectedMemory + + + VkStructureType sType + void* pNext + VkBool32 protectedNoFault + + + VkStructureType sType + const void* pNext + VkDeviceQueueCreateFlags flags + uint32_t queueFamilyIndex + uint32_t queueIndex + + + VkStructureType sType + const void* pNext + VkPipelineCoverageToColorStateCreateFlagsNV flags + VkBool32 coverageToColorEnable + uint32_t coverageToColorLocation + + + VkStructureType sType + void* pNext + VkBool32 filterMinmaxSingleComponentFormats + VkBool32 filterMinmaxImageComponentMapping + + + + float x + float y + + + VkStructureType sType + const void* pNext + VkSampleCountFlagBits sampleLocationsPerPixel + VkExtent2D sampleLocationGridSize + uint32_t sampleLocationsCount + const VkSampleLocationEXT* pSampleLocations + + + uint32_t attachmentIndex + VkSampleLocationsInfoEXT sampleLocationsInfo + + + uint32_t subpassIndex + VkSampleLocationsInfoEXT sampleLocationsInfo + + + VkStructureType sType + const void* pNext + uint32_t attachmentInitialSampleLocationsCount + const VkAttachmentSampleLocationsEXT* pAttachmentInitialSampleLocations + uint32_t postSubpassSampleLocationsCount + const VkSubpassSampleLocationsEXT* pPostSubpassSampleLocations + + + VkStructureType sType + const void* pNext + VkBool32 sampleLocationsEnable + VkSampleLocationsInfoEXT sampleLocationsInfo + + + VkStructureType sType + void* pNext + VkSampleCountFlags sampleLocationSampleCounts + VkExtent2D maxSampleLocationGridSize + float sampleLocationCoordinateRange[2] + uint32_t sampleLocationSubPixelBits + VkBool32 variableSampleLocations + + + VkStructureType sType + void* pNext + VkExtent2D maxSampleLocationGridSize + + + VkStructureType sType + const void* pNext + VkSamplerReductionMode reductionMode + + + + VkStructureType sType + void* pNext + VkBool32 advancedBlendCoherentOperations + + + VkStructureType sType + void* pNext + VkBool32 multiDraw + + + VkStructureType sType + void* pNext + uint32_t advancedBlendMaxColorAttachments + VkBool32 advancedBlendIndependentBlend + VkBool32 advancedBlendNonPremultipliedSrcColor + VkBool32 advancedBlendNonPremultipliedDstColor + VkBool32 advancedBlendCorrelatedOverlap + VkBool32 advancedBlendAllOperations + + + VkStructureType sType + const void* pNext + VkBool32 srcPremultiplied + VkBool32 dstPremultiplied + VkBlendOverlapEXT blendOverlap + + + VkStructureType sType + void* pNext + VkBool32 inlineUniformBlock + VkBool32 descriptorBindingInlineUniformBlockUpdateAfterBind + + + + VkStructureType sType + void* pNext + uint32_t maxInlineUniformBlockSize + uint32_t maxPerStageDescriptorInlineUniformBlocks + uint32_t maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks + uint32_t maxDescriptorSetInlineUniformBlocks + uint32_t maxDescriptorSetUpdateAfterBindInlineUniformBlocks + + + + VkStructureType sType + const void* pNext + uint32_t dataSize + const void* pData + + + + VkStructureType sType + const void* pNext + uint32_t maxInlineUniformBlockBindings + + + + VkStructureType sType + const void* pNext + VkPipelineCoverageModulationStateCreateFlagsNV flags + VkCoverageModulationModeNV coverageModulationMode + VkBool32 coverageModulationTableEnable + uint32_t coverageModulationTableCount + const float* pCoverageModulationTable + + + VkStructureType sType + const void* pNext + uint32_t viewFormatCount + const VkFormat* pViewFormats + + + + VkStructureType sType + const void* pNext + VkValidationCacheCreateFlagsEXT flags + size_t initialDataSize + const void* pInitialData + + + VkStructureType sType + const void* pNext + VkValidationCacheEXT validationCache + + + VkStructureType sType + void* pNext + uint32_t maxPerSetDescriptors + VkDeviceSize maxMemoryAllocationSize + + + + VkStructureType sType + void* pNext + VkBool32 maintenance4 + + + + VkStructureType sType + void* pNext + VkDeviceSize maxBufferSize + + + + VkStructureType sType + void* pNext + VkBool32 maintenance5 + + + VkStructureType sType + void* pNext + VkBool32 earlyFragmentMultisampleCoverageAfterSampleCounting + VkBool32 earlyFragmentSampleMaskTestBeforeSampleCounting + VkBool32 depthStencilSwizzleOneSupport + VkBool32 polygonModePointSize + VkBool32 nonStrictSinglePixelWideLinesUseParallelogram + VkBool32 nonStrictWideLinesUseParallelogram + + + VkStructureType sType + void* pNext + VkBool32 maintenance6 + + + VkStructureType sType + void* pNext + VkBool32 blockTexelViewCompatibleMultipleLayers + uint32_t maxCombinedImageSamplerDescriptorCount + VkBool32 fragmentShadingRateClampCombinerInputs + + + VkStructureType sType + const void* pNext + uint32_t viewMask + uint32_t colorAttachmentCount + const VkFormat* pColorAttachmentFormats + VkFormat depthAttachmentFormat + VkFormat stencilAttachmentFormat + + + VkStructureType sType + void* pNext + VkBool32 supported + + + + VkStructureType sType + void* pNext + VkBool32 shaderDrawParameters + + + + VkStructureType sType + void* pNext + VkBool32 shaderFloat1616-bit floats (halfs) in shaders + VkBool32 shaderInt88-bit integers in shaders + + + + + VkStructureType sType + void* pNext + VkShaderFloatControlsIndependence denormBehaviorIndependence + VkShaderFloatControlsIndependence roundingModeIndependence + VkBool32 shaderSignedZeroInfNanPreserveFloat16An implementation can preserve signed zero, nan, inf + VkBool32 shaderSignedZeroInfNanPreserveFloat32An implementation can preserve signed zero, nan, inf + VkBool32 shaderSignedZeroInfNanPreserveFloat64An implementation can preserve signed zero, nan, inf + VkBool32 shaderDenormPreserveFloat16An implementation can preserve denormals + VkBool32 shaderDenormPreserveFloat32An implementation can preserve denormals + VkBool32 shaderDenormPreserveFloat64An implementation can preserve denormals + VkBool32 shaderDenormFlushToZeroFloat16An implementation can flush to zero denormals + VkBool32 shaderDenormFlushToZeroFloat32An implementation can flush to zero denormals + VkBool32 shaderDenormFlushToZeroFloat64An implementation can flush to zero denormals + VkBool32 shaderRoundingModeRTEFloat16An implementation can support RTE + VkBool32 shaderRoundingModeRTEFloat32An implementation can support RTE + VkBool32 shaderRoundingModeRTEFloat64An implementation can support RTE + VkBool32 shaderRoundingModeRTZFloat16An implementation can support RTZ + VkBool32 shaderRoundingModeRTZFloat32An implementation can support RTZ + VkBool32 shaderRoundingModeRTZFloat64An implementation can support RTZ + + + + VkStructureType sType + void* pNext + VkBool32 hostQueryReset + + + + uint64_t consumer + uint64_t producer + + + VkStructureType sType + const void* pNext + const void* handle + int stride + int format + int usage + VkNativeBufferUsage2ANDROID usage2 + + + VkStructureType sType + const void* pNext + VkSwapchainImageUsageFlagsANDROID usage + + + VkStructureType sType + const void* pNext + VkBool32 sharedImage + + + uint32_t numUsedVgprs + uint32_t numUsedSgprs + uint32_t ldsSizePerLocalWorkGroup + size_t ldsUsageSizeInBytes + size_t scratchMemUsageInBytes + + + VkShaderStageFlags shaderStageMask + VkShaderResourceUsageAMD resourceUsage + uint32_t numPhysicalVgprs + uint32_t numPhysicalSgprs + uint32_t numAvailableVgprs + uint32_t numAvailableSgprs + uint32_t computeWorkGroupSize[3] + + + VkStructureType sType + const void* pNext + VkQueueGlobalPriorityKHR globalPriority + + + + VkStructureType sType + void* pNext + VkBool32 globalPriorityQuery + + + + VkStructureType sType + void* pNext + uint32_t priorityCount + VkQueueGlobalPriorityKHR priorities[VK_MAX_GLOBAL_PRIORITY_SIZE_KHR] + + + + VkStructureType sType + const void* pNext + VkObjectType objectType + uint64_t objectHandle + const char* pObjectName + + + VkStructureType sType + const void* pNext + VkObjectType objectType + uint64_t objectHandle + uint64_t tagName + size_t tagSize + const void* pTag + + + VkStructureType sType + const void* pNext + const char* pLabelName + float color[4] + + + VkStructureType sType + const void* pNext + VkDebugUtilsMessengerCreateFlagsEXT flags + VkDebugUtilsMessageSeverityFlagsEXT messageSeverity + VkDebugUtilsMessageTypeFlagsEXT messageType + PFN_vkDebugUtilsMessengerCallbackEXT pfnUserCallback + void* pUserData + + + VkStructureType sType + const void* pNext + VkDebugUtilsMessengerCallbackDataFlagsEXT flags + const char* pMessageIdName + int32_t messageIdNumber + const char* pMessage + uint32_t queueLabelCount + const VkDebugUtilsLabelEXT* pQueueLabels + uint32_t cmdBufLabelCount + const VkDebugUtilsLabelEXT* pCmdBufLabels + uint32_t objectCount + const VkDebugUtilsObjectNameInfoEXT* pObjects + + + VkStructureType sType + void* pNext + VkBool32 deviceMemoryReport + + + VkStructureType sType + const void* pNext + VkDeviceMemoryReportFlagsEXT flags + PFN_vkDeviceMemoryReportCallbackEXT pfnUserCallback + void* pUserData + + + VkStructureType sType + void* pNext + VkDeviceMemoryReportFlagsEXT flags + VkDeviceMemoryReportEventTypeEXT type + uint64_t memoryObjectId + VkDeviceSize size + VkObjectType objectType + uint64_t objectHandle + uint32_t heapIndex + + + VkStructureType sType + const void* pNext + VkExternalMemoryHandleTypeFlagBits handleType + void* pHostPointer + + + VkStructureType sType + void* pNext + uint32_t memoryTypeBits + + + VkStructureType sType + void* pNext + VkDeviceSize minImportedHostPointerAlignment + + + VkStructureType sType + void* pNext + float primitiveOverestimationSizeThe size in pixels the primitive is enlarged at each edge during conservative rasterization + float maxExtraPrimitiveOverestimationSizeThe maximum additional overestimation the client can specify in the pipeline state + float extraPrimitiveOverestimationSizeGranularityThe granularity of extra overestimation sizes the implementations supports between 0 and maxExtraOverestimationSize + VkBool32 primitiveUnderestimationtrue if the implementation supports conservative rasterization underestimation mode + VkBool32 conservativePointAndLineRasterizationtrue if conservative rasterization also applies to points and lines + VkBool32 degenerateTrianglesRasterizedtrue if degenerate triangles (those with zero area after snap) are rasterized + VkBool32 degenerateLinesRasterizedtrue if degenerate lines (those with zero length after snap) are rasterized + VkBool32 fullyCoveredFragmentShaderInputVariabletrue if the implementation supports the FullyCoveredEXT SPIR-V builtin fragment shader input variable + VkBool32 conservativeRasterizationPostDepthCoveragetrue if the implementation supports both conservative rasterization and post depth coverage sample coverage mask + + + VkStructureType sType + const void* pNext + VkTimeDomainKHR timeDomain + + + + VkStructureType sType + void* pNext + uint32_t shaderEngineCountnumber of shader engines + uint32_t shaderArraysPerEngineCountnumber of shader arrays + uint32_t computeUnitsPerShaderArraynumber of physical CUs per shader array + uint32_t simdPerComputeUnitnumber of SIMDs per compute unit + uint32_t wavefrontsPerSimdnumber of wavefront slots in each SIMD + uint32_t wavefrontSizemaximum number of threads per wavefront + uint32_t sgprsPerSimdnumber of physical SGPRs per SIMD + uint32_t minSgprAllocationminimum number of SGPRs that can be allocated by a wave + uint32_t maxSgprAllocationnumber of available SGPRs + uint32_t sgprAllocationGranularitySGPRs are allocated in groups of this size + uint32_t vgprsPerSimdnumber of physical VGPRs per SIMD + uint32_t minVgprAllocationminimum number of VGPRs that can be allocated by a wave + uint32_t maxVgprAllocationnumber of available VGPRs + uint32_t vgprAllocationGranularityVGPRs are allocated in groups of this size + + + VkStructureType sType + void* pNextPointer to next structure + VkShaderCorePropertiesFlagsAMD shaderCoreFeaturesfeatures supported by the shader core + uint32_t activeComputeUnitCountnumber of active compute units across all shader engines/arrays + + + VkStructureType sType + const void* pNext + VkPipelineRasterizationConservativeStateCreateFlagsEXT flagsReserved + VkConservativeRasterizationModeEXT conservativeRasterizationModeConservative rasterization mode + float extraPrimitiveOverestimationSizeExtra overestimation to add to the primitive + + + VkStructureType sType + void* pNext + VkBool32 shaderInputAttachmentArrayDynamicIndexing + VkBool32 shaderUniformTexelBufferArrayDynamicIndexing + VkBool32 shaderStorageTexelBufferArrayDynamicIndexing + VkBool32 shaderUniformBufferArrayNonUniformIndexing + VkBool32 shaderSampledImageArrayNonUniformIndexing + VkBool32 shaderStorageBufferArrayNonUniformIndexing + VkBool32 shaderStorageImageArrayNonUniformIndexing + VkBool32 shaderInputAttachmentArrayNonUniformIndexing + VkBool32 shaderUniformTexelBufferArrayNonUniformIndexing + VkBool32 shaderStorageTexelBufferArrayNonUniformIndexing + VkBool32 descriptorBindingUniformBufferUpdateAfterBind + VkBool32 descriptorBindingSampledImageUpdateAfterBind + VkBool32 descriptorBindingStorageImageUpdateAfterBind + VkBool32 descriptorBindingStorageBufferUpdateAfterBind + VkBool32 descriptorBindingUniformTexelBufferUpdateAfterBind + VkBool32 descriptorBindingStorageTexelBufferUpdateAfterBind + VkBool32 descriptorBindingUpdateUnusedWhilePending + VkBool32 descriptorBindingPartiallyBound + VkBool32 descriptorBindingVariableDescriptorCount + VkBool32 runtimeDescriptorArray + + + + VkStructureType sType + void* pNext + uint32_t maxUpdateAfterBindDescriptorsInAllPools + VkBool32 shaderUniformBufferArrayNonUniformIndexingNative + VkBool32 shaderSampledImageArrayNonUniformIndexingNative + VkBool32 shaderStorageBufferArrayNonUniformIndexingNative + VkBool32 shaderStorageImageArrayNonUniformIndexingNative + VkBool32 shaderInputAttachmentArrayNonUniformIndexingNative + VkBool32 robustBufferAccessUpdateAfterBind + VkBool32 quadDivergentImplicitLod + uint32_t maxPerStageDescriptorUpdateAfterBindSamplers + uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers + uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers + uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages + uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages + uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments + uint32_t maxPerStageUpdateAfterBindResources + uint32_t maxDescriptorSetUpdateAfterBindSamplers + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic + uint32_t maxDescriptorSetUpdateAfterBindSampledImages + uint32_t maxDescriptorSetUpdateAfterBindStorageImages + uint32_t maxDescriptorSetUpdateAfterBindInputAttachments + + + + VkStructureType sType + const void* pNext + uint32_t bindingCount + const VkDescriptorBindingFlags* pBindingFlags + + + + VkStructureType sType + const void* pNext + uint32_t descriptorSetCount + const uint32_t* pDescriptorCounts + + + + VkStructureType sType + void* pNext + uint32_t maxVariableDescriptorCount + + + + VkStructureType sType + const void* pNext + VkAttachmentDescriptionFlags flags + VkFormat format + VkSampleCountFlagBits samples + VkAttachmentLoadOp loadOpLoad operation for color or depth data + VkAttachmentStoreOp storeOpStore operation for color or depth data + VkAttachmentLoadOp stencilLoadOpLoad operation for stencil data + VkAttachmentStoreOp stencilStoreOpStore operation for stencil data + VkImageLayout initialLayout + VkImageLayout finalLayout + + + + VkStructureType sType + const void* pNext + uint32_t attachment + VkImageLayout layout + VkImageAspectFlags aspectMask + + + + VkStructureType sType + const void* pNext + VkSubpassDescriptionFlags flags + VkPipelineBindPoint pipelineBindPoint + uint32_t viewMask + uint32_t inputAttachmentCount + const VkAttachmentReference2* pInputAttachments + uint32_t colorAttachmentCount + const VkAttachmentReference2* pColorAttachments + const VkAttachmentReference2* pResolveAttachments + const VkAttachmentReference2* pDepthStencilAttachment + uint32_t preserveAttachmentCount + const uint32_t* pPreserveAttachments + + + + VkStructureType sType + const void* pNext + uint32_t srcSubpass + uint32_t dstSubpass + VkPipelineStageFlags srcStageMask + VkPipelineStageFlags dstStageMask + VkAccessFlags srcAccessMask + VkAccessFlags dstAccessMask + VkDependencyFlags dependencyFlags + int32_t viewOffset + + + + VkStructureType sType + const void* pNext + VkRenderPassCreateFlags flags + uint32_t attachmentCount + const VkAttachmentDescription2* pAttachments + uint32_t subpassCount + const VkSubpassDescription2* pSubpasses + uint32_t dependencyCount + const VkSubpassDependency2* pDependencies + uint32_t correlatedViewMaskCount + const uint32_t* pCorrelatedViewMasks + + + + VkStructureType sType + const void* pNext + VkSubpassContents contents + + + + VkStructureType sType + const void* pNext + + + + VkStructureType sType + void* pNext + VkBool32 timelineSemaphore + + + + VkStructureType sType + void* pNext + uint64_t maxTimelineSemaphoreValueDifference + + + + VkStructureType sType + const void* pNext + VkSemaphoreType semaphoreType + uint64_t initialValue + + + + VkStructureType sType + const void* pNext + uint32_t waitSemaphoreValueCount + const uint64_t* pWaitSemaphoreValues + uint32_t signalSemaphoreValueCount + const uint64_t* pSignalSemaphoreValues + + + + VkStructureType sType + const void* pNext + VkSemaphoreWaitFlags flags + uint32_t semaphoreCount + const VkSemaphore* pSemaphores + const uint64_t* pValues + + + + VkStructureType sType + const void* pNext + VkSemaphore semaphore + uint64_t value + + + + uint32_t binding + uint32_t divisor + + + + VkStructureType sType + const void* pNext + uint32_t vertexBindingDivisorCount + const VkVertexInputBindingDivisorDescriptionKHR* pVertexBindingDivisors + + + + VkStructureType sType + void* pNext + uint32_t maxVertexAttribDivisormax value of vertex attribute divisor + + + VkStructureType sType + void* pNext + uint32_t maxVertexAttribDivisormax value of vertex attribute divisor + VkBool32 supportsNonZeroFirstInstance + + + VkStructureType sType + void* pNext + uint32_t pciDomain + uint32_t pciBus + uint32_t pciDevice + uint32_t pciFunction + + + VkStructureType sType + const void* pNext + struct AHardwareBuffer* buffer + + + VkStructureType sType + void* pNext + uint64_t androidHardwareBufferUsage + + + VkStructureType sType + void* pNext + VkDeviceSize allocationSize + uint32_t memoryTypeBits + + + VkStructureType sType + const void* pNext + VkDeviceMemory memory + + + VkStructureType sType + void* pNext + VkFormat format + uint64_t externalFormat + VkFormatFeatureFlags formatFeatures + VkComponentMapping samplerYcbcrConversionComponents + VkSamplerYcbcrModelConversion suggestedYcbcrModel + VkSamplerYcbcrRange suggestedYcbcrRange + VkChromaLocation suggestedXChromaOffset + VkChromaLocation suggestedYChromaOffset + + + VkStructureType sType + const void* pNext + VkBool32 conditionalRenderingEnableWhether this secondary command buffer may be executed during an active conditional rendering + + + VkStructureType sType + void* pNext + uint64_t externalFormat + + + VkStructureType sType + void* pNext + VkBool32 storageBuffer8BitAccess8-bit integer variables supported in StorageBuffer + VkBool32 uniformAndStorageBuffer8BitAccess8-bit integer variables supported in StorageBuffer and Uniform + VkBool32 storagePushConstant88-bit integer variables supported in PushConstant + + + + VkStructureType sType + void* pNext + VkBool32 conditionalRendering + VkBool32 inheritedConditionalRendering + + + VkStructureType sType + void* pNext + VkBool32 vulkanMemoryModel + VkBool32 vulkanMemoryModelDeviceScope + VkBool32 vulkanMemoryModelAvailabilityVisibilityChains + + + + VkStructureType sType + void* pNext + VkBool32 shaderBufferInt64Atomics + VkBool32 shaderSharedInt64Atomics + + + + VkStructureType sType + void* pNext + VkBool32 shaderBufferFloat32Atomics + VkBool32 shaderBufferFloat32AtomicAdd + VkBool32 shaderBufferFloat64Atomics + VkBool32 shaderBufferFloat64AtomicAdd + VkBool32 shaderSharedFloat32Atomics + VkBool32 shaderSharedFloat32AtomicAdd + VkBool32 shaderSharedFloat64Atomics + VkBool32 shaderSharedFloat64AtomicAdd + VkBool32 shaderImageFloat32Atomics + VkBool32 shaderImageFloat32AtomicAdd + VkBool32 sparseImageFloat32Atomics + VkBool32 sparseImageFloat32AtomicAdd + + + VkStructureType sType + void* pNext + VkBool32 shaderBufferFloat16Atomics + VkBool32 shaderBufferFloat16AtomicAdd + VkBool32 shaderBufferFloat16AtomicMinMax + VkBool32 shaderBufferFloat32AtomicMinMax + VkBool32 shaderBufferFloat64AtomicMinMax + VkBool32 shaderSharedFloat16Atomics + VkBool32 shaderSharedFloat16AtomicAdd + VkBool32 shaderSharedFloat16AtomicMinMax + VkBool32 shaderSharedFloat32AtomicMinMax + VkBool32 shaderSharedFloat64AtomicMinMax + VkBool32 shaderImageFloat32AtomicMinMax + VkBool32 sparseImageFloat32AtomicMinMax + + + VkStructureType sType + void* pNext + VkBool32 vertexAttributeInstanceRateDivisor + VkBool32 vertexAttributeInstanceRateZeroDivisor + + + + VkStructureType sType + void* pNext + VkPipelineStageFlags checkpointExecutionStageMask + + + VkStructureType sType + void* pNext + VkPipelineStageFlagBits stage + void* pCheckpointMarker + + + VkStructureType sType + void* pNext + VkResolveModeFlags supportedDepthResolveModessupported depth resolve modes + VkResolveModeFlags supportedStencilResolveModessupported stencil resolve modes + VkBool32 independentResolveNonedepth and stencil resolve modes can be set independently if one of them is none + VkBool32 independentResolvedepth and stencil resolve modes can be set independently + + + + VkStructureType sType + const void* pNext + VkResolveModeFlagBits depthResolveModedepth resolve mode + VkResolveModeFlagBits stencilResolveModestencil resolve mode + const VkAttachmentReference2* pDepthStencilResolveAttachmentdepth/stencil resolve attachment + + + + VkStructureType sType + const void* pNext + VkFormat decodeMode + + + VkStructureType sType + void* pNext + VkBool32 decodeModeSharedExponent + + + VkStructureType sType + void* pNext + VkBool32 transformFeedback + VkBool32 geometryStreams + + + VkStructureType sType + void* pNext + uint32_t maxTransformFeedbackStreams + uint32_t maxTransformFeedbackBuffers + VkDeviceSize maxTransformFeedbackBufferSize + uint32_t maxTransformFeedbackStreamDataSize + uint32_t maxTransformFeedbackBufferDataSize + uint32_t maxTransformFeedbackBufferDataStride + VkBool32 transformFeedbackQueries + VkBool32 transformFeedbackStreamsLinesTriangles + VkBool32 transformFeedbackRasterizationStreamSelect + VkBool32 transformFeedbackDraw + + + VkStructureType sType + const void* pNext + VkPipelineRasterizationStateStreamCreateFlagsEXT flags + uint32_t rasterizationStream + + + VkStructureType sType + void* pNext + VkBool32 representativeFragmentTest + + + VkStructureType sType + const void* pNext + VkBool32 representativeFragmentTestEnable + + + VkStructureType sType + void* pNext + VkBool32 exclusiveScissor + + + VkStructureType sType + const void* pNext + uint32_t exclusiveScissorCount + const VkRect2D* pExclusiveScissors + + + VkStructureType sType + void* pNext + VkBool32 cornerSampledImage + + + VkStructureType sType + void* pNext + VkBool32 computeDerivativeGroupQuads + VkBool32 computeDerivativeGroupLinear + + + + VkStructureType sType + void* pNext + VkBool32 imageFootprint + + + VkStructureType sType + void* pNext + VkBool32 dedicatedAllocationImageAliasing + + + VkStructureType sType + void* pNext + VkBool32 indirectCopy + + + VkStructureType sType + void* pNext + VkQueueFlags supportedQueuesBitfield of which queues are supported for indirect copy + + + VkStructureType sType + void* pNext + VkBool32 memoryDecompression + + + VkStructureType sType + void* pNext + VkMemoryDecompressionMethodFlagsNV decompressionMethods + uint64_t maxDecompressionIndirectCount + + + uint32_t shadingRatePaletteEntryCount + const VkShadingRatePaletteEntryNV* pShadingRatePaletteEntries + + + VkStructureType sType + const void* pNext + VkBool32 shadingRateImageEnable + uint32_t viewportCount + const VkShadingRatePaletteNV* pShadingRatePalettes + + + VkStructureType sType + void* pNext + VkBool32 shadingRateImage + VkBool32 shadingRateCoarseSampleOrder + + + VkStructureType sType + void* pNext + VkExtent2D shadingRateTexelSize + uint32_t shadingRatePaletteSize + uint32_t shadingRateMaxCoarseSamples + + + VkStructureType sType + void* pNext + VkBool32 invocationMask + + + uint32_t pixelX + uint32_t pixelY + uint32_t sample + + + VkShadingRatePaletteEntryNV shadingRate + uint32_t sampleCount + uint32_t sampleLocationCount + const VkCoarseSampleLocationNV* pSampleLocations + + + VkStructureType sType + const void* pNext + VkCoarseSampleOrderTypeNV sampleOrderType + uint32_t customSampleOrderCount + const VkCoarseSampleOrderCustomNV* pCustomSampleOrders + + + VkStructureType sType + void* pNext + VkBool32 taskShader + VkBool32 meshShader + + + VkStructureType sType + void* pNext + uint32_t maxDrawMeshTasksCount + uint32_t maxTaskWorkGroupInvocations + uint32_t maxTaskWorkGroupSize[3] + uint32_t maxTaskTotalMemorySize + uint32_t maxTaskOutputCount + uint32_t maxMeshWorkGroupInvocations + uint32_t maxMeshWorkGroupSize[3] + uint32_t maxMeshTotalMemorySize + uint32_t maxMeshOutputVertices + uint32_t maxMeshOutputPrimitives + uint32_t maxMeshMultiviewViewCount + uint32_t meshOutputPerVertexGranularity + uint32_t meshOutputPerPrimitiveGranularity + + + uint32_t taskCount + uint32_t firstTask + + + VkStructureType sType + void* pNext + VkBool32 taskShader + VkBool32 meshShader + VkBool32 multiviewMeshShader + VkBool32 primitiveFragmentShadingRateMeshShader + VkBool32 meshShaderQueries + + + VkStructureType sType + void* pNext + uint32_t maxTaskWorkGroupTotalCount + uint32_t maxTaskWorkGroupCount[3] + uint32_t maxTaskWorkGroupInvocations + uint32_t maxTaskWorkGroupSize[3] + uint32_t maxTaskPayloadSize + uint32_t maxTaskSharedMemorySize + uint32_t maxTaskPayloadAndSharedMemorySize + uint32_t maxMeshWorkGroupTotalCount + uint32_t maxMeshWorkGroupCount[3] + uint32_t maxMeshWorkGroupInvocations + uint32_t maxMeshWorkGroupSize[3] + uint32_t maxMeshSharedMemorySize + uint32_t maxMeshPayloadAndSharedMemorySize + uint32_t maxMeshOutputMemorySize + uint32_t maxMeshPayloadAndOutputMemorySize + uint32_t maxMeshOutputComponents + uint32_t maxMeshOutputVertices + uint32_t maxMeshOutputPrimitives + uint32_t maxMeshOutputLayers + uint32_t maxMeshMultiviewViewCount + uint32_t meshOutputPerVertexGranularity + uint32_t meshOutputPerPrimitiveGranularity + uint32_t maxPreferredTaskWorkGroupInvocations + uint32_t maxPreferredMeshWorkGroupInvocations + VkBool32 prefersLocalInvocationVertexOutput + VkBool32 prefersLocalInvocationPrimitiveOutput + VkBool32 prefersCompactVertexOutput + VkBool32 prefersCompactPrimitiveOutput + + + uint32_t groupCountX + uint32_t groupCountY + uint32_t groupCountZ + + + VkStructureType sType + const void* pNext + VkRayTracingShaderGroupTypeKHR type + uint32_t generalShader + uint32_t closestHitShader + uint32_t anyHitShader + uint32_t intersectionShader + + + VkStructureType sType + const void* pNext + VkRayTracingShaderGroupTypeKHR type + uint32_t generalShader + uint32_t closestHitShader + uint32_t anyHitShader + uint32_t intersectionShader + const void* pShaderGroupCaptureReplayHandle + + + VkStructureType sType + const void* pNext + VkPipelineCreateFlags flagsPipeline creation flags + uint32_t stageCount + const VkPipelineShaderStageCreateInfo* pStagesOne entry for each active shader stage + uint32_t groupCount + const VkRayTracingShaderGroupCreateInfoNV* pGroups + uint32_t maxRecursionDepth + VkPipelineLayout layoutInterface layout of the pipeline + VkPipeline basePipelineHandleIf VK_PIPELINE_CREATE_DERIVATIVE_BIT is set and this value is nonzero, it specifies the handle of the base pipeline this is a derivative of + int32_t basePipelineIndexIf VK_PIPELINE_CREATE_DERIVATIVE_BIT is set and this value is not -1, it specifies an index into pCreateInfos of the base pipeline this is a derivative of + + + VkStructureType sType + const void* pNext + VkPipelineCreateFlags flagsPipeline creation flags + uint32_t stageCount + const VkPipelineShaderStageCreateInfo* pStagesOne entry for each active shader stage + uint32_t groupCount + const VkRayTracingShaderGroupCreateInfoKHR* pGroups + uint32_t maxPipelineRayRecursionDepth + const VkPipelineLibraryCreateInfoKHR* pLibraryInfo + const VkRayTracingPipelineInterfaceCreateInfoKHR* pLibraryInterface + const VkPipelineDynamicStateCreateInfo* pDynamicState + VkPipelineLayout layoutInterface layout of the pipeline + VkPipeline basePipelineHandleIf VK_PIPELINE_CREATE_DERIVATIVE_BIT is set and this value is nonzero, it specifies the handle of the base pipeline this is a derivative of + int32_t basePipelineIndexIf VK_PIPELINE_CREATE_DERIVATIVE_BIT is set and this value is not -1, it specifies an index into pCreateInfos of the base pipeline this is a derivative of + + + VkStructureType sType + const void* pNext + VkBuffer vertexData + VkDeviceSize vertexOffset + uint32_t vertexCount + VkDeviceSize vertexStride + VkFormat vertexFormat + VkBuffer indexData + VkDeviceSize indexOffset + uint32_t indexCount + VkIndexType indexType + VkBuffer transformDataOptional reference to array of floats representing a 3x4 row major affine transformation matrix. + VkDeviceSize transformOffset + + + VkStructureType sType + const void* pNext + VkBuffer aabbData + uint32_t numAABBs + uint32_t strideStride in bytes between AABBs + VkDeviceSize offsetOffset in bytes of the first AABB in aabbData + + + VkGeometryTrianglesNV triangles + VkGeometryAABBNV aabbs + + + VkStructureType sType + const void* pNext + VkGeometryTypeKHR geometryType + VkGeometryDataNV geometry + VkGeometryFlagsKHR flags + + + VkStructureType sType + const void* pNext + VkAccelerationStructureTypeNV type + VkBuildAccelerationStructureFlagsNV flags + uint32_t instanceCount + uint32_t geometryCount + const VkGeometryNV* pGeometries + + + VkStructureType sType + const void* pNext + VkDeviceSize compactedSize + VkAccelerationStructureInfoNV info + + + VkStructureType sType + const void* pNext + VkAccelerationStructureNV accelerationStructure + VkDeviceMemory memory + VkDeviceSize memoryOffset + uint32_t deviceIndexCount + const uint32_t* pDeviceIndices + + + VkStructureType sType + const void* pNext + uint32_t accelerationStructureCount + const VkAccelerationStructureKHR* pAccelerationStructures + + + VkStructureType sType + const void* pNext + uint32_t accelerationStructureCount + const VkAccelerationStructureNV* pAccelerationStructures + + + VkStructureType sType + const void* pNext + VkAccelerationStructureMemoryRequirementsTypeNV type + VkAccelerationStructureNV accelerationStructure + + + VkStructureType sType + void* pNext + VkBool32 accelerationStructure + VkBool32 accelerationStructureCaptureReplay + VkBool32 accelerationStructureIndirectBuild + VkBool32 accelerationStructureHostCommands + VkBool32 descriptorBindingAccelerationStructureUpdateAfterBind + + + VkStructureType sType + void* pNext + VkBool32 rayTracingPipeline + VkBool32 rayTracingPipelineShaderGroupHandleCaptureReplay + VkBool32 rayTracingPipelineShaderGroupHandleCaptureReplayMixed + VkBool32 rayTracingPipelineTraceRaysIndirect + VkBool32 rayTraversalPrimitiveCulling + + + VkStructureType sType + void* pNext + VkBool32 rayQuery + + + VkStructureType sType + void* pNext + uint64_t maxGeometryCount + uint64_t maxInstanceCount + uint64_t maxPrimitiveCount + uint32_t maxPerStageDescriptorAccelerationStructures + uint32_t maxPerStageDescriptorUpdateAfterBindAccelerationStructures + uint32_t maxDescriptorSetAccelerationStructures + uint32_t maxDescriptorSetUpdateAfterBindAccelerationStructures + uint32_t minAccelerationStructureScratchOffsetAlignment + + + VkStructureType sType + void* pNext + uint32_t shaderGroupHandleSize + uint32_t maxRayRecursionDepth + uint32_t maxShaderGroupStride + uint32_t shaderGroupBaseAlignment + uint32_t shaderGroupHandleCaptureReplaySize + uint32_t maxRayDispatchInvocationCount + uint32_t shaderGroupHandleAlignment + uint32_t maxRayHitAttributeSize + + + VkStructureType sType + void* pNext + uint32_t shaderGroupHandleSize + uint32_t maxRecursionDepth + uint32_t maxShaderGroupStride + uint32_t shaderGroupBaseAlignment + uint64_t maxGeometryCount + uint64_t maxInstanceCount + uint64_t maxTriangleCount + uint32_t maxDescriptorSetAccelerationStructures + + + VkDeviceAddress deviceAddress + VkDeviceSize stride + VkDeviceSize size + + + uint32_t width + uint32_t height + uint32_t depth + + + VkDeviceAddress raygenShaderRecordAddress + VkDeviceSize raygenShaderRecordSize + VkDeviceAddress missShaderBindingTableAddress + VkDeviceSize missShaderBindingTableSize + VkDeviceSize missShaderBindingTableStride + VkDeviceAddress hitShaderBindingTableAddress + VkDeviceSize hitShaderBindingTableSize + VkDeviceSize hitShaderBindingTableStride + VkDeviceAddress callableShaderBindingTableAddress + VkDeviceSize callableShaderBindingTableSize + VkDeviceSize callableShaderBindingTableStride + uint32_t width + uint32_t height + uint32_t depth + + + VkStructureType sType + void* pNext + VkBool32 rayTracingMaintenance1 + VkBool32 rayTracingPipelineTraceRaysIndirect2 + + + VkStructureType sType + void* pNext + uint32_t drmFormatModifierCount + VkDrmFormatModifierPropertiesEXT* pDrmFormatModifierProperties + + + uint64_t drmFormatModifier + uint32_t drmFormatModifierPlaneCount + VkFormatFeatureFlags drmFormatModifierTilingFeatures + + + VkStructureType sType + const void* pNext + uint64_t drmFormatModifier + VkSharingMode sharingMode + uint32_t queueFamilyIndexCount + const uint32_t* pQueueFamilyIndices + + + VkStructureType sType + const void* pNext + uint32_t drmFormatModifierCount + const uint64_t* pDrmFormatModifiers + + + VkStructureType sType + const void* pNext + uint64_t drmFormatModifier + uint32_t drmFormatModifierPlaneCount + const VkSubresourceLayout* pPlaneLayouts + + + VkStructureType sType + void* pNext + uint64_t drmFormatModifier + + + VkStructureType sType + const void* pNext + VkImageUsageFlags stencilUsage + + + + VkStructureType sType + const void* pNext + VkMemoryOverallocationBehaviorAMD overallocationBehavior + + + VkStructureType sType + void* pNext + VkBool32 fragmentDensityMap + VkBool32 fragmentDensityMapDynamic + VkBool32 fragmentDensityMapNonSubsampledImages + + + VkStructureType sType + void* pNext + VkBool32 fragmentDensityMapDeferred + + + VkStructureType sType + void* pNext + VkBool32 fragmentDensityMapOffset + + + VkStructureType sType + void* pNext + VkExtent2D minFragmentDensityTexelSize + VkExtent2D maxFragmentDensityTexelSize + VkBool32 fragmentDensityInvocations + + + VkStructureType sType + void* pNext + VkBool32 subsampledLoads + VkBool32 subsampledCoarseReconstructionEarlyAccess + uint32_t maxSubsampledArrayLayers + uint32_t maxDescriptorSetSubsampledSamplers + + + VkStructureType sType + void* pNext + VkExtent2D fragmentDensityOffsetGranularity + + + VkStructureType sType + const void* pNext + VkAttachmentReference fragmentDensityMapAttachment + + + VkStructureType sType + const void* pNext + uint32_t fragmentDensityOffsetCount + const VkOffset2D* pFragmentDensityOffsets + + + VkStructureType sType + void* pNext + VkBool32 scalarBlockLayout + + + + VkStructureType sType + const void* pNext + VkBool32 supportsProtectedRepresents if surface can be protected + + + VkStructureType sType + void* pNext + VkBool32 uniformBufferStandardLayout + + + + VkStructureType sType + void* pNext + VkBool32 depthClipEnable + + + VkStructureType sType + const void* pNext + VkPipelineRasterizationDepthClipStateCreateFlagsEXT flagsReserved + VkBool32 depthClipEnable + + + VkStructureType sType + void* pNext + VkDeviceSize heapBudget[VK_MAX_MEMORY_HEAPS] + VkDeviceSize heapUsage[VK_MAX_MEMORY_HEAPS] + + + VkStructureType sType + void* pNext + VkBool32 memoryPriority + + + VkStructureType sType + const void* pNext + float priority + + + VkStructureType sType + void* pNext + VkBool32 pageableDeviceLocalMemory + + + VkStructureType sType + void* pNext + VkBool32 bufferDeviceAddress + VkBool32 bufferDeviceAddressCaptureReplay + VkBool32 bufferDeviceAddressMultiDevice + + + + VkStructureType sType + void* pNext + VkBool32 bufferDeviceAddress + VkBool32 bufferDeviceAddressCaptureReplay + VkBool32 bufferDeviceAddressMultiDevice + + + + VkStructureType sType + const void* pNext + VkBuffer buffer + + + + + VkStructureType sType + const void* pNext + uint64_t opaqueCaptureAddress + + + + VkStructureType sType + const void* pNext + VkDeviceAddress deviceAddress + + + VkStructureType sType + void* pNext + VkImageViewType imageViewType + + + VkStructureType sType + void* pNext + VkBool32 filterCubicThe combinations of format, image type (and image view type if provided) can be filtered with VK_FILTER_CUBIC_EXT + VkBool32 filterCubicMinmaxThe combination of format, image type (and image view type if provided) can be filtered with VK_FILTER_CUBIC_EXT and ReductionMode of Min or Max + + + VkStructureType sType + void* pNext + VkBool32 imagelessFramebuffer + + + + VkStructureType sType + const void* pNext + uint32_t attachmentImageInfoCount + const VkFramebufferAttachmentImageInfo* pAttachmentImageInfos + + + + VkStructureType sType + const void* pNext + VkImageCreateFlags flagsImage creation flags + VkImageUsageFlags usageImage usage flags + uint32_t width + uint32_t height + uint32_t layerCount + uint32_t viewFormatCount + const VkFormat* pViewFormats + + + + VkStructureType sType + const void* pNext + uint32_t attachmentCount + const VkImageView* pAttachments + + + + VkStructureType sType + void* pNext + VkBool32 textureCompressionASTC_HDR + + + + VkStructureType sType + void* pNext + VkBool32 cooperativeMatrix + VkBool32 cooperativeMatrixRobustBufferAccess + + + VkStructureType sType + void* pNext + VkShaderStageFlags cooperativeMatrixSupportedStages + + + VkStructureType sType + void* pNext + uint32_t MSize + uint32_t NSize + uint32_t KSize + VkComponentTypeNV AType + VkComponentTypeNV BType + VkComponentTypeNV CType + VkComponentTypeNV DType + VkScopeNV scope + + + VkStructureType sType + void* pNext + VkBool32 ycbcrImageArrays + + + VkStructureType sType + const void* pNext + VkImageView imageView + VkDescriptorType descriptorType + VkSampler sampler + + + VkStructureType sType + void* pNext + VkDeviceAddress deviceAddress + VkDeviceSize size + + + VkStructureType sType + const void* pNext + GgpFrameToken frameToken + + + VkPipelineCreationFeedbackFlags flags + uint64_t duration + + + + VkStructureType sType + const void* pNext + VkPipelineCreationFeedback* pPipelineCreationFeedbackOutput pipeline creation feedback. + uint32_t pipelineStageCreationFeedbackCount + VkPipelineCreationFeedback* pPipelineStageCreationFeedbacksOne entry for each shader stage specified in the parent Vk*PipelineCreateInfo struct + + + + VkStructureType sType + void* pNext + VkFullScreenExclusiveEXT fullScreenExclusive + + + VkStructureType sType + const void* pNext + HMONITOR hmonitor + + + VkStructureType sType + void* pNext + VkBool32 fullScreenExclusiveSupported + + + VkStructureType sType + void* pNext + VkBool32 presentBarrier + + + VkStructureType sType + void* pNext + VkBool32 presentBarrierSupported + + + VkStructureType sType + void* pNext + VkBool32 presentBarrierEnable + + + VkStructureType sType + void* pNext + VkBool32 performanceCounterQueryPoolsperformance counters supported in query pools + VkBool32 performanceCounterMultipleQueryPoolsperformance counters from multiple query pools can be accessed in the same primary command buffer + + + VkStructureType sType + void* pNext + VkBool32 allowCommandBufferQueryCopiesFlag to specify whether performance queries are allowed to be used in vkCmdCopyQueryPoolResults + + + VkStructureType sType + void* pNext + VkPerformanceCounterUnitKHR unit + VkPerformanceCounterScopeKHR scope + VkPerformanceCounterStorageKHR storage + uint8_t uuid[VK_UUID_SIZE] + + + VkStructureType sType + void* pNext + VkPerformanceCounterDescriptionFlagsKHR flags + char name[VK_MAX_DESCRIPTION_SIZE] + char category[VK_MAX_DESCRIPTION_SIZE] + char description[VK_MAX_DESCRIPTION_SIZE] + + + VkStructureType sType + const void* pNext + uint32_t queueFamilyIndex + uint32_t counterIndexCount + const uint32_t* pCounterIndices + + + int32_t int32 + int64_t int64 + uint32_t uint32 + uint64_t uint64 + float float32 + double float64 + + + VkStructureType sType + const void* pNext + VkAcquireProfilingLockFlagsKHR flagsAcquire profiling lock flags + uint64_t timeout + + + VkStructureType sType + const void* pNext + uint32_t counterPassIndexIndex for which counter pass to submit + + + VkStructureType sType + const void* pNext + uint32_t maxPerformanceQueriesPerPoolMaximum number of VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR queries in a query pool + + + VkStructureType sType + const void* pNext + VkHeadlessSurfaceCreateFlagsEXT flags + + + VkStructureType sType + void* pNext + VkBool32 coverageReductionMode + + + VkStructureType sType + const void* pNext + VkPipelineCoverageReductionStateCreateFlagsNV flags + VkCoverageReductionModeNV coverageReductionMode + + + VkStructureType sType + void* pNext + VkCoverageReductionModeNV coverageReductionMode + VkSampleCountFlagBits rasterizationSamples + VkSampleCountFlags depthStencilSamples + VkSampleCountFlags colorSamples + + + VkStructureType sType + void* pNext + VkBool32 shaderIntegerFunctions2 + + + uint32_t value32 + uint64_t value64 + float valueFloat + VkBool32 valueBool + const char* valueString + + + VkPerformanceValueTypeINTEL type + VkPerformanceValueDataINTEL data + + + VkStructureType sType + const void* pNext + void* pUserData + + + VkStructureType sType + const void* pNext + VkQueryPoolSamplingModeINTEL performanceCountersSampling + + + + VkStructureType sType + const void* pNext + uint64_t marker + + + VkStructureType sType + const void* pNext + uint32_t marker + + + VkStructureType sType + const void* pNext + VkPerformanceOverrideTypeINTEL type + VkBool32 enable + uint64_t parameter + + + VkStructureType sType + const void* pNext + VkPerformanceConfigurationTypeINTEL type + + + VkStructureType sType + void* pNext + VkBool32 shaderSubgroupClock + VkBool32 shaderDeviceClock + + + VkStructureType sType + void* pNext + VkBool32 indexTypeUint8 + + + + VkStructureType sType + void* pNext + uint32_t shaderSMCount + uint32_t shaderWarpsPerSM + + + VkStructureType sType + void* pNext + VkBool32 shaderSMBuiltins + + + VkStructureType sType + void* pNextPointer to next structure + VkBool32 fragmentShaderSampleInterlock + VkBool32 fragmentShaderPixelInterlock + VkBool32 fragmentShaderShadingRateInterlock + + + VkStructureType sType + void* pNext + VkBool32 separateDepthStencilLayouts + + + + VkStructureType sType + void* pNext + VkImageLayout stencilLayout + + + VkStructureType sType + void* pNext + VkBool32 primitiveTopologyListRestart + VkBool32 primitiveTopologyPatchListRestart + + + + VkStructureType sType + void* pNext + VkImageLayout stencilInitialLayout + VkImageLayout stencilFinalLayout + + + + VkStructureType sType + void* pNext + VkBool32 pipelineExecutableInfo + + + VkStructureType sType + const void* pNext + VkPipeline pipeline + + + + VkStructureType sType + void* pNext + VkShaderStageFlags stages + char name[VK_MAX_DESCRIPTION_SIZE] + char description[VK_MAX_DESCRIPTION_SIZE] + uint32_t subgroupSize + + + VkStructureType sType + const void* pNext + VkPipeline pipeline + uint32_t executableIndex + + + VkBool32 b32 + int64_t i64 + uint64_t u64 + double f64 + + + VkStructureType sType + void* pNext + char name[VK_MAX_DESCRIPTION_SIZE] + char description[VK_MAX_DESCRIPTION_SIZE] + VkPipelineExecutableStatisticFormatKHR format + VkPipelineExecutableStatisticValueKHR value + + + VkStructureType sType + void* pNext + char name[VK_MAX_DESCRIPTION_SIZE] + char description[VK_MAX_DESCRIPTION_SIZE] + VkBool32 isText + size_t dataSize + void* pData + + + VkStructureType sType + void* pNext + VkBool32 shaderDemoteToHelperInvocation + + + + VkStructureType sType + void* pNext + VkBool32 texelBufferAlignment + + + VkStructureType sType + void* pNext + VkDeviceSize storageTexelBufferOffsetAlignmentBytes + VkBool32 storageTexelBufferOffsetSingleTexelAlignment + VkDeviceSize uniformTexelBufferOffsetAlignmentBytes + VkBool32 uniformTexelBufferOffsetSingleTexelAlignment + + + + VkStructureType sType + void* pNext + VkBool32 subgroupSizeControl + VkBool32 computeFullSubgroups + + + + VkStructureType sType + void* pNext + uint32_t minSubgroupSizeThe minimum subgroup size supported by this device + uint32_t maxSubgroupSizeThe maximum subgroup size supported by this device + uint32_t maxComputeWorkgroupSubgroupsThe maximum number of subgroups supported in a workgroup + VkShaderStageFlags requiredSubgroupSizeStagesThe shader stages that support specifying a subgroup size + + + + VkStructureType sType + void* pNext + uint32_t requiredSubgroupSize + + + + + VkStructureType sType + void* pNext + VkRenderPass renderPass + uint32_t subpass + + + VkStructureType sType + void* pNext + uint32_t maxSubpassShadingWorkgroupSizeAspectRatio + + + VkStructureType sType + void* pNext + uint32_t maxWorkGroupCount[3] + uint32_t maxWorkGroupSize[3] + uint32_t maxOutputClusterCount + VkDeviceSize indirectBufferOffsetAlignment + + + VkStructureType sType + const void* pNext + uint64_t opaqueCaptureAddress + + + + VkStructureType sType + const void* pNext + VkDeviceMemory memory + + + + VkStructureType sType + void* pNext + VkBool32 rectangularLines + VkBool32 bresenhamLines + VkBool32 smoothLines + VkBool32 stippledRectangularLines + VkBool32 stippledBresenhamLines + VkBool32 stippledSmoothLines + + + + VkStructureType sType + void* pNext + uint32_t lineSubPixelPrecisionBits + + + + VkStructureType sType + const void* pNext + VkLineRasterizationModeKHR lineRasterizationMode + VkBool32 stippledLineEnable + uint32_t lineStippleFactor + uint16_t lineStipplePattern + + + + VkStructureType sType + void* pNext + VkBool32 pipelineCreationCacheControl + + + + VkStructureType sType + void* pNext + VkBool32 storageBuffer16BitAccess16-bit integer/floating-point variables supported in BufferBlock + VkBool32 uniformAndStorageBuffer16BitAccess16-bit integer/floating-point variables supported in BufferBlock and Block + VkBool32 storagePushConstant1616-bit integer/floating-point variables supported in PushConstant + VkBool32 storageInputOutput1616-bit integer/floating-point variables supported in shader inputs and outputs + VkBool32 multiviewMultiple views in a renderpass + VkBool32 multiviewGeometryShaderMultiple views in a renderpass w/ geometry shader + VkBool32 multiviewTessellationShaderMultiple views in a renderpass w/ tessellation shader + VkBool32 variablePointersStorageBuffer + VkBool32 variablePointers + VkBool32 protectedMemory + VkBool32 samplerYcbcrConversionSampler color conversion supported + VkBool32 shaderDrawParameters + + + VkStructureType sType + void* pNext + uint8_t deviceUUID[VK_UUID_SIZE] + uint8_t driverUUID[VK_UUID_SIZE] + uint8_t deviceLUID[VK_LUID_SIZE] + uint32_t deviceNodeMask + VkBool32 deviceLUIDValid + uint32_t subgroupSizeThe size of a subgroup for this queue. + VkShaderStageFlags subgroupSupportedStagesBitfield of what shader stages support subgroup operations + VkSubgroupFeatureFlags subgroupSupportedOperationsBitfield of what subgroup operations are supported. + VkBool32 subgroupQuadOperationsInAllStagesFlag to specify whether quad operations are available in all stages. + VkPointClippingBehavior pointClippingBehavior + uint32_t maxMultiviewViewCountmax number of views in a subpass + uint32_t maxMultiviewInstanceIndexmax instance index for a draw in a multiview subpass + VkBool32 protectedNoFault + uint32_t maxPerSetDescriptors + VkDeviceSize maxMemoryAllocationSize + + + VkStructureType sType + void* pNext + VkBool32 samplerMirrorClampToEdge + VkBool32 drawIndirectCount + VkBool32 storageBuffer8BitAccess8-bit integer variables supported in StorageBuffer + VkBool32 uniformAndStorageBuffer8BitAccess8-bit integer variables supported in StorageBuffer and Uniform + VkBool32 storagePushConstant88-bit integer variables supported in PushConstant + VkBool32 shaderBufferInt64Atomics + VkBool32 shaderSharedInt64Atomics + VkBool32 shaderFloat1616-bit floats (halfs) in shaders + VkBool32 shaderInt88-bit integers in shaders + VkBool32 descriptorIndexing + VkBool32 shaderInputAttachmentArrayDynamicIndexing + VkBool32 shaderUniformTexelBufferArrayDynamicIndexing + VkBool32 shaderStorageTexelBufferArrayDynamicIndexing + VkBool32 shaderUniformBufferArrayNonUniformIndexing + VkBool32 shaderSampledImageArrayNonUniformIndexing + VkBool32 shaderStorageBufferArrayNonUniformIndexing + VkBool32 shaderStorageImageArrayNonUniformIndexing + VkBool32 shaderInputAttachmentArrayNonUniformIndexing + VkBool32 shaderUniformTexelBufferArrayNonUniformIndexing + VkBool32 shaderStorageTexelBufferArrayNonUniformIndexing + VkBool32 descriptorBindingUniformBufferUpdateAfterBind + VkBool32 descriptorBindingSampledImageUpdateAfterBind + VkBool32 descriptorBindingStorageImageUpdateAfterBind + VkBool32 descriptorBindingStorageBufferUpdateAfterBind + VkBool32 descriptorBindingUniformTexelBufferUpdateAfterBind + VkBool32 descriptorBindingStorageTexelBufferUpdateAfterBind + VkBool32 descriptorBindingUpdateUnusedWhilePending + VkBool32 descriptorBindingPartiallyBound + VkBool32 descriptorBindingVariableDescriptorCount + VkBool32 runtimeDescriptorArray + VkBool32 samplerFilterMinmax + VkBool32 scalarBlockLayout + VkBool32 imagelessFramebuffer + VkBool32 uniformBufferStandardLayout + VkBool32 shaderSubgroupExtendedTypes + VkBool32 separateDepthStencilLayouts + VkBool32 hostQueryReset + VkBool32 timelineSemaphore + VkBool32 bufferDeviceAddress + VkBool32 bufferDeviceAddressCaptureReplay + VkBool32 bufferDeviceAddressMultiDevice + VkBool32 vulkanMemoryModel + VkBool32 vulkanMemoryModelDeviceScope + VkBool32 vulkanMemoryModelAvailabilityVisibilityChains + VkBool32 shaderOutputViewportIndex + VkBool32 shaderOutputLayer + VkBool32 subgroupBroadcastDynamicId + + + VkStructureType sType + void* pNext + VkDriverId driverID + char driverName[VK_MAX_DRIVER_NAME_SIZE] + char driverInfo[VK_MAX_DRIVER_INFO_SIZE] + VkConformanceVersion conformanceVersion + VkShaderFloatControlsIndependence denormBehaviorIndependence + VkShaderFloatControlsIndependence roundingModeIndependence + VkBool32 shaderSignedZeroInfNanPreserveFloat16An implementation can preserve signed zero, nan, inf + VkBool32 shaderSignedZeroInfNanPreserveFloat32An implementation can preserve signed zero, nan, inf + VkBool32 shaderSignedZeroInfNanPreserveFloat64An implementation can preserve signed zero, nan, inf + VkBool32 shaderDenormPreserveFloat16An implementation can preserve denormals + VkBool32 shaderDenormPreserveFloat32An implementation can preserve denormals + VkBool32 shaderDenormPreserveFloat64An implementation can preserve denormals + VkBool32 shaderDenormFlushToZeroFloat16An implementation can flush to zero denormals + VkBool32 shaderDenormFlushToZeroFloat32An implementation can flush to zero denormals + VkBool32 shaderDenormFlushToZeroFloat64An implementation can flush to zero denormals + VkBool32 shaderRoundingModeRTEFloat16An implementation can support RTE + VkBool32 shaderRoundingModeRTEFloat32An implementation can support RTE + VkBool32 shaderRoundingModeRTEFloat64An implementation can support RTE + VkBool32 shaderRoundingModeRTZFloat16An implementation can support RTZ + VkBool32 shaderRoundingModeRTZFloat32An implementation can support RTZ + VkBool32 shaderRoundingModeRTZFloat64An implementation can support RTZ + uint32_t maxUpdateAfterBindDescriptorsInAllPools + VkBool32 shaderUniformBufferArrayNonUniformIndexingNative + VkBool32 shaderSampledImageArrayNonUniformIndexingNative + VkBool32 shaderStorageBufferArrayNonUniformIndexingNative + VkBool32 shaderStorageImageArrayNonUniformIndexingNative + VkBool32 shaderInputAttachmentArrayNonUniformIndexingNative + VkBool32 robustBufferAccessUpdateAfterBind + VkBool32 quadDivergentImplicitLod + uint32_t maxPerStageDescriptorUpdateAfterBindSamplers + uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers + uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers + uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages + uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages + uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments + uint32_t maxPerStageUpdateAfterBindResources + uint32_t maxDescriptorSetUpdateAfterBindSamplers + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic + uint32_t maxDescriptorSetUpdateAfterBindSampledImages + uint32_t maxDescriptorSetUpdateAfterBindStorageImages + uint32_t maxDescriptorSetUpdateAfterBindInputAttachments + VkResolveModeFlags supportedDepthResolveModessupported depth resolve modes + VkResolveModeFlags supportedStencilResolveModessupported stencil resolve modes + VkBool32 independentResolveNonedepth and stencil resolve modes can be set independently if one of them is none + VkBool32 independentResolvedepth and stencil resolve modes can be set independently + VkBool32 filterMinmaxSingleComponentFormats + VkBool32 filterMinmaxImageComponentMapping + uint64_t maxTimelineSemaphoreValueDifference + VkSampleCountFlags framebufferIntegerColorSampleCounts + + + VkStructureType sType + void* pNext + VkBool32 robustImageAccess + VkBool32 inlineUniformBlock + VkBool32 descriptorBindingInlineUniformBlockUpdateAfterBind + VkBool32 pipelineCreationCacheControl + VkBool32 privateData + VkBool32 shaderDemoteToHelperInvocation + VkBool32 shaderTerminateInvocation + VkBool32 subgroupSizeControl + VkBool32 computeFullSubgroups + VkBool32 synchronization2 + VkBool32 textureCompressionASTC_HDR + VkBool32 shaderZeroInitializeWorkgroupMemory + VkBool32 dynamicRendering + VkBool32 shaderIntegerDotProduct + VkBool32 maintenance4 + + + VkStructureType sType + void* pNext + uint32_t minSubgroupSizeThe minimum subgroup size supported by this device + uint32_t maxSubgroupSizeThe maximum subgroup size supported by this device + uint32_t maxComputeWorkgroupSubgroupsThe maximum number of subgroups supported in a workgroup + VkShaderStageFlags requiredSubgroupSizeStagesThe shader stages that support specifying a subgroup size + uint32_t maxInlineUniformBlockSize + uint32_t maxPerStageDescriptorInlineUniformBlocks + uint32_t maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks + uint32_t maxDescriptorSetInlineUniformBlocks + uint32_t maxDescriptorSetUpdateAfterBindInlineUniformBlocks + uint32_t maxInlineUniformTotalSize + VkBool32 integerDotProduct8BitUnsignedAccelerated + VkBool32 integerDotProduct8BitSignedAccelerated + VkBool32 integerDotProduct8BitMixedSignednessAccelerated + VkBool32 integerDotProduct4x8BitPackedUnsignedAccelerated + VkBool32 integerDotProduct4x8BitPackedSignedAccelerated + VkBool32 integerDotProduct4x8BitPackedMixedSignednessAccelerated + VkBool32 integerDotProduct16BitUnsignedAccelerated + VkBool32 integerDotProduct16BitSignedAccelerated + VkBool32 integerDotProduct16BitMixedSignednessAccelerated + VkBool32 integerDotProduct32BitUnsignedAccelerated + VkBool32 integerDotProduct32BitSignedAccelerated + VkBool32 integerDotProduct32BitMixedSignednessAccelerated + VkBool32 integerDotProduct64BitUnsignedAccelerated + VkBool32 integerDotProduct64BitSignedAccelerated + VkBool32 integerDotProduct64BitMixedSignednessAccelerated + VkBool32 integerDotProductAccumulatingSaturating8BitUnsignedAccelerated + VkBool32 integerDotProductAccumulatingSaturating8BitSignedAccelerated + VkBool32 integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated + VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated + VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated + VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated + VkBool32 integerDotProductAccumulatingSaturating16BitUnsignedAccelerated + VkBool32 integerDotProductAccumulatingSaturating16BitSignedAccelerated + VkBool32 integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated + VkBool32 integerDotProductAccumulatingSaturating32BitUnsignedAccelerated + VkBool32 integerDotProductAccumulatingSaturating32BitSignedAccelerated + VkBool32 integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated + VkBool32 integerDotProductAccumulatingSaturating64BitUnsignedAccelerated + VkBool32 integerDotProductAccumulatingSaturating64BitSignedAccelerated + VkBool32 integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated + VkDeviceSize storageTexelBufferOffsetAlignmentBytes + VkBool32 storageTexelBufferOffsetSingleTexelAlignment + VkDeviceSize uniformTexelBufferOffsetAlignmentBytes + VkBool32 uniformTexelBufferOffsetSingleTexelAlignment + VkDeviceSize maxBufferSize + + + VkStructureType sType + const void* pNext + VkPipelineCompilerControlFlagsAMD compilerControlFlags + + + VkStructureType sType + void* pNext + VkBool32 deviceCoherentMemory + + + VkStructureType sType + void* pNext + VkFaultLevel faultLevel + VkFaultType faultType + + + VkStructureType sType + const void* pNext + uint32_t faultCount + VkFaultData*pFaults + PFN_vkFaultCallbackFunction pfnFaultCallback + + + VkStructureType sType + void* pNext + char name[VK_MAX_EXTENSION_NAME_SIZE] + char version[VK_MAX_EXTENSION_NAME_SIZE] + VkToolPurposeFlags purposes + char description[VK_MAX_DESCRIPTION_SIZE] + char layer[VK_MAX_EXTENSION_NAME_SIZE] + + + + VkStructureType sType + const void* pNext + VkClearColorValue customBorderColor + VkFormat format + + + VkStructureType sType + void* pNext + uint32_t maxCustomBorderColorSamplers + + + VkStructureType sType + void* pNext + VkBool32 customBorderColors + VkBool32 customBorderColorWithoutFormat + + + VkStructureType sType + const void* pNext + VkComponentMapping components + VkBool32 srgb + + + VkStructureType sType + void* pNext + VkBool32 borderColorSwizzle + VkBool32 borderColorSwizzleFromImage + + + VkDeviceAddress deviceAddress + void* hostAddress + + + VkDeviceAddress deviceAddress + const void* hostAddress + + + VkDeviceAddress deviceAddress + const void* hostAddress + + + VkStructureType sType + const void* pNext + VkFormat vertexFormat + VkDeviceOrHostAddressConstKHR vertexData + VkDeviceSize vertexStride + uint32_t maxVertex + VkIndexType indexType + VkDeviceOrHostAddressConstKHR indexData + VkDeviceOrHostAddressConstKHR transformData + + + VkStructureType sType + const void* pNext + VkDeviceOrHostAddressConstKHR data + VkDeviceSize stride + + + VkStructureType sType + const void* pNext + VkBool32 arrayOfPointers + VkDeviceOrHostAddressConstKHR data + + + VkAccelerationStructureGeometryTrianglesDataKHR triangles + VkAccelerationStructureGeometryAabbsDataKHR aabbs + VkAccelerationStructureGeometryInstancesDataKHR instances + + + VkStructureType sType + const void* pNext + VkGeometryTypeKHR geometryType + VkAccelerationStructureGeometryDataKHR geometry + VkGeometryFlagsKHR flags + + + VkStructureType sType + const void* pNext + VkAccelerationStructureTypeKHR type + VkBuildAccelerationStructureFlagsKHR flags + VkBuildAccelerationStructureModeKHR mode + VkAccelerationStructureKHR srcAccelerationStructure + VkAccelerationStructureKHR dstAccelerationStructure + uint32_t geometryCount + const VkAccelerationStructureGeometryKHR* pGeometries + const VkAccelerationStructureGeometryKHR* const* ppGeometries + VkDeviceOrHostAddressKHR scratchData + + + uint32_t primitiveCount + uint32_t primitiveOffset + uint32_t firstVertex + uint32_t transformOffset + + + VkStructureType sType + const void* pNext + VkAccelerationStructureCreateFlagsKHR createFlags + VkBuffer buffer + VkDeviceSize offsetSpecified in bytes + VkDeviceSize size + VkAccelerationStructureTypeKHR type + VkDeviceAddress deviceAddress + + + float minX + float minY + float minZ + float maxX + float maxY + float maxZ + + + + float matrix[3][4] + + + + The bitfields in this structure are non-normative since bitfield ordering is implementation-defined in C. The specification defines the normative layout. + VkTransformMatrixKHR transform + uint32_t instanceCustomIndex:24 + uint32_t mask:8 + uint32_t instanceShaderBindingTableRecordOffset:24 + VkGeometryInstanceFlagsKHR flags:8 + uint64_t accelerationStructureReference + + + + VkStructureType sType + const void* pNext + VkAccelerationStructureKHR accelerationStructure + + + VkStructureType sType + const void* pNext + const uint8_t* pVersionData + + + VkStructureType sType + const void* pNext + VkAccelerationStructureKHR src + VkAccelerationStructureKHR dst + VkCopyAccelerationStructureModeKHR mode + + + VkStructureType sType + const void* pNext + VkAccelerationStructureKHR src + VkDeviceOrHostAddressKHR dst + VkCopyAccelerationStructureModeKHR mode + + + VkStructureType sType + const void* pNext + VkDeviceOrHostAddressConstKHR src + VkAccelerationStructureKHR dst + VkCopyAccelerationStructureModeKHR mode + + + VkStructureType sType + const void* pNext + uint32_t maxPipelineRayPayloadSize + uint32_t maxPipelineRayHitAttributeSize + + + VkStructureType sType + const void* pNext + uint32_t libraryCount + const VkPipeline* pLibraries + + + VkObjectType objectType + uint64_t objectHandle + VkRefreshObjectFlagsKHR flags + + + VkStructureType sType + const void* pNext + uint32_t objectCount + const VkRefreshObjectKHR* pObjects + + + VkStructureType sType + void* pNext + VkBool32 extendedDynamicState + + + VkStructureType sType + void* pNext + VkBool32 extendedDynamicState2 + VkBool32 extendedDynamicState2LogicOp + VkBool32 extendedDynamicState2PatchControlPoints + + + VkStructureType sType + void* pNext + VkBool32 extendedDynamicState3TessellationDomainOrigin + VkBool32 extendedDynamicState3DepthClampEnable + VkBool32 extendedDynamicState3PolygonMode + VkBool32 extendedDynamicState3RasterizationSamples + VkBool32 extendedDynamicState3SampleMask + VkBool32 extendedDynamicState3AlphaToCoverageEnable + VkBool32 extendedDynamicState3AlphaToOneEnable + VkBool32 extendedDynamicState3LogicOpEnable + VkBool32 extendedDynamicState3ColorBlendEnable + VkBool32 extendedDynamicState3ColorBlendEquation + VkBool32 extendedDynamicState3ColorWriteMask + VkBool32 extendedDynamicState3RasterizationStream + VkBool32 extendedDynamicState3ConservativeRasterizationMode + VkBool32 extendedDynamicState3ExtraPrimitiveOverestimationSize + VkBool32 extendedDynamicState3DepthClipEnable + VkBool32 extendedDynamicState3SampleLocationsEnable + VkBool32 extendedDynamicState3ColorBlendAdvanced + VkBool32 extendedDynamicState3ProvokingVertexMode + VkBool32 extendedDynamicState3LineRasterizationMode + VkBool32 extendedDynamicState3LineStippleEnable + VkBool32 extendedDynamicState3DepthClipNegativeOneToOne + VkBool32 extendedDynamicState3ViewportWScalingEnable + VkBool32 extendedDynamicState3ViewportSwizzle + VkBool32 extendedDynamicState3CoverageToColorEnable + VkBool32 extendedDynamicState3CoverageToColorLocation + VkBool32 extendedDynamicState3CoverageModulationMode + VkBool32 extendedDynamicState3CoverageModulationTableEnable + VkBool32 extendedDynamicState3CoverageModulationTable + VkBool32 extendedDynamicState3CoverageReductionMode + VkBool32 extendedDynamicState3RepresentativeFragmentTestEnable + VkBool32 extendedDynamicState3ShadingRateImageEnable + + + VkStructureType sType + void* pNext + VkBool32 dynamicPrimitiveTopologyUnrestricted + + + VkBlendFactor srcColorBlendFactor + VkBlendFactor dstColorBlendFactor + VkBlendOp colorBlendOp + VkBlendFactor srcAlphaBlendFactor + VkBlendFactor dstAlphaBlendFactor + VkBlendOp alphaBlendOp + + + VkBlendOp advancedBlendOp + VkBool32 srcPremultiplied + VkBool32 dstPremultiplied + VkBlendOverlapEXT blendOverlap + VkBool32 clampResults + + + VkStructureType sType + void* pNextPointer to next structure + VkSurfaceTransformFlagBitsKHR transform + + + VkStructureType sType + const void* pNext + VkSurfaceTransformFlagBitsKHR transform + + + VkStructureType sType + void* pNextPointer to next structure + VkSurfaceTransformFlagBitsKHR transform + VkRect2D renderArea + + + VkStructureType sType + void* pNext + VkBool32 diagnosticsConfig + + + VkStructureType sType + const void* pNext + VkDeviceDiagnosticsConfigFlagsNV flags + + + VkStructureType sType + const void* pNext + uint8_t pipelineIdentifier[VK_UUID_SIZE] + VkPipelineMatchControl matchControl + VkDeviceSize poolEntrySize + + + VkStructureType sType + void* pNext + VkBool32 shaderZeroInitializeWorkgroupMemory + + + + VkStructureType sType + void* pNext + VkBool32 shaderSubgroupUniformControlFlow + + + VkStructureType sType + void* pNext + VkBool32 robustBufferAccess2 + VkBool32 robustImageAccess2 + VkBool32 nullDescriptor + + + VkStructureType sType + void* pNext + VkDeviceSize robustStorageBufferAccessSizeAlignment + VkDeviceSize robustUniformBufferAccessSizeAlignment + + + VkStructureType sType + void* pNext + VkBool32 robustImageAccess + + + + VkStructureType sType + void* pNext + VkBool32 workgroupMemoryExplicitLayout + VkBool32 workgroupMemoryExplicitLayoutScalarBlockLayout + VkBool32 workgroupMemoryExplicitLayout8BitAccess + VkBool32 workgroupMemoryExplicitLayout16BitAccess + + + VkStructureType sType + void* pNext + VkBool32 constantAlphaColorBlendFactors + VkBool32 events + VkBool32 imageViewFormatReinterpretation + VkBool32 imageViewFormatSwizzle + VkBool32 imageView2DOn3DImage + VkBool32 multisampleArrayImage + VkBool32 mutableComparisonSamplers + VkBool32 pointPolygons + VkBool32 samplerMipLodBias + VkBool32 separateStencilMaskRef + VkBool32 shaderSampleRateInterpolationFunctions + VkBool32 tessellationIsolines + VkBool32 tessellationPointMode + VkBool32 triangleFans + VkBool32 vertexAttributeAccessBeyondStride + + + VkStructureType sType + void* pNext + uint32_t minVertexInputBindingStrideAlignment + + + VkStructureType sType + void* pNext + VkBool32 formatA4R4G4B4 + VkBool32 formatA4B4G4R4 + + + VkStructureType sType + void* pNext + VkBool32 subpassShading + + + VkStructureType sType + void*pNext + VkBool32 clustercullingShader + VkBool32 multiviewClusterCullingShader + + + VkStructureType sType + void*pNext + VkBool32 clusterShadingRate + + + VkStructureType sType + const void* pNext + VkDeviceSize srcOffsetSpecified in bytes + VkDeviceSize dstOffsetSpecified in bytes + VkDeviceSize sizeSpecified in bytes + + + + VkStructureType sType + const void* pNext + VkImageSubresourceLayers srcSubresource + VkOffset3D srcOffsetSpecified in pixels for both compressed and uncompressed images + VkImageSubresourceLayers dstSubresource + VkOffset3D dstOffsetSpecified in pixels for both compressed and uncompressed images + VkExtent3D extentSpecified in pixels for both compressed and uncompressed images + + + + VkStructureType sType + const void* pNext + VkImageSubresourceLayers srcSubresource + VkOffset3D srcOffsets[2]Specified in pixels for both compressed and uncompressed images + VkImageSubresourceLayers dstSubresource + VkOffset3D dstOffsets[2]Specified in pixels for both compressed and uncompressed images + + + + VkStructureType sType + const void* pNext + VkDeviceSize bufferOffsetSpecified in bytes + uint32_t bufferRowLengthSpecified in texels + uint32_t bufferImageHeight + VkImageSubresourceLayers imageSubresource + VkOffset3D imageOffsetSpecified in pixels for both compressed and uncompressed images + VkExtent3D imageExtentSpecified in pixels for both compressed and uncompressed images + + + + VkStructureType sType + const void* pNext + VkImageSubresourceLayers srcSubresource + VkOffset3D srcOffset + VkImageSubresourceLayers dstSubresource + VkOffset3D dstOffset + VkExtent3D extent + + + + VkStructureType sType + const void* pNext + VkBuffer srcBuffer + VkBuffer dstBuffer + uint32_t regionCount + const VkBufferCopy2* pRegions + + + + VkStructureType sType + const void* pNext + VkImage srcImage + VkImageLayout srcImageLayout + VkImage dstImage + VkImageLayout dstImageLayout + uint32_t regionCount + const VkImageCopy2* pRegions + + + + VkStructureType sType + const void* pNext + VkImage srcImage + VkImageLayout srcImageLayout + VkImage dstImage + VkImageLayout dstImageLayout + uint32_t regionCount + const VkImageBlit2* pRegions + VkFilter filter + + + + VkStructureType sType + const void* pNext + VkBuffer srcBuffer + VkImage dstImage + VkImageLayout dstImageLayout + uint32_t regionCount + const VkBufferImageCopy2* pRegions + + + + VkStructureType sType + const void* pNext + VkImage srcImage + VkImageLayout srcImageLayout + VkBuffer dstBuffer + uint32_t regionCount + const VkBufferImageCopy2* pRegions + + + + VkStructureType sType + const void* pNext + VkImage srcImage + VkImageLayout srcImageLayout + VkImage dstImage + VkImageLayout dstImageLayout + uint32_t regionCount + const VkImageResolve2* pRegions + + + + VkStructureType sType + void* pNext + VkBool32 shaderImageInt64Atomics + VkBool32 sparseImageInt64Atomics + + + VkStructureType sType + const void* pNext + const VkAttachmentReference2* pFragmentShadingRateAttachment + VkExtent2D shadingRateAttachmentTexelSize + + + VkStructureType sType + const void* pNext + VkExtent2D fragmentSize + VkFragmentShadingRateCombinerOpKHR combinerOps[2] + + + VkStructureType sType + void* pNext + VkBool32 pipelineFragmentShadingRate + VkBool32 primitiveFragmentShadingRate + VkBool32 attachmentFragmentShadingRate + + + VkStructureType sType + void* pNext + VkExtent2D minFragmentShadingRateAttachmentTexelSize + VkExtent2D maxFragmentShadingRateAttachmentTexelSize + uint32_t maxFragmentShadingRateAttachmentTexelSizeAspectRatio + VkBool32 primitiveFragmentShadingRateWithMultipleViewports + VkBool32 layeredShadingRateAttachments + VkBool32 fragmentShadingRateNonTrivialCombinerOps + VkExtent2D maxFragmentSize + uint32_t maxFragmentSizeAspectRatio + uint32_t maxFragmentShadingRateCoverageSamples + VkSampleCountFlagBits maxFragmentShadingRateRasterizationSamples + VkBool32 fragmentShadingRateWithShaderDepthStencilWrites + VkBool32 fragmentShadingRateWithSampleMask + VkBool32 fragmentShadingRateWithShaderSampleMask + VkBool32 fragmentShadingRateWithConservativeRasterization + VkBool32 fragmentShadingRateWithFragmentShaderInterlock + VkBool32 fragmentShadingRateWithCustomSampleLocations + VkBool32 fragmentShadingRateStrictMultiplyCombiner + + + VkStructureType sType + void* pNext + VkSampleCountFlags sampleCounts + VkExtent2D fragmentSize + + + VkStructureType sType + void* pNext + VkBool32 shaderTerminateInvocation + + + + VkStructureType sType + void* pNext + VkBool32 fragmentShadingRateEnums + VkBool32 supersampleFragmentShadingRates + VkBool32 noInvocationFragmentShadingRates + + + VkStructureType sType + void* pNext + VkSampleCountFlagBits maxFragmentShadingRateInvocationCount + + + VkStructureType sType + const void* pNext + VkFragmentShadingRateTypeNV shadingRateType + VkFragmentShadingRateNV shadingRate + VkFragmentShadingRateCombinerOpKHR combinerOps[2] + + + VkStructureType sType + const void* pNext + VkDeviceSize accelerationStructureSize + VkDeviceSize updateScratchSize + VkDeviceSize buildScratchSize + + + VkStructureType sType + void* pNext + VkBool32 image2DViewOf3D + VkBool32 sampler2DViewOf3D + + + VkStructureType sType + void* pNext + VkBool32 imageSlicedViewOf3D + + + VkStructureType sType + void* pNext + VkBool32 attachmentFeedbackLoopDynamicState + + + VkStructureType sType + void* pNext + VkBool32 mutableDescriptorType + + + + uint32_t descriptorTypeCount + const VkDescriptorType* pDescriptorTypes + + + + VkStructureType sType + const void* pNext + uint32_t mutableDescriptorTypeListCount + const VkMutableDescriptorTypeListEXT* pMutableDescriptorTypeLists + + + + VkStructureType sType + void* pNext + VkBool32 depthClipControl + + + VkStructureType sType + const void* pNext + VkBool32 negativeOneToOne + + + VkStructureType sType + void* pNext + VkBool32 vertexInputDynamicState + + + VkStructureType sType + void* pNext + VkBool32 externalMemoryRDMA + + + VkStructureType sType + void* pNext + uint32_t binding + uint32_t stride + VkVertexInputRate inputRate + uint32_t divisor + + + VkStructureType sType + void* pNext + uint32_t locationlocation of the shader vertex attrib + uint32_t bindingVertex buffer binding id + VkFormat formatformat of source data + uint32_t offsetOffset of first element in bytes from base of vertex + + + VkStructureType sType + void* pNext + VkBool32 colorWriteEnable + + + VkStructureType sType + const void* pNext + uint32_t attachmentCount# of pAttachments + const VkBool32* pColorWriteEnables + + + VkStructureType sType + const void* pNext + VkPipelineStageFlags2 srcStageMask + VkAccessFlags2 srcAccessMask + VkPipelineStageFlags2 dstStageMask + VkAccessFlags2 dstAccessMask + + + + VkStructureType sType + const void* pNext + VkPipelineStageFlags2 srcStageMask + VkAccessFlags2 srcAccessMask + VkPipelineStageFlags2 dstStageMask + VkAccessFlags2 dstAccessMask + VkImageLayout oldLayout + VkImageLayout newLayout + uint32_t srcQueueFamilyIndex + uint32_t dstQueueFamilyIndex + VkImage image + VkImageSubresourceRange subresourceRange + + + + VkStructureType sType + const void* pNext + VkPipelineStageFlags2 srcStageMask + VkAccessFlags2 srcAccessMask + VkPipelineStageFlags2 dstStageMask + VkAccessFlags2 dstAccessMask + uint32_t srcQueueFamilyIndex + uint32_t dstQueueFamilyIndex + VkBuffer buffer + VkDeviceSize offset + VkDeviceSize size + + + + VkStructureType sType + const void* pNext + VkDependencyFlags dependencyFlags + uint32_t memoryBarrierCount + const VkMemoryBarrier2* pMemoryBarriers + uint32_t bufferMemoryBarrierCount + const VkBufferMemoryBarrier2* pBufferMemoryBarriers + uint32_t imageMemoryBarrierCount + const VkImageMemoryBarrier2* pImageMemoryBarriers + + + + VkStructureType sType + const void* pNext + VkSemaphore semaphore + uint64_t value + VkPipelineStageFlags2 stageMask + uint32_t deviceIndex + + + + VkStructureType sType + const void* pNext + VkCommandBuffer commandBuffer + uint32_t deviceMask + + + + VkStructureType sType + const void* pNext + VkSubmitFlags flags + uint32_t waitSemaphoreInfoCount + const VkSemaphoreSubmitInfo* pWaitSemaphoreInfos + uint32_t commandBufferInfoCount + const VkCommandBufferSubmitInfo* pCommandBufferInfos + uint32_t signalSemaphoreInfoCount + const VkSemaphoreSubmitInfo* pSignalSemaphoreInfos + + + + VkStructureType sType + void* pNext + VkPipelineStageFlags2 checkpointExecutionStageMask + + + VkStructureType sType + void* pNext + VkPipelineStageFlags2 stage + void* pCheckpointMarker + + + VkStructureType sType + void* pNext + VkBool32 synchronization2 + + + + VkStructureType sType + void* pNext + VkBool32 hostImageCopy + + + VkStructureType sType + void* pNext + uint32_t copySrcLayoutCount + VkImageLayout* pCopySrcLayouts + uint32_t copyDstLayoutCount + VkImageLayout* pCopyDstLayouts + uint8_t optimalTilingLayoutUUID[VK_UUID_SIZE] + VkBool32 identicalMemoryTypeRequirements + + + VkStructureType sType + const void* pNext + const void* pHostPointer + uint32_t memoryRowLengthSpecified in texels + uint32_t memoryImageHeight + VkImageSubresourceLayers imageSubresource + VkOffset3D imageOffset + VkExtent3D imageExtent + + + VkStructureType sType + const void* pNext + void* pHostPointer + uint32_t memoryRowLengthSpecified in texels + uint32_t memoryImageHeight + VkImageSubresourceLayers imageSubresource + VkOffset3D imageOffset + VkExtent3D imageExtent + + + VkStructureType sType + const void* pNext + VkHostImageCopyFlagsEXT flags + VkImage dstImage + VkImageLayout dstImageLayout + uint32_t regionCount + const VkMemoryToImageCopyEXT* pRegions + + + VkStructureType sType + const void* pNext + VkHostImageCopyFlagsEXT flags + VkImage srcImage + VkImageLayout srcImageLayout + uint32_t regionCount + const VkImageToMemoryCopyEXT* pRegions + + + VkStructureType sType + const void* pNext + VkHostImageCopyFlagsEXT flags + VkImage srcImage + VkImageLayout srcImageLayout + VkImage dstImage + VkImageLayout dstImageLayout + uint32_t regionCount + const VkImageCopy2* pRegions + + + VkStructureType sType + const void* pNext + VkImage image + VkImageLayout oldLayout + VkImageLayout newLayout + VkImageSubresourceRange subresourceRange + + + VkStructureType sType + void* pNext + VkDeviceSize sizeSpecified in bytes + + + VkStructureType sType + void* pNext + VkBool32 optimalDeviceAccessSpecifies if device access is optimal + VkBool32 identicalMemoryLayoutSpecifies if memory layout is identical + + + VkStructureType sType + void* pNext + VkBool32 deviceNoDynamicHostAllocations + VkBool32 deviceDestroyFreesMemory + VkBool32 commandPoolMultipleCommandBuffersRecording + VkBool32 commandPoolResetCommandBuffer + VkBool32 commandBufferSimultaneousUse + VkBool32 secondaryCommandBufferNullOrImagelessFramebuffer + VkBool32 recycleDescriptorSetMemory + VkBool32 recyclePipelineMemory + uint32_t maxRenderPassSubpasses + uint32_t maxRenderPassDependencies + uint32_t maxSubpassInputAttachments + uint32_t maxSubpassPreserveAttachments + uint32_t maxFramebufferAttachments + uint32_t maxDescriptorSetLayoutBindings + uint32_t maxQueryFaultCount + uint32_t maxCallbackFaultCount + uint32_t maxCommandPoolCommandBuffers + VkDeviceSize maxCommandBufferSize + + + VkStructureType sType + const void* pNext + VkDeviceSize poolEntrySize + uint32_t poolEntryCount + + + VkStructureType sType + const void* pNext + uint32_t pipelineCacheCreateInfoCount + const VkPipelineCacheCreateInfo* pPipelineCacheCreateInfos + uint32_t pipelinePoolSizeCount + const VkPipelinePoolSize* pPipelinePoolSizes + uint32_t semaphoreRequestCount + uint32_t commandBufferRequestCount + uint32_t fenceRequestCount + uint32_t deviceMemoryRequestCount + uint32_t bufferRequestCount + uint32_t imageRequestCount + uint32_t eventRequestCount + uint32_t queryPoolRequestCount + uint32_t bufferViewRequestCount + uint32_t imageViewRequestCount + uint32_t layeredImageViewRequestCount + uint32_t pipelineCacheRequestCount + uint32_t pipelineLayoutRequestCount + uint32_t renderPassRequestCount + uint32_t graphicsPipelineRequestCount + uint32_t computePipelineRequestCount + uint32_t descriptorSetLayoutRequestCount + uint32_t samplerRequestCount + uint32_t descriptorPoolRequestCount + uint32_t descriptorSetRequestCount + uint32_t framebufferRequestCount + uint32_t commandPoolRequestCount + uint32_t samplerYcbcrConversionRequestCount + uint32_t surfaceRequestCount + uint32_t swapchainRequestCount + uint32_t displayModeRequestCount + uint32_t subpassDescriptionRequestCount + uint32_t attachmentDescriptionRequestCount + uint32_t descriptorSetLayoutBindingRequestCount + uint32_t descriptorSetLayoutBindingLimit + uint32_t maxImageViewMipLevels + uint32_t maxImageViewArrayLayers + uint32_t maxLayeredImageViewMipLevels + uint32_t maxOcclusionQueriesPerPool + uint32_t maxPipelineStatisticsQueriesPerPool + uint32_t maxTimestampQueriesPerPool + uint32_t maxImmutableSamplersPerDescriptorSetLayout + + + VkStructureType sType + const void* pNext + VkDeviceSize commandPoolReservedSize + uint32_t commandPoolMaxCommandBuffers + + + VkStructureType sType + void* pNext + VkDeviceSize commandPoolAllocated + VkDeviceSize commandPoolReservedSize + VkDeviceSize commandBufferAllocated + + + VkStructureType sType + void* pNext + VkBool32 shaderAtomicInstructions + + + VkStructureType sType + void* pNext + VkBool32 primitivesGeneratedQuery + VkBool32 primitivesGeneratedQueryWithRasterizerDiscard + VkBool32 primitivesGeneratedQueryWithNonZeroStreams + + + VkStructureType sType + void* pNext + VkBool32 legacyDithering + + + VkStructureType sType + void* pNext + VkBool32 multisampledRenderToSingleSampled + + + VkStructureType sType + void* pNext + VkBool32 optimal + + + VkStructureType sType + const void* pNext + VkBool32 multisampledRenderToSingleSampledEnable + VkSampleCountFlagBits rasterizationSamples + + + VkStructureType sType + void* pNext + VkBool32 pipelineProtectedAccess + + + VkStructureType sType + void* pNext + VkVideoCodecOperationFlagsKHR videoCodecOperations + + + VkStructureType sType + void* pNext + VkBool32 queryResultStatusSupport + + + VkStructureType sType + const void* pNext + uint32_t profileCount + const VkVideoProfileInfoKHR* pProfiles + + + VkStructureType sType + const void* pNext + VkImageUsageFlags imageUsage + + + VkStructureType sType + void* pNext + VkFormat format + VkComponentMapping componentMapping + VkImageCreateFlags imageCreateFlags + VkImageType imageType + VkImageTiling imageTiling + VkImageUsageFlags imageUsageFlags + + + VkStructureType sType + const void* pNext + VkVideoCodecOperationFlagBitsKHR videoCodecOperation + VkVideoChromaSubsamplingFlagsKHR chromaSubsampling + VkVideoComponentBitDepthFlagsKHR lumaBitDepth + VkVideoComponentBitDepthFlagsKHR chromaBitDepth + + + VkStructureType sType + void* pNext + VkVideoCapabilityFlagsKHR flags + VkDeviceSize minBitstreamBufferOffsetAlignment + VkDeviceSize minBitstreamBufferSizeAlignment + VkExtent2D pictureAccessGranularity + VkExtent2D minCodedExtent + VkExtent2D maxCodedExtent + uint32_t maxDpbSlots + uint32_t maxActiveReferencePictures + VkExtensionProperties stdHeaderVersion + + + VkStructureType sType + void* pNext + uint32_t memoryBindIndex + VkMemoryRequirements memoryRequirements + + + VkStructureType sType + const void* pNext + uint32_t memoryBindIndex + VkDeviceMemory memory + VkDeviceSize memoryOffset + VkDeviceSize memorySize + + + VkStructureType sType + const void* pNext + VkOffset2D codedOffsetThe offset to be used for the picture resource, currently only used in field mode + VkExtent2D codedExtentThe extent to be used for the picture resource + uint32_t baseArrayLayerThe first array layer to be accessed for the Decode or Encode Operations + VkImageView imageViewBindingThe ImageView binding of the resource + + + VkStructureType sType + const void* pNext + int32_t slotIndexThe reference slot index + const VkVideoPictureResourceInfoKHR* pPictureResourceThe reference picture resource + + + VkStructureType sType + void* pNext + VkVideoDecodeCapabilityFlagsKHR flags + + + VkStructureType sType + const void* pNext + VkVideoDecodeUsageFlagsKHR videoUsageHints + + + VkStructureType sType + const void* pNext + VkVideoDecodeFlagsKHR flags + VkBuffer srcBuffer + VkDeviceSize srcBufferOffset + VkDeviceSize srcBufferRange + VkVideoPictureResourceInfoKHR dstPictureResource + const VkVideoReferenceSlotInfoKHR* pSetupReferenceSlot + uint32_t referenceSlotCount + const VkVideoReferenceSlotInfoKHR* pReferenceSlots + + + VkStructureType sType + void* pNext + VkBool32 videoMaintenance1 + + + VkStructureType sType + const void* pNext + VkQueryPool queryPool + uint32_t firstQuery + uint32_t queryCount + + Video Decode Codec Standard specific structures + #include "vk_video/vulkan_video_codec_h264std.h" + + + + + + + + + + + + + + + + + + + #include "vk_video/vulkan_video_codec_h264std_decode.h" + + + + + + VkStructureType sType + const void* pNext + StdVideoH264ProfileIdc stdProfileIdc + VkVideoDecodeH264PictureLayoutFlagBitsKHR pictureLayout + + + VkStructureType sType + void* pNext + StdVideoH264LevelIdc maxLevelIdc + VkOffset2D fieldOffsetGranularity + + + + + VkStructureType sType + const void* pNext + uint32_t stdSPSCount + const StdVideoH264SequenceParameterSet* pStdSPSs + uint32_t stdPPSCount + const StdVideoH264PictureParameterSet* pStdPPSsList of Picture Parameters associated with the spsStd, above + + + VkStructureType sType + const void* pNext + uint32_t maxStdSPSCount + uint32_t maxStdPPSCount + const VkVideoDecodeH264SessionParametersAddInfoKHR* pParametersAddInfo + + + VkStructureType sType + const void* pNext + const StdVideoDecodeH264PictureInfo* pStdPictureInfo + uint32_t sliceCount + const uint32_t* pSliceOffsets + + + VkStructureType sType + const void* pNext + const StdVideoDecodeH264ReferenceInfo* pStdReferenceInfo + + #include "vk_video/vulkan_video_codec_h265std.h" + + + + + + + + + + + + + + + + + + + #include "vk_video/vulkan_video_codec_h265std_decode.h" + + + + + + VkStructureType sType + const void* pNext + StdVideoH265ProfileIdc stdProfileIdc + + + VkStructureType sType + void* pNext + StdVideoH265LevelIdc maxLevelIdc + + + VkStructureType sType + const void* pNext + uint32_t stdVPSCount + const StdVideoH265VideoParameterSet* pStdVPSs + uint32_t stdSPSCount + const StdVideoH265SequenceParameterSet* pStdSPSs + uint32_t stdPPSCount + const StdVideoH265PictureParameterSet* pStdPPSsList of Picture Parameters associated with the spsStd, above + + + VkStructureType sType + const void* pNext + uint32_t maxStdVPSCount + uint32_t maxStdSPSCount + uint32_t maxStdPPSCount + const VkVideoDecodeH265SessionParametersAddInfoKHR* pParametersAddInfo + + + VkStructureType sType + const void* pNext + const StdVideoDecodeH265PictureInfo* pStdPictureInfo + uint32_t sliceSegmentCount + const uint32_t* pSliceSegmentOffsets + + + VkStructureType sType + const void* pNext + const StdVideoDecodeH265ReferenceInfo* pStdReferenceInfo + + #include "vk_video/vulkan_video_codec_av1std.h" + + + + #include "vk_video/vulkan_video_codec_av1std_decode.h" + + + + VkStructureType sType + const void* pNext + StdVideoAV1Profile stdProfile + VkBool32 filmGrainSupport + + + VkStructureType sType + void* pNext + StdVideoAV1Level maxLevel + + + VkStructureType sType + const void* pNext + const StdVideoAV1SequenceHeader* pStdSequenceHeader + + + VkStructureType sType + const void* pNext + const StdVideoDecodeAV1PictureInfo* pStdPictureInfo + int32_t referenceNameSlotIndices[VK_MAX_VIDEO_AV1_REFERENCES_PER_FRAME_KHR] + uint32_t frameHeaderOffset + uint32_t tileCount + const uint32_t* pTileOffsets + const uint32_t* pTileSizes + + + VkStructureType sType + const void* pNext + const StdVideoDecodeAV1ReferenceInfo* pStdReferenceInfo + + + VkStructureType sType + const void* pNext + uint32_t queueFamilyIndex + VkVideoSessionCreateFlagsKHR flags + const VkVideoProfileInfoKHR* pVideoProfile + VkFormat pictureFormat + VkExtent2D maxCodedExtent + VkFormat referencePictureFormat + uint32_t maxDpbSlots + uint32_t maxActiveReferencePictures + const VkExtensionProperties* pStdHeaderVersion + + + VkStructureType sType + const void* pNext + VkVideoSessionParametersCreateFlagsKHR flags + VkVideoSessionParametersKHR videoSessionParametersTemplate + VkVideoSessionKHR videoSession + + + VkStructureType sType + const void* pNext + uint32_t updateSequenceCount + + + VkStructureType sType + const void* pNext + VkVideoSessionParametersKHR videoSessionParameters + + + VkStructureType sType + void* pNext + VkBool32 hasOverrides + + + VkStructureType sType + const void* pNext + VkVideoBeginCodingFlagsKHR flags + VkVideoSessionKHR videoSession + VkVideoSessionParametersKHR videoSessionParameters + uint32_t referenceSlotCount + const VkVideoReferenceSlotInfoKHR* pReferenceSlots + + + VkStructureType sType + const void* pNext + VkVideoEndCodingFlagsKHR flags + + + VkStructureType sType + const void* pNext + VkVideoCodingControlFlagsKHR flags + + + VkStructureType sType + const void* pNext + VkVideoEncodeUsageFlagsKHR videoUsageHints + VkVideoEncodeContentFlagsKHR videoContentHints + VkVideoEncodeTuningModeKHR tuningMode + + + VkStructureType sType + const void* pNext + VkVideoEncodeFlagsKHR flags + VkBuffer dstBuffer + VkDeviceSize dstBufferOffset + VkDeviceSize dstBufferRange + VkVideoPictureResourceInfoKHR srcPictureResource + const VkVideoReferenceSlotInfoKHR* pSetupReferenceSlot + uint32_t referenceSlotCount + const VkVideoReferenceSlotInfoKHR* pReferenceSlots + uint32_t precedingExternallyEncodedBytes + + + VkStructureType sType + const void* pNext + VkVideoEncodeFeedbackFlagsKHR encodeFeedbackFlags + + + VkStructureType sType + const void* pNext + uint32_t qualityLevel + + + VkStructureType sType + const void* pNext + const VkVideoProfileInfoKHR* pVideoProfile + uint32_t qualityLevel + + + VkStructureType sType + void* pNext + VkVideoEncodeRateControlModeFlagBitsKHR preferredRateControlMode + uint32_t preferredRateControlLayerCount + + + VkStructureType sType + const void* pNext + VkVideoEncodeRateControlFlagsKHR flags + VkVideoEncodeRateControlModeFlagBitsKHR rateControlMode + uint32_t layerCount + const VkVideoEncodeRateControlLayerInfoKHR* pLayers + uint32_t virtualBufferSizeInMs + uint32_t initialVirtualBufferSizeInMs + + + VkStructureType sType + const void* pNext + uint64_t averageBitrate + uint64_t maxBitrate + uint32_t frameRateNumerator + uint32_t frameRateDenominator + + + VkStructureType sType + void* pNext + VkVideoEncodeCapabilityFlagsKHR flags + VkVideoEncodeRateControlModeFlagsKHR rateControlModes + uint32_t maxRateControlLayers + uint64_t maxBitrate + uint32_t maxQualityLevels + VkExtent2D encodeInputPictureGranularity + VkVideoEncodeFeedbackFlagsKHR supportedEncodeFeedbackFlags + + + VkStructureType sType + void* pNext + VkVideoEncodeH264CapabilityFlagsKHR flags + StdVideoH264LevelIdc maxLevelIdc + uint32_t maxSliceCount + uint32_t maxPPictureL0ReferenceCount + uint32_t maxBPictureL0ReferenceCount + uint32_t maxL1ReferenceCount + uint32_t maxTemporalLayerCount + VkBool32 expectDyadicTemporalLayerPattern + int32_t minQp + int32_t maxQp + VkBool32 prefersGopRemainingFrames + VkBool32 requiresGopRemainingFrames + VkVideoEncodeH264StdFlagsKHR stdSyntaxFlags + + + VkStructureType sType + void* pNext + VkVideoEncodeH264RateControlFlagsKHR preferredRateControlFlags + uint32_t preferredGopFrameCount + uint32_t preferredIdrPeriod + uint32_t preferredConsecutiveBFrameCount + uint32_t preferredTemporalLayerCount + VkVideoEncodeH264QpKHR preferredConstantQp + uint32_t preferredMaxL0ReferenceCount + uint32_t preferredMaxL1ReferenceCount + VkBool32 preferredStdEntropyCodingModeFlag + + #include "vk_video/vulkan_video_codec_h264std_encode.h" + + + + + + + + + + + + VkStructureType sType + const void* pNext + VkBool32 useMaxLevelIdc + StdVideoH264LevelIdc maxLevelIdc + + + VkStructureType sType + const void* pNext + uint32_t stdSPSCount + const StdVideoH264SequenceParameterSet* pStdSPSs + uint32_t stdPPSCount + const StdVideoH264PictureParameterSet* pStdPPSsList of Picture Parameters associated with the spsStd, above + + + VkStructureType sType + const void* pNext + uint32_t maxStdSPSCount + uint32_t maxStdPPSCount + const VkVideoEncodeH264SessionParametersAddInfoKHR* pParametersAddInfo + + + VkStructureType sType + const void* pNext + VkBool32 writeStdSPS + VkBool32 writeStdPPS + uint32_t stdSPSId + uint32_t stdPPSId + + + VkStructureType sType + void* pNext + VkBool32 hasStdSPSOverrides + VkBool32 hasStdPPSOverrides + + + VkStructureType sType + const void* pNext + const StdVideoEncodeH264ReferenceInfo* pStdReferenceInfo + + + VkStructureType sType + const void* pNext + uint32_t naluSliceEntryCount + const VkVideoEncodeH264NaluSliceInfoKHR* pNaluSliceEntries + const StdVideoEncodeH264PictureInfo* pStdPictureInfo + VkBool32 generatePrefixNalu + + + VkStructureType sType + const void* pNext + StdVideoH264ProfileIdc stdProfileIdc + + + VkStructureType sType + const void* pNext + int32_t constantQp + const StdVideoEncodeH264SliceHeader* pStdSliceHeader + + + VkStructureType sType + const void* pNext + VkVideoEncodeH264RateControlFlagsKHR flags + uint32_t gopFrameCount + uint32_t idrPeriod + uint32_t consecutiveBFrameCount + uint32_t temporalLayerCount + + + int32_t qpI + int32_t qpP + int32_t qpB + + + uint32_t frameISize + uint32_t framePSize + uint32_t frameBSize + + + VkStructureType sType + const void* pNext + VkBool32 useGopRemainingFrames + uint32_t gopRemainingI + uint32_t gopRemainingP + uint32_t gopRemainingB + + + VkStructureType sType + const void* pNext + VkBool32 useMinQp + VkVideoEncodeH264QpKHR minQp + VkBool32 useMaxQp + VkVideoEncodeH264QpKHR maxQp + VkBool32 useMaxFrameSize + VkVideoEncodeH264FrameSizeKHR maxFrameSize + + + VkStructureType sType + void* pNext + VkVideoEncodeH265CapabilityFlagsKHR flags + StdVideoH265LevelIdc maxLevelIdc + uint32_t maxSliceSegmentCount + VkExtent2D maxTiles + VkVideoEncodeH265CtbSizeFlagsKHR ctbSizes + VkVideoEncodeH265TransformBlockSizeFlagsKHR transformBlockSizes + uint32_t maxPPictureL0ReferenceCount + uint32_t maxBPictureL0ReferenceCount + uint32_t maxL1ReferenceCount + uint32_t maxSubLayerCount + VkBool32 expectDyadicTemporalSubLayerPattern + int32_t minQp + int32_t maxQp + VkBool32 prefersGopRemainingFrames + VkBool32 requiresGopRemainingFrames + VkVideoEncodeH265StdFlagsKHR stdSyntaxFlags + + + VkStructureType sType + void* pNext + VkVideoEncodeH265RateControlFlagsKHR preferredRateControlFlags + uint32_t preferredGopFrameCount + uint32_t preferredIdrPeriod + uint32_t preferredConsecutiveBFrameCount + uint32_t preferredSubLayerCount + VkVideoEncodeH265QpKHR preferredConstantQp + uint32_t preferredMaxL0ReferenceCount + uint32_t preferredMaxL1ReferenceCount + + #include "vk_video/vulkan_video_codec_h265std_encode.h" + + + + + + + + + + VkStructureType sType + const void* pNext + VkBool32 useMaxLevelIdc + StdVideoH265LevelIdc maxLevelIdc + + + VkStructureType sType + const void* pNext + uint32_t stdVPSCount + const StdVideoH265VideoParameterSet* pStdVPSs + uint32_t stdSPSCount + const StdVideoH265SequenceParameterSet* pStdSPSs + uint32_t stdPPSCount + const StdVideoH265PictureParameterSet* pStdPPSsList of Picture Parameters associated with the spsStd, above + + + VkStructureType sType + const void* pNext + uint32_t maxStdVPSCount + uint32_t maxStdSPSCount + uint32_t maxStdPPSCount + const VkVideoEncodeH265SessionParametersAddInfoKHR* pParametersAddInfo + + + VkStructureType sType + const void* pNext + VkBool32 writeStdVPS + VkBool32 writeStdSPS + VkBool32 writeStdPPS + uint32_t stdVPSId + uint32_t stdSPSId + uint32_t stdPPSId + + + VkStructureType sType + void* pNext + VkBool32 hasStdVPSOverrides + VkBool32 hasStdSPSOverrides + VkBool32 hasStdPPSOverrides + + + VkStructureType sType + const void* pNext + uint32_t naluSliceSegmentEntryCount + const VkVideoEncodeH265NaluSliceSegmentInfoKHR* pNaluSliceSegmentEntries + const StdVideoEncodeH265PictureInfo* pStdPictureInfo + + + VkStructureType sType + const void* pNext + int32_t constantQp + const StdVideoEncodeH265SliceSegmentHeader* pStdSliceSegmentHeader + + + VkStructureType sType + const void* pNext + VkVideoEncodeH265RateControlFlagsKHR flags + uint32_t gopFrameCount + uint32_t idrPeriod + uint32_t consecutiveBFrameCount + uint32_t subLayerCount + + + int32_t qpI + int32_t qpP + int32_t qpB + + + uint32_t frameISize + uint32_t framePSize + uint32_t frameBSize + + + VkStructureType sType + const void* pNext + VkBool32 useGopRemainingFrames + uint32_t gopRemainingI + uint32_t gopRemainingP + uint32_t gopRemainingB + + + VkStructureType sType + const void* pNext + VkBool32 useMinQp + VkVideoEncodeH265QpKHR minQp + VkBool32 useMaxQp + VkVideoEncodeH265QpKHR maxQp + VkBool32 useMaxFrameSize + VkVideoEncodeH265FrameSizeKHR maxFrameSize + + + VkStructureType sType + const void* pNext + StdVideoH265ProfileIdc stdProfileIdc + + + VkStructureType sType + const void* pNext + const StdVideoEncodeH265ReferenceInfo* pStdReferenceInfo + + + VkStructureType sType + void* pNext + VkBool32 inheritedViewportScissor2D + + + VkStructureType sType + const void* pNext + VkBool32 viewportScissor2D + uint32_t viewportDepthCount + const VkViewport* pViewportDepths + + + VkStructureType sType + void* pNext + VkBool32 ycbcr2plane444Formats + + + VkStructureType sType + void* pNext + VkBool32 provokingVertexLast + VkBool32 transformFeedbackPreservesProvokingVertex + + + VkStructureType sType + void* pNext + VkBool32 provokingVertexModePerPipeline + VkBool32 transformFeedbackPreservesTriangleFanProvokingVertex + + + VkStructureType sType + const void* pNext + VkProvokingVertexModeEXT provokingVertexMode + + + VkStructureType sType + const void* pNext + size_t dataSize + const void* pData + + + VkStructureType sType + const void* pNext + VkCuModuleNVX module + const char* pName + + + VkStructureType sType + const void* pNext + VkCuFunctionNVX function + uint32_t gridDimX + uint32_t gridDimY + uint32_t gridDimZ + uint32_t blockDimX + uint32_t blockDimY + uint32_t blockDimZ + uint32_t sharedMemBytes + size_t paramCount + const void* const * pParams + size_t extraCount + const void* const * pExtras + + + VkStructureType sType + void* pNext + VkBool32 descriptorBuffer + VkBool32 descriptorBufferCaptureReplay + VkBool32 descriptorBufferImageLayoutIgnored + VkBool32 descriptorBufferPushDescriptors + + + VkStructureType sType + void* pNext + VkBool32 combinedImageSamplerDescriptorSingleArray + VkBool32 bufferlessPushDescriptors + VkBool32 allowSamplerImageViewPostSubmitCreation + VkDeviceSize descriptorBufferOffsetAlignment + uint32_t maxDescriptorBufferBindings + uint32_t maxResourceDescriptorBufferBindings + uint32_t maxSamplerDescriptorBufferBindings + uint32_t maxEmbeddedImmutableSamplerBindings + uint32_t maxEmbeddedImmutableSamplers + size_t bufferCaptureReplayDescriptorDataSize + size_t imageCaptureReplayDescriptorDataSize + size_t imageViewCaptureReplayDescriptorDataSize + size_t samplerCaptureReplayDescriptorDataSize + size_t accelerationStructureCaptureReplayDescriptorDataSize + size_t samplerDescriptorSize + size_t combinedImageSamplerDescriptorSize + size_t sampledImageDescriptorSize + size_t storageImageDescriptorSize + size_t uniformTexelBufferDescriptorSize + size_t robustUniformTexelBufferDescriptorSize + size_t storageTexelBufferDescriptorSize + size_t robustStorageTexelBufferDescriptorSize + size_t uniformBufferDescriptorSize + size_t robustUniformBufferDescriptorSize + size_t storageBufferDescriptorSize + size_t robustStorageBufferDescriptorSize + size_t inputAttachmentDescriptorSize + size_t accelerationStructureDescriptorSize + VkDeviceSize maxSamplerDescriptorBufferRange + VkDeviceSize maxResourceDescriptorBufferRange + VkDeviceSize samplerDescriptorBufferAddressSpaceSize + VkDeviceSize resourceDescriptorBufferAddressSpaceSize + VkDeviceSize descriptorBufferAddressSpaceSize + + + VkStructureType sType + void* pNext + size_t combinedImageSamplerDensityMapDescriptorSize + + + VkStructureType sType + void* pNext + VkDeviceAddress address + VkDeviceSize range + VkFormat format + + + VkStructureType sType + void* pNext + VkDeviceAddress address + VkBufferUsageFlags usage + + + VkStructureType sType + void* pNext + VkBuffer buffer + + + const VkSampler* pSampler + const VkDescriptorImageInfo* pCombinedImageSampler + const VkDescriptorImageInfo* pInputAttachmentImage + const VkDescriptorImageInfo* pSampledImage + const VkDescriptorImageInfo* pStorageImage + const VkDescriptorAddressInfoEXT* pUniformTexelBuffer + const VkDescriptorAddressInfoEXT* pStorageTexelBuffer + const VkDescriptorAddressInfoEXT* pUniformBuffer + const VkDescriptorAddressInfoEXT* pStorageBuffer + VkDeviceAddress accelerationStructure + + + VkStructureType sType + const void* pNext + VkDescriptorType type + VkDescriptorDataEXT data + + + VkStructureType sType + const void* pNext + VkBuffer buffer + + + VkStructureType sType + const void* pNext + VkImage image + + + VkStructureType sType + const void* pNext + VkImageView imageView + + + VkStructureType sType + const void* pNext + VkSampler sampler + + + VkStructureType sType + const void* pNext + VkAccelerationStructureKHR accelerationStructure + VkAccelerationStructureNV accelerationStructureNV + + + VkStructureType sType + const void* pNext + const void* opaqueCaptureDescriptorData + + + VkStructureType sType + void* pNext + VkBool32 shaderIntegerDotProduct + + + + VkStructureType sType + void* pNext + VkBool32 integerDotProduct8BitUnsignedAccelerated + VkBool32 integerDotProduct8BitSignedAccelerated + VkBool32 integerDotProduct8BitMixedSignednessAccelerated + VkBool32 integerDotProduct4x8BitPackedUnsignedAccelerated + VkBool32 integerDotProduct4x8BitPackedSignedAccelerated + VkBool32 integerDotProduct4x8BitPackedMixedSignednessAccelerated + VkBool32 integerDotProduct16BitUnsignedAccelerated + VkBool32 integerDotProduct16BitSignedAccelerated + VkBool32 integerDotProduct16BitMixedSignednessAccelerated + VkBool32 integerDotProduct32BitUnsignedAccelerated + VkBool32 integerDotProduct32BitSignedAccelerated + VkBool32 integerDotProduct32BitMixedSignednessAccelerated + VkBool32 integerDotProduct64BitUnsignedAccelerated + VkBool32 integerDotProduct64BitSignedAccelerated + VkBool32 integerDotProduct64BitMixedSignednessAccelerated + VkBool32 integerDotProductAccumulatingSaturating8BitUnsignedAccelerated + VkBool32 integerDotProductAccumulatingSaturating8BitSignedAccelerated + VkBool32 integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated + VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated + VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated + VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated + VkBool32 integerDotProductAccumulatingSaturating16BitUnsignedAccelerated + VkBool32 integerDotProductAccumulatingSaturating16BitSignedAccelerated + VkBool32 integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated + VkBool32 integerDotProductAccumulatingSaturating32BitUnsignedAccelerated + VkBool32 integerDotProductAccumulatingSaturating32BitSignedAccelerated + VkBool32 integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated + VkBool32 integerDotProductAccumulatingSaturating64BitUnsignedAccelerated + VkBool32 integerDotProductAccumulatingSaturating64BitSignedAccelerated + VkBool32 integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated + + + + VkStructureType sType + void* pNext + VkBool32 hasPrimary + VkBool32 hasRender + int64_t primaryMajor + int64_t primaryMinor + int64_t renderMajor + int64_t renderMinor + + + VkStructureType sType + void* pNext + VkBool32 fragmentShaderBarycentric + + + VkStructureType sType + void* pNext + VkBool32 triStripVertexOrderIndependentOfProvokingVertex + + + VkStructureType sType + void* pNext + VkBool32 rayTracingMotionBlur + VkBool32 rayTracingMotionBlurPipelineTraceRaysIndirect + + + VkStructureType sType + void* pNext + VkBool32 rayTracingValidation + + + + VkStructureType sType + const void* pNext + VkDeviceOrHostAddressConstKHR vertexData + + + VkStructureType sType + const void* pNext + uint32_t maxInstances + VkAccelerationStructureMotionInfoFlagsNV flags + + + float sx + float a + float b + float pvx + float sy + float c + float pvy + float sz + float pvz + float qx + float qy + float qz + float qw + float tx + float ty + float tz + + + The bitfields in this structure are non-normative since bitfield ordering is implementation-defined in C. The specification defines the normative layout. + VkSRTDataNV transformT0 + VkSRTDataNV transformT1 + uint32_t instanceCustomIndex:24 + uint32_t mask:8 + uint32_t instanceShaderBindingTableRecordOffset:24 + VkGeometryInstanceFlagsKHR flags:8 + uint64_t accelerationStructureReference + + + The bitfields in this structure are non-normative since bitfield ordering is implementation-defined in C. The specification defines the normative layout. + VkTransformMatrixKHR transformT0 + VkTransformMatrixKHR transformT1 + uint32_t instanceCustomIndex:24 + uint32_t mask:8 + uint32_t instanceShaderBindingTableRecordOffset:24 + VkGeometryInstanceFlagsKHR flags:8 + uint64_t accelerationStructureReference + + + VkAccelerationStructureInstanceKHR staticInstance + VkAccelerationStructureMatrixMotionInstanceNV matrixMotionInstance + VkAccelerationStructureSRTMotionInstanceNV srtMotionInstance + + + VkAccelerationStructureMotionInstanceTypeNV type + VkAccelerationStructureMotionInstanceFlagsNV flags + VkAccelerationStructureMotionInstanceDataNV data + + typedef void* VkRemoteAddressNV; + + VkStructureType sType + const void* pNext + VkDeviceMemory memory + VkExternalMemoryHandleTypeFlagBits handleType + + + VkStructureType sType + const void* pNext + VkBufferCollectionFUCHSIA collection + uint32_t index + + + VkStructureType sType + const void* pNext + VkBufferCollectionFUCHSIA collection + uint32_t index + + + VkStructureType sType + const void* pNext + VkBufferCollectionFUCHSIA collection + uint32_t index + + + VkStructureType sType + const void* pNext + zx_handle_t collectionToken + + + VkStructureType sType + void* pNext + uint32_t memoryTypeBits + uint32_t bufferCount + uint32_t createInfoIndex + uint64_t sysmemPixelFormat + VkFormatFeatureFlags formatFeatures + VkSysmemColorSpaceFUCHSIA sysmemColorSpaceIndex + VkComponentMapping samplerYcbcrConversionComponents + VkSamplerYcbcrModelConversion suggestedYcbcrModel + VkSamplerYcbcrRange suggestedYcbcrRange + VkChromaLocation suggestedXChromaOffset + VkChromaLocation suggestedYChromaOffset + + + VkStructureType sType + const void* pNext + VkBufferCreateInfo createInfo + VkFormatFeatureFlags requiredFormatFeatures + VkBufferCollectionConstraintsInfoFUCHSIA bufferCollectionConstraints + + + VkStructureType sType + const void* pNext + uint32_t colorSpace + + + VkStructureType sType + const void* pNext + VkImageCreateInfo imageCreateInfo + VkFormatFeatureFlags requiredFormatFeatures + VkImageFormatConstraintsFlagsFUCHSIA flags + uint64_t sysmemPixelFormat + uint32_t colorSpaceCount + const VkSysmemColorSpaceFUCHSIA* pColorSpaces + + + VkStructureType sType + const void* pNext + uint32_t formatConstraintsCount + const VkImageFormatConstraintsInfoFUCHSIA* pFormatConstraints + VkBufferCollectionConstraintsInfoFUCHSIA bufferCollectionConstraints + VkImageConstraintsInfoFlagsFUCHSIA flags + + + VkStructureType sType + const void* pNext + uint32_t minBufferCount + uint32_t maxBufferCount + uint32_t minBufferCountForCamping + uint32_t minBufferCountForDedicatedSlack + uint32_t minBufferCountForSharedSlack + + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCudaModuleNV) + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCudaFunctionNV) + + VkStructureType sType + const void* pNext + size_t dataSize + const void* pData + + + VkStructureType sType + const void* pNext + VkCudaModuleNV module + const char* pName + + + VkStructureType sType + const void* pNext + VkCudaFunctionNV function + uint32_t gridDimX + uint32_t gridDimY + uint32_t gridDimZ + uint32_t blockDimX + uint32_t blockDimY + uint32_t blockDimZ + uint32_t sharedMemBytes + size_t paramCount + const void* const * pParams + size_t extraCount + const void* const * pExtras + + + VkStructureType sType + void* pNext + VkBool32 formatRgba10x6WithoutYCbCrSampler + + + VkStructureType sType + void* pNext + VkFormatFeatureFlags2 linearTilingFeatures + VkFormatFeatureFlags2 optimalTilingFeatures + VkFormatFeatureFlags2 bufferFeatures + + + + VkStructureType sType + void* pNext + uint32_t drmFormatModifierCount + VkDrmFormatModifierProperties2EXT* pDrmFormatModifierProperties + + + uint64_t drmFormatModifier + uint32_t drmFormatModifierPlaneCount + VkFormatFeatureFlags2 drmFormatModifierTilingFeatures + + + VkStructureType sType + void* pNext + VkFormat format + uint64_t externalFormat + VkFormatFeatureFlags2 formatFeatures + VkComponentMapping samplerYcbcrConversionComponents + VkSamplerYcbcrModelConversion suggestedYcbcrModel + VkSamplerYcbcrRange suggestedYcbcrRange + VkChromaLocation suggestedXChromaOffset + VkChromaLocation suggestedYChromaOffset + + + VkStructureType sType + const void* pNext + uint32_t viewMask + uint32_t colorAttachmentCount + const VkFormat* pColorAttachmentFormats + VkFormat depthAttachmentFormat + VkFormat stencilAttachmentFormat + + + + VkStructureType sType + const void* pNext + VkRenderingFlags flags + VkRect2D renderArea + uint32_t layerCount + uint32_t viewMask + uint32_t colorAttachmentCount + const VkRenderingAttachmentInfo* pColorAttachments + const VkRenderingAttachmentInfo* pDepthAttachment + const VkRenderingAttachmentInfo* pStencilAttachment + + + + VkStructureType sType + const void* pNext + VkImageView imageView + VkImageLayout imageLayout + VkResolveModeFlagBits resolveMode + VkImageView resolveImageView + VkImageLayout resolveImageLayout + VkAttachmentLoadOp loadOp + VkAttachmentStoreOp storeOp + VkClearValue clearValue + + + + VkStructureType sType + const void* pNext + VkImageView imageView + VkImageLayout imageLayout + VkExtent2D shadingRateAttachmentTexelSize + + + VkStructureType sType + const void* pNext + VkImageView imageView + VkImageLayout imageLayout + + + VkStructureType sType + void* pNext + VkBool32 dynamicRendering + + + + VkStructureType sType + const void* pNext + VkRenderingFlags flags + uint32_t viewMask + uint32_t colorAttachmentCount + uint32_t colorAttachmentCount + const VkFormat* pColorAttachmentFormats + VkFormat depthAttachmentFormat + VkFormat stencilAttachmentFormat + VkSampleCountFlagBits rasterizationSamples + + + + VkStructureType sType + const void* pNext + uint32_t colorAttachmentCount + const VkSampleCountFlagBits* pColorAttachmentSamples + VkSampleCountFlagBits depthStencilAttachmentSamples + + + + VkStructureType sType + const void* pNext + VkBool32 perViewAttributes + VkBool32 perViewAttributesPositionXOnly + + + VkStructureType sType + void* pNext + VkBool32 minLod + + + VkStructureType sType + const void* pNext + float minLod + + + VkStructureType sType + void* pNext + VkBool32 rasterizationOrderColorAttachmentAccess + VkBool32 rasterizationOrderDepthAttachmentAccess + VkBool32 rasterizationOrderStencilAttachmentAccess + + + + VkStructureType sType + void* pNext + VkBool32 linearColorAttachment + + + VkStructureType sType + void* pNext + VkBool32 graphicsPipelineLibrary + + + VkStructureType sType + void* pNext + VkBool32 graphicsPipelineLibraryFastLinking + VkBool32 graphicsPipelineLibraryIndependentInterpolationDecoration + + + VkStructureType sType + const void* pNext + VkGraphicsPipelineLibraryFlagsEXT flags + + + VkStructureType sType + void* pNext + VkBool32 descriptorSetHostMapping + + + VkStructureType sType + const void* pNext + VkDescriptorSetLayout descriptorSetLayout + uint32_t binding + + + VkStructureType sType + void* pNext + size_t descriptorOffset + uint32_t descriptorSize + + + VkStructureType sType + void* pNext + VkBool32 nestedCommandBuffer + VkBool32 nestedCommandBufferRendering + VkBool32 nestedCommandBufferSimultaneousUse + + + VkStructureType sType + void* pNext + uint32_t maxCommandBufferNestingLevel + + + VkStructureType sType + void* pNext + VkBool32 shaderModuleIdentifier + + + VkStructureType sType + void* pNext + uint8_t shaderModuleIdentifierAlgorithmUUID[VK_UUID_SIZE] + + + VkStructureType sType + const void* pNext + uint32_t identifierSize + const uint8_t* pIdentifier + + + VkStructureType sType + void* pNext + uint32_t identifierSize + uint8_t identifier[VK_MAX_SHADER_MODULE_IDENTIFIER_SIZE_EXT] + + + VkStructureType sType + const void* pNext + VkImageCompressionFlagsEXT flags + uint32_t compressionControlPlaneCount + VkImageCompressionFixedRateFlagsEXT* pFixedRateFlags + + + VkStructureType sType + void* pNext + VkBool32 imageCompressionControl + + + VkStructureType sType + void* pNext + VkImageCompressionFlagsEXT imageCompressionFlags + VkImageCompressionFixedRateFlagsEXT imageCompressionFixedRateFlags + + + VkStructureType sType + void* pNext + VkBool32 imageCompressionControlSwapchain + + + VkStructureType sType + void* pNext + VkImageSubresource imageSubresource + + + + VkStructureType sType + void* pNext + VkSubresourceLayout subresourceLayout + + + + VkStructureType sType + const void* pNext + VkBool32 disallowMerging + + + uint32_t postMergeSubpassCount + + + VkStructureType sType + const void* pNext + VkRenderPassCreationFeedbackInfoEXT* pRenderPassFeedback + + + VkSubpassMergeStatusEXT subpassMergeStatus + char description[VK_MAX_DESCRIPTION_SIZE] + uint32_t postMergeIndex + + + VkStructureType sType + const void* pNext + VkRenderPassSubpassFeedbackInfoEXT* pSubpassFeedback + + + VkStructureType sType + void* pNext + VkBool32 subpassMergeFeedback + + + VkStructureType sType + const void* pNext + VkMicromapTypeEXT type + VkBuildMicromapFlagsEXT flags + VkBuildMicromapModeEXT mode + VkMicromapEXT dstMicromap + uint32_t usageCountsCount + const VkMicromapUsageEXT* pUsageCounts + const VkMicromapUsageEXT* const* ppUsageCounts + VkDeviceOrHostAddressConstKHR data + VkDeviceOrHostAddressKHR scratchData + VkDeviceOrHostAddressConstKHR triangleArray + VkDeviceSize triangleArrayStride + + + VkStructureType sType + const void* pNext + VkMicromapCreateFlagsEXT createFlags + VkBuffer buffer + VkDeviceSize offsetSpecified in bytes + VkDeviceSize size + VkMicromapTypeEXT type + VkDeviceAddress deviceAddress + + + VkStructureType sType + const void* pNext + const uint8_t* pVersionData + + + VkStructureType sType + const void* pNext + VkMicromapEXT src + VkMicromapEXT dst + VkCopyMicromapModeEXT mode + + + VkStructureType sType + const void* pNext + VkMicromapEXT src + VkDeviceOrHostAddressKHR dst + VkCopyMicromapModeEXT mode + + + VkStructureType sType + const void* pNext + VkDeviceOrHostAddressConstKHR src + VkMicromapEXT dst + VkCopyMicromapModeEXT mode + + + VkStructureType sType + const void* pNext + VkDeviceSize micromapSize + VkDeviceSize buildScratchSize + VkBool32 discardable + + + uint32_t count + uint32_t subdivisionLevel + uint32_t formatInterpretation depends on parent type + + + uint32_t dataOffsetSpecified in bytes + uint16_t subdivisionLevel + uint16_t format + + + VkStructureType sType + void* pNext + VkBool32 micromap + VkBool32 micromapCaptureReplay + VkBool32 micromapHostCommands + + + VkStructureType sType + void* pNext + uint32_t maxOpacity2StateSubdivisionLevel + uint32_t maxOpacity4StateSubdivisionLevel + + + VkStructureType sType + void* pNext + VkIndexType indexType + VkDeviceOrHostAddressConstKHR indexBuffer + VkDeviceSize indexStride + uint32_t baseTriangle + uint32_t usageCountsCount + const VkMicromapUsageEXT* pUsageCounts + const VkMicromapUsageEXT* const* ppUsageCounts + VkMicromapEXT micromap + + + VkStructureType sType + void* pNext + VkBool32 displacementMicromap + + + VkStructureType sType + void* pNext + uint32_t maxDisplacementMicromapSubdivisionLevel + + + VkStructureType sType + void* pNext + + VkFormat displacementBiasAndScaleFormat + VkFormat displacementVectorFormat + + VkDeviceOrHostAddressConstKHR displacementBiasAndScaleBuffer + VkDeviceSize displacementBiasAndScaleStride + VkDeviceOrHostAddressConstKHR displacementVectorBuffer + VkDeviceSize displacementVectorStride + VkDeviceOrHostAddressConstKHR displacedMicromapPrimitiveFlags + VkDeviceSize displacedMicromapPrimitiveFlagsStride + VkIndexType indexType + VkDeviceOrHostAddressConstKHR indexBuffer + VkDeviceSize indexStride + + uint32_t baseTriangle + + uint32_t usageCountsCount + const VkMicromapUsageEXT* pUsageCounts + const VkMicromapUsageEXT* const* ppUsageCounts + + VkMicromapEXT micromap + + + VkStructureType sType + void* pNext + uint8_t pipelineIdentifier[VK_UUID_SIZE] + + + VkStructureType sType + void* pNext + VkBool32 pipelinePropertiesIdentifier + + + VkStructureType sType + void* pNext + VkBool32 shaderEarlyAndLateFragmentTests + + + VkStructureType sType + const void* pNext + VkBool32 acquireUnmodifiedMemory + + + VkStructureType sType + const void* pNext + VkExportMetalObjectTypeFlagBitsEXT exportObjectType + + + VkStructureType sType + const void* pNext + + + VkStructureType sType + const void* pNext + MTLDevice_id mtlDevice + + + VkStructureType sType + const void* pNext + VkQueue queue + MTLCommandQueue_id mtlCommandQueue + + + VkStructureType sType + const void* pNext + VkDeviceMemory memory + MTLBuffer_id mtlBuffer + + + VkStructureType sType + const void* pNext + MTLBuffer_id mtlBuffer + + + VkStructureType sType + const void* pNext + VkImage image + VkImageView imageView + VkBufferView bufferView + VkImageAspectFlagBits plane + MTLTexture_id mtlTexture + + + VkStructureType sType + const void* pNext + VkImageAspectFlagBits plane + MTLTexture_id mtlTexture + + + VkStructureType sType + const void* pNext + VkImage image + IOSurfaceRef ioSurface + + + VkStructureType sType + const void* pNext + IOSurfaceRef ioSurface + + + VkStructureType sType + const void* pNext + VkSemaphore semaphore + VkEvent event + MTLSharedEvent_id mtlSharedEvent + + + VkStructureType sType + const void* pNext + MTLSharedEvent_id mtlSharedEvent + + + VkStructureType sType + void* pNext + VkBool32 nonSeamlessCubeMap + + + VkStructureType sType + void* pNext + VkBool32 pipelineRobustness + + + VkStructureType sType + const void* pNext + VkPipelineRobustnessBufferBehaviorEXT storageBuffers + VkPipelineRobustnessBufferBehaviorEXT uniformBuffers + VkPipelineRobustnessBufferBehaviorEXT vertexInputs + VkPipelineRobustnessImageBehaviorEXT images + + + VkStructureType sType + void* pNext + VkPipelineRobustnessBufferBehaviorEXT defaultRobustnessStorageBuffers + VkPipelineRobustnessBufferBehaviorEXT defaultRobustnessUniformBuffers + VkPipelineRobustnessBufferBehaviorEXT defaultRobustnessVertexInputs + VkPipelineRobustnessImageBehaviorEXT defaultRobustnessImages + + + VkStructureType sType + const void* pNext + VkOffset2D filterCenter + VkExtent2D filterSize + uint32_t numPhases + + + VkStructureType sType + void* pNext + VkBool32 textureSampleWeighted + VkBool32 textureBoxFilter + VkBool32 textureBlockMatch + + + VkStructureType sType + void* pNext + uint32_t maxWeightFilterPhases + VkExtent2D maxWeightFilterDimension + VkExtent2D maxBlockMatchRegion + VkExtent2D maxBoxFilterBlockSize + + + VkStructureType sType + void* pNext + VkBool32 tileProperties + + + VkStructureType sType + void* pNext + VkExtent3D tileSize + VkExtent2D apronSize + VkOffset2D origin + + + VkStructureType sType + void* pNext + VkBool32 amigoProfiling + + + VkStructureType sType + const void* pNext + uint64_t firstDrawTimestamp + uint64_t swapBufferTimestamp + + + VkStructureType sType + void* pNext + VkBool32 attachmentFeedbackLoopLayout + + + VkStructureType sType + void* pNext + VkBool32 depthClampZeroOne + + + VkStructureType sType + void* pNext + VkBool32 reportAddressBinding + + + VkStructureType sType + void* pNext + VkDeviceAddressBindingFlagsEXT flags + VkDeviceAddress baseAddress + VkDeviceSize size + VkDeviceAddressBindingTypeEXT bindingType + + + VkStructureType sType + void* pNext + VkBool32 opticalFlow + + + VkStructureType sType + void* pNext + VkOpticalFlowGridSizeFlagsNV supportedOutputGridSizes + VkOpticalFlowGridSizeFlagsNV supportedHintGridSizes + VkBool32 hintSupported + VkBool32 costSupported + VkBool32 bidirectionalFlowSupported + VkBool32 globalFlowSupported + uint32_t minWidth + uint32_t minHeight + uint32_t maxWidth + uint32_t maxHeight + uint32_t maxNumRegionsOfInterest + + + VkStructureType sType + const void* pNext + VkOpticalFlowUsageFlagsNV usage + + + VkStructureType sType + const void* pNext + VkFormat format + + + VkStructureType sType + void* pNext + uint32_t width + uint32_t height + VkFormat imageFormat + VkFormat flowVectorFormat + VkFormat costFormat + VkOpticalFlowGridSizeFlagsNV outputGridSize + VkOpticalFlowGridSizeFlagsNV hintGridSize + VkOpticalFlowPerformanceLevelNV performanceLevel + VkOpticalFlowSessionCreateFlagsNV flags + + NV internal use only + VkStructureType sType + void* pNext + uint32_t id + uint32_t size + const void* pPrivateData + + + VkStructureType sType + void* pNext + VkOpticalFlowExecuteFlagsNV flags + uint32_t regionCount + const VkRect2D* pRegions + + + VkStructureType sType + void* pNext + VkBool32 deviceFault + VkBool32 deviceFaultVendorBinary + + + VkDeviceFaultAddressTypeEXT addressType + VkDeviceAddress reportedAddress + VkDeviceSize addressPrecision + + + char description[VK_MAX_DESCRIPTION_SIZE]Free-form description of the fault + uint64_t vendorFaultCode + uint64_t vendorFaultData + + + VkStructureType sType + void* pNext + uint32_t addressInfoCount + uint32_t vendorInfoCount + VkDeviceSize vendorBinarySizeSpecified in bytes + + + VkStructureType sType + void* pNext + char description[VK_MAX_DESCRIPTION_SIZE]Free-form description of the fault + VkDeviceFaultAddressInfoEXT* pAddressInfos + VkDeviceFaultVendorInfoEXT* pVendorInfos + void* pVendorBinaryData + + + The fields in this structure are non-normative since structure packing is implementation-defined in C. The specification defines the normative layout. + uint32_t headerSize + VkDeviceFaultVendorBinaryHeaderVersionEXT headerVersion + uint32_t vendorID + uint32_t deviceID + uint32_t driverVersion + uint8_t pipelineCacheUUID[VK_UUID_SIZE] + uint32_t applicationNameOffset + uint32_t applicationVersion + uint32_t engineNameOffset + uint32_t engineVersion + uint32_t apiVersion + + + VkStructureType sType + void* pNext + VkBool32 pipelineLibraryGroupHandles + + + VkStructureType sType + const void* pNext + float depthBiasConstantFactor + float depthBiasClamp + float depthBiasSlopeFactor + + + VkStructureType sType + const void* pNext + VkDepthBiasRepresentationEXT depthBiasRepresentation + VkBool32 depthBiasExact + + + VkDeviceAddress srcAddress + VkDeviceAddress dstAddress + VkDeviceSize compressedSizeSpecified in bytes + VkDeviceSize decompressedSizeSpecified in bytes + VkMemoryDecompressionMethodFlagsNV decompressionMethod + + + VkStructureType sType + void* pNext + uint64_t shaderCoreMask + uint32_t shaderCoreCount + uint32_t shaderWarpsPerCore + + + VkStructureType sType + void* pNext + VkBool32 shaderCoreBuiltins + + + VkStructureType sType + const void* pNext + VkFrameBoundaryFlagsEXT flags + uint64_t frameID + uint32_t imageCount + const VkImage* pImages + uint32_t bufferCount + const VkBuffer* pBuffers + uint64_t tagName + size_t tagSize + const void* pTag + + + VkStructureType sType + void* pNext + VkBool32 frameBoundary + + + VkStructureType sType + void* pNext + VkBool32 dynamicRenderingUnusedAttachments + + + VkStructureType sType + void* pNext + VkPresentModeKHR presentMode + + + VkStructureType sType + void* pNext + VkPresentScalingFlagsEXT supportedPresentScaling + VkPresentGravityFlagsEXT supportedPresentGravityX + VkPresentGravityFlagsEXT supportedPresentGravityY + VkExtent2D minScaledImageExtentSupported minimum image width and height for the surface when scaling is used + VkExtent2D maxScaledImageExtentSupported maximum image width and height for the surface when scaling is used + + + VkStructureType sType + void* pNext + uint32_t presentModeCount + VkPresentModeKHR* pPresentModesOutput list of present modes compatible with the one specified in VkSurfacePresentModeEXT + + + VkStructureType sType + void* pNext + VkBool32 swapchainMaintenance1 + + + VkStructureType sType + const void* pNext + uint32_t swapchainCountCopy of VkPresentInfoKHR::swapchainCount + const VkFence* pFencesFence to signal for each swapchain + + + VkStructureType sType + const void* pNext + uint32_t presentModeCountLength of the pPresentModes array + const VkPresentModeKHR* pPresentModesPresentation modes which will be usable with this swapchain + + + VkStructureType sType + const void* pNext + uint32_t swapchainCountCopy of VkPresentInfoKHR::swapchainCount + const VkPresentModeKHR* pPresentModesPresentation mode for each swapchain + + + VkStructureType sType + const void* pNext + VkPresentScalingFlagsEXT scalingBehavior + VkPresentGravityFlagsEXT presentGravityX + VkPresentGravityFlagsEXT presentGravityY + + + VkStructureType sType + const void* pNext + VkSwapchainKHR swapchainSwapchain for which images are being released + uint32_t imageIndexCountNumber of indices to release + const uint32_t* pImageIndicesIndices of which presentable images to release + + + VkStructureType sType + void* pNext + VkBool32 depthBiasControl + VkBool32 leastRepresentableValueForceUnormRepresentation + VkBool32 floatRepresentation + VkBool32 depthBiasExact + + + VkStructureType sType + void* pNext + VkBool32 rayTracingInvocationReorder + + + VkStructureType sType + void* pNext + VkRayTracingInvocationReorderModeNV rayTracingInvocationReorderReorderingHint + + + VkStructureType sType + void* pNext + VkBool32 extendedSparseAddressSpace + + + VkStructureType sType + void* pNext + VkDeviceSize extendedSparseAddressSpaceSizeTotal address space available for extended sparse allocations (bytes) + VkImageUsageFlags extendedSparseImageUsageFlagsBitfield of which image usages are supported for extended sparse allocations + VkBufferUsageFlags extendedSparseBufferUsageFlagsBitfield of which buffer usages are supported for extended sparse allocations + + + VkStructureType sType + void* pNext + VkDirectDriverLoadingFlagsLUNARG flags + PFN_vkGetInstanceProcAddrLUNARG pfnGetInstanceProcAddr + + + VkStructureType sType + const void* pNext + VkDirectDriverLoadingModeLUNARG mode + uint32_t driverCount + const VkDirectDriverLoadingInfoLUNARG* pDrivers + + + VkStructureType sType + void* pNext + VkBool32 multiviewPerViewViewports + + + VkStructureType sType + void* pNext + VkBool32 rayTracingPositionFetch + + + VkStructureType sType + const void* pNext + const VkImageCreateInfo* pCreateInfo + const VkImageSubresource2KHR* pSubresource + + + VkStructureType sType + void* pNext + uint32_t pixelRate + uint32_t texelRate + uint32_t fmaRate + + + VkStructureType sType + void* pNext + VkBool32 multiviewPerViewRenderAreas + + + VkStructureType sType + const void* pNext + uint32_t perViewRenderAreaCount + const VkRect2D* pPerViewRenderAreas + + + VkStructureType sType + const void* pNext + void* pQueriedLowLatencyData + + + VkStructureType sType + const void* pNext + VkMemoryMapFlags flags + VkDeviceMemory memory + VkDeviceSize offset + VkDeviceSize size + + + VkStructureType sType + const void* pNext + VkMemoryUnmapFlagsKHR flags + VkDeviceMemory memory + + + VkStructureType sType + void* pNext + VkBool32 shaderObject + + + VkStructureType sType + void* pNext + uint8_t shaderBinaryUUID[VK_UUID_SIZE] + uint32_t shaderBinaryVersion + + + VkStructureType sType + const void* pNext + VkShaderCreateFlagsEXT flags + VkShaderStageFlagBits stage + VkShaderStageFlags nextStage + VkShaderCodeTypeEXT codeType + size_t codeSize + const void* pCode + const char* pName + uint32_t setLayoutCount + const VkDescriptorSetLayout* pSetLayouts + uint32_t pushConstantRangeCount + const VkPushConstantRange* pPushConstantRanges + const VkSpecializationInfo* pSpecializationInfo + + + VkStructureType sType + void* pNext + VkBool32 shaderTileImageColorReadAccess + VkBool32 shaderTileImageDepthReadAccess + VkBool32 shaderTileImageStencilReadAccess + + + VkStructureType sType + void* pNext + VkBool32 shaderTileImageCoherentReadAccelerated + VkBool32 shaderTileImageReadSampleFromPixelRateInvocation + VkBool32 shaderTileImageReadFromHelperInvocation + + + VkStructureType sType + const void* pNext + struct _screen_buffer* buffer + + + VkStructureType sType + void* pNext + VkDeviceSize allocationSize + uint32_t memoryTypeBits + + + VkStructureType sType + void* pNext + VkFormat format + uint64_t externalFormat + uint64_t screenUsage + VkFormatFeatureFlags formatFeatures + VkComponentMapping samplerYcbcrConversionComponents + VkSamplerYcbcrModelConversion suggestedYcbcrModel + VkSamplerYcbcrRange suggestedYcbcrRange + VkChromaLocation suggestedXChromaOffset + VkChromaLocation suggestedYChromaOffset + + + VkStructureType sType + void* pNext + uint64_t externalFormat + + + VkStructureType sType + void* pNext + VkBool32 screenBufferImport + + + VkStructureType sType + void* pNext + VkBool32 cooperativeMatrix + VkBool32 cooperativeMatrixRobustBufferAccess + + + VkStructureType sType + void* pNext + uint32_t MSize + uint32_t NSize + uint32_t KSize + VkComponentTypeKHR AType + VkComponentTypeKHR BType + VkComponentTypeKHR CType + VkComponentTypeKHR ResultType + VkBool32 saturatingAccumulation + VkScopeKHR scope + + + VkStructureType sType + void* pNext + VkShaderStageFlags cooperativeMatrixSupportedStages + + + VkStructureType sType + void* pNext + uint32_t maxExecutionGraphDepth + uint32_t maxExecutionGraphShaderOutputNodes + uint32_t maxExecutionGraphShaderPayloadSize + uint32_t maxExecutionGraphShaderPayloadCount + uint32_t executionGraphDispatchAddressAlignment + + + VkStructureType sType + void* pNext + VkBool32 shaderEnqueue + + + VkStructureType sType + const void* pNext + VkPipelineCreateFlags flags + uint32_t stageCount + const VkPipelineShaderStageCreateInfo* pStages + const VkPipelineLibraryCreateInfoKHR* pLibraryInfo + VkPipelineLayout layout + VkPipeline basePipelineHandle + int32_t basePipelineIndex + + + VkStructureType sType + const void* pNext + const char* pName + uint32_t index + + + VkStructureType sType + void* pNext + VkDeviceSize size + + + uint32_t nodeIndex + uint32_t payloadCount + VkDeviceOrHostAddressConstAMDX payloads + uint64_t payloadStride + + + uint32_t count + VkDeviceOrHostAddressConstAMDX infos + uint64_t stride + + + VkStructureType sType + const void* pNext + VkResult* pResult + + + VkStructureType sType + const void* pNext + VkShaderStageFlags stageFlags + VkPipelineLayout layout + uint32_t firstSet + uint32_t descriptorSetCount + const VkDescriptorSet* pDescriptorSets + uint32_t dynamicOffsetCount + const uint32_t* pDynamicOffsets + + + VkStructureType sType + const void* pNext + VkPipelineLayout layout + VkShaderStageFlags stageFlags + uint32_t offset + uint32_t size + const void* pValues + + + VkStructureType sType + const void* pNext + VkShaderStageFlags stageFlags + VkPipelineLayout layout + uint32_t set + uint32_t descriptorWriteCount + const VkWriteDescriptorSet* pDescriptorWrites + + + VkStructureType sType + const void* pNext + VkDescriptorUpdateTemplate descriptorUpdateTemplate + VkPipelineLayout layout + uint32_t set + const void* pData + + + VkStructureType sType + const void* pNext + VkShaderStageFlags stageFlags + VkPipelineLayout layout + uint32_t firstSet + uint32_t setCount + const uint32_t* pBufferIndices + const VkDeviceSize* pOffsets + + + VkStructureType sType + const void* pNext + VkShaderStageFlags stageFlags + VkPipelineLayout layout + uint32_t set + + + VkStructureType sType + void* pNext + VkBool32 cubicRangeClamp + + + VkStructureType sType + void* pNext + VkBool32 ycbcrDegamma + + + VkStructureType sType + void* pNext + VkBool32 enableYDegamma + VkBool32 enableCbCrDegamma + + + VkStructureType sType + void* pNext + VkBool32 selectableCubicWeights + + + VkStructureType sType + const void* pNext + VkCubicFilterWeightsQCOM cubicWeights + + + VkStructureType sType + const void* pNext + VkCubicFilterWeightsQCOM cubicWeights + + + VkStructureType sType + void* pNext + VkBool32 textureBlockMatch2 + + + VkStructureType sType + void* pNext + VkExtent2D maxBlockMatchWindow + + + VkStructureType sType + const void* pNext + VkExtent2D windowExtent + VkBlockMatchWindowCompareModeQCOM windowCompareMode + + + VkStructureType sType + void* pNext + VkBool32 descriptorPoolOverallocation + + + VkStructureType sType + void* pNext + VkLayeredDriverUnderlyingApiMSFT underlyingAPI + + + VkStructureType sType + void* pNext + VkBool32 perStageDescriptorSet + VkBool32 dynamicPipelineLayout + + + VkStructureType sType + void* pNext + VkBool32 externalFormatResolve + + + VkStructureType sType + void* pNext + VkBool32 nullColorAttachmentWithExternalFormatResolve + VkChromaLocation externalFormatResolveChromaOffsetX + VkChromaLocation externalFormatResolveChromaOffsetY + + + VkStructureType sType + void* pNext + VkFormat colorAttachmentFormat + + + VkStructureType sType + const void* pNext + VkBool32 lowLatencyMode + VkBool32 lowLatencyBoost + uint32_t minimumIntervalUs + + + VkStructureType sType + const void* pNext + VkSemaphore signalSemaphore + uint64_t value + + + VkStructureType sType + const void* pNext + uint64_t presentID + VkLatencyMarkerNV marker + + + VkStructureType sType + const void* pNext + uint32_t timingCount + VkLatencyTimingsFrameReportNV* pTimings + + + VkStructureType sType + const void* pNext + uint64_t presentID + uint64_t inputSampleTimeUs + uint64_t simStartTimeUs + uint64_t simEndTimeUs + uint64_t renderSubmitStartTimeUs + uint64_t renderSubmitEndTimeUs + uint64_t presentStartTimeUs + uint64_t presentEndTimeUs + uint64_t driverStartTimeUs + uint64_t driverEndTimeUs + uint64_t osRenderQueueStartTimeUs + uint64_t osRenderQueueEndTimeUs + uint64_t gpuRenderStartTimeUs + uint64_t gpuRenderEndTimeUs + + + VkStructureType sType + const void* pNext + VkOutOfBandQueueTypeNV queueType + + + VkStructureType sType + const void* pNext + uint64_t presentID + + + VkStructureType sType + const void* pNext + VkBool32 latencyModeEnable + + + VkStructureType sType + const void* pNext + uint32_t presentModeCount + VkPresentModeKHR* pPresentModes + + + VkStructureType sType + void* pNext + VkBool32 cudaKernelLaunchFeatures + + + VkStructureType sType + void* pNext + uint32_t computeCapabilityMinor + uint32_t computeCapabilityMajor + + + VkStructureType sType + void* pNext + uint32_t shaderCoreCount + + + VkStructureType sType + void* pNext + VkBool32 schedulingControls + + + VkStructureType sType + void* pNext + VkPhysicalDeviceSchedulingControlsFlagsARM schedulingControlsFlags + + + VkStructureType sType + void* pNext + VkBool32 relaxedLineRasterization + + + VkStructureType sType + void* pNext + VkBool32 renderPassStriped + + + VkStructureType sType + void* pNext + VkExtent2D renderPassStripeGranularity + uint32_t maxRenderPassStripes + + + VkStructureType sType + const void* pNext + VkRect2D stripeArea + + + VkStructureType sType + const void* pNext + uint32_t stripeInfoCount + const VkRenderPassStripeInfoARM* pStripeInfos + + + VkStructureType sType + const void* pNext + uint32_t stripeSemaphoreInfoCount + const VkSemaphoreSubmitInfo* pStripeSemaphoreInfos + + + VkStructureType sType + void* pNext + VkBool32 shaderMaximalReconvergence + + + VkStructureType sType + void* pNext + VkBool32 shaderSubgroupRotate + VkBool32 shaderSubgroupRotateClustered + + + VkStructureType sType + void* pNext + VkBool32 shaderExpectAssume + + + VkStructureType sType + void* pNext + VkBool32 shaderFloatControls2 + + + VkStructureType sType + void* pNext + VkBool32 dynamicRenderingLocalRead + + + VkStructureType sType + const void* pNext + uint32_t colorAttachmentCount + const uint32_t* pColorAttachmentLocations + + + VkStructureType sType + const void* pNext + uint32_t colorAttachmentCount + const uint32_t* pColorAttachmentInputIndices + const uint32_t* pDepthInputAttachmentIndex + const uint32_t* pStencilInputAttachmentIndex + + + VkStructureType sType + void* pNext + VkBool32 shaderQuadControl + + + VkStructureType sType + void* pNext + VkBool32 shaderFloat16VectorAtomics + + + VkStructureType sType + void* pNext + VkBool32 memoryMapPlaced + VkBool32 memoryMapRangePlaced + VkBool32 memoryUnmapReserve + + + VkStructureType sType + void* pNext + VkDeviceSize minPlacedMemoryMapAlignment + + + VkStructureType sType + const void* pNext + void* pPlacedAddress + + + VkStructureType sType + void* pNext + VkBool32 shaderRawAccessChains + + + + + Vulkan enumerant (token) definitions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Unlike OpenGL, most tokens in Vulkan are actual typed enumerants in + their own numeric namespaces. The "name" attribute is the C enum + type name, and is pulled in from a type tag definition above + (slightly clunky, but retains the type / enum distinction). "type" + attributes of "enum" or "bitmask" indicate that these values should + be generated inside an appropriate definition. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + value="4" reserved for VK_KHR_sampler_mirror_clamp_to_edge + enum VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; do not + alias! + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Return codes (positive values) + + + + + + + Error codes (negative values) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Flags + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + WSI Extensions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + NVX_device_generated_commands formerly used these enum values, but that extension has been removed + value 31 / name VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT + value 32 / name VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Vendor IDs are now represented as enums instead of the old + <vendorids> tag, allowing them to be included in the + API headers. + + + + + + + + + + + Driver IDs are now represented as enums instead of the old + <driverids> tag, allowing them to be included in the + API headers. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + bitpos 17-31 are specified by extensions to the original VkAccessFlagBits enum + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + bitpos 17-31 are specified by extensions to the original VkPipelineStageFlagBits enum + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + VkResult vkCreateInstance + const VkInstanceCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkInstance* pInstance + + + void vkDestroyInstance + VkInstance instance + const VkAllocationCallbacks* pAllocator + + all sname:VkPhysicalDevice objects enumerated from pname:instance + + + + VkResult vkEnumeratePhysicalDevices + VkInstance instance + uint32_t* pPhysicalDeviceCount + VkPhysicalDevice* pPhysicalDevices + + + PFN_vkVoidFunction vkGetDeviceProcAddr + VkDevice device + const char* pName + + + PFN_vkVoidFunction vkGetInstanceProcAddr + VkInstance instance + const char* pName + + + void vkGetPhysicalDeviceProperties + VkPhysicalDevice physicalDevice + VkPhysicalDeviceProperties* pProperties + + + void vkGetPhysicalDeviceQueueFamilyProperties + VkPhysicalDevice physicalDevice + uint32_t* pQueueFamilyPropertyCount + VkQueueFamilyProperties* pQueueFamilyProperties + + + void vkGetPhysicalDeviceMemoryProperties + VkPhysicalDevice physicalDevice + VkPhysicalDeviceMemoryProperties* pMemoryProperties + + + void vkGetPhysicalDeviceFeatures + VkPhysicalDevice physicalDevice + VkPhysicalDeviceFeatures* pFeatures + + + void vkGetPhysicalDeviceFormatProperties + VkPhysicalDevice physicalDevice + VkFormat format + VkFormatProperties* pFormatProperties + + + VkResult vkGetPhysicalDeviceImageFormatProperties + VkPhysicalDevice physicalDevice + VkFormat format + VkImageType type + VkImageTiling tiling + VkImageUsageFlags usage + VkImageCreateFlags flags + VkImageFormatProperties* pImageFormatProperties + + + VkResult vkCreateDevice + VkPhysicalDevice physicalDevice + const VkDeviceCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkDevice* pDevice + + + VkResult vkCreateDevice + VkPhysicalDevice physicalDevice + const VkDeviceCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkDevice* pDevice + + + void vkDestroyDevice + VkDevice device + const VkAllocationCallbacks* pAllocator + + all sname:VkQueue objects created from pname:device + + + + VkResult vkEnumerateInstanceVersion + uint32_t* pApiVersion + + + VkResult vkEnumerateInstanceLayerProperties + uint32_t* pPropertyCount + VkLayerProperties* pProperties + + + VkResult vkEnumerateInstanceExtensionProperties + const char* pLayerName + uint32_t* pPropertyCount + VkExtensionProperties* pProperties + + + VkResult vkEnumerateDeviceLayerProperties + VkPhysicalDevice physicalDevice + uint32_t* pPropertyCount + VkLayerProperties* pProperties + + + VkResult vkEnumerateDeviceLayerProperties + VkPhysicalDevice physicalDevice + uint32_t* pPropertyCount + VkLayerProperties* pProperties + + + + VkResult vkEnumerateDeviceExtensionProperties + VkPhysicalDevice physicalDevice + const char* pLayerName + uint32_t* pPropertyCount + VkExtensionProperties* pProperties + + + void vkGetDeviceQueue + VkDevice device + uint32_t queueFamilyIndex + uint32_t queueIndex + VkQueue* pQueue + + + VkResult vkQueueSubmit + VkQueue queue + uint32_t submitCount + const VkSubmitInfo* pSubmits + VkFence fence + + + VkResult vkQueueWaitIdle + VkQueue queue + + + VkResult vkDeviceWaitIdle + VkDevice device + + all sname:VkQueue objects created from pname:device + + + + VkResult vkAllocateMemory + VkDevice device + const VkMemoryAllocateInfo* pAllocateInfo + const VkAllocationCallbacks* pAllocator + VkDeviceMemory* pMemory + + + void vkFreeMemory + VkDevice device + VkDeviceMemory memory + const VkAllocationCallbacks* pAllocator + + + VkResult vkMapMemory + VkDevice device + VkDeviceMemory memory + VkDeviceSize offset + VkDeviceSize size + VkMemoryMapFlags flags + void** ppData + + + void vkUnmapMemory + VkDevice device + VkDeviceMemory memory + + + VkResult vkFlushMappedMemoryRanges + VkDevice device + uint32_t memoryRangeCount + const VkMappedMemoryRange* pMemoryRanges + + + VkResult vkInvalidateMappedMemoryRanges + VkDevice device + uint32_t memoryRangeCount + const VkMappedMemoryRange* pMemoryRanges + + + void vkGetDeviceMemoryCommitment + VkDevice device + VkDeviceMemory memory + VkDeviceSize* pCommittedMemoryInBytes + + + void vkGetBufferMemoryRequirements + VkDevice device + VkBuffer buffer + VkMemoryRequirements* pMemoryRequirements + + + VkResult vkBindBufferMemory + VkDevice device + VkBuffer buffer + VkDeviceMemory memory + VkDeviceSize memoryOffset + + + void vkGetImageMemoryRequirements + VkDevice device + VkImage image + VkMemoryRequirements* pMemoryRequirements + + + VkResult vkBindImageMemory + VkDevice device + VkImage image + VkDeviceMemory memory + VkDeviceSize memoryOffset + + + void vkGetImageSparseMemoryRequirements + VkDevice device + VkImage image + uint32_t* pSparseMemoryRequirementCount + VkSparseImageMemoryRequirements* pSparseMemoryRequirements + + + void vkGetPhysicalDeviceSparseImageFormatProperties + VkPhysicalDevice physicalDevice + VkFormat format + VkImageType type + VkSampleCountFlagBits samples + VkImageUsageFlags usage + VkImageTiling tiling + uint32_t* pPropertyCount + VkSparseImageFormatProperties* pProperties + + + VkResult vkQueueBindSparse + VkQueue queue + uint32_t bindInfoCount + const VkBindSparseInfo* pBindInfo + VkFence fence + + + VkResult vkCreateFence + VkDevice device + const VkFenceCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkFence* pFence + + + void vkDestroyFence + VkDevice device + VkFence fence + const VkAllocationCallbacks* pAllocator + + + VkResult vkResetFences + VkDevice device + uint32_t fenceCount + const VkFence* pFences + + + VkResult vkGetFenceStatus + VkDevice device + VkFence fence + + + VkResult vkWaitForFences + VkDevice device + uint32_t fenceCount + const VkFence* pFences + VkBool32 waitAll + uint64_t timeout + + + VkResult vkCreateSemaphore + VkDevice device + const VkSemaphoreCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkSemaphore* pSemaphore + + + void vkDestroySemaphore + VkDevice device + VkSemaphore semaphore + const VkAllocationCallbacks* pAllocator + + + VkResult vkCreateEvent + VkDevice device + const VkEventCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkEvent* pEvent + + + void vkDestroyEvent + VkDevice device + VkEvent event + const VkAllocationCallbacks* pAllocator + + + VkResult vkGetEventStatus + VkDevice device + VkEvent event + + + VkResult vkSetEvent + VkDevice device + VkEvent event + + + VkResult vkResetEvent + VkDevice device + VkEvent event + + + VkResult vkCreateQueryPool + VkDevice device + const VkQueryPoolCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkQueryPool* pQueryPool + + + void vkDestroyQueryPool + VkDevice device + VkQueryPool queryPool + const VkAllocationCallbacks* pAllocator + + + VkResult vkGetQueryPoolResults + VkDevice device + VkQueryPool queryPool + uint32_t firstQuery + uint32_t queryCount + size_t dataSize + void* pData + VkDeviceSize stride + VkQueryResultFlags flags + + + void vkResetQueryPool + VkDevice device + VkQueryPool queryPool + uint32_t firstQuery + uint32_t queryCount + + + + VkResult vkCreateBuffer + VkDevice device + const VkBufferCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkBuffer* pBuffer + + + void vkDestroyBuffer + VkDevice device + VkBuffer buffer + const VkAllocationCallbacks* pAllocator + + + VkResult vkCreateBufferView + VkDevice device + const VkBufferViewCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkBufferView* pView + + + void vkDestroyBufferView + VkDevice device + VkBufferView bufferView + const VkAllocationCallbacks* pAllocator + + + VkResult vkCreateImage + VkDevice device + const VkImageCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkImage* pImage + + + void vkDestroyImage + VkDevice device + VkImage image + const VkAllocationCallbacks* pAllocator + + + void vkGetImageSubresourceLayout + VkDevice device + VkImage image + const VkImageSubresource* pSubresource + VkSubresourceLayout* pLayout + + + VkResult vkCreateImageView + VkDevice device + const VkImageViewCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkImageView* pView + + + void vkDestroyImageView + VkDevice device + VkImageView imageView + const VkAllocationCallbacks* pAllocator + + + VkResult vkCreateShaderModule + VkDevice device + const VkShaderModuleCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkShaderModule* pShaderModule + + + void vkDestroyShaderModule + VkDevice device + VkShaderModule shaderModule + const VkAllocationCallbacks* pAllocator + + + VkResult vkCreatePipelineCache + VkDevice device + const VkPipelineCacheCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkPipelineCache* pPipelineCache + + + VkResult vkCreatePipelineCache + VkDevice device + const VkPipelineCacheCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkPipelineCache* pPipelineCache + + + void vkDestroyPipelineCache + VkDevice device + VkPipelineCache pipelineCache + const VkAllocationCallbacks* pAllocator + + + VkResult vkGetPipelineCacheData + VkDevice device + VkPipelineCache pipelineCache + size_t* pDataSize + void* pData + + + VkResult vkMergePipelineCaches + VkDevice device + VkPipelineCache dstCache + uint32_t srcCacheCount + const VkPipelineCache* pSrcCaches + + + VkResult vkCreateGraphicsPipelines + VkDevice device + VkPipelineCache pipelineCache + uint32_t createInfoCount + const VkGraphicsPipelineCreateInfo* pCreateInfos + const VkAllocationCallbacks* pAllocator + VkPipeline* pPipelines + + + VkResult vkCreateGraphicsPipelines + VkDevice device + VkPipelineCache pipelineCache + uint32_t createInfoCount + const VkGraphicsPipelineCreateInfo* pCreateInfos + const VkAllocationCallbacks* pAllocator + VkPipeline* pPipelines + + + VkResult vkCreateComputePipelines + VkDevice device + VkPipelineCache pipelineCache + uint32_t createInfoCount + const VkComputePipelineCreateInfo* pCreateInfos + const VkAllocationCallbacks* pAllocator + VkPipeline* pPipelines + + + VkResult vkCreateComputePipelines + VkDevice device + VkPipelineCache pipelineCache + uint32_t createInfoCount + const VkComputePipelineCreateInfo* pCreateInfos + const VkAllocationCallbacks* pAllocator + VkPipeline* pPipelines + + + VkResult vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI + VkDevice device + VkRenderPass renderpass + VkExtent2D* pMaxWorkgroupSize + + + void vkDestroyPipeline + VkDevice device + VkPipeline pipeline + const VkAllocationCallbacks* pAllocator + + + VkResult vkCreatePipelineLayout + VkDevice device + const VkPipelineLayoutCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkPipelineLayout* pPipelineLayout + + + void vkDestroyPipelineLayout + VkDevice device + VkPipelineLayout pipelineLayout + const VkAllocationCallbacks* pAllocator + + + VkResult vkCreateSampler + VkDevice device + const VkSamplerCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkSampler* pSampler + + + void vkDestroySampler + VkDevice device + VkSampler sampler + const VkAllocationCallbacks* pAllocator + + + VkResult vkCreateDescriptorSetLayout + VkDevice device + const VkDescriptorSetLayoutCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkDescriptorSetLayout* pSetLayout + + + void vkDestroyDescriptorSetLayout + VkDevice device + VkDescriptorSetLayout descriptorSetLayout + const VkAllocationCallbacks* pAllocator + + + VkResult vkCreateDescriptorPool + VkDevice device + const VkDescriptorPoolCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkDescriptorPool* pDescriptorPool + + + void vkDestroyDescriptorPool + VkDevice device + VkDescriptorPool descriptorPool + const VkAllocationCallbacks* pAllocator + + + VkResult vkResetDescriptorPool + VkDevice device + VkDescriptorPool descriptorPool + VkDescriptorPoolResetFlags flags + + any sname:VkDescriptorSet objects allocated from pname:descriptorPool + + + + VkResult vkAllocateDescriptorSets + VkDevice device + const VkDescriptorSetAllocateInfo* pAllocateInfo + VkDescriptorSet* pDescriptorSets + + + VkResult vkFreeDescriptorSets + VkDevice device + VkDescriptorPool descriptorPool + uint32_t descriptorSetCount + const VkDescriptorSet* pDescriptorSets + + + void vkUpdateDescriptorSets + VkDevice device + uint32_t descriptorWriteCount + const VkWriteDescriptorSet* pDescriptorWrites + uint32_t descriptorCopyCount + const VkCopyDescriptorSet* pDescriptorCopies + + + VkResult vkCreateFramebuffer + VkDevice device + const VkFramebufferCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkFramebuffer* pFramebuffer + + + void vkDestroyFramebuffer + VkDevice device + VkFramebuffer framebuffer + const VkAllocationCallbacks* pAllocator + + + VkResult vkCreateRenderPass + VkDevice device + const VkRenderPassCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkRenderPass* pRenderPass + + + void vkDestroyRenderPass + VkDevice device + VkRenderPass renderPass + const VkAllocationCallbacks* pAllocator + + + void vkGetRenderAreaGranularity + VkDevice device + VkRenderPass renderPass + VkExtent2D* pGranularity + + + void vkGetRenderingAreaGranularityKHR + VkDevice device + const VkRenderingAreaInfoKHR* pRenderingAreaInfo + VkExtent2D* pGranularity + + + VkResult vkCreateCommandPool + VkDevice device + const VkCommandPoolCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkCommandPool* pCommandPool + + + void vkDestroyCommandPool + VkDevice device + VkCommandPool commandPool + const VkAllocationCallbacks* pAllocator + + + VkResult vkResetCommandPool + VkDevice device + VkCommandPool commandPool + VkCommandPoolResetFlags flags + + + VkResult vkAllocateCommandBuffers + VkDevice device + const VkCommandBufferAllocateInfo* pAllocateInfo + VkCommandBuffer* pCommandBuffers + + + void vkFreeCommandBuffers + VkDevice device + VkCommandPool commandPool + uint32_t commandBufferCount + const VkCommandBuffer* pCommandBuffers + + + VkResult vkBeginCommandBuffer + VkCommandBuffer commandBuffer + const VkCommandBufferBeginInfo* pBeginInfo + + the sname:VkCommandPool that pname:commandBuffer was allocated from + + + + VkResult vkEndCommandBuffer + VkCommandBuffer commandBuffer + + the sname:VkCommandPool that pname:commandBuffer was allocated from + + + + VkResult vkResetCommandBuffer + VkCommandBuffer commandBuffer + VkCommandBufferResetFlags flags + + the sname:VkCommandPool that pname:commandBuffer was allocated from + + + + void vkCmdBindPipeline + VkCommandBuffer commandBuffer + VkPipelineBindPoint pipelineBindPoint + VkPipeline pipeline + + + void vkCmdSetAttachmentFeedbackLoopEnableEXT + VkCommandBuffer commandBuffer + VkImageAspectFlags aspectMask + + + void vkCmdSetViewport + VkCommandBuffer commandBuffer + uint32_t firstViewport + uint32_t viewportCount + const VkViewport* pViewports + + + void vkCmdSetScissor + VkCommandBuffer commandBuffer + uint32_t firstScissor + uint32_t scissorCount + const VkRect2D* pScissors + + + void vkCmdSetLineWidth + VkCommandBuffer commandBuffer + float lineWidth + + + void vkCmdSetDepthBias + VkCommandBuffer commandBuffer + float depthBiasConstantFactor + float depthBiasClamp + float depthBiasSlopeFactor + + + void vkCmdSetBlendConstants + VkCommandBuffer commandBuffer + const float blendConstants[4] + + + void vkCmdSetDepthBounds + VkCommandBuffer commandBuffer + float minDepthBounds + float maxDepthBounds + + + void vkCmdSetStencilCompareMask + VkCommandBuffer commandBuffer + VkStencilFaceFlags faceMask + uint32_t compareMask + + + void vkCmdSetStencilWriteMask + VkCommandBuffer commandBuffer + VkStencilFaceFlags faceMask + uint32_t writeMask + + + void vkCmdSetStencilReference + VkCommandBuffer commandBuffer + VkStencilFaceFlags faceMask + uint32_t reference + + + void vkCmdBindDescriptorSets + VkCommandBuffer commandBuffer + VkPipelineBindPoint pipelineBindPoint + VkPipelineLayout layout + uint32_t firstSet + uint32_t descriptorSetCount + const VkDescriptorSet* pDescriptorSets + uint32_t dynamicOffsetCount + const uint32_t* pDynamicOffsets + + + void vkCmdBindIndexBuffer + VkCommandBuffer commandBuffer + VkBuffer buffer + VkDeviceSize offset + VkIndexType indexType + + + void vkCmdBindVertexBuffers + VkCommandBuffer commandBuffer + uint32_t firstBinding + uint32_t bindingCount + const VkBuffer* pBuffers + const VkDeviceSize* pOffsets + + + void vkCmdDraw + VkCommandBuffer commandBuffer + uint32_t vertexCount + uint32_t instanceCount + uint32_t firstVertex + uint32_t firstInstance + + + void vkCmdDrawIndexed + VkCommandBuffer commandBuffer + uint32_t indexCount + uint32_t instanceCount + uint32_t firstIndex + int32_t vertexOffset + uint32_t firstInstance + + + void vkCmdDrawMultiEXT + VkCommandBuffer commandBuffer + uint32_t drawCount + const VkMultiDrawInfoEXT* pVertexInfo + uint32_t instanceCount + uint32_t firstInstance + uint32_t stride + + + void vkCmdDrawMultiIndexedEXT + VkCommandBuffer commandBuffer + uint32_t drawCount + const VkMultiDrawIndexedInfoEXT* pIndexInfo + uint32_t instanceCount + uint32_t firstInstance + uint32_t stride + const int32_t* pVertexOffset + + + void vkCmdDrawIndirect + VkCommandBuffer commandBuffer + VkBuffer buffer + VkDeviceSize offset + uint32_t drawCount + uint32_t stride + + + void vkCmdDrawIndexedIndirect + VkCommandBuffer commandBuffer + VkBuffer buffer + VkDeviceSize offset + uint32_t drawCount + uint32_t stride + + + void vkCmdDispatch + VkCommandBuffer commandBuffer + uint32_t groupCountX + uint32_t groupCountY + uint32_t groupCountZ + + + void vkCmdDispatchIndirect + VkCommandBuffer commandBuffer + VkBuffer buffer + VkDeviceSize offset + + + void vkCmdSubpassShadingHUAWEI + VkCommandBuffer commandBuffer + + + void vkCmdDrawClusterHUAWEI + VkCommandBuffer commandBuffer + uint32_t groupCountX + uint32_t groupCountY + uint32_t groupCountZ + + + void vkCmdDrawClusterIndirectHUAWEI + VkCommandBuffer commandBuffer + VkBuffer buffer + VkDeviceSize offset + + + void vkCmdUpdatePipelineIndirectBufferNV + VkCommandBuffer commandBuffer + VkPipelineBindPoint pipelineBindPoint + VkPipeline pipeline + + + void vkCmdCopyBuffer + VkCommandBuffer commandBuffer + VkBuffer srcBuffer + VkBuffer dstBuffer + uint32_t regionCount + const VkBufferCopy* pRegions + + + void vkCmdCopyImage + VkCommandBuffer commandBuffer + VkImage srcImage + VkImageLayout srcImageLayout + VkImage dstImage + VkImageLayout dstImageLayout + uint32_t regionCount + const VkImageCopy* pRegions + + + void vkCmdBlitImage + VkCommandBuffer commandBuffer + VkImage srcImage + VkImageLayout srcImageLayout + VkImage dstImage + VkImageLayout dstImageLayout + uint32_t regionCount + const VkImageBlit* pRegions + VkFilter filter + + + void vkCmdCopyBufferToImage + VkCommandBuffer commandBuffer + VkBuffer srcBuffer + VkImage dstImage + VkImageLayout dstImageLayout + uint32_t regionCount + const VkBufferImageCopy* pRegions + + + void vkCmdCopyImageToBuffer + VkCommandBuffer commandBuffer + VkImage srcImage + VkImageLayout srcImageLayout + VkBuffer dstBuffer + uint32_t regionCount + const VkBufferImageCopy* pRegions + + + void vkCmdCopyMemoryIndirectNV + VkCommandBuffer commandBuffer + VkDeviceAddress copyBufferAddress + uint32_t copyCount + uint32_t stride + + + void vkCmdCopyMemoryToImageIndirectNV + VkCommandBuffer commandBuffer + VkDeviceAddress copyBufferAddress + uint32_t copyCount + uint32_t stride + VkImage dstImage + VkImageLayout dstImageLayout + const VkImageSubresourceLayers* pImageSubresources + + + void vkCmdUpdateBuffer + VkCommandBuffer commandBuffer + VkBuffer dstBuffer + VkDeviceSize dstOffset + VkDeviceSize dataSize + const void* pData + + + void vkCmdFillBuffer + VkCommandBuffer commandBuffer + VkBuffer dstBuffer + VkDeviceSize dstOffset + VkDeviceSize size + uint32_t data + + + void vkCmdClearColorImage + VkCommandBuffer commandBuffer + VkImage image + VkImageLayout imageLayout + const VkClearColorValue* pColor + uint32_t rangeCount + const VkImageSubresourceRange* pRanges + + + void vkCmdClearDepthStencilImage + VkCommandBuffer commandBuffer + VkImage image + VkImageLayout imageLayout + const VkClearDepthStencilValue* pDepthStencil + uint32_t rangeCount + const VkImageSubresourceRange* pRanges + + + void vkCmdClearAttachments + VkCommandBuffer commandBuffer + uint32_t attachmentCount + const VkClearAttachment* pAttachments + uint32_t rectCount + const VkClearRect* pRects + + + void vkCmdResolveImage + VkCommandBuffer commandBuffer + VkImage srcImage + VkImageLayout srcImageLayout + VkImage dstImage + VkImageLayout dstImageLayout + uint32_t regionCount + const VkImageResolve* pRegions + + + void vkCmdSetEvent + VkCommandBuffer commandBuffer + VkEvent event + VkPipelineStageFlags stageMask + + + void vkCmdResetEvent + VkCommandBuffer commandBuffer + VkEvent event + VkPipelineStageFlags stageMask + + + void vkCmdWaitEvents + VkCommandBuffer commandBuffer + uint32_t eventCount + const VkEvent* pEvents + VkPipelineStageFlags srcStageMask + VkPipelineStageFlags dstStageMask + uint32_t memoryBarrierCount + const VkMemoryBarrier* pMemoryBarriers + uint32_t bufferMemoryBarrierCount + const VkBufferMemoryBarrier* pBufferMemoryBarriers + uint32_t imageMemoryBarrierCount + const VkImageMemoryBarrier* pImageMemoryBarriers + + + void vkCmdPipelineBarrier + VkCommandBuffer commandBuffer + VkPipelineStageFlags srcStageMask + VkPipelineStageFlags dstStageMask + VkDependencyFlags dependencyFlags + uint32_t memoryBarrierCount + const VkMemoryBarrier* pMemoryBarriers + uint32_t bufferMemoryBarrierCount + const VkBufferMemoryBarrier* pBufferMemoryBarriers + uint32_t imageMemoryBarrierCount + const VkImageMemoryBarrier* pImageMemoryBarriers + + + void vkCmdBeginQuery + VkCommandBuffer commandBuffer + VkQueryPool queryPool + uint32_t query + VkQueryControlFlags flags + + + void vkCmdEndQuery + VkCommandBuffer commandBuffer + VkQueryPool queryPool + uint32_t query + + + void vkCmdBeginConditionalRenderingEXT + VkCommandBuffer commandBuffer + const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin + + + void vkCmdEndConditionalRenderingEXT + VkCommandBuffer commandBuffer + + + void vkCmdResetQueryPool + VkCommandBuffer commandBuffer + VkQueryPool queryPool + uint32_t firstQuery + uint32_t queryCount + + + void vkCmdWriteTimestamp + VkCommandBuffer commandBuffer + VkPipelineStageFlagBits pipelineStage + VkQueryPool queryPool + uint32_t query + + + void vkCmdCopyQueryPoolResults + VkCommandBuffer commandBuffer + VkQueryPool queryPool + uint32_t firstQuery + uint32_t queryCount + VkBuffer dstBuffer + VkDeviceSize dstOffset + VkDeviceSize stride + VkQueryResultFlags flags + + + void vkCmdPushConstants + VkCommandBuffer commandBuffer + VkPipelineLayout layout + VkShaderStageFlags stageFlags + uint32_t offset + uint32_t size + const void* pValues + + + void vkCmdBeginRenderPass + VkCommandBuffer commandBuffer + const VkRenderPassBeginInfo* pRenderPassBegin + VkSubpassContents contents + + + void vkCmdNextSubpass + VkCommandBuffer commandBuffer + VkSubpassContents contents + + + void vkCmdEndRenderPass + VkCommandBuffer commandBuffer + + + void vkCmdExecuteCommands + VkCommandBuffer commandBuffer + uint32_t commandBufferCount + const VkCommandBuffer* pCommandBuffers + + + VkResult vkCreateAndroidSurfaceKHR + VkInstance instance + const VkAndroidSurfaceCreateInfoKHR* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkSurfaceKHR* pSurface + + + VkResult vkGetPhysicalDeviceDisplayPropertiesKHR + VkPhysicalDevice physicalDevice + uint32_t* pPropertyCount + VkDisplayPropertiesKHR* pProperties + + + VkResult vkGetPhysicalDeviceDisplayPlanePropertiesKHR + VkPhysicalDevice physicalDevice + uint32_t* pPropertyCount + VkDisplayPlanePropertiesKHR* pProperties + + + VkResult vkGetDisplayPlaneSupportedDisplaysKHR + VkPhysicalDevice physicalDevice + uint32_t planeIndex + uint32_t* pDisplayCount + VkDisplayKHR* pDisplays + + + VkResult vkGetDisplayModePropertiesKHR + VkPhysicalDevice physicalDevice + VkDisplayKHR display + uint32_t* pPropertyCount + VkDisplayModePropertiesKHR* pProperties + + + VkResult vkCreateDisplayModeKHR + VkPhysicalDevice physicalDevice + VkDisplayKHR display + const VkDisplayModeCreateInfoKHR* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkDisplayModeKHR* pMode + + + VkResult vkGetDisplayPlaneCapabilitiesKHR + VkPhysicalDevice physicalDevice + VkDisplayModeKHR mode + uint32_t planeIndex + VkDisplayPlaneCapabilitiesKHR* pCapabilities + + + VkResult vkCreateDisplayPlaneSurfaceKHR + VkInstance instance + const VkDisplaySurfaceCreateInfoKHR* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkSurfaceKHR* pSurface + + + VkResult vkCreateSharedSwapchainsKHR + VkDevice device + uint32_t swapchainCount + const VkSwapchainCreateInfoKHR* pCreateInfos + const VkSwapchainCreateInfoKHR* pCreateInfos + const VkAllocationCallbacks* pAllocator + VkSwapchainKHR* pSwapchains + + + void vkDestroySurfaceKHR + VkInstance instance + VkSurfaceKHR surface + const VkAllocationCallbacks* pAllocator + + + VkResult vkGetPhysicalDeviceSurfaceSupportKHR + VkPhysicalDevice physicalDevice + uint32_t queueFamilyIndex + VkSurfaceKHR surface + VkBool32* pSupported + + + VkResult vkGetPhysicalDeviceSurfaceCapabilitiesKHR + VkPhysicalDevice physicalDevice + VkSurfaceKHR surface + VkSurfaceCapabilitiesKHR* pSurfaceCapabilities + + + VkResult vkGetPhysicalDeviceSurfaceFormatsKHR + VkPhysicalDevice physicalDevice + VkSurfaceKHR surface + uint32_t* pSurfaceFormatCount + VkSurfaceFormatKHR* pSurfaceFormats + + + VkResult vkGetPhysicalDeviceSurfacePresentModesKHR + VkPhysicalDevice physicalDevice + VkSurfaceKHR surface + uint32_t* pPresentModeCount + VkPresentModeKHR* pPresentModes + + + VkResult vkCreateSwapchainKHR + VkDevice device + const VkSwapchainCreateInfoKHR* pCreateInfo + const VkSwapchainCreateInfoKHR* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkSwapchainKHR* pSwapchain + + + void vkDestroySwapchainKHR + VkDevice device + VkSwapchainKHR swapchain + const VkAllocationCallbacks* pAllocator + + + VkResult vkGetSwapchainImagesKHR + VkDevice device + VkSwapchainKHR swapchain + uint32_t* pSwapchainImageCount + VkImage* pSwapchainImages + + + VkResult vkAcquireNextImageKHR + VkDevice device + VkSwapchainKHR swapchain + uint64_t timeout + VkSemaphore semaphore + VkFence fence + uint32_t* pImageIndex + + + VkResult vkQueuePresentKHR + VkQueue queue + const VkPresentInfoKHR* pPresentInfo + + + VkResult vkCreateViSurfaceNN + VkInstance instance + const VkViSurfaceCreateInfoNN* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkSurfaceKHR* pSurface + + + VkResult vkCreateWaylandSurfaceKHR + VkInstance instance + const VkWaylandSurfaceCreateInfoKHR* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkSurfaceKHR* pSurface + + + VkBool32 vkGetPhysicalDeviceWaylandPresentationSupportKHR + VkPhysicalDevice physicalDevice + uint32_t queueFamilyIndex + struct wl_display* display + + + VkResult vkCreateWin32SurfaceKHR + VkInstance instance + const VkWin32SurfaceCreateInfoKHR* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkSurfaceKHR* pSurface + + + VkBool32 vkGetPhysicalDeviceWin32PresentationSupportKHR + VkPhysicalDevice physicalDevice + uint32_t queueFamilyIndex + + + VkResult vkCreateXlibSurfaceKHR + VkInstance instance + const VkXlibSurfaceCreateInfoKHR* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkSurfaceKHR* pSurface + + + VkBool32 vkGetPhysicalDeviceXlibPresentationSupportKHR + VkPhysicalDevice physicalDevice + uint32_t queueFamilyIndex + Display* dpy + VisualID visualID + + + VkResult vkCreateXcbSurfaceKHR + VkInstance instance + const VkXcbSurfaceCreateInfoKHR* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkSurfaceKHR* pSurface + + + VkBool32 vkGetPhysicalDeviceXcbPresentationSupportKHR + VkPhysicalDevice physicalDevice + uint32_t queueFamilyIndex + xcb_connection_t* connection + xcb_visualid_t visual_id + + + VkResult vkCreateDirectFBSurfaceEXT + VkInstance instance + const VkDirectFBSurfaceCreateInfoEXT* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkSurfaceKHR* pSurface + + + VkBool32 vkGetPhysicalDeviceDirectFBPresentationSupportEXT + VkPhysicalDevice physicalDevice + uint32_t queueFamilyIndex + IDirectFB* dfb + + + VkResult vkCreateImagePipeSurfaceFUCHSIA + VkInstance instance + const VkImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkSurfaceKHR* pSurface + + + VkResult vkCreateStreamDescriptorSurfaceGGP + VkInstance instance + const VkStreamDescriptorSurfaceCreateInfoGGP* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkSurfaceKHR* pSurface + + + VkResult vkCreateScreenSurfaceQNX + VkInstance instance + const VkScreenSurfaceCreateInfoQNX* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkSurfaceKHR* pSurface + + + VkBool32 vkGetPhysicalDeviceScreenPresentationSupportQNX + VkPhysicalDevice physicalDevice + uint32_t queueFamilyIndex + struct _screen_window* window + + + VkResult vkCreateDebugReportCallbackEXT + VkInstance instance + const VkDebugReportCallbackCreateInfoEXT* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkDebugReportCallbackEXT* pCallback + + + void vkDestroyDebugReportCallbackEXT + VkInstance instance + VkDebugReportCallbackEXT callback + const VkAllocationCallbacks* pAllocator + + + void vkDebugReportMessageEXT + VkInstance instance + VkDebugReportFlagsEXT flags + VkDebugReportObjectTypeEXT objectType + uint64_t object + size_t location + int32_t messageCode + const char* pLayerPrefix + const char* pMessage + + + VkResult vkDebugMarkerSetObjectNameEXT + VkDevice device + const VkDebugMarkerObjectNameInfoEXT* pNameInfo + + + VkResult vkDebugMarkerSetObjectTagEXT + VkDevice device + const VkDebugMarkerObjectTagInfoEXT* pTagInfo + + + void vkCmdDebugMarkerBeginEXT + VkCommandBuffer commandBuffer + const VkDebugMarkerMarkerInfoEXT* pMarkerInfo + + + void vkCmdDebugMarkerEndEXT + VkCommandBuffer commandBuffer + + + void vkCmdDebugMarkerInsertEXT + VkCommandBuffer commandBuffer + const VkDebugMarkerMarkerInfoEXT* pMarkerInfo + + + VkResult vkGetPhysicalDeviceExternalImageFormatPropertiesNV + VkPhysicalDevice physicalDevice + VkFormat format + VkImageType type + VkImageTiling tiling + VkImageUsageFlags usage + VkImageCreateFlags flags + VkExternalMemoryHandleTypeFlagsNV externalHandleType + VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties + + + VkResult vkGetMemoryWin32HandleNV + VkDevice device + VkDeviceMemory memory + VkExternalMemoryHandleTypeFlagsNV handleType + HANDLE* pHandle + + + void vkCmdExecuteGeneratedCommandsNV + VkCommandBuffer commandBuffer + VkBool32 isPreprocessed + const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo + + + void vkCmdPreprocessGeneratedCommandsNV + VkCommandBuffer commandBuffer + const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo + + + void vkCmdBindPipelineShaderGroupNV + VkCommandBuffer commandBuffer + VkPipelineBindPoint pipelineBindPoint + VkPipeline pipeline + uint32_t groupIndex + + + void vkGetGeneratedCommandsMemoryRequirementsNV + VkDevice device + const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo + VkMemoryRequirements2* pMemoryRequirements + + + VkResult vkCreateIndirectCommandsLayoutNV + VkDevice device + const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkIndirectCommandsLayoutNV* pIndirectCommandsLayout + + + void vkDestroyIndirectCommandsLayoutNV + VkDevice device + VkIndirectCommandsLayoutNV indirectCommandsLayout + const VkAllocationCallbacks* pAllocator + + + void vkGetPhysicalDeviceFeatures2 + VkPhysicalDevice physicalDevice + VkPhysicalDeviceFeatures2* pFeatures + + + + void vkGetPhysicalDeviceProperties2 + VkPhysicalDevice physicalDevice + VkPhysicalDeviceProperties2* pProperties + + + + void vkGetPhysicalDeviceFormatProperties2 + VkPhysicalDevice physicalDevice + VkFormat format + VkFormatProperties2* pFormatProperties + + + + VkResult vkGetPhysicalDeviceImageFormatProperties2 + VkPhysicalDevice physicalDevice + const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo + VkImageFormatProperties2* pImageFormatProperties + + + + void vkGetPhysicalDeviceQueueFamilyProperties2 + VkPhysicalDevice physicalDevice + uint32_t* pQueueFamilyPropertyCount + VkQueueFamilyProperties2* pQueueFamilyProperties + + + + void vkGetPhysicalDeviceMemoryProperties2 + VkPhysicalDevice physicalDevice + VkPhysicalDeviceMemoryProperties2* pMemoryProperties + + + + void vkGetPhysicalDeviceSparseImageFormatProperties2 + VkPhysicalDevice physicalDevice + const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo + uint32_t* pPropertyCount + VkSparseImageFormatProperties2* pProperties + + + + void vkCmdPushDescriptorSetKHR + VkCommandBuffer commandBuffer + VkPipelineBindPoint pipelineBindPoint + VkPipelineLayout layout + uint32_t set + uint32_t descriptorWriteCount + const VkWriteDescriptorSet* pDescriptorWrites + + + void vkTrimCommandPool + VkDevice device + VkCommandPool commandPool + VkCommandPoolTrimFlags flags + + + + void vkGetPhysicalDeviceExternalBufferProperties + VkPhysicalDevice physicalDevice + const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo + VkExternalBufferProperties* pExternalBufferProperties + + + + VkResult vkGetMemoryWin32HandleKHR + VkDevice device + const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo + HANDLE* pHandle + + + VkResult vkGetMemoryWin32HandlePropertiesKHR + VkDevice device + VkExternalMemoryHandleTypeFlagBits handleType + HANDLE handle + VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties + + + VkResult vkGetMemoryFdKHR + VkDevice device + const VkMemoryGetFdInfoKHR* pGetFdInfo + int* pFd + + + VkResult vkGetMemoryFdPropertiesKHR + VkDevice device + VkExternalMemoryHandleTypeFlagBits handleType + int fd + VkMemoryFdPropertiesKHR* pMemoryFdProperties + + + VkResult vkGetMemoryZirconHandleFUCHSIA + VkDevice device + const VkMemoryGetZirconHandleInfoFUCHSIA* pGetZirconHandleInfo + zx_handle_t* pZirconHandle + + + VkResult vkGetMemoryZirconHandlePropertiesFUCHSIA + VkDevice device + VkExternalMemoryHandleTypeFlagBits handleType + zx_handle_t zirconHandle + VkMemoryZirconHandlePropertiesFUCHSIA* pMemoryZirconHandleProperties + + + VkResult vkGetMemoryRemoteAddressNV + VkDevice device + const VkMemoryGetRemoteAddressInfoNV* pMemoryGetRemoteAddressInfo + VkRemoteAddressNV* pAddress + + + VkResult vkGetMemorySciBufNV + VkDevice device + const VkMemoryGetSciBufInfoNV* pGetSciBufInfo + NvSciBufObj* pHandle + + + VkResult vkGetPhysicalDeviceExternalMemorySciBufPropertiesNV + VkPhysicalDevice physicalDevice + VkExternalMemoryHandleTypeFlagBits handleType + NvSciBufObj handle + VkMemorySciBufPropertiesNV* pMemorySciBufProperties + + + VkResult vkGetPhysicalDeviceSciBufAttributesNV + VkPhysicalDevice physicalDevice + NvSciBufAttrList pAttributes + + + void vkGetPhysicalDeviceExternalSemaphoreProperties + VkPhysicalDevice physicalDevice + const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo + VkExternalSemaphoreProperties* pExternalSemaphoreProperties + + + + VkResult vkGetSemaphoreWin32HandleKHR + VkDevice device + const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo + HANDLE* pHandle + + + VkResult vkImportSemaphoreWin32HandleKHR + VkDevice device + const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo + + + VkResult vkGetSemaphoreFdKHR + VkDevice device + const VkSemaphoreGetFdInfoKHR* pGetFdInfo + int* pFd + + + VkResult vkImportSemaphoreFdKHR + VkDevice device + const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo + + + VkResult vkGetSemaphoreZirconHandleFUCHSIA + VkDevice device + const VkSemaphoreGetZirconHandleInfoFUCHSIA* pGetZirconHandleInfo + zx_handle_t* pZirconHandle + + + VkResult vkImportSemaphoreZirconHandleFUCHSIA + VkDevice device + const VkImportSemaphoreZirconHandleInfoFUCHSIA* pImportSemaphoreZirconHandleInfo + + + void vkGetPhysicalDeviceExternalFenceProperties + VkPhysicalDevice physicalDevice + const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo + VkExternalFenceProperties* pExternalFenceProperties + + + + VkResult vkGetFenceWin32HandleKHR + VkDevice device + const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo + HANDLE* pHandle + + + VkResult vkImportFenceWin32HandleKHR + VkDevice device + const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo + + + VkResult vkGetFenceFdKHR + VkDevice device + const VkFenceGetFdInfoKHR* pGetFdInfo + int* pFd + + + VkResult vkImportFenceFdKHR + VkDevice device + const VkImportFenceFdInfoKHR* pImportFenceFdInfo + + + VkResult vkGetFenceSciSyncFenceNV + VkDevice device + const VkFenceGetSciSyncInfoNV* pGetSciSyncHandleInfo + void* pHandle + + + VkResult vkGetFenceSciSyncObjNV + VkDevice device + const VkFenceGetSciSyncInfoNV* pGetSciSyncHandleInfo + void* pHandle + + + VkResult vkImportFenceSciSyncFenceNV + VkDevice device + const VkImportFenceSciSyncInfoNV* pImportFenceSciSyncInfo + + + VkResult vkImportFenceSciSyncObjNV + VkDevice device + const VkImportFenceSciSyncInfoNV* pImportFenceSciSyncInfo + + + VkResult vkGetSemaphoreSciSyncObjNV + VkDevice device + const VkSemaphoreGetSciSyncInfoNV* pGetSciSyncInfo + void* pHandle + + + VkResult vkImportSemaphoreSciSyncObjNV + VkDevice device + const VkImportSemaphoreSciSyncInfoNV* pImportSemaphoreSciSyncInfo + + + VkResult vkGetPhysicalDeviceSciSyncAttributesNV + VkPhysicalDevice physicalDevice + const VkSciSyncAttributesInfoNV* pSciSyncAttributesInfo + NvSciSyncAttrList pAttributes + + + VkResult vkCreateSemaphoreSciSyncPoolNV + VkDevice device + const VkSemaphoreSciSyncPoolCreateInfoNV* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkSemaphoreSciSyncPoolNV* pSemaphorePool + + + void vkDestroySemaphoreSciSyncPoolNV + VkDevice device + VkSemaphoreSciSyncPoolNV semaphorePool + const VkAllocationCallbacks* pAllocator + + + VkResult vkReleaseDisplayEXT + VkPhysicalDevice physicalDevice + VkDisplayKHR display + + + VkResult vkAcquireXlibDisplayEXT + VkPhysicalDevice physicalDevice + Display* dpy + VkDisplayKHR display + + + VkResult vkGetRandROutputDisplayEXT + VkPhysicalDevice physicalDevice + Display* dpy + RROutput rrOutput + VkDisplayKHR* pDisplay + + + VkResult vkAcquireWinrtDisplayNV + VkPhysicalDevice physicalDevice + VkDisplayKHR display + + + VkResult vkGetWinrtDisplayNV + VkPhysicalDevice physicalDevice + uint32_t deviceRelativeId + VkDisplayKHR* pDisplay + + + VkResult vkDisplayPowerControlEXT + VkDevice device + VkDisplayKHR display + const VkDisplayPowerInfoEXT* pDisplayPowerInfo + + + VkResult vkRegisterDeviceEventEXT + VkDevice device + const VkDeviceEventInfoEXT* pDeviceEventInfo + const VkAllocationCallbacks* pAllocator + VkFence* pFence + + + VkResult vkRegisterDisplayEventEXT + VkDevice device + VkDisplayKHR display + const VkDisplayEventInfoEXT* pDisplayEventInfo + const VkAllocationCallbacks* pAllocator + VkFence* pFence + + + VkResult vkGetSwapchainCounterEXT + VkDevice device + VkSwapchainKHR swapchain + VkSurfaceCounterFlagBitsEXT counter + uint64_t* pCounterValue + + + VkResult vkGetPhysicalDeviceSurfaceCapabilities2EXT + VkPhysicalDevice physicalDevice + VkSurfaceKHR surface + VkSurfaceCapabilities2EXT* pSurfaceCapabilities + + + VkResult vkEnumeratePhysicalDeviceGroups + VkInstance instance + uint32_t* pPhysicalDeviceGroupCount + VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties + + + + void vkGetDeviceGroupPeerMemoryFeatures + VkDevice device + uint32_t heapIndex + uint32_t localDeviceIndex + uint32_t remoteDeviceIndex + VkPeerMemoryFeatureFlags* pPeerMemoryFeatures + + + + VkResult vkBindBufferMemory2 + VkDevice device + uint32_t bindInfoCount + const VkBindBufferMemoryInfo* pBindInfos + + + + VkResult vkBindImageMemory2 + VkDevice device + uint32_t bindInfoCount + const VkBindImageMemoryInfo* pBindInfos + + + + void vkCmdSetDeviceMask + VkCommandBuffer commandBuffer + uint32_t deviceMask + + + + VkResult vkGetDeviceGroupPresentCapabilitiesKHR + VkDevice device + VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities + + + VkResult vkGetDeviceGroupSurfacePresentModesKHR + VkDevice device + VkSurfaceKHR surface + VkDeviceGroupPresentModeFlagsKHR* pModes + + + VkResult vkAcquireNextImage2KHR + VkDevice device + const VkAcquireNextImageInfoKHR* pAcquireInfo + uint32_t* pImageIndex + + + void vkCmdDispatchBase + VkCommandBuffer commandBuffer + uint32_t baseGroupX + uint32_t baseGroupY + uint32_t baseGroupZ + uint32_t groupCountX + uint32_t groupCountY + uint32_t groupCountZ + + + + VkResult vkGetPhysicalDevicePresentRectanglesKHR + VkPhysicalDevice physicalDevice + VkSurfaceKHR surface + uint32_t* pRectCount + VkRect2D* pRects + + + VkResult vkCreateDescriptorUpdateTemplate + VkDevice device + const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate + + + + void vkDestroyDescriptorUpdateTemplate + VkDevice device + VkDescriptorUpdateTemplate descriptorUpdateTemplate + const VkAllocationCallbacks* pAllocator + + + + void vkUpdateDescriptorSetWithTemplate + VkDevice device + VkDescriptorSet descriptorSet + VkDescriptorUpdateTemplate descriptorUpdateTemplate + const void* pData + + + + void vkCmdPushDescriptorSetWithTemplateKHR + VkCommandBuffer commandBuffer + VkDescriptorUpdateTemplate descriptorUpdateTemplate + VkPipelineLayout layout + uint32_t set + const void* pData + + + void vkSetHdrMetadataEXT + VkDevice device + uint32_t swapchainCount + const VkSwapchainKHR* pSwapchains + const VkHdrMetadataEXT* pMetadata + + + VkResult vkGetSwapchainStatusKHR + VkDevice device + VkSwapchainKHR swapchain + + + VkResult vkGetRefreshCycleDurationGOOGLE + VkDevice device + VkSwapchainKHR swapchain + VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties + + + VkResult vkGetPastPresentationTimingGOOGLE + VkDevice device + VkSwapchainKHR swapchain + uint32_t* pPresentationTimingCount + VkPastPresentationTimingGOOGLE* pPresentationTimings + + + VkResult vkCreateIOSSurfaceMVK + VkInstance instance + const VkIOSSurfaceCreateInfoMVK* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkSurfaceKHR* pSurface + + + VkResult vkCreateMacOSSurfaceMVK + VkInstance instance + const VkMacOSSurfaceCreateInfoMVK* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkSurfaceKHR* pSurface + + + VkResult vkCreateMetalSurfaceEXT + VkInstance instance + const VkMetalSurfaceCreateInfoEXT* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkSurfaceKHR* pSurface + + + void vkCmdSetViewportWScalingNV + VkCommandBuffer commandBuffer + uint32_t firstViewport + uint32_t viewportCount + const VkViewportWScalingNV* pViewportWScalings + + + void vkCmdSetDiscardRectangleEXT + VkCommandBuffer commandBuffer + uint32_t firstDiscardRectangle + uint32_t discardRectangleCount + const VkRect2D* pDiscardRectangles + + + void vkCmdSetDiscardRectangleEnableEXT + VkCommandBuffer commandBuffer + VkBool32 discardRectangleEnable + + + void vkCmdSetDiscardRectangleModeEXT + VkCommandBuffer commandBuffer + VkDiscardRectangleModeEXT discardRectangleMode + + + void vkCmdSetSampleLocationsEXT + VkCommandBuffer commandBuffer + const VkSampleLocationsInfoEXT* pSampleLocationsInfo + + + void vkGetPhysicalDeviceMultisamplePropertiesEXT + VkPhysicalDevice physicalDevice + VkSampleCountFlagBits samples + VkMultisamplePropertiesEXT* pMultisampleProperties + + + VkResult vkGetPhysicalDeviceSurfaceCapabilities2KHR + VkPhysicalDevice physicalDevice + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo + VkSurfaceCapabilities2KHR* pSurfaceCapabilities + + + VkResult vkGetPhysicalDeviceSurfaceFormats2KHR + VkPhysicalDevice physicalDevice + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo + uint32_t* pSurfaceFormatCount + VkSurfaceFormat2KHR* pSurfaceFormats + + + VkResult vkGetPhysicalDeviceDisplayProperties2KHR + VkPhysicalDevice physicalDevice + uint32_t* pPropertyCount + VkDisplayProperties2KHR* pProperties + + + VkResult vkGetPhysicalDeviceDisplayPlaneProperties2KHR + VkPhysicalDevice physicalDevice + uint32_t* pPropertyCount + VkDisplayPlaneProperties2KHR* pProperties + + + VkResult vkGetDisplayModeProperties2KHR + VkPhysicalDevice physicalDevice + VkDisplayKHR display + uint32_t* pPropertyCount + VkDisplayModeProperties2KHR* pProperties + + + VkResult vkGetDisplayPlaneCapabilities2KHR + VkPhysicalDevice physicalDevice + const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo + VkDisplayPlaneCapabilities2KHR* pCapabilities + + + void vkGetBufferMemoryRequirements2 + VkDevice device + const VkBufferMemoryRequirementsInfo2* pInfo + VkMemoryRequirements2* pMemoryRequirements + + + + void vkGetImageMemoryRequirements2 + VkDevice device + const VkImageMemoryRequirementsInfo2* pInfo + VkMemoryRequirements2* pMemoryRequirements + + + + void vkGetImageSparseMemoryRequirements2 + VkDevice device + const VkImageSparseMemoryRequirementsInfo2* pInfo + uint32_t* pSparseMemoryRequirementCount + VkSparseImageMemoryRequirements2* pSparseMemoryRequirements + + + + void vkGetDeviceBufferMemoryRequirements + VkDevice device + const VkDeviceBufferMemoryRequirements* pInfo + VkMemoryRequirements2* pMemoryRequirements + + + + void vkGetDeviceImageMemoryRequirements + VkDevice device + const VkDeviceImageMemoryRequirements* pInfo + VkMemoryRequirements2* pMemoryRequirements + + + + void vkGetDeviceImageSparseMemoryRequirements + VkDevice device + const VkDeviceImageMemoryRequirements* pInfo + uint32_t* pSparseMemoryRequirementCount + VkSparseImageMemoryRequirements2* pSparseMemoryRequirements + + + + VkResult vkCreateSamplerYcbcrConversion + VkDevice device + const VkSamplerYcbcrConversionCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkSamplerYcbcrConversion* pYcbcrConversion + + + + void vkDestroySamplerYcbcrConversion + VkDevice device + VkSamplerYcbcrConversion ycbcrConversion + const VkAllocationCallbacks* pAllocator + + + + void vkGetDeviceQueue2 + VkDevice device + const VkDeviceQueueInfo2* pQueueInfo + VkQueue* pQueue + + + VkResult vkCreateValidationCacheEXT + VkDevice device + const VkValidationCacheCreateInfoEXT* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkValidationCacheEXT* pValidationCache + + + void vkDestroyValidationCacheEXT + VkDevice device + VkValidationCacheEXT validationCache + const VkAllocationCallbacks* pAllocator + + + VkResult vkGetValidationCacheDataEXT + VkDevice device + VkValidationCacheEXT validationCache + size_t* pDataSize + void* pData + + + VkResult vkMergeValidationCachesEXT + VkDevice device + VkValidationCacheEXT dstCache + uint32_t srcCacheCount + const VkValidationCacheEXT* pSrcCaches + + + void vkGetDescriptorSetLayoutSupport + VkDevice device + const VkDescriptorSetLayoutCreateInfo* pCreateInfo + VkDescriptorSetLayoutSupport* pSupport + + + + VkResult vkGetSwapchainGrallocUsageANDROID + VkDevice device + VkFormat format + VkImageUsageFlags imageUsage + int* grallocUsage + + + VkResult vkGetSwapchainGrallocUsage2ANDROID + VkDevice device + VkFormat format + VkImageUsageFlags imageUsage + VkSwapchainImageUsageFlagsANDROID swapchainImageUsage + uint64_t* grallocConsumerUsage + uint64_t* grallocProducerUsage + + + VkResult vkAcquireImageANDROID + VkDevice device + VkImage image + int nativeFenceFd + VkSemaphore semaphore + VkFence fence + + + VkResult vkQueueSignalReleaseImageANDROID + VkQueue queue + uint32_t waitSemaphoreCount + const VkSemaphore* pWaitSemaphores + VkImage image + int* pNativeFenceFd + + + VkResult vkGetShaderInfoAMD + VkDevice device + VkPipeline pipeline + VkShaderStageFlagBits shaderStage + VkShaderInfoTypeAMD infoType + size_t* pInfoSize + void* pInfo + + + void vkSetLocalDimmingAMD + VkDevice device + VkSwapchainKHR swapChain + VkBool32 localDimmingEnable + + + VkResult vkGetPhysicalDeviceCalibrateableTimeDomainsKHR + VkPhysicalDevice physicalDevice + uint32_t* pTimeDomainCount + VkTimeDomainKHR* pTimeDomains + + + + VkResult vkGetCalibratedTimestampsKHR + VkDevice device + uint32_t timestampCount + const VkCalibratedTimestampInfoKHR* pTimestampInfos + uint64_t* pTimestamps + uint64_t* pMaxDeviation + + + + VkResult vkSetDebugUtilsObjectNameEXT + VkDevice device + const VkDebugUtilsObjectNameInfoEXT* pNameInfo + + + VkResult vkSetDebugUtilsObjectTagEXT + VkDevice device + const VkDebugUtilsObjectTagInfoEXT* pTagInfo + + + void vkQueueBeginDebugUtilsLabelEXT + VkQueue queue + const VkDebugUtilsLabelEXT* pLabelInfo + + + void vkQueueEndDebugUtilsLabelEXT + VkQueue queue + + + void vkQueueInsertDebugUtilsLabelEXT + VkQueue queue + const VkDebugUtilsLabelEXT* pLabelInfo + + + void vkCmdBeginDebugUtilsLabelEXT + VkCommandBuffer commandBuffer + const VkDebugUtilsLabelEXT* pLabelInfo + + + void vkCmdEndDebugUtilsLabelEXT + VkCommandBuffer commandBuffer + + + void vkCmdInsertDebugUtilsLabelEXT + VkCommandBuffer commandBuffer + const VkDebugUtilsLabelEXT* pLabelInfo + + + VkResult vkCreateDebugUtilsMessengerEXT + VkInstance instance + const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkDebugUtilsMessengerEXT* pMessenger + + + void vkDestroyDebugUtilsMessengerEXT + VkInstance instance + VkDebugUtilsMessengerEXT messenger + const VkAllocationCallbacks* pAllocator + + + void vkSubmitDebugUtilsMessageEXT + VkInstance instance + VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity + VkDebugUtilsMessageTypeFlagsEXT messageTypes + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData + + + VkResult vkGetMemoryHostPointerPropertiesEXT + VkDevice device + VkExternalMemoryHandleTypeFlagBits handleType + const void* pHostPointer + VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties + + + void vkCmdWriteBufferMarkerAMD + VkCommandBuffer commandBuffer + VkPipelineStageFlagBits pipelineStage + VkBuffer dstBuffer + VkDeviceSize dstOffset + uint32_t marker + + + VkResult vkCreateRenderPass2 + VkDevice device + const VkRenderPassCreateInfo2* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkRenderPass* pRenderPass + + + + void vkCmdBeginRenderPass2 + VkCommandBuffer commandBuffer + const VkRenderPassBeginInfo* pRenderPassBegin + const VkSubpassBeginInfo* pSubpassBeginInfo + + + + void vkCmdNextSubpass2 + VkCommandBuffer commandBuffer + const VkSubpassBeginInfo* pSubpassBeginInfo + const VkSubpassEndInfo* pSubpassEndInfo + + + + void vkCmdEndRenderPass2 + VkCommandBuffer commandBuffer + const VkSubpassEndInfo* pSubpassEndInfo + + + + VkResult vkGetSemaphoreCounterValue + VkDevice device + VkSemaphore semaphore + uint64_t* pValue + + + + VkResult vkWaitSemaphores + VkDevice device + const VkSemaphoreWaitInfo* pWaitInfo + uint64_t timeout + + + + VkResult vkSignalSemaphore + VkDevice device + const VkSemaphoreSignalInfo* pSignalInfo + + + + VkResult vkGetAndroidHardwareBufferPropertiesANDROID + VkDevice device + const struct AHardwareBuffer* buffer + VkAndroidHardwareBufferPropertiesANDROID* pProperties + + + VkResult vkGetMemoryAndroidHardwareBufferANDROID + VkDevice device + const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo + struct AHardwareBuffer** pBuffer + + + void vkCmdDrawIndirectCount + VkCommandBuffer commandBuffer + VkBuffer buffer + VkDeviceSize offset + VkBuffer countBuffer + VkDeviceSize countBufferOffset + uint32_t maxDrawCount + uint32_t stride + + + + + void vkCmdDrawIndexedIndirectCount + VkCommandBuffer commandBuffer + VkBuffer buffer + VkDeviceSize offset + VkBuffer countBuffer + VkDeviceSize countBufferOffset + uint32_t maxDrawCount + uint32_t stride + + + + + void vkCmdSetCheckpointNV + VkCommandBuffer commandBuffer + const void* pCheckpointMarker + + + void vkGetQueueCheckpointDataNV + VkQueue queue + uint32_t* pCheckpointDataCount + VkCheckpointDataNV* pCheckpointData + + + void vkCmdBindTransformFeedbackBuffersEXT + VkCommandBuffer commandBuffer + uint32_t firstBinding + uint32_t bindingCount + const VkBuffer* pBuffers + const VkDeviceSize* pOffsets + const VkDeviceSize* pSizes + + + void vkCmdBeginTransformFeedbackEXT + VkCommandBuffer commandBuffer + uint32_t firstCounterBuffer + uint32_t counterBufferCount + const VkBuffer* pCounterBuffers + const VkDeviceSize* pCounterBufferOffsets + + + void vkCmdEndTransformFeedbackEXT + VkCommandBuffer commandBuffer + uint32_t firstCounterBuffer + uint32_t counterBufferCount + const VkBuffer* pCounterBuffers + const VkDeviceSize* pCounterBufferOffsets + + + void vkCmdBeginQueryIndexedEXT + VkCommandBuffer commandBuffer + VkQueryPool queryPool + uint32_t query + VkQueryControlFlags flags + uint32_t index + + + void vkCmdEndQueryIndexedEXT + VkCommandBuffer commandBuffer + VkQueryPool queryPool + uint32_t query + uint32_t index + + + void vkCmdDrawIndirectByteCountEXT + VkCommandBuffer commandBuffer + uint32_t instanceCount + uint32_t firstInstance + VkBuffer counterBuffer + VkDeviceSize counterBufferOffset + uint32_t counterOffset + uint32_t vertexStride + + + void vkCmdSetExclusiveScissorNV + VkCommandBuffer commandBuffer + uint32_t firstExclusiveScissor + uint32_t exclusiveScissorCount + const VkRect2D* pExclusiveScissors + + + void vkCmdSetExclusiveScissorEnableNV + VkCommandBuffer commandBuffer + uint32_t firstExclusiveScissor + uint32_t exclusiveScissorCount + const VkBool32* pExclusiveScissorEnables + + + void vkCmdBindShadingRateImageNV + VkCommandBuffer commandBuffer + VkImageView imageView + VkImageLayout imageLayout + + + void vkCmdSetViewportShadingRatePaletteNV + VkCommandBuffer commandBuffer + uint32_t firstViewport + uint32_t viewportCount + const VkShadingRatePaletteNV* pShadingRatePalettes + + + void vkCmdSetCoarseSampleOrderNV + VkCommandBuffer commandBuffer + VkCoarseSampleOrderTypeNV sampleOrderType + uint32_t customSampleOrderCount + const VkCoarseSampleOrderCustomNV* pCustomSampleOrders + + + void vkCmdDrawMeshTasksNV + VkCommandBuffer commandBuffer + uint32_t taskCount + uint32_t firstTask + + + void vkCmdDrawMeshTasksIndirectNV + VkCommandBuffer commandBuffer + VkBuffer buffer + VkDeviceSize offset + uint32_t drawCount + uint32_t stride + + + void vkCmdDrawMeshTasksIndirectCountNV + VkCommandBuffer commandBuffer + VkBuffer buffer + VkDeviceSize offset + VkBuffer countBuffer + VkDeviceSize countBufferOffset + uint32_t maxDrawCount + uint32_t stride + + + void vkCmdDrawMeshTasksEXT + VkCommandBuffer commandBuffer + uint32_t groupCountX + uint32_t groupCountY + uint32_t groupCountZ + + + void vkCmdDrawMeshTasksIndirectEXT + VkCommandBuffer commandBuffer + VkBuffer buffer + VkDeviceSize offset + uint32_t drawCount + uint32_t stride + + + void vkCmdDrawMeshTasksIndirectCountEXT + VkCommandBuffer commandBuffer + VkBuffer buffer + VkDeviceSize offset + VkBuffer countBuffer + VkDeviceSize countBufferOffset + uint32_t maxDrawCount + uint32_t stride + + + VkResult vkCompileDeferredNV + VkDevice device + VkPipeline pipeline + uint32_t shader + + + VkResult vkCreateAccelerationStructureNV + VkDevice device + const VkAccelerationStructureCreateInfoNV* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkAccelerationStructureNV* pAccelerationStructure + + + void vkCmdBindInvocationMaskHUAWEI + VkCommandBuffer commandBuffer + VkImageView imageView + VkImageLayout imageLayout + + + void vkDestroyAccelerationStructureKHR + VkDevice device + VkAccelerationStructureKHR accelerationStructure + const VkAllocationCallbacks* pAllocator + + + void vkDestroyAccelerationStructureNV + VkDevice device + VkAccelerationStructureNV accelerationStructure + const VkAllocationCallbacks* pAllocator + + + void vkGetAccelerationStructureMemoryRequirementsNV + VkDevice device + const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo + VkMemoryRequirements2KHR* pMemoryRequirements + + + VkResult vkBindAccelerationStructureMemoryNV + VkDevice device + uint32_t bindInfoCount + const VkBindAccelerationStructureMemoryInfoNV* pBindInfos + + + void vkCmdCopyAccelerationStructureNV + VkCommandBuffer commandBuffer + VkAccelerationStructureNV dst + VkAccelerationStructureNV src + VkCopyAccelerationStructureModeKHR mode + + + void vkCmdCopyAccelerationStructureKHR + VkCommandBuffer commandBuffer + const VkCopyAccelerationStructureInfoKHR* pInfo + + + VkResult vkCopyAccelerationStructureKHR + VkDevice device + VkDeferredOperationKHR deferredOperation + const VkCopyAccelerationStructureInfoKHR* pInfo + + + void vkCmdCopyAccelerationStructureToMemoryKHR + VkCommandBuffer commandBuffer + const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo + + + VkResult vkCopyAccelerationStructureToMemoryKHR + VkDevice device + VkDeferredOperationKHR deferredOperation + const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo + + + void vkCmdCopyMemoryToAccelerationStructureKHR + VkCommandBuffer commandBuffer + const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo + + + VkResult vkCopyMemoryToAccelerationStructureKHR + VkDevice device + VkDeferredOperationKHR deferredOperation + const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo + + + void vkCmdWriteAccelerationStructuresPropertiesKHR + VkCommandBuffer commandBuffer + uint32_t accelerationStructureCount + const VkAccelerationStructureKHR* pAccelerationStructures + VkQueryType queryType + VkQueryPool queryPool + uint32_t firstQuery + + + void vkCmdWriteAccelerationStructuresPropertiesNV + VkCommandBuffer commandBuffer + uint32_t accelerationStructureCount + const VkAccelerationStructureNV* pAccelerationStructures + VkQueryType queryType + VkQueryPool queryPool + uint32_t firstQuery + + + void vkCmdBuildAccelerationStructureNV + VkCommandBuffer commandBuffer + const VkAccelerationStructureInfoNV* pInfo + VkBuffer instanceData + VkDeviceSize instanceOffset + VkBool32 update + VkAccelerationStructureNV dst + VkAccelerationStructureNV src + VkBuffer scratch + VkDeviceSize scratchOffset + + + VkResult vkWriteAccelerationStructuresPropertiesKHR + VkDevice device + uint32_t accelerationStructureCount + const VkAccelerationStructureKHR* pAccelerationStructures + VkQueryType queryType + size_t dataSize + void* pData + size_t stride + + + void vkCmdTraceRaysKHR + VkCommandBuffer commandBuffer + const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable + const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable + const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable + const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable + uint32_t width + uint32_t height + uint32_t depth + + + void vkCmdTraceRaysNV + VkCommandBuffer commandBuffer + VkBuffer raygenShaderBindingTableBuffer + VkDeviceSize raygenShaderBindingOffset + VkBuffer missShaderBindingTableBuffer + VkDeviceSize missShaderBindingOffset + VkDeviceSize missShaderBindingStride + VkBuffer hitShaderBindingTableBuffer + VkDeviceSize hitShaderBindingOffset + VkDeviceSize hitShaderBindingStride + VkBuffer callableShaderBindingTableBuffer + VkDeviceSize callableShaderBindingOffset + VkDeviceSize callableShaderBindingStride + uint32_t width + uint32_t height + uint32_t depth + + + VkResult vkGetRayTracingShaderGroupHandlesKHR + VkDevice device + VkPipeline pipeline + uint32_t firstGroup + uint32_t groupCount + size_t dataSize + void* pData + + + + VkResult vkGetRayTracingCaptureReplayShaderGroupHandlesKHR + VkDevice device + VkPipeline pipeline + uint32_t firstGroup + uint32_t groupCount + size_t dataSize + void* pData + + + VkResult vkGetAccelerationStructureHandleNV + VkDevice device + VkAccelerationStructureNV accelerationStructure + size_t dataSize + void* pData + + + VkResult vkCreateRayTracingPipelinesNV + VkDevice device + VkPipelineCache pipelineCache + uint32_t createInfoCount + const VkRayTracingPipelineCreateInfoNV* pCreateInfos + const VkAllocationCallbacks* pAllocator + VkPipeline* pPipelines + + + VkResult vkCreateRayTracingPipelinesNV + VkDevice device + VkPipelineCache pipelineCache + uint32_t createInfoCount + const VkRayTracingPipelineCreateInfoNV* pCreateInfos + const VkAllocationCallbacks* pAllocator + VkPipeline* pPipelines + + + VkResult vkCreateRayTracingPipelinesKHR + VkDevice device + VkDeferredOperationKHR deferredOperation + VkPipelineCache pipelineCache + uint32_t createInfoCount + const VkRayTracingPipelineCreateInfoKHR* pCreateInfos + const VkAllocationCallbacks* pAllocator + VkPipeline* pPipelines + + + VkResult vkCreateRayTracingPipelinesKHR + VkDevice device + VkDeferredOperationKHR deferredOperation + VkPipelineCache pipelineCache + uint32_t createInfoCount + const VkRayTracingPipelineCreateInfoKHR* pCreateInfos + const VkAllocationCallbacks* pAllocator + VkPipeline* pPipelines + + + VkResult vkGetPhysicalDeviceCooperativeMatrixPropertiesNV + VkPhysicalDevice physicalDevice + uint32_t* pPropertyCount + VkCooperativeMatrixPropertiesNV* pProperties + + + void vkCmdTraceRaysIndirectKHR + VkCommandBuffer commandBuffer + const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable + const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable + const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable + const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable + VkDeviceAddress indirectDeviceAddress + + + void vkCmdTraceRaysIndirect2KHR + VkCommandBuffer commandBuffer + VkDeviceAddress indirectDeviceAddress + + + void vkGetDeviceAccelerationStructureCompatibilityKHR + VkDevice device + const VkAccelerationStructureVersionInfoKHR* pVersionInfo + VkAccelerationStructureCompatibilityKHR* pCompatibility + + + VkDeviceSize vkGetRayTracingShaderGroupStackSizeKHR + VkDevice device + VkPipeline pipeline + uint32_t group + VkShaderGroupShaderKHR groupShader + + + void vkCmdSetRayTracingPipelineStackSizeKHR + VkCommandBuffer commandBuffer + uint32_t pipelineStackSize + + + uint32_t vkGetImageViewHandleNVX + VkDevice device + const VkImageViewHandleInfoNVX* pInfo + + + VkResult vkGetImageViewAddressNVX + VkDevice device + VkImageView imageView + VkImageViewAddressPropertiesNVX* pProperties + + + VkResult vkGetPhysicalDeviceSurfacePresentModes2EXT + VkPhysicalDevice physicalDevice + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo + uint32_t* pPresentModeCount + VkPresentModeKHR* pPresentModes + + + VkResult vkGetDeviceGroupSurfacePresentModes2EXT + VkDevice device + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo + VkDeviceGroupPresentModeFlagsKHR* pModes + + + VkResult vkAcquireFullScreenExclusiveModeEXT + VkDevice device + VkSwapchainKHR swapchain + + + VkResult vkReleaseFullScreenExclusiveModeEXT + VkDevice device + VkSwapchainKHR swapchain + + + VkResult vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR + VkPhysicalDevice physicalDevice + uint32_t queueFamilyIndex + uint32_t* pCounterCount + VkPerformanceCounterKHR* pCounters + VkPerformanceCounterDescriptionKHR* pCounterDescriptions + + + void vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR + VkPhysicalDevice physicalDevice + const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo + uint32_t* pNumPasses + + + VkResult vkAcquireProfilingLockKHR + VkDevice device + const VkAcquireProfilingLockInfoKHR* pInfo + + + void vkReleaseProfilingLockKHR + VkDevice device + + + VkResult vkGetImageDrmFormatModifierPropertiesEXT + VkDevice device + VkImage image + VkImageDrmFormatModifierPropertiesEXT* pProperties + + + uint64_t vkGetBufferOpaqueCaptureAddress + VkDevice device + const VkBufferDeviceAddressInfo* pInfo + + + + VkDeviceAddress vkGetBufferDeviceAddress + VkDevice device + const VkBufferDeviceAddressInfo* pInfo + + + + + VkResult vkCreateHeadlessSurfaceEXT + VkInstance instance + const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkSurfaceKHR* pSurface + + + VkResult vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV + VkPhysicalDevice physicalDevice + uint32_t* pCombinationCount + VkFramebufferMixedSamplesCombinationNV* pCombinations + + + VkResult vkInitializePerformanceApiINTEL + VkDevice device + const VkInitializePerformanceApiInfoINTEL* pInitializeInfo + + + void vkUninitializePerformanceApiINTEL + VkDevice device + + + VkResult vkCmdSetPerformanceMarkerINTEL + VkCommandBuffer commandBuffer + const VkPerformanceMarkerInfoINTEL* pMarkerInfo + + + VkResult vkCmdSetPerformanceStreamMarkerINTEL + VkCommandBuffer commandBuffer + const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo + + + VkResult vkCmdSetPerformanceOverrideINTEL + VkCommandBuffer commandBuffer + const VkPerformanceOverrideInfoINTEL* pOverrideInfo + + + VkResult vkAcquirePerformanceConfigurationINTEL + VkDevice device + const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo + VkPerformanceConfigurationINTEL* pConfiguration + + + VkResult vkReleasePerformanceConfigurationINTEL + VkDevice device + VkPerformanceConfigurationINTEL configuration + + + VkResult vkQueueSetPerformanceConfigurationINTEL + VkQueue queue + VkPerformanceConfigurationINTEL configuration + + + VkResult vkGetPerformanceParameterINTEL + VkDevice device + VkPerformanceParameterTypeINTEL parameter + VkPerformanceValueINTEL* pValue + + + uint64_t vkGetDeviceMemoryOpaqueCaptureAddress + VkDevice device + const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo + + + + VkResult vkGetPipelineExecutablePropertiesKHR + VkDevice device + const VkPipelineInfoKHR* pPipelineInfo + uint32_t* pExecutableCount + VkPipelineExecutablePropertiesKHR* pProperties + + + VkResult vkGetPipelineExecutableStatisticsKHR + VkDevice device + const VkPipelineExecutableInfoKHR* pExecutableInfo + uint32_t* pStatisticCount + VkPipelineExecutableStatisticKHR* pStatistics + + + VkResult vkGetPipelineExecutableInternalRepresentationsKHR + VkDevice device + const VkPipelineExecutableInfoKHR* pExecutableInfo + uint32_t* pInternalRepresentationCount + VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations + + + void vkCmdSetLineStippleKHR + VkCommandBuffer commandBuffer + uint32_t lineStippleFactor + uint16_t lineStipplePattern + + + + VkResult vkGetFaultData + VkDevice device + VkFaultQueryBehavior faultQueryBehavior + VkBool32* pUnrecordedFaults + uint32_t* pFaultCount + VkFaultData* pFaults + + + VkResult vkGetPhysicalDeviceToolProperties + VkPhysicalDevice physicalDevice + uint32_t* pToolCount + VkPhysicalDeviceToolProperties* pToolProperties + + + + VkResult vkCreateAccelerationStructureKHR + VkDevice device + const VkAccelerationStructureCreateInfoKHR* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkAccelerationStructureKHR* pAccelerationStructure + + + void vkCmdBuildAccelerationStructuresKHR + VkCommandBuffer commandBuffer + uint32_t infoCount + const VkAccelerationStructureBuildGeometryInfoKHR* pInfos + const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos + + + void vkCmdBuildAccelerationStructuresIndirectKHR + VkCommandBuffer commandBuffer + uint32_t infoCount + const VkAccelerationStructureBuildGeometryInfoKHR* pInfos + const VkDeviceAddress* pIndirectDeviceAddresses + const uint32_t* pIndirectStrides + const uint32_t* const* ppMaxPrimitiveCounts + + + VkResult vkBuildAccelerationStructuresKHR + VkDevice device + VkDeferredOperationKHR deferredOperation + uint32_t infoCount + const VkAccelerationStructureBuildGeometryInfoKHR* pInfos + const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos + + + VkDeviceAddress vkGetAccelerationStructureDeviceAddressKHR + VkDevice device + const VkAccelerationStructureDeviceAddressInfoKHR* pInfo + + + VkResult vkCreateDeferredOperationKHR + VkDevice device + const VkAllocationCallbacks* pAllocator + VkDeferredOperationKHR* pDeferredOperation + + + void vkDestroyDeferredOperationKHR + VkDevice device + VkDeferredOperationKHR operation + const VkAllocationCallbacks* pAllocator + + + uint32_t vkGetDeferredOperationMaxConcurrencyKHR + VkDevice device + VkDeferredOperationKHR operation + + + VkResult vkGetDeferredOperationResultKHR + VkDevice device + VkDeferredOperationKHR operation + + + VkResult vkDeferredOperationJoinKHR + VkDevice device + VkDeferredOperationKHR operation + + + void vkGetPipelineIndirectMemoryRequirementsNV + VkDevice device + const VkComputePipelineCreateInfo* pCreateInfo + VkMemoryRequirements2* pMemoryRequirements + + + VkDeviceAddress vkGetPipelineIndirectDeviceAddressNV + VkDevice device + const VkPipelineIndirectDeviceAddressInfoNV* pInfo + + + void vkCmdSetCullMode + VkCommandBuffer commandBuffer + VkCullModeFlags cullMode + + + + void vkCmdSetFrontFace + VkCommandBuffer commandBuffer + VkFrontFace frontFace + + + + void vkCmdSetPrimitiveTopology + VkCommandBuffer commandBuffer + VkPrimitiveTopology primitiveTopology + + + + void vkCmdSetViewportWithCount + VkCommandBuffer commandBuffer + uint32_t viewportCount + const VkViewport* pViewports + + + + void vkCmdSetScissorWithCount + VkCommandBuffer commandBuffer + uint32_t scissorCount + const VkRect2D* pScissors + + + + void vkCmdBindIndexBuffer2KHR + VkCommandBuffer commandBuffer + VkBuffer buffer + VkDeviceSize offset + VkDeviceSize size + VkIndexType indexType + + + void vkCmdBindVertexBuffers2 + VkCommandBuffer commandBuffer + uint32_t firstBinding + uint32_t bindingCount + const VkBuffer* pBuffers + const VkDeviceSize* pOffsets + const VkDeviceSize* pSizes + const VkDeviceSize* pStrides + + + + void vkCmdSetDepthTestEnable + VkCommandBuffer commandBuffer + VkBool32 depthTestEnable + + + + void vkCmdSetDepthWriteEnable + VkCommandBuffer commandBuffer + VkBool32 depthWriteEnable + + + + void vkCmdSetDepthCompareOp + VkCommandBuffer commandBuffer + VkCompareOp depthCompareOp + + + + void vkCmdSetDepthBoundsTestEnable + VkCommandBuffer commandBuffer + VkBool32 depthBoundsTestEnable + + + + void vkCmdSetStencilTestEnable + VkCommandBuffer commandBuffer + VkBool32 stencilTestEnable + + + + void vkCmdSetStencilOp + VkCommandBuffer commandBuffer + VkStencilFaceFlags faceMask + VkStencilOp failOp + VkStencilOp passOp + VkStencilOp depthFailOp + VkCompareOp compareOp + + + + void vkCmdSetPatchControlPointsEXT + VkCommandBuffer commandBuffer + uint32_t patchControlPoints + + + void vkCmdSetRasterizerDiscardEnable + VkCommandBuffer commandBuffer + VkBool32 rasterizerDiscardEnable + + + + void vkCmdSetDepthBiasEnable + VkCommandBuffer commandBuffer + VkBool32 depthBiasEnable + + + + void vkCmdSetLogicOpEXT + VkCommandBuffer commandBuffer + VkLogicOp logicOp + + + void vkCmdSetPrimitiveRestartEnable + VkCommandBuffer commandBuffer + VkBool32 primitiveRestartEnable + + + + void vkCmdSetTessellationDomainOriginEXT + VkCommandBuffer commandBuffer + VkTessellationDomainOrigin domainOrigin + + + void vkCmdSetDepthClampEnableEXT + VkCommandBuffer commandBuffer + VkBool32 depthClampEnable + + + void vkCmdSetPolygonModeEXT + VkCommandBuffer commandBuffer + VkPolygonMode polygonMode + + + void vkCmdSetRasterizationSamplesEXT + VkCommandBuffer commandBuffer + VkSampleCountFlagBits rasterizationSamples + + + void vkCmdSetSampleMaskEXT + VkCommandBuffer commandBuffer + VkSampleCountFlagBits samples + const VkSampleMask* pSampleMask + + + void vkCmdSetAlphaToCoverageEnableEXT + VkCommandBuffer commandBuffer + VkBool32 alphaToCoverageEnable + + + void vkCmdSetAlphaToOneEnableEXT + VkCommandBuffer commandBuffer + VkBool32 alphaToOneEnable + + + void vkCmdSetLogicOpEnableEXT + VkCommandBuffer commandBuffer + VkBool32 logicOpEnable + + + void vkCmdSetColorBlendEnableEXT + VkCommandBuffer commandBuffer + uint32_t firstAttachment + uint32_t attachmentCount + const VkBool32* pColorBlendEnables + + + void vkCmdSetColorBlendEquationEXT + VkCommandBuffer commandBuffer + uint32_t firstAttachment + uint32_t attachmentCount + const VkColorBlendEquationEXT* pColorBlendEquations + + + void vkCmdSetColorWriteMaskEXT + VkCommandBuffer commandBuffer + uint32_t firstAttachment + uint32_t attachmentCount + const VkColorComponentFlags* pColorWriteMasks + + + void vkCmdSetRasterizationStreamEXT + VkCommandBuffer commandBuffer + uint32_t rasterizationStream + + + void vkCmdSetConservativeRasterizationModeEXT + VkCommandBuffer commandBuffer + VkConservativeRasterizationModeEXT conservativeRasterizationMode + + + void vkCmdSetExtraPrimitiveOverestimationSizeEXT + VkCommandBuffer commandBuffer + float extraPrimitiveOverestimationSize + + + void vkCmdSetDepthClipEnableEXT + VkCommandBuffer commandBuffer + VkBool32 depthClipEnable + + + void vkCmdSetSampleLocationsEnableEXT + VkCommandBuffer commandBuffer + VkBool32 sampleLocationsEnable + + + void vkCmdSetColorBlendAdvancedEXT + VkCommandBuffer commandBuffer + uint32_t firstAttachment + uint32_t attachmentCount + const VkColorBlendAdvancedEXT* pColorBlendAdvanced + + + void vkCmdSetProvokingVertexModeEXT + VkCommandBuffer commandBuffer + VkProvokingVertexModeEXT provokingVertexMode + + + void vkCmdSetLineRasterizationModeEXT + VkCommandBuffer commandBuffer + VkLineRasterizationModeEXT lineRasterizationMode + + + void vkCmdSetLineStippleEnableEXT + VkCommandBuffer commandBuffer + VkBool32 stippledLineEnable + + + void vkCmdSetDepthClipNegativeOneToOneEXT + VkCommandBuffer commandBuffer + VkBool32 negativeOneToOne + + + void vkCmdSetViewportWScalingEnableNV + VkCommandBuffer commandBuffer + VkBool32 viewportWScalingEnable + + + void vkCmdSetViewportSwizzleNV + VkCommandBuffer commandBuffer + uint32_t firstViewport + uint32_t viewportCount + const VkViewportSwizzleNV* pViewportSwizzles + + + void vkCmdSetCoverageToColorEnableNV + VkCommandBuffer commandBuffer + VkBool32 coverageToColorEnable + + + void vkCmdSetCoverageToColorLocationNV + VkCommandBuffer commandBuffer + uint32_t coverageToColorLocation + + + void vkCmdSetCoverageModulationModeNV + VkCommandBuffer commandBuffer + VkCoverageModulationModeNV coverageModulationMode + + + void vkCmdSetCoverageModulationTableEnableNV + VkCommandBuffer commandBuffer + VkBool32 coverageModulationTableEnable + + + void vkCmdSetCoverageModulationTableNV + VkCommandBuffer commandBuffer + uint32_t coverageModulationTableCount + const float* pCoverageModulationTable + + + void vkCmdSetShadingRateImageEnableNV + VkCommandBuffer commandBuffer + VkBool32 shadingRateImageEnable + + + void vkCmdSetCoverageReductionModeNV + VkCommandBuffer commandBuffer + VkCoverageReductionModeNV coverageReductionMode + + + void vkCmdSetRepresentativeFragmentTestEnableNV + VkCommandBuffer commandBuffer + VkBool32 representativeFragmentTestEnable + + + VkResult vkCreatePrivateDataSlot + VkDevice device + const VkPrivateDataSlotCreateInfo* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkPrivateDataSlot* pPrivateDataSlot + + + + void vkDestroyPrivateDataSlot + VkDevice device + VkPrivateDataSlot privateDataSlot + const VkAllocationCallbacks* pAllocator + + + + VkResult vkSetPrivateData + VkDevice device + VkObjectType objectType + uint64_t objectHandle + VkPrivateDataSlot privateDataSlot + uint64_t data + + + + void vkGetPrivateData + VkDevice device + VkObjectType objectType + uint64_t objectHandle + VkPrivateDataSlot privateDataSlot + uint64_t* pData + + + + void vkCmdCopyBuffer2 + VkCommandBuffer commandBuffer + const VkCopyBufferInfo2* pCopyBufferInfo + + + + void vkCmdCopyImage2 + VkCommandBuffer commandBuffer + const VkCopyImageInfo2* pCopyImageInfo + + + + void vkCmdBlitImage2 + VkCommandBuffer commandBuffer + const VkBlitImageInfo2* pBlitImageInfo + + + + void vkCmdCopyBufferToImage2 + VkCommandBuffer commandBuffer + const VkCopyBufferToImageInfo2* pCopyBufferToImageInfo + + + + void vkCmdCopyImageToBuffer2 + VkCommandBuffer commandBuffer + const VkCopyImageToBufferInfo2* pCopyImageToBufferInfo + + + + void vkCmdResolveImage2 + VkCommandBuffer commandBuffer + const VkResolveImageInfo2* pResolveImageInfo + + + + void vkCmdRefreshObjectsKHR + VkCommandBuffer commandBuffer + const VkRefreshObjectListKHR* pRefreshObjects + + + VkResult vkGetPhysicalDeviceRefreshableObjectTypesKHR + VkPhysicalDevice physicalDevice + uint32_t* pRefreshableObjectTypeCount + VkObjectType* pRefreshableObjectTypes + + + void vkCmdSetFragmentShadingRateKHR + VkCommandBuffer commandBuffer + const VkExtent2D* pFragmentSize + const VkFragmentShadingRateCombinerOpKHR combinerOps[2] + + + VkResult vkGetPhysicalDeviceFragmentShadingRatesKHR + VkPhysicalDevice physicalDevice + uint32_t* pFragmentShadingRateCount + VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates + + + void vkCmdSetFragmentShadingRateEnumNV + VkCommandBuffer commandBuffer + VkFragmentShadingRateNV shadingRate + const VkFragmentShadingRateCombinerOpKHR combinerOps[2] + + + void vkGetAccelerationStructureBuildSizesKHR + VkDevice device + VkAccelerationStructureBuildTypeKHR buildType + const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo + const uint32_t* pMaxPrimitiveCounts + VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo + + + void vkCmdSetVertexInputEXT + VkCommandBuffer commandBuffer + uint32_t vertexBindingDescriptionCount + const VkVertexInputBindingDescription2EXT* pVertexBindingDescriptions + uint32_t vertexAttributeDescriptionCount + const VkVertexInputAttributeDescription2EXT* pVertexAttributeDescriptions + + + void vkCmdSetColorWriteEnableEXT + VkCommandBuffer commandBuffer + uint32_t attachmentCount + const VkBool32* pColorWriteEnables + + + void vkCmdSetEvent2 + VkCommandBuffer commandBuffer + VkEvent event + const VkDependencyInfo* pDependencyInfo + + + + void vkCmdResetEvent2 + VkCommandBuffer commandBuffer + VkEvent event + VkPipelineStageFlags2 stageMask + + + + void vkCmdWaitEvents2 + VkCommandBuffer commandBuffer + uint32_t eventCount + const VkEvent* pEvents + const VkDependencyInfo* pDependencyInfos + + + + void vkCmdPipelineBarrier2 + VkCommandBuffer commandBuffer + const VkDependencyInfo* pDependencyInfo + + + + VkResult vkQueueSubmit2 + VkQueue queue + uint32_t submitCount + const VkSubmitInfo2* pSubmits + VkFence fence + + + + void vkCmdWriteTimestamp2 + VkCommandBuffer commandBuffer + VkPipelineStageFlags2 stage + VkQueryPool queryPool + uint32_t query + + + + void vkCmdWriteBufferMarker2AMD + VkCommandBuffer commandBuffer + VkPipelineStageFlags2 stage + VkBuffer dstBuffer + VkDeviceSize dstOffset + uint32_t marker + + + void vkGetQueueCheckpointData2NV + VkQueue queue + uint32_t* pCheckpointDataCount + VkCheckpointData2NV* pCheckpointData + + + VkResult vkCopyMemoryToImageEXT + VkDevice device + const VkCopyMemoryToImageInfoEXT* pCopyMemoryToImageInfo + + + VkResult vkCopyImageToMemoryEXT + VkDevice device + const VkCopyImageToMemoryInfoEXT* pCopyImageToMemoryInfo + + + VkResult vkCopyImageToImageEXT + VkDevice device + const VkCopyImageToImageInfoEXT* pCopyImageToImageInfo + + + VkResult vkTransitionImageLayoutEXT + VkDevice device + uint32_t transitionCount + const VkHostImageLayoutTransitionInfoEXT* pTransitions + + + void vkGetCommandPoolMemoryConsumption + VkDevice device + VkCommandPool commandPool + VkCommandBuffer commandBuffer + VkCommandPoolMemoryConsumption* pConsumption + + + VkResult vkGetPhysicalDeviceVideoCapabilitiesKHR + VkPhysicalDevice physicalDevice + const VkVideoProfileInfoKHR* pVideoProfile + VkVideoCapabilitiesKHR* pCapabilities + + + VkResult vkGetPhysicalDeviceVideoFormatPropertiesKHR + VkPhysicalDevice physicalDevice + const VkPhysicalDeviceVideoFormatInfoKHR* pVideoFormatInfo + uint32_t* pVideoFormatPropertyCount + VkVideoFormatPropertiesKHR* pVideoFormatProperties + + + VkResult vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR + VkPhysicalDevice physicalDevice + const VkPhysicalDeviceVideoEncodeQualityLevelInfoKHR* pQualityLevelInfo + VkVideoEncodeQualityLevelPropertiesKHR* pQualityLevelProperties + + + VkResult vkCreateVideoSessionKHR + VkDevice device + const VkVideoSessionCreateInfoKHR* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkVideoSessionKHR* pVideoSession + + + void vkDestroyVideoSessionKHR + VkDevice device + VkVideoSessionKHR videoSession + const VkAllocationCallbacks* pAllocator + + + VkResult vkCreateVideoSessionParametersKHR + VkDevice device + const VkVideoSessionParametersCreateInfoKHR* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkVideoSessionParametersKHR* pVideoSessionParameters + + + VkResult vkUpdateVideoSessionParametersKHR + VkDevice device + VkVideoSessionParametersKHR videoSessionParameters + const VkVideoSessionParametersUpdateInfoKHR* pUpdateInfo + + + VkResult vkGetEncodedVideoSessionParametersKHR + VkDevice device + const VkVideoEncodeSessionParametersGetInfoKHR* pVideoSessionParametersInfo + VkVideoEncodeSessionParametersFeedbackInfoKHR* pFeedbackInfo + size_t* pDataSize + void* pData + + + void vkDestroyVideoSessionParametersKHR + VkDevice device + VkVideoSessionParametersKHR videoSessionParameters + const VkAllocationCallbacks* pAllocator + + + VkResult vkGetVideoSessionMemoryRequirementsKHR + VkDevice device + VkVideoSessionKHR videoSession + uint32_t* pMemoryRequirementsCount + VkVideoSessionMemoryRequirementsKHR* pMemoryRequirements + + + VkResult vkBindVideoSessionMemoryKHR + VkDevice device + VkVideoSessionKHR videoSession + uint32_t bindSessionMemoryInfoCount + const VkBindVideoSessionMemoryInfoKHR* pBindSessionMemoryInfos + + + void vkCmdDecodeVideoKHR + VkCommandBuffer commandBuffer + const VkVideoDecodeInfoKHR* pDecodeInfo + + + void vkCmdBeginVideoCodingKHR + VkCommandBuffer commandBuffer + const VkVideoBeginCodingInfoKHR* pBeginInfo + + + void vkCmdControlVideoCodingKHR + VkCommandBuffer commandBuffer + const VkVideoCodingControlInfoKHR* pCodingControlInfo + + + void vkCmdEndVideoCodingKHR + VkCommandBuffer commandBuffer + const VkVideoEndCodingInfoKHR* pEndCodingInfo + + + void vkCmdEncodeVideoKHR + VkCommandBuffer commandBuffer + const VkVideoEncodeInfoKHR* pEncodeInfo + + + void vkCmdDecompressMemoryNV + VkCommandBuffer commandBuffer + uint32_t decompressRegionCount + const VkDecompressMemoryRegionNV* pDecompressMemoryRegions + + + void vkCmdDecompressMemoryIndirectCountNV + VkCommandBuffer commandBuffer + VkDeviceAddress indirectCommandsAddress + VkDeviceAddress indirectCommandsCountAddress + uint32_t stride + + + VkResult vkCreateCuModuleNVX + VkDevice device + const VkCuModuleCreateInfoNVX* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkCuModuleNVX* pModule + + + VkResult vkCreateCuFunctionNVX + VkDevice device + const VkCuFunctionCreateInfoNVX* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkCuFunctionNVX* pFunction + + + void vkDestroyCuModuleNVX + VkDevice device + VkCuModuleNVX module + const VkAllocationCallbacks* pAllocator + + + void vkDestroyCuFunctionNVX + VkDevice device + VkCuFunctionNVX function + const VkAllocationCallbacks* pAllocator + + + void vkCmdCuLaunchKernelNVX + VkCommandBuffer commandBuffer + const VkCuLaunchInfoNVX* pLaunchInfo + + + void vkGetDescriptorSetLayoutSizeEXT + VkDevice device + VkDescriptorSetLayout layout + VkDeviceSize* pLayoutSizeInBytes + + + void vkGetDescriptorSetLayoutBindingOffsetEXT + VkDevice device + VkDescriptorSetLayout layout + uint32_t binding + VkDeviceSize* pOffset + + + void vkGetDescriptorEXT + VkDevice device + const VkDescriptorGetInfoEXT* pDescriptorInfo + size_t dataSize + void* pDescriptor + + + void vkCmdBindDescriptorBuffersEXT + VkCommandBuffer commandBuffer + uint32_t bufferCount + const VkDescriptorBufferBindingInfoEXT* pBindingInfos + + + void vkCmdSetDescriptorBufferOffsetsEXT + VkCommandBuffer commandBuffer + VkPipelineBindPoint pipelineBindPoint + VkPipelineLayout layout + uint32_t firstSet + uint32_t setCount + const uint32_t* pBufferIndices + const VkDeviceSize* pOffsets + + + void vkCmdBindDescriptorBufferEmbeddedSamplersEXT + VkCommandBuffer commandBuffer + VkPipelineBindPoint pipelineBindPoint + VkPipelineLayout layout + uint32_t set + + + VkResult vkGetBufferOpaqueCaptureDescriptorDataEXT + VkDevice device + const VkBufferCaptureDescriptorDataInfoEXT* pInfo + void* pData + + + VkResult vkGetImageOpaqueCaptureDescriptorDataEXT + VkDevice device + const VkImageCaptureDescriptorDataInfoEXT* pInfo + void* pData + + + VkResult vkGetImageViewOpaqueCaptureDescriptorDataEXT + VkDevice device + const VkImageViewCaptureDescriptorDataInfoEXT* pInfo + void* pData + + + VkResult vkGetSamplerOpaqueCaptureDescriptorDataEXT + VkDevice device + const VkSamplerCaptureDescriptorDataInfoEXT* pInfo + void* pData + + + VkResult vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT + VkDevice device + const VkAccelerationStructureCaptureDescriptorDataInfoEXT* pInfo + void* pData + + + void vkSetDeviceMemoryPriorityEXT + VkDevice device + VkDeviceMemory memory + float priority + + + VkResult vkAcquireDrmDisplayEXT + VkPhysicalDevice physicalDevice + int32_t drmFd + VkDisplayKHR display + + + VkResult vkGetDrmDisplayEXT + VkPhysicalDevice physicalDevice + int32_t drmFd + uint32_t connectorId + VkDisplayKHR* display + + + VkResult vkWaitForPresentKHR + VkDevice device + VkSwapchainKHR swapchain + uint64_t presentId + uint64_t timeout + + + VkResult vkCreateBufferCollectionFUCHSIA + VkDevice device + const VkBufferCollectionCreateInfoFUCHSIA* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkBufferCollectionFUCHSIA* pCollection + + + VkResult vkSetBufferCollectionBufferConstraintsFUCHSIA + VkDevice device + VkBufferCollectionFUCHSIA collection + const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo + + + VkResult vkSetBufferCollectionImageConstraintsFUCHSIA + VkDevice device + VkBufferCollectionFUCHSIA collection + const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo + + + void vkDestroyBufferCollectionFUCHSIA + VkDevice device + VkBufferCollectionFUCHSIA collection + const VkAllocationCallbacks* pAllocator + + + VkResult vkGetBufferCollectionPropertiesFUCHSIA + VkDevice device + VkBufferCollectionFUCHSIA collection + VkBufferCollectionPropertiesFUCHSIA* pProperties + + + VkResult vkCreateCudaModuleNV + VkDevice device + const VkCudaModuleCreateInfoNV* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkCudaModuleNV* pModule + + + VkResult vkGetCudaModuleCacheNV + VkDevice device + VkCudaModuleNV module + size_t* pCacheSize + void* pCacheData + + + VkResult vkCreateCudaFunctionNV + VkDevice device + const VkCudaFunctionCreateInfoNV* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkCudaFunctionNV* pFunction + + + void vkDestroyCudaModuleNV + VkDevice device + VkCudaModuleNV module + const VkAllocationCallbacks* pAllocator + + + void vkDestroyCudaFunctionNV + VkDevice device + VkCudaFunctionNV function + const VkAllocationCallbacks* pAllocator + + + void vkCmdCudaLaunchKernelNV + VkCommandBuffer commandBuffer + const VkCudaLaunchInfoNV* pLaunchInfo + + + void vkCmdBeginRendering + VkCommandBuffer commandBuffer + const VkRenderingInfo* pRenderingInfo + + + + void vkCmdEndRendering + VkCommandBuffer commandBuffer + + + + + void vkGetDescriptorSetLayoutHostMappingInfoVALVE + VkDevice device + const VkDescriptorSetBindingReferenceVALVE* pBindingReference + VkDescriptorSetLayoutHostMappingInfoVALVE* pHostMapping + + + void vkGetDescriptorSetHostMappingVALVE + VkDevice device + VkDescriptorSet descriptorSet + void** ppData + + + VkResult vkCreateMicromapEXT + VkDevice device + const VkMicromapCreateInfoEXT* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkMicromapEXT* pMicromap + + + void vkCmdBuildMicromapsEXT + VkCommandBuffer commandBuffer + uint32_t infoCount + const VkMicromapBuildInfoEXT* pInfos + + + VkResult vkBuildMicromapsEXT + VkDevice device + VkDeferredOperationKHR deferredOperation + uint32_t infoCount + const VkMicromapBuildInfoEXT* pInfos + + + void vkDestroyMicromapEXT + VkDevice device + VkMicromapEXT micromap + const VkAllocationCallbacks* pAllocator + + + void vkCmdCopyMicromapEXT + VkCommandBuffer commandBuffer + const VkCopyMicromapInfoEXT* pInfo + + + VkResult vkCopyMicromapEXT + VkDevice device + VkDeferredOperationKHR deferredOperation + const VkCopyMicromapInfoEXT* pInfo + + + void vkCmdCopyMicromapToMemoryEXT + VkCommandBuffer commandBuffer + const VkCopyMicromapToMemoryInfoEXT* pInfo + + + VkResult vkCopyMicromapToMemoryEXT + VkDevice device + VkDeferredOperationKHR deferredOperation + const VkCopyMicromapToMemoryInfoEXT* pInfo + + + void vkCmdCopyMemoryToMicromapEXT + VkCommandBuffer commandBuffer + const VkCopyMemoryToMicromapInfoEXT* pInfo + + + VkResult vkCopyMemoryToMicromapEXT + VkDevice device + VkDeferredOperationKHR deferredOperation + const VkCopyMemoryToMicromapInfoEXT* pInfo + + + void vkCmdWriteMicromapsPropertiesEXT + VkCommandBuffer commandBuffer + uint32_t micromapCount + const VkMicromapEXT* pMicromaps + VkQueryType queryType + VkQueryPool queryPool + uint32_t firstQuery + + + VkResult vkWriteMicromapsPropertiesEXT + VkDevice device + uint32_t micromapCount + const VkMicromapEXT* pMicromaps + VkQueryType queryType + size_t dataSize + void* pData + size_t stride + + + void vkGetDeviceMicromapCompatibilityEXT + VkDevice device + const VkMicromapVersionInfoEXT* pVersionInfo + VkAccelerationStructureCompatibilityKHR* pCompatibility + + + void vkGetMicromapBuildSizesEXT + VkDevice device + VkAccelerationStructureBuildTypeKHR buildType + const VkMicromapBuildInfoEXT* pBuildInfo + VkMicromapBuildSizesInfoEXT* pSizeInfo + + + void vkGetShaderModuleIdentifierEXT + VkDevice device + VkShaderModule shaderModule + VkShaderModuleIdentifierEXT* pIdentifier + + + void vkGetShaderModuleCreateInfoIdentifierEXT + VkDevice device + const VkShaderModuleCreateInfo* pCreateInfo + VkShaderModuleIdentifierEXT* pIdentifier + + + void vkGetImageSubresourceLayout2KHR + VkDevice device + VkImage image + const VkImageSubresource2KHR* pSubresource + VkSubresourceLayout2KHR* pLayout + + + + VkResult vkGetPipelinePropertiesEXT + VkDevice device + const VkPipelineInfoEXT* pPipelineInfo + VkBaseOutStructure* pPipelineProperties + + + void vkExportMetalObjectsEXT + VkDevice device + VkExportMetalObjectsInfoEXT* pMetalObjectsInfo + + + VkResult vkGetFramebufferTilePropertiesQCOM + VkDevice device + VkFramebuffer framebuffer + uint32_t* pPropertiesCount + VkTilePropertiesQCOM* pProperties + + + VkResult vkGetDynamicRenderingTilePropertiesQCOM + VkDevice device + const VkRenderingInfo* pRenderingInfo + VkTilePropertiesQCOM* pProperties + + + VkResult vkGetPhysicalDeviceOpticalFlowImageFormatsNV + VkPhysicalDevice physicalDevice + const VkOpticalFlowImageFormatInfoNV* pOpticalFlowImageFormatInfo + uint32_t* pFormatCount + VkOpticalFlowImageFormatPropertiesNV* pImageFormatProperties + + + VkResult vkCreateOpticalFlowSessionNV + VkDevice device + const VkOpticalFlowSessionCreateInfoNV* pCreateInfo + const VkAllocationCallbacks* pAllocator + VkOpticalFlowSessionNV* pSession + + + void vkDestroyOpticalFlowSessionNV + VkDevice device + VkOpticalFlowSessionNV session + const VkAllocationCallbacks* pAllocator + + + VkResult vkBindOpticalFlowSessionImageNV + VkDevice device + VkOpticalFlowSessionNV session + VkOpticalFlowSessionBindingPointNV bindingPoint + VkImageView view + VkImageLayout layout + + + void vkCmdOpticalFlowExecuteNV + VkCommandBuffer commandBuffer + VkOpticalFlowSessionNV session + const VkOpticalFlowExecuteInfoNV* pExecuteInfo + + + VkResult vkGetDeviceFaultInfoEXT + VkDevice device + VkDeviceFaultCountsEXT* pFaultCounts + VkDeviceFaultInfoEXT* pFaultInfo + + + void vkCmdSetDepthBias2EXT + VkCommandBuffer commandBuffer + const VkDepthBiasInfoEXT* pDepthBiasInfo + + + VkResult vkReleaseSwapchainImagesEXT + VkDevice device + const VkReleaseSwapchainImagesInfoEXT* pReleaseInfo + + + void vkGetDeviceImageSubresourceLayoutKHR + VkDevice device + const VkDeviceImageSubresourceInfoKHR* pInfo + VkSubresourceLayout2KHR* pLayout + + + VkResult vkMapMemory2KHR + VkDevice device + const VkMemoryMapInfoKHR* pMemoryMapInfo + void** ppData + + + VkResult vkUnmapMemory2KHR + VkDevice device + const VkMemoryUnmapInfoKHR* pMemoryUnmapInfo + + + VkResult vkCreateShadersEXT + VkDevice device + uint32_t createInfoCount + const VkShaderCreateInfoEXT* pCreateInfos + const VkAllocationCallbacks* pAllocator + VkShaderEXT* pShaders + + + void vkDestroyShaderEXT + VkDevice device + VkShaderEXT shader + const VkAllocationCallbacks* pAllocator + + + VkResult vkGetShaderBinaryDataEXT + VkDevice device + VkShaderEXT shader + size_t* pDataSize + void* pData + + + void vkCmdBindShadersEXT + VkCommandBuffer commandBuffer + uint32_t stageCount + const VkShaderStageFlagBits* pStages + const VkShaderEXT* pShaders + + + VkResult vkGetScreenBufferPropertiesQNX + VkDevice device + const struct _screen_buffer* buffer + VkScreenBufferPropertiesQNX* pProperties + + + VkResult vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR + VkPhysicalDevice physicalDevice + uint32_t* pPropertyCount + VkCooperativeMatrixPropertiesKHR* pProperties + + + VkResult vkGetExecutionGraphPipelineScratchSizeAMDX + VkDevice device + VkPipeline executionGraph + VkExecutionGraphPipelineScratchSizeAMDX* pSizeInfo + + + VkResult vkGetExecutionGraphPipelineNodeIndexAMDX + VkDevice device + VkPipeline executionGraph + const VkPipelineShaderStageNodeCreateInfoAMDX* pNodeInfo + uint32_t* pNodeIndex + + + VkResult vkCreateExecutionGraphPipelinesAMDX + VkDevice device + VkPipelineCache pipelineCache + uint32_t createInfoCount + const VkExecutionGraphPipelineCreateInfoAMDX* pCreateInfos + const VkAllocationCallbacks* pAllocator + VkPipeline* pPipelines + + + void vkCmdInitializeGraphScratchMemoryAMDX + VkCommandBuffer commandBuffer + VkDeviceAddress scratch + + + void vkCmdDispatchGraphAMDX + VkCommandBuffer commandBuffer + VkDeviceAddress scratch + const VkDispatchGraphCountInfoAMDX* pCountInfo + + + void vkCmdDispatchGraphIndirectAMDX + VkCommandBuffer commandBuffer + VkDeviceAddress scratch + const VkDispatchGraphCountInfoAMDX* pCountInfo + + + void vkCmdDispatchGraphIndirectCountAMDX + VkCommandBuffer commandBuffer + VkDeviceAddress scratch + VkDeviceAddress countInfo + + + void vkCmdBindDescriptorSets2KHR + VkCommandBuffer commandBuffer + const VkBindDescriptorSetsInfoKHR* pBindDescriptorSetsInfo + + + void vkCmdPushConstants2KHR + VkCommandBuffer commandBuffer + const VkPushConstantsInfoKHR* pPushConstantsInfo + + + void vkCmdPushDescriptorSet2KHR + VkCommandBuffer commandBuffer + const VkPushDescriptorSetInfoKHR* pPushDescriptorSetInfo + + + void vkCmdPushDescriptorSetWithTemplate2KHR + VkCommandBuffer commandBuffer + const VkPushDescriptorSetWithTemplateInfoKHR* pPushDescriptorSetWithTemplateInfo + + + void vkCmdSetDescriptorBufferOffsets2EXT + VkCommandBuffer commandBuffer + const VkSetDescriptorBufferOffsetsInfoEXT* pSetDescriptorBufferOffsetsInfo + + + void vkCmdBindDescriptorBufferEmbeddedSamplers2EXT + VkCommandBuffer commandBuffer + const VkBindDescriptorBufferEmbeddedSamplersInfoEXT* pBindDescriptorBufferEmbeddedSamplersInfo + + + VkResult vkSetLatencySleepModeNV + VkDevice device + VkSwapchainKHR swapchain + const VkLatencySleepModeInfoNV* pSleepModeInfo + + + VkResult vkLatencySleepNV + VkDevice device + VkSwapchainKHR swapchain + const VkLatencySleepInfoNV* pSleepInfo + + + void vkSetLatencyMarkerNV + VkDevice device + VkSwapchainKHR swapchain + const VkSetLatencyMarkerInfoNV* pLatencyMarkerInfo + + + void vkGetLatencyTimingsNV + VkDevice device + VkSwapchainKHR swapchain + VkGetLatencyMarkerInfoNV* pLatencyMarkerInfo + + + void vkQueueNotifyOutOfBandNV + VkQueue queue + const VkOutOfBandQueueTypeInfoNV* pQueueTypeInfo + + + void vkCmdSetRenderingAttachmentLocationsKHR + VkCommandBuffer commandBuffer + const VkRenderingAttachmentLocationInfoKHR* pLocationInfo + + + void vkCmdSetRenderingInputAttachmentIndicesKHR + VkCommandBuffer commandBuffer + const VkRenderingInputAttachmentIndexInfoKHR* pLocationInfo + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + offset 1 reserved for the old VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHX enum + offset 2 reserved for the old VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHX enum + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Additional dependent types / tokens extending enumerants, not explicitly mentioned + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Additional dependent types / tokens extending enumerants, not explicitly mentioned + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This duplicates definitions in VK_KHR_device_group below + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + VK_ANDROID_native_buffer is used between the Android Vulkan loader and drivers to implement the WSI extensions. It is not exposed to applications and uses types that are not part of Android's stable public API, so it is left disabled to keep it out of the standard Vulkan headers. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This duplicates definitions in other extensions, below + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + enum offset=0 was mistakenly used for the 1.1 core enum + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES + (value=1000094000). Fortunately, no conflict resulted. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This extension requires buffer_device_address functionality. + VK_EXT_buffer_device_address is also acceptable, but since it is deprecated the KHR version is preferred. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + These enums are present only to inform downstream + consumers like KTX2. There is no actual Vulkan extension + corresponding to the enums. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT and + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT + were not promoted to Vulkan 1.3. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + VkPhysicalDevice4444FormatsFeaturesEXT and + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT + were not promoted to Vulkan 1.3. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + NV internal use only + + + + + + + + + + + + + + + + + + + + + + + + + + NV internal use only + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Fragment shader stage is added by the VK_EXT_shader_tile_image extension + + + + + + + Fragment shader stage is added by the VK_EXT_shader_tile_image extension + + + + + + + + + + + + + + + + + + + TODO/Suggestion. Introduce 'synclist' (could be a different name) element + that specifies the list of stages, accesses, etc. This list can be used by + 'syncaccess' or 'syncstage' elements. For example, 'syncsupport' in addition to the + 'stage' attribute can support 'list' attribute to reference 'synclist'. + We can have the lists defined for ALL stages and it can be shared between MEMORY_READ + and MEMORY_WRITE accesses. Similarly, ALL shader stages list is often used. This proposal + is a way to fix duplication problem. When new stage is added multiple places needs to be + updated. It is potential source of bugs. The expectation such setup will produce more + robust system and also more simple structure to review and validate. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT + VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT + VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT + VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT + VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT + VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT + VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT + VK_PIPELINE_STAGE_2_TRANSFORM_FEEDBACK_BIT_EXT + VK_PIPELINE_STAGE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR + VK_PIPELINE_STAGE_2_FRAGMENT_DENSITY_PROCESS_BIT_EXT + VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT + VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT + VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT + VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT + VK_PIPELINE_STAGE_2_CONDITIONAL_RENDERING_BIT_EXT + + + VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT + VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT + VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT + VK_PIPELINE_STAGE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR + VK_PIPELINE_STAGE_2_FRAGMENT_DENSITY_PROCESS_BIT_EXT + VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT + VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT + VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT + VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT + VK_PIPELINE_STAGE_2_CONDITIONAL_RENDERING_BIT_EXT + + + VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT + VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT + VK_PIPELINE_STAGE_2_CONDITIONAL_RENDERING_BIT_EXT + + + VK_PIPELINE_STAGE_2_TRANSFER_BIT + + + VK_PIPELINE_STAGE_2_HOST_BIT + + + VK_PIPELINE_STAGE_2_SUBPASS_SHADER_BIT_HUAWEI + + + VK_PIPELINE_STAGE_2_COMMAND_PREPROCESS_BIT_NV + + + VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR + + + VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_COPY_BIT_KHR + + + VK_PIPELINE_STAGE_2_MICROMAP_BUILD_BIT_EXT + + + VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT + VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR + + + VK_PIPELINE_STAGE_2_VIDEO_DECODE_BIT_KHR + + + VK_PIPELINE_STAGE_2_VIDEO_ENCODE_BIT_KHR + + + VK_PIPELINE_STAGE_2_OPTICAL_FLOW_BIT_NV + + + diff --git a/src/c.zig b/src/c.zig new file mode 100644 index 0000000..a3d9dfb --- /dev/null +++ b/src/c.zig @@ -0,0 +1,25 @@ +pub usingnamespace @cImport({ + @cDefine("GLFW_INCLUDE_NONE", {}); + @cInclude("GLFW/glfw3.h"); +}); + +const vk = @import("vk"); +const c = @This(); + +pub extern fn glfwGetInstanceProcAddress( + instance: vk.Instance, + procname: [*:0]const u8, +) vk.PfnVoidFunction; + +pub extern fn glfwGetPhysicalDevicePresentationSupport( + instance: vk.Instance, + pdev: vk.PhysicalDevice, + queuefamily: u32, +) c_int; + +pub extern fn glfwCreateWindowSurface( + instance: vk.Instance, + window: *c.GLFWwindow, + allocation_callbacks: ?*const vk.AllocationCallbacks, + surface: *vk.SurfaceKHR, +) vk.Result; diff --git a/src/main.zig b/src/main.zig new file mode 100644 index 0000000..1fd54c1 --- /dev/null +++ b/src/main.zig @@ -0,0 +1,75 @@ +const std = @import("std"); + +const c = @import("c.zig"); + +const vk = @import("vk"); + +const BaseDispatch = vk.BaseWrapper(.{ + .createInstance = true, + .getInstanceProcAddr = true, +}); + +const InstanceDispatch = vk.InstanceWrapper(.{ + .destroyInstance = true, +}); + +const Context = struct { + vkb: BaseDispatch, + vki: InstanceDispatch, + + instance: vk.Instance, + + pub fn init(allocator: std.mem.Allocator, app_name: [*:0]const u8, window: *c.GLFWwindow) !Context { + _ = allocator; + _ = window; + + var self: Context = undefined; + self.vkb = try BaseDispatch.load(c.glfwGetInstanceProcAddress); + + var glfw_exts_count: u32 = 0; + const glfw_exts = c.glfwGetRequiredInstanceExtensions(&glfw_exts_count); + + const app_info = vk.ApplicationInfo{ + .p_application_name = app_name, + .application_version = vk.makeApiVersion(0, 0, 0, 0), + .p_engine_name = app_name, + .engine_version = vk.makeApiVersion(0, 0, 0, 0), + .api_version = vk.API_VERSION_1_2, + }; + + self.instance = try self.vkb.createInstance(&.{ + .p_application_info = &app_info, + .enabled_extension_count = glfw_exts_count, + .pp_enabled_extension_names = @as([*]const [*:0]const u8, @ptrCast(glfw_exts)), + }, null); + + self.vki = try InstanceDispatch.load(self.instance, self.vkb.dispatch.vkGetInstanceProcAddr); + errdefer self.vki.destroyInstance(self.instance, null); + + return self; + } + + pub fn deinit(self: Context) void { + self.vki.destroyInstance(self.instance, null); + } +}; + +pub fn main() !void { + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + const allocator = gpa.allocator(); + + if (c.glfwInit() != c.GLFW_TRUE) { + return error.GlfwInitFailed; + } + defer c.glfwTerminate(); + + const window: *c.GLFWwindow = c.glfwCreateWindow(1280, 720, "Hello World!", null, null) orelse return error.GlfwWindowFailed; + defer c.glfwDestroyWindow(window); + + const ctx = try Context.init(allocator, "content", window); + defer ctx.deinit(); + + while (c.glfwWindowShouldClose(window) == 0) : (c.glfwPollEvents()) { + c.glfwSwapBuffers(window); + } +} From 872323bf2aa2b16b99284d287dad2b3b6cecc48b Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 20 Mar 2024 18:49:46 -0400 Subject: [PATCH 002/113] vulkan-tutorial 01 --- src/Window.zig | 60 ++++++++++++++++++++++++ src/main.zig | 124 ++++++++++++++++++++++++++----------------------- 2 files changed, 125 insertions(+), 59 deletions(-) create mode 100644 src/Window.zig diff --git a/src/Window.zig b/src/Window.zig new file mode 100644 index 0000000..fa39cce --- /dev/null +++ b/src/Window.zig @@ -0,0 +1,60 @@ +const Self = @This(); + +const std = @import("std"); +const c = @import("c.zig"); +const vk = @import("vk"); + +const BaseDispatch = vk.BaseWrapper(.{}); +const InstanceDispatch = vk.InstanceWrapper(.{}); +const DeviceDispatch = vk.DeviceWrapper(.{}); + +vkb: BaseDispatch, +vki: InstanceDispatch, +vkd: DeviceDispatch, + +window: *c.GLFWwindow, + +pub fn mainLoop(self: Self) void { + while (c.glfwWindowShouldClose(self.window) == 0) : (c.glfwPollEvents()) { + c.glfwSwapBuffers(self.window); + } +} + +fn initWindow(self: *Self) !void { + c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); + self.window = c.glfwCreateWindow( + 1280, + 720, + "Hello World", + null, + null, + ) orelse return error.glfwCreateWindowFailed; + errdefer self.deinitWindow(); +} + +fn deinitWindow(self: Self) void { + c.glfwDestroyWindow(self.window); +} + +fn initVulkan(self: *Self) !void { + _ = self; +} + +fn deinitVulkan(self: Self) void { + _ = self; +} + +pub fn init() !Self { + var self: Self = undefined; + try self.initWindow(); + errdefer self.deinitWindow(); + try self.initVulkan(); + errdefer self.deinitVulkan(); + + return self; +} + +pub fn deinit(self: Self) void { + self.deinitWindow(); + self.deinitVulkan(); +} diff --git a/src/main.zig b/src/main.zig index 1fd54c1..a2a712f 100644 --- a/src/main.zig +++ b/src/main.zig @@ -1,75 +1,81 @@ const std = @import("std"); - const c = @import("c.zig"); - const vk = @import("vk"); -const BaseDispatch = vk.BaseWrapper(.{ - .createInstance = true, - .getInstanceProcAddr = true, -}); +const Window = @import("Window.zig"); -const InstanceDispatch = vk.InstanceWrapper(.{ - .destroyInstance = true, -}); - -const Context = struct { - vkb: BaseDispatch, - vki: InstanceDispatch, - - instance: vk.Instance, - - pub fn init(allocator: std.mem.Allocator, app_name: [*:0]const u8, window: *c.GLFWwindow) !Context { - _ = allocator; - _ = window; - - var self: Context = undefined; - self.vkb = try BaseDispatch.load(c.glfwGetInstanceProcAddress); - - var glfw_exts_count: u32 = 0; - const glfw_exts = c.glfwGetRequiredInstanceExtensions(&glfw_exts_count); - - const app_info = vk.ApplicationInfo{ - .p_application_name = app_name, - .application_version = vk.makeApiVersion(0, 0, 0, 0), - .p_engine_name = app_name, - .engine_version = vk.makeApiVersion(0, 0, 0, 0), - .api_version = vk.API_VERSION_1_2, - }; - - self.instance = try self.vkb.createInstance(&.{ - .p_application_info = &app_info, - .enabled_extension_count = glfw_exts_count, - .pp_enabled_extension_names = @as([*]const [*:0]const u8, @ptrCast(glfw_exts)), - }, null); - - self.vki = try InstanceDispatch.load(self.instance, self.vkb.dispatch.vkGetInstanceProcAddr); - errdefer self.vki.destroyInstance(self.instance, null); - - return self; - } - - pub fn deinit(self: Context) void { - self.vki.destroyInstance(self.instance, null); - } -}; +// const BaseDispatch = vk.BaseWrapper(.{ +// .createInstance = true, +// .getInstanceProcAddr = true, +// }); +// +// const InstanceDispatch = vk.InstanceWrapper(.{ +// .destroyInstance = true, +// }); +// +// const Context = struct { +// vkb: BaseDispatch, +// vki: InstanceDispatch, +// +// instance: vk.Instance, +// +// pub fn init(allocator: std.mem.Allocator, app_name: [*:0]const u8, window: *c.GLFWwindow) !Context { +// _ = allocator; +// _ = window; +// +// var self: Context = undefined; +// self.vkb = try BaseDispatch.load(c.glfwGetInstanceProcAddress); +// +// var glfw_exts_count: u32 = 0; +// const glfw_exts = c.glfwGetRequiredInstanceExtensions(&glfw_exts_count); +// +// const app_info = vk.ApplicationInfo{ +// .p_application_name = app_name, +// .application_version = vk.makeApiVersion(0, 0, 0, 0), +// .p_engine_name = app_name, +// .engine_version = vk.makeApiVersion(0, 0, 0, 0), +// .api_version = vk.API_VERSION_1_2, +// }; +// +// self.instance = try self.vkb.createInstance(&.{ +// .p_application_info = &app_info, +// .enabled_extension_count = glfw_exts_count, +// .pp_enabled_extension_names = @as([*]const [*:0]const u8, @ptrCast(glfw_exts)), +// }, null); +// +// self.vki = try InstanceDispatch.load(self.instance, self.vkb.dispatch.vkGetInstanceProcAddr); +// errdefer self.vki.destroyInstance(self.instance, null); +// +// return self; +// } +// +// pub fn deinit(self: Context) void { +// self.vki.destroyInstance(self.instance, null); +// } +// }; pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - const allocator = gpa.allocator(); + // var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + // const allocator = gpa.allocator(); if (c.glfwInit() != c.GLFW_TRUE) { return error.GlfwInitFailed; } defer c.glfwTerminate(); - const window: *c.GLFWwindow = c.glfwCreateWindow(1280, 720, "Hello World!", null, null) orelse return error.GlfwWindowFailed; - defer c.glfwDestroyWindow(window); + const window = try Window.init(); + defer window.deinit(); - const ctx = try Context.init(allocator, "content", window); - defer ctx.deinit(); + window.mainLoop(); - while (c.glfwWindowShouldClose(window) == 0) : (c.glfwPollEvents()) { - c.glfwSwapBuffers(window); - } + // c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); + // const window: *c.GLFWwindow = c.glfwCreateWindow(1280, 720, "Hello World!", null, null) orelse return error.GlfwWindowFailed; + // defer c.glfwDestroyWindow(window); + + // const ctx = try Context.init(allocator, "content", window); + // defer ctx.deinit(); + + // while (c.glfwWindowShouldClose(window) == 0) : (c.glfwPollEvents()) { + // c.glfwSwapBuffers(window); + // } } From 1b89ff4b252d7a73a3d7e76485847a2ff59a568b Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 20 Mar 2024 19:08:59 -0400 Subject: [PATCH 003/113] vulkan-tutorial 02 --- src/Window.zig | 57 ++++++++++++++++++++++++++++++++++++++++++++------ src/main.zig | 9 ++++---- 2 files changed, 56 insertions(+), 10 deletions(-) diff --git a/src/Window.zig b/src/Window.zig index fa39cce..690d9f9 100644 --- a/src/Window.zig +++ b/src/Window.zig @@ -4,16 +4,26 @@ const std = @import("std"); const c = @import("c.zig"); const vk = @import("vk"); -const BaseDispatch = vk.BaseWrapper(.{}); -const InstanceDispatch = vk.InstanceWrapper(.{}); +const BaseDispatch = vk.BaseWrapper(.{ + .createInstance = true, + .getInstanceProcAddr = true, + .enumerateInstanceExtensionProperties = true, +}); +const InstanceDispatch = vk.InstanceWrapper(.{ + .destroyInstance = true, +}); const DeviceDispatch = vk.DeviceWrapper(.{}); +allocator: std.mem.Allocator, + vkb: BaseDispatch, vki: InstanceDispatch, vkd: DeviceDispatch, window: *c.GLFWwindow, +instance: vk.Instance, + pub fn mainLoop(self: Self) void { while (c.glfwWindowShouldClose(self.window) == 0) : (c.glfwPollEvents()) { c.glfwSwapBuffers(self.window); @@ -29,7 +39,7 @@ fn initWindow(self: *Self) !void { null, null, ) orelse return error.glfwCreateWindowFailed; - errdefer self.deinitWindow(); + errdefer c.glfwDestroyWindow(self.window); } fn deinitWindow(self: Self) void { @@ -37,15 +47,50 @@ fn deinitWindow(self: Self) void { } fn initVulkan(self: *Self) !void { - _ = self; + self.vkb = try BaseDispatch.load(&c.glfwGetInstanceProcAddress); + + const app_info = vk.ApplicationInfo{ + .p_application_name = "Hello World", + .application_version = vk.makeApiVersion(0, 0, 0, 0), + .p_engine_name = "No Engine", + .engine_version = vk.makeApiVersion(0, 0, 0, 0), + .api_version = vk.API_VERSION_1_3, + }; + + var glfw_ext_count: u32 = 0; + const glfw_exts = c.glfwGetRequiredInstanceExtensions(&glfw_ext_count); + + const create_info = vk.InstanceCreateInfo{ + .p_application_info = &app_info, + .enabled_extension_count = glfw_ext_count, + .pp_enabled_extension_names = @as([*]const [*:0]const u8, @ptrCast(glfw_exts)), + .enabled_layer_count = 0, + }; + + self.instance = try self.vkb.createInstance(&create_info, null); + self.vki = try InstanceDispatch.load(self.instance, self.vkb.dispatch.vkGetInstanceProcAddr); + errdefer self.vki.destroyInstance(self.instance, null); + + var ext_count: u32 = 0; + _ = try self.vkb.enumerateInstanceExtensionProperties(null, &ext_count, null); + const extensions = try self.allocator.alloc(vk.ExtensionProperties, ext_count); + defer self.allocator.free(extensions); + _ = try self.vkb.enumerateInstanceExtensionProperties(null, &ext_count, extensions.ptr); + + std.debug.print("available extensions:\n", .{}); + for (extensions) |ext| { + std.debug.print("- {s}\n", .{ext.extension_name}); + } } fn deinitVulkan(self: Self) void { - _ = self; + self.vki.destroyInstance(self.instance, null); } -pub fn init() !Self { +pub fn init(allocator: std.mem.Allocator) !Self { var self: Self = undefined; + self.allocator = allocator; + try self.initWindow(); errdefer self.deinitWindow(); try self.initVulkan(); diff --git a/src/main.zig b/src/main.zig index a2a712f..9234539 100644 --- a/src/main.zig +++ b/src/main.zig @@ -55,18 +55,19 @@ const Window = @import("Window.zig"); // }; pub fn main() !void { - // var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - // const allocator = gpa.allocator(); + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + const allocator = gpa.allocator(); + defer _ = gpa.detectLeaks(); if (c.glfwInit() != c.GLFW_TRUE) { return error.GlfwInitFailed; } defer c.glfwTerminate(); - const window = try Window.init(); + const window = try Window.init(allocator); defer window.deinit(); - window.mainLoop(); + // window.mainLoop(); // c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); // const window: *c.GLFWwindow = c.glfwCreateWindow(1280, 720, "Hello World!", null, null) orelse return error.GlfwWindowFailed; From 85af42ffc14887021fbe8d951f3e311e9d7ac812 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 21 Mar 2024 09:18:57 -0400 Subject: [PATCH 004/113] vulkan-tutorial 03 --- src/Window.zig | 155 ++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 139 insertions(+), 16 deletions(-) diff --git a/src/Window.zig b/src/Window.zig index 690d9f9..58864a1 100644 --- a/src/Window.zig +++ b/src/Window.zig @@ -3,14 +3,19 @@ const Self = @This(); const std = @import("std"); const c = @import("c.zig"); const vk = @import("vk"); +const builtin = @import("builtin"); const BaseDispatch = vk.BaseWrapper(.{ .createInstance = true, .getInstanceProcAddr = true, .enumerateInstanceExtensionProperties = true, + .enumerateInstanceLayerProperties = true, }); const InstanceDispatch = vk.InstanceWrapper(.{ .destroyInstance = true, + .createDebugUtilsMessengerEXT = true, + .destroyDebugUtilsMessengerEXT = true, + .submitDebugUtilsMessageEXT = true, }); const DeviceDispatch = vk.DeviceWrapper(.{}); @@ -23,6 +28,7 @@ vkd: DeviceDispatch, window: *c.GLFWwindow, instance: vk.Instance, +messenger: vk.DebugUtilsMessengerEXT, pub fn mainLoop(self: Self) void { while (c.glfwWindowShouldClose(self.window) == 0) : (c.glfwPollEvents()) { @@ -46,9 +52,134 @@ fn deinitWindow(self: Self) void { c.glfwDestroyWindow(self.window); } +export fn debug_callback( + message_severity: vk.DebugUtilsMessageSeverityFlagsEXT, + message_type: vk.DebugUtilsMessageTypeFlagsEXT, + p_callback_data: ?*const vk.DebugUtilsMessengerCallbackDataEXT, + _: ?*anyopaque, +) callconv(.C) vk.Bool32 { + if (p_callback_data == null) return vk.FALSE; + if (p_callback_data.?.p_message == null) return vk.FALSE; + const msg = p_callback_data.?.p_message.?; + + const scopes = .{ + "validation", + "performance", + "device_address_binding", + "general", + }; + + const scope: []const u8 = inline for (scopes) |tag| { + if (@field(message_type, tag ++ "_bit_ext")) { + break tag; + } + } else { + return vk.FALSE; + }; + + const levels = .{ + "error", + "info", + "warning", + "verbose", + }; + + const level: []const u8 = inline for (levels) |tag| { + if (@field(message_severity, tag ++ "_bit_ext")) { + break tag; + } + } else { + return vk.FALSE; + }; + + // ripped from std.log, but with my own levels and scope. + const stderr = std.io.getStdErr().writer(); + var bw = std.io.bufferedWriter(stderr); + const writer = bw.writer(); + + std.debug.getStderrMutex().lock(); + defer std.debug.getStderrMutex().unlock(); + nosuspend { + writer.print("vk-{s}({s}): {s}\n", .{ level, scope, msg }) catch return vk.FALSE; + bw.flush() catch return vk.FALSE; + } + + return vk.FALSE; +} + fn initVulkan(self: *Self) !void { self.vkb = try BaseDispatch.load(&c.glfwGetInstanceProcAddress); + var exts = std.ArrayList([*:0]const u8).init(self.allocator); + defer exts.deinit(); + + var layers = std.ArrayList([*:0]const u8).init(self.allocator); + defer layers.deinit(); + + switch (builtin.mode) { + .ReleaseSafe, .Debug => { + try layers.append("VK_LAYER_KHRONOS_validation"); + try exts.append("VK_EXT_debug_utils"); + }, + .ReleaseSmall, .ReleaseFast => {}, + } + + var glfw_ext_count: u32 = 0; + const glfw_exts: [*][*:0]const u8 = @ptrCast(c.glfwGetRequiredInstanceExtensions(&glfw_ext_count)); + try exts.appendSlice(glfw_exts[0..glfw_ext_count]); + + std.log.debug("requesting extensions: {s}", .{exts.items}); + std.log.debug("requesting layers: {s}", .{layers.items}); + + var available_ext_count: u32 = 0; + _ = try self.vkb.enumerateInstanceExtensionProperties(null, &available_ext_count, null); + const available_exts = try self.allocator.alloc(vk.ExtensionProperties, available_ext_count); + defer self.allocator.free(available_exts); + _ = try self.vkb.enumerateInstanceExtensionProperties(null, &available_ext_count, available_exts.ptr); + + var available_layer_count: u32 = 0; + _ = try self.vkb.enumerateInstanceLayerProperties(&available_layer_count, null); + const available_layers = try self.allocator.alloc(vk.LayerProperties, available_layer_count); + defer self.allocator.free(available_layers); + _ = try self.vkb.enumerateInstanceLayerProperties(&available_layer_count, available_layers.ptr); + + for (exts.items) |name| { + const required_name = std.mem.sliceTo(name, 0); + for (available_exts) |prop| { + const available_name = std.mem.sliceTo(&prop.extension_name, 0); + if (std.mem.eql(u8, required_name, available_name)) break; + } else { + return error.ExtensionNotPresent; + } + } + + for (layers.items) |name| { + const required_name = std.mem.sliceTo(name, 0); + for (available_layers) |prop| { + const available_name = std.mem.sliceTo(&prop.layer_name, 0); + if (std.mem.eql(u8, required_name, available_name)) break; + } else { + return error.LayerNotPresent; + } + } + + const debug_create_info = vk.DebugUtilsMessengerCreateInfoEXT{ + .message_severity = vk.DebugUtilsMessageSeverityFlagsEXT{ + .verbose_bit_ext = false, + .warning_bit_ext = true, + .error_bit_ext = true, + .info_bit_ext = false, + }, + .message_type = vk.DebugUtilsMessageTypeFlagsEXT{ + .general_bit_ext = true, + .validation_bit_ext = true, + .performance_bit_ext = true, + .device_address_binding_bit_ext = false, + }, + .pfn_user_callback = &debug_callback, + .p_user_data = null, + }; + const app_info = vk.ApplicationInfo{ .p_application_name = "Hello World", .application_version = vk.makeApiVersion(0, 0, 0, 0), @@ -57,33 +188,25 @@ fn initVulkan(self: *Self) !void { .api_version = vk.API_VERSION_1_3, }; - var glfw_ext_count: u32 = 0; - const glfw_exts = c.glfwGetRequiredInstanceExtensions(&glfw_ext_count); - const create_info = vk.InstanceCreateInfo{ .p_application_info = &app_info, - .enabled_extension_count = glfw_ext_count, - .pp_enabled_extension_names = @as([*]const [*:0]const u8, @ptrCast(glfw_exts)), - .enabled_layer_count = 0, + .enabled_extension_count = @intCast(exts.items.len), + .pp_enabled_extension_names = exts.items.ptr, + .enabled_layer_count = @intCast(layers.items.len), + .pp_enabled_layer_names = layers.items.ptr, + .p_next = &debug_create_info, }; self.instance = try self.vkb.createInstance(&create_info, null); self.vki = try InstanceDispatch.load(self.instance, self.vkb.dispatch.vkGetInstanceProcAddr); errdefer self.vki.destroyInstance(self.instance, null); - var ext_count: u32 = 0; - _ = try self.vkb.enumerateInstanceExtensionProperties(null, &ext_count, null); - const extensions = try self.allocator.alloc(vk.ExtensionProperties, ext_count); - defer self.allocator.free(extensions); - _ = try self.vkb.enumerateInstanceExtensionProperties(null, &ext_count, extensions.ptr); - - std.debug.print("available extensions:\n", .{}); - for (extensions) |ext| { - std.debug.print("- {s}\n", .{ext.extension_name}); - } + self.messenger = try self.vki.createDebugUtilsMessengerEXT(self.instance, &debug_create_info, null); + errdefer self.vki.destroyDebugUtilsMessengerEXT(self.instance, self.messenger, null); } fn deinitVulkan(self: Self) void { + self.vki.destroyDebugUtilsMessengerEXT(self.instance, self.messenger, null); self.vki.destroyInstance(self.instance, null); } From 0d183d79a0946c22c422081771693666f01fb8eb Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 21 Mar 2024 11:49:21 -0400 Subject: [PATCH 005/113] comptime USE_DEBUG_LAYERS --- src/Window.zig | 42 ++++++++++++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/src/Window.zig b/src/Window.zig index 58864a1..f4d6039 100644 --- a/src/Window.zig +++ b/src/Window.zig @@ -5,6 +5,11 @@ const c = @import("c.zig"); const vk = @import("vk"); const builtin = @import("builtin"); +const USE_DEBUG_LAYERS = switch (builtin.mode) { + .ReleaseSafe, .Debug => true, + .ReleaseSmall, .ReleaseFast => false, +}; + const BaseDispatch = vk.BaseWrapper(.{ .createInstance = true, .getInstanceProcAddr = true, @@ -13,9 +18,9 @@ const BaseDispatch = vk.BaseWrapper(.{ }); const InstanceDispatch = vk.InstanceWrapper(.{ .destroyInstance = true, - .createDebugUtilsMessengerEXT = true, - .destroyDebugUtilsMessengerEXT = true, - .submitDebugUtilsMessageEXT = true, + .createDebugUtilsMessengerEXT = USE_DEBUG_LAYERS, + .destroyDebugUtilsMessengerEXT = USE_DEBUG_LAYERS, + .submitDebugUtilsMessageEXT = USE_DEBUG_LAYERS, }); const DeviceDispatch = vk.DeviceWrapper(.{}); @@ -28,7 +33,7 @@ vkd: DeviceDispatch, window: *c.GLFWwindow, instance: vk.Instance, -messenger: vk.DebugUtilsMessengerEXT, +messenger: if (USE_DEBUG_LAYERS) vk.DebugUtilsMessengerEXT else void, pub fn mainLoop(self: Self) void { while (c.glfwWindowShouldClose(self.window) == 0) : (c.glfwPollEvents()) { @@ -116,12 +121,9 @@ fn initVulkan(self: *Self) !void { var layers = std.ArrayList([*:0]const u8).init(self.allocator); defer layers.deinit(); - switch (builtin.mode) { - .ReleaseSafe, .Debug => { - try layers.append("VK_LAYER_KHRONOS_validation"); - try exts.append("VK_EXT_debug_utils"); - }, - .ReleaseSmall, .ReleaseFast => {}, + if (USE_DEBUG_LAYERS) { + try layers.append("VK_LAYER_KHRONOS_validation"); + try exts.append("VK_EXT_debug_utils"); } var glfw_ext_count: u32 = 0; @@ -194,19 +196,31 @@ fn initVulkan(self: *Self) !void { .pp_enabled_extension_names = exts.items.ptr, .enabled_layer_count = @intCast(layers.items.len), .pp_enabled_layer_names = layers.items.ptr, - .p_next = &debug_create_info, + .p_next = if (USE_DEBUG_LAYERS) &debug_create_info else null, }; self.instance = try self.vkb.createInstance(&create_info, null); self.vki = try InstanceDispatch.load(self.instance, self.vkb.dispatch.vkGetInstanceProcAddr); errdefer self.vki.destroyInstance(self.instance, null); - self.messenger = try self.vki.createDebugUtilsMessengerEXT(self.instance, &debug_create_info, null); - errdefer self.vki.destroyDebugUtilsMessengerEXT(self.instance, self.messenger, null); + if (USE_DEBUG_LAYERS) self.messenger = try self.vki.createDebugUtilsMessengerEXT( + self.instance, + &debug_create_info, + null, + ); + errdefer if (USE_DEBUG_LAYERS) self.vki.destroyDebugUtilsMessengerEXT( + self.instance, + self.messenger, + null, + ); } fn deinitVulkan(self: Self) void { - self.vki.destroyDebugUtilsMessengerEXT(self.instance, self.messenger, null); + if (USE_DEBUG_LAYERS) self.vki.destroyDebugUtilsMessengerEXT( + self.instance, + self.messenger, + null, + ); self.vki.destroyInstance(self.instance, null); } From e5cc05e2b8e9f21475425f13f1ee6748d61a5e3a Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 21 Mar 2024 12:58:28 -0400 Subject: [PATCH 006/113] separate Window.zig and Context.zig --- src/Context.zig | 208 ++++++++++++++++++++++++++++++++++++++++++++ src/Window.zig | 226 +++--------------------------------------------- src/main.zig | 74 +++------------- 3 files changed, 232 insertions(+), 276 deletions(-) create mode 100644 src/Context.zig diff --git a/src/Context.zig b/src/Context.zig new file mode 100644 index 0000000..68fbc5e --- /dev/null +++ b/src/Context.zig @@ -0,0 +1,208 @@ +const Self = @This(); + +const std = @import("std"); +const c = @import("c.zig"); +const vk = @import("vk"); +const builtin = @import("builtin"); + +const USE_DEBUG_LAYERS = switch (builtin.mode) { + .ReleaseSafe, .Debug => true, + .ReleaseSmall, .ReleaseFast => false, +}; + +allocator: std.mem.Allocator, + +vkb: BaseDispatch, +vki: InstanceDispatch, +vkd: DeviceDispatch, + +instance: vk.Instance, +messenger: if (USE_DEBUG_LAYERS) vk.DebugUtilsMessengerEXT else void, + +pub fn init(allocator: std.mem.Allocator) !Self { + var self: Self = undefined; + self.allocator = allocator; + + self.vkb = try BaseDispatch.load(&c.glfwGetInstanceProcAddress); + + var exts = std.ArrayList([*:0]const u8).init(allocator); + defer exts.deinit(); + + var layers = std.ArrayList([*:0]const u8).init(allocator); + defer layers.deinit(); + + if (USE_DEBUG_LAYERS) { + try layers.append("VK_LAYER_KHRONOS_validation"); + try exts.append("VK_EXT_debug_utils"); + } + + var glfw_ext_count: u32 = 0; + const glfw_exts: [*][*:0]const u8 = @ptrCast(c.glfwGetRequiredInstanceExtensions(&glfw_ext_count)); + try exts.appendSlice(glfw_exts[0..glfw_ext_count]); + + std.log.debug("requesting extensions: {s}", .{exts.items}); + std.log.debug("requesting layers: {s}", .{layers.items}); + + var available_ext_count: u32 = 0; + _ = try self.vkb.enumerateInstanceExtensionProperties(null, &available_ext_count, null); + const available_exts = try allocator.alloc(vk.ExtensionProperties, available_ext_count); + defer allocator.free(available_exts); + _ = try self.vkb.enumerateInstanceExtensionProperties(null, &available_ext_count, available_exts.ptr); + + var available_layer_count: u32 = 0; + _ = try self.vkb.enumerateInstanceLayerProperties(&available_layer_count, null); + const available_layers = try allocator.alloc(vk.LayerProperties, available_layer_count); + defer allocator.free(available_layers); + _ = try self.vkb.enumerateInstanceLayerProperties(&available_layer_count, available_layers.ptr); + + for (exts.items) |name| { + const required_name = std.mem.sliceTo(name, 0); + for (available_exts) |prop| { + const available_name = std.mem.sliceTo(&prop.extension_name, 0); + if (std.mem.eql(u8, required_name, available_name)) break; + } else { + return error.ExtensionNotPresent; + } + } + + for (layers.items) |name| { + const required_name = std.mem.sliceTo(name, 0); + for (available_layers) |prop| { + const available_name = std.mem.sliceTo(&prop.layer_name, 0); + if (std.mem.eql(u8, required_name, available_name)) break; + } else { + return error.LayerNotPresent; + } + } + + const debug_create_info = vk.DebugUtilsMessengerCreateInfoEXT{ + .message_severity = vk.DebugUtilsMessageSeverityFlagsEXT{ + .verbose_bit_ext = false, + .warning_bit_ext = true, + .error_bit_ext = true, + .info_bit_ext = false, + }, + .message_type = vk.DebugUtilsMessageTypeFlagsEXT{ + .general_bit_ext = true, + .validation_bit_ext = true, + .performance_bit_ext = true, + .device_address_binding_bit_ext = false, + }, + .pfn_user_callback = &debug_callback, + .p_user_data = null, + }; + + const app_info = vk.ApplicationInfo{ + .p_application_name = "Hello World", + .application_version = vk.makeApiVersion(0, 0, 0, 0), + .p_engine_name = "No Engine", + .engine_version = vk.makeApiVersion(0, 0, 0, 0), + .api_version = vk.API_VERSION_1_3, + }; + + const create_info = vk.InstanceCreateInfo{ + .p_application_info = &app_info, + .enabled_extension_count = @intCast(exts.items.len), + .pp_enabled_extension_names = exts.items.ptr, + .enabled_layer_count = @intCast(layers.items.len), + .pp_enabled_layer_names = layers.items.ptr, + .p_next = if (USE_DEBUG_LAYERS) &debug_create_info else null, + }; + + self.instance = try self.vkb.createInstance(&create_info, null); + self.vki = try InstanceDispatch.load(self.instance, self.vkb.dispatch.vkGetInstanceProcAddr); + errdefer self.vki.destroyInstance(self.instance, null); + + if (USE_DEBUG_LAYERS) self.messenger = try self.vki.createDebugUtilsMessengerEXT( + self.instance, + &debug_create_info, + null, + ); + errdefer if (USE_DEBUG_LAYERS) self.vki.destroyDebugUtilsMessengerEXT( + self.instance, + self.messenger, + null, + ); + + return self; +} + +pub fn deinit(self: Self) void { + if (USE_DEBUG_LAYERS) self.vki.destroyDebugUtilsMessengerEXT( + self.instance, + self.messenger, + null, + ); + self.vki.destroyInstance(self.instance, null); +} + +export fn debug_callback( + message_severity: vk.DebugUtilsMessageSeverityFlagsEXT, + message_type: vk.DebugUtilsMessageTypeFlagsEXT, + p_callback_data: ?*const vk.DebugUtilsMessengerCallbackDataEXT, + _: ?*anyopaque, +) callconv(.C) vk.Bool32 { + if (p_callback_data == null) return vk.FALSE; + if (p_callback_data.?.p_message == null) return vk.FALSE; + const msg = p_callback_data.?.p_message.?; + + const scopes = .{ + "validation", + "performance", + "device_address_binding", + "general", + }; + + const scope: []const u8 = inline for (scopes) |tag| { + if (@field(message_type, tag ++ "_bit_ext")) { + break tag; + } + } else { + return vk.FALSE; + }; + + const levels = .{ + "error", + "info", + "warning", + "verbose", + }; + + const level: []const u8 = inline for (levels) |tag| { + if (@field(message_severity, tag ++ "_bit_ext")) { + break tag; + } + } else { + return vk.FALSE; + }; + + // ripped from std.log, but with my own levels and scope. + const stderr = std.io.getStdErr().writer(); + var bw = std.io.bufferedWriter(stderr); + const writer = bw.writer(); + + std.debug.getStderrMutex().lock(); + defer std.debug.getStderrMutex().unlock(); + nosuspend { + writer.print("vk-{s}({s}): {s}\n", .{ level, scope, msg }) catch return vk.FALSE; + bw.flush() catch return vk.FALSE; + } + + return vk.FALSE; +} + +const BaseDispatch = vk.BaseWrapper(.{ + .createInstance = true, + .getInstanceProcAddr = true, + .enumerateInstanceExtensionProperties = true, + .enumerateInstanceLayerProperties = true, +}); + +const InstanceDispatch = vk.InstanceWrapper(.{ + .destroyInstance = true, + .createDebugUtilsMessengerEXT = USE_DEBUG_LAYERS, + .destroyDebugUtilsMessengerEXT = USE_DEBUG_LAYERS, + .submitDebugUtilsMessageEXT = USE_DEBUG_LAYERS, +}); + +const DeviceDispatch = vk.DeviceWrapper(.{}); diff --git a/src/Window.zig b/src/Window.zig index f4d6039..d792468 100644 --- a/src/Window.zig +++ b/src/Window.zig @@ -5,238 +5,38 @@ const c = @import("c.zig"); const vk = @import("vk"); const builtin = @import("builtin"); -const USE_DEBUG_LAYERS = switch (builtin.mode) { - .ReleaseSafe, .Debug => true, - .ReleaseSmall, .ReleaseFast => false, -}; - -const BaseDispatch = vk.BaseWrapper(.{ - .createInstance = true, - .getInstanceProcAddr = true, - .enumerateInstanceExtensionProperties = true, - .enumerateInstanceLayerProperties = true, -}); -const InstanceDispatch = vk.InstanceWrapper(.{ - .destroyInstance = true, - .createDebugUtilsMessengerEXT = USE_DEBUG_LAYERS, - .destroyDebugUtilsMessengerEXT = USE_DEBUG_LAYERS, - .submitDebugUtilsMessageEXT = USE_DEBUG_LAYERS, -}); -const DeviceDispatch = vk.DeviceWrapper(.{}); - allocator: std.mem.Allocator, -vkb: BaseDispatch, -vki: InstanceDispatch, -vkd: DeviceDispatch, - window: *c.GLFWwindow, -instance: vk.Instance, -messenger: if (USE_DEBUG_LAYERS) vk.DebugUtilsMessengerEXT else void, - pub fn mainLoop(self: Self) void { while (c.glfwWindowShouldClose(self.window) == 0) : (c.glfwPollEvents()) { c.glfwSwapBuffers(self.window); } } -fn initWindow(self: *Self) !void { +pub fn init( + allocator: std.mem.Allocator, + width: u32, + height: u32, + title: []const u8, +) !Self { + var self: Self = undefined; + self.allocator = allocator; + c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); self.window = c.glfwCreateWindow( - 1280, - 720, - "Hello World", + @intCast(width), + @intCast(height), + title.ptr, null, null, ) orelse return error.glfwCreateWindowFailed; errdefer c.glfwDestroyWindow(self.window); -} - -fn deinitWindow(self: Self) void { - c.glfwDestroyWindow(self.window); -} - -export fn debug_callback( - message_severity: vk.DebugUtilsMessageSeverityFlagsEXT, - message_type: vk.DebugUtilsMessageTypeFlagsEXT, - p_callback_data: ?*const vk.DebugUtilsMessengerCallbackDataEXT, - _: ?*anyopaque, -) callconv(.C) vk.Bool32 { - if (p_callback_data == null) return vk.FALSE; - if (p_callback_data.?.p_message == null) return vk.FALSE; - const msg = p_callback_data.?.p_message.?; - - const scopes = .{ - "validation", - "performance", - "device_address_binding", - "general", - }; - - const scope: []const u8 = inline for (scopes) |tag| { - if (@field(message_type, tag ++ "_bit_ext")) { - break tag; - } - } else { - return vk.FALSE; - }; - - const levels = .{ - "error", - "info", - "warning", - "verbose", - }; - - const level: []const u8 = inline for (levels) |tag| { - if (@field(message_severity, tag ++ "_bit_ext")) { - break tag; - } - } else { - return vk.FALSE; - }; - - // ripped from std.log, but with my own levels and scope. - const stderr = std.io.getStdErr().writer(); - var bw = std.io.bufferedWriter(stderr); - const writer = bw.writer(); - - std.debug.getStderrMutex().lock(); - defer std.debug.getStderrMutex().unlock(); - nosuspend { - writer.print("vk-{s}({s}): {s}\n", .{ level, scope, msg }) catch return vk.FALSE; - bw.flush() catch return vk.FALSE; - } - - return vk.FALSE; -} - -fn initVulkan(self: *Self) !void { - self.vkb = try BaseDispatch.load(&c.glfwGetInstanceProcAddress); - - var exts = std.ArrayList([*:0]const u8).init(self.allocator); - defer exts.deinit(); - - var layers = std.ArrayList([*:0]const u8).init(self.allocator); - defer layers.deinit(); - - if (USE_DEBUG_LAYERS) { - try layers.append("VK_LAYER_KHRONOS_validation"); - try exts.append("VK_EXT_debug_utils"); - } - - var glfw_ext_count: u32 = 0; - const glfw_exts: [*][*:0]const u8 = @ptrCast(c.glfwGetRequiredInstanceExtensions(&glfw_ext_count)); - try exts.appendSlice(glfw_exts[0..glfw_ext_count]); - - std.log.debug("requesting extensions: {s}", .{exts.items}); - std.log.debug("requesting layers: {s}", .{layers.items}); - - var available_ext_count: u32 = 0; - _ = try self.vkb.enumerateInstanceExtensionProperties(null, &available_ext_count, null); - const available_exts = try self.allocator.alloc(vk.ExtensionProperties, available_ext_count); - defer self.allocator.free(available_exts); - _ = try self.vkb.enumerateInstanceExtensionProperties(null, &available_ext_count, available_exts.ptr); - - var available_layer_count: u32 = 0; - _ = try self.vkb.enumerateInstanceLayerProperties(&available_layer_count, null); - const available_layers = try self.allocator.alloc(vk.LayerProperties, available_layer_count); - defer self.allocator.free(available_layers); - _ = try self.vkb.enumerateInstanceLayerProperties(&available_layer_count, available_layers.ptr); - - for (exts.items) |name| { - const required_name = std.mem.sliceTo(name, 0); - for (available_exts) |prop| { - const available_name = std.mem.sliceTo(&prop.extension_name, 0); - if (std.mem.eql(u8, required_name, available_name)) break; - } else { - return error.ExtensionNotPresent; - } - } - - for (layers.items) |name| { - const required_name = std.mem.sliceTo(name, 0); - for (available_layers) |prop| { - const available_name = std.mem.sliceTo(&prop.layer_name, 0); - if (std.mem.eql(u8, required_name, available_name)) break; - } else { - return error.LayerNotPresent; - } - } - - const debug_create_info = vk.DebugUtilsMessengerCreateInfoEXT{ - .message_severity = vk.DebugUtilsMessageSeverityFlagsEXT{ - .verbose_bit_ext = false, - .warning_bit_ext = true, - .error_bit_ext = true, - .info_bit_ext = false, - }, - .message_type = vk.DebugUtilsMessageTypeFlagsEXT{ - .general_bit_ext = true, - .validation_bit_ext = true, - .performance_bit_ext = true, - .device_address_binding_bit_ext = false, - }, - .pfn_user_callback = &debug_callback, - .p_user_data = null, - }; - - const app_info = vk.ApplicationInfo{ - .p_application_name = "Hello World", - .application_version = vk.makeApiVersion(0, 0, 0, 0), - .p_engine_name = "No Engine", - .engine_version = vk.makeApiVersion(0, 0, 0, 0), - .api_version = vk.API_VERSION_1_3, - }; - - const create_info = vk.InstanceCreateInfo{ - .p_application_info = &app_info, - .enabled_extension_count = @intCast(exts.items.len), - .pp_enabled_extension_names = exts.items.ptr, - .enabled_layer_count = @intCast(layers.items.len), - .pp_enabled_layer_names = layers.items.ptr, - .p_next = if (USE_DEBUG_LAYERS) &debug_create_info else null, - }; - - self.instance = try self.vkb.createInstance(&create_info, null); - self.vki = try InstanceDispatch.load(self.instance, self.vkb.dispatch.vkGetInstanceProcAddr); - errdefer self.vki.destroyInstance(self.instance, null); - - if (USE_DEBUG_LAYERS) self.messenger = try self.vki.createDebugUtilsMessengerEXT( - self.instance, - &debug_create_info, - null, - ); - errdefer if (USE_DEBUG_LAYERS) self.vki.destroyDebugUtilsMessengerEXT( - self.instance, - self.messenger, - null, - ); -} - -fn deinitVulkan(self: Self) void { - if (USE_DEBUG_LAYERS) self.vki.destroyDebugUtilsMessengerEXT( - self.instance, - self.messenger, - null, - ); - self.vki.destroyInstance(self.instance, null); -} - -pub fn init(allocator: std.mem.Allocator) !Self { - var self: Self = undefined; - self.allocator = allocator; - - try self.initWindow(); - errdefer self.deinitWindow(); - try self.initVulkan(); - errdefer self.deinitVulkan(); return self; } pub fn deinit(self: Self) void { - self.deinitWindow(); - self.deinitVulkan(); + c.glfwDestroyWindow(self.window); } diff --git a/src/main.zig b/src/main.zig index 9234539..fdf0ba4 100644 --- a/src/main.zig +++ b/src/main.zig @@ -3,56 +3,7 @@ const c = @import("c.zig"); const vk = @import("vk"); const Window = @import("Window.zig"); - -// const BaseDispatch = vk.BaseWrapper(.{ -// .createInstance = true, -// .getInstanceProcAddr = true, -// }); -// -// const InstanceDispatch = vk.InstanceWrapper(.{ -// .destroyInstance = true, -// }); -// -// const Context = struct { -// vkb: BaseDispatch, -// vki: InstanceDispatch, -// -// instance: vk.Instance, -// -// pub fn init(allocator: std.mem.Allocator, app_name: [*:0]const u8, window: *c.GLFWwindow) !Context { -// _ = allocator; -// _ = window; -// -// var self: Context = undefined; -// self.vkb = try BaseDispatch.load(c.glfwGetInstanceProcAddress); -// -// var glfw_exts_count: u32 = 0; -// const glfw_exts = c.glfwGetRequiredInstanceExtensions(&glfw_exts_count); -// -// const app_info = vk.ApplicationInfo{ -// .p_application_name = app_name, -// .application_version = vk.makeApiVersion(0, 0, 0, 0), -// .p_engine_name = app_name, -// .engine_version = vk.makeApiVersion(0, 0, 0, 0), -// .api_version = vk.API_VERSION_1_2, -// }; -// -// self.instance = try self.vkb.createInstance(&.{ -// .p_application_info = &app_info, -// .enabled_extension_count = glfw_exts_count, -// .pp_enabled_extension_names = @as([*]const [*:0]const u8, @ptrCast(glfw_exts)), -// }, null); -// -// self.vki = try InstanceDispatch.load(self.instance, self.vkb.dispatch.vkGetInstanceProcAddr); -// errdefer self.vki.destroyInstance(self.instance, null); -// -// return self; -// } -// -// pub fn deinit(self: Context) void { -// self.vki.destroyInstance(self.instance, null); -// } -// }; +const Context = @import("Context.zig"); pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; @@ -64,19 +15,16 @@ pub fn main() !void { } defer c.glfwTerminate(); - const window = try Window.init(allocator); + const ctx = try Context.init(allocator); + defer ctx.deinit(); + + const window = try Window.init( + allocator, + 1280, + 720, + "Hello World", + ); defer window.deinit(); - // window.mainLoop(); - - // c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); - // const window: *c.GLFWwindow = c.glfwCreateWindow(1280, 720, "Hello World!", null, null) orelse return error.GlfwWindowFailed; - // defer c.glfwDestroyWindow(window); - - // const ctx = try Context.init(allocator, "content", window); - // defer ctx.deinit(); - - // while (c.glfwWindowShouldClose(window) == 0) : (c.glfwPollEvents()) { - // c.glfwSwapBuffers(window); - // } + window.mainLoop(); } From a7c3ce3a69e6a419a5504c1738b8ee1367cf559b Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 21 Mar 2024 13:45:36 -0400 Subject: [PATCH 007/113] vulkan-tutorial 04 --- src/Context.zig | 75 +++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 66 insertions(+), 9 deletions(-) diff --git a/src/Context.zig b/src/Context.zig index 68fbc5e..4b9b0a8 100644 --- a/src/Context.zig +++ b/src/Context.zig @@ -24,6 +24,7 @@ pub fn init(allocator: std.mem.Allocator) !Self { self.allocator = allocator; self.vkb = try BaseDispatch.load(&c.glfwGetInstanceProcAddress); + const vkb = self.vkb; var exts = std.ArrayList([*:0]const u8).init(allocator); defer exts.deinit(); @@ -44,16 +45,16 @@ pub fn init(allocator: std.mem.Allocator) !Self { std.log.debug("requesting layers: {s}", .{layers.items}); var available_ext_count: u32 = 0; - _ = try self.vkb.enumerateInstanceExtensionProperties(null, &available_ext_count, null); + _ = try vkb.enumerateInstanceExtensionProperties(null, &available_ext_count, null); const available_exts = try allocator.alloc(vk.ExtensionProperties, available_ext_count); defer allocator.free(available_exts); - _ = try self.vkb.enumerateInstanceExtensionProperties(null, &available_ext_count, available_exts.ptr); + _ = try vkb.enumerateInstanceExtensionProperties(null, &available_ext_count, available_exts.ptr); var available_layer_count: u32 = 0; - _ = try self.vkb.enumerateInstanceLayerProperties(&available_layer_count, null); + _ = try vkb.enumerateInstanceLayerProperties(&available_layer_count, null); const available_layers = try allocator.alloc(vk.LayerProperties, available_layer_count); defer allocator.free(available_layers); - _ = try self.vkb.enumerateInstanceLayerProperties(&available_layer_count, available_layers.ptr); + _ = try vkb.enumerateInstanceLayerProperties(&available_layer_count, available_layers.ptr); for (exts.items) |name| { const required_name = std.mem.sliceTo(name, 0); @@ -109,21 +110,73 @@ pub fn init(allocator: std.mem.Allocator) !Self { .p_next = if (USE_DEBUG_LAYERS) &debug_create_info else null, }; - self.instance = try self.vkb.createInstance(&create_info, null); - self.vki = try InstanceDispatch.load(self.instance, self.vkb.dispatch.vkGetInstanceProcAddr); - errdefer self.vki.destroyInstance(self.instance, null); + self.instance = try vkb.createInstance(&create_info, null); + self.vki = try InstanceDispatch.load(self.instance, vkb.dispatch.vkGetInstanceProcAddr); + const vki = self.vki; + errdefer vki.destroyInstance(self.instance, null); - if (USE_DEBUG_LAYERS) self.messenger = try self.vki.createDebugUtilsMessengerEXT( + if (USE_DEBUG_LAYERS) self.messenger = try vki.createDebugUtilsMessengerEXT( self.instance, &debug_create_info, null, ); - errdefer if (USE_DEBUG_LAYERS) self.vki.destroyDebugUtilsMessengerEXT( + errdefer if (USE_DEBUG_LAYERS) vki.destroyDebugUtilsMessengerEXT( self.instance, self.messenger, null, ); + var device_count: u32 = 0; + _ = try vki.enumeratePhysicalDevices(self.instance, &device_count, null); + const devices = try allocator.alloc(vk.PhysicalDevice, device_count); + defer allocator.free(devices); + _ = try vki.enumeratePhysicalDevices(self.instance, &device_count, devices.ptr); + + // todo some ranking strategy to find the most-suitable device + const Selection = struct { + device: vk.PhysicalDevice, + props: vk.PhysicalDeviceProperties, + feats: vk.PhysicalDeviceFeatures, + }; + const selected: Selection = for (devices) |device| { + const props = vki.getPhysicalDeviceProperties(device); + const feats = vki.getPhysicalDeviceFeatures(device); + + if (props.device_type != vk.PhysicalDeviceType.discrete_gpu) continue; + + // if (feats.geometry_shader == vk.FALSE) continue; + + break .{ + .device = device, + .props = props, + .feats = feats, + }; + } else { + return error.NoSuitablePhysicalDevice; + }; + + var queue_family_count: u32 = 0; + vki.getPhysicalDeviceQueueFamilyProperties(selected.device, &queue_family_count, null); + const queue_family_properties = try allocator.alloc(vk.QueueFamilyProperties, queue_family_count); + defer allocator.free(queue_family_properties); + vki.getPhysicalDeviceQueueFamilyProperties(selected.device, &queue_family_count, queue_family_properties.ptr); + + var indices = struct { + graphics: ?u32 = null, + }{}; + + for (queue_family_properties, 0..) |prop, idx| { + if (indices.graphics == null) { + if (prop.queue_flags.graphics_bit) { + indices.graphics = @intCast(idx); + } + } + + if (indices.graphics != null) { + break; + } + } + return self; } @@ -203,6 +256,10 @@ const InstanceDispatch = vk.InstanceWrapper(.{ .createDebugUtilsMessengerEXT = USE_DEBUG_LAYERS, .destroyDebugUtilsMessengerEXT = USE_DEBUG_LAYERS, .submitDebugUtilsMessageEXT = USE_DEBUG_LAYERS, + .enumeratePhysicalDevices = true, + .getPhysicalDeviceProperties = true, + .getPhysicalDeviceFeatures = true, + .getPhysicalDeviceQueueFamilyProperties = true, }); const DeviceDispatch = vk.DeviceWrapper(.{}); From 3bf7c4e61b27b4452e6e956bcf8524318c80a96f Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 21 Mar 2024 14:14:43 -0400 Subject: [PATCH 008/113] vulkan-tutorial 05 --- src/Context.zig | 72 ++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 60 insertions(+), 12 deletions(-) diff --git a/src/Context.zig b/src/Context.zig index 4b9b0a8..6f08bca 100644 --- a/src/Context.zig +++ b/src/Context.zig @@ -17,6 +17,12 @@ vki: InstanceDispatch, vkd: DeviceDispatch, instance: vk.Instance, +device: vk.Device, + +queues: struct { + graphics: vk.Queue, +}, + messenger: if (USE_DEBUG_LAYERS) vk.DebugUtilsMessengerEXT else void, pub fn init(allocator: std.mem.Allocator) !Self { @@ -161,26 +167,63 @@ pub fn init(allocator: std.mem.Allocator) !Self { defer allocator.free(queue_family_properties); vki.getPhysicalDeviceQueueFamilyProperties(selected.device, &queue_family_count, queue_family_properties.ptr); - var indices = struct { - graphics: ?u32 = null, - }{}; + // todo this should be incorporated with physical device selection/ranking. + const Indices = struct { + graphics: u32, + }; + const indices: Indices = find_index: { + var graphics: ?u32 = null; - for (queue_family_properties, 0..) |prop, idx| { - if (indices.graphics == null) { - if (prop.queue_flags.graphics_bit) { - indices.graphics = @intCast(idx); + for (queue_family_properties, 0..) |prop, idx| { + if (graphics == null and prop.queue_flags.graphics_bit) { + graphics = @intCast(idx); + } + + if (graphics != null) { + break :find_index .{ .graphics = graphics.? }; } } - if (indices.graphics != null) { - break; - } - } + return error.IncompatibleDeviceQueues; + }; + + const priorities = [_]f32{1.0}; + const queue_create_infos = [_]vk.DeviceQueueCreateInfo{ + .{ + .queue_family_index = indices.graphics, + .queue_count = priorities.len, + .p_queue_priorities = &priorities, + }, + }; + + const device_create_info = vk.DeviceCreateInfo{ + .queue_create_info_count = queue_create_infos.len, + .p_queue_create_infos = &queue_create_infos, + .p_enabled_features = &selected.feats, + // .enabled_extension_count = @intCast(exts.items.len), + // .pp_enabled_extension_names = exts.items.ptr, + .enabled_layer_count = @intCast(layers.items.len), + .pp_enabled_layer_names = layers.items.ptr, + }; + + self.device = try vki.createDevice( + selected.device, + &device_create_info, + null, + ); + self.vkd = try DeviceDispatch.load(self.device, vki.dispatch.vkGetDeviceProcAddr); + const vkd = self.vkd; + errdefer vkd.destroyDevice(self.device, null); + + self.queues = .{ + .graphics = vkd.getDeviceQueue(self.device, indices.graphics, 0), + }; return self; } pub fn deinit(self: Self) void { + self.vkd.destroyDevice(self.device, null); if (USE_DEBUG_LAYERS) self.vki.destroyDebugUtilsMessengerEXT( self.instance, self.messenger, @@ -260,6 +303,11 @@ const InstanceDispatch = vk.InstanceWrapper(.{ .getPhysicalDeviceProperties = true, .getPhysicalDeviceFeatures = true, .getPhysicalDeviceQueueFamilyProperties = true, + .createDevice = true, + .getDeviceProcAddr = true, }); -const DeviceDispatch = vk.DeviceWrapper(.{}); +const DeviceDispatch = vk.DeviceWrapper(.{ + .destroyDevice = true, + .getDeviceQueue = true, +}); From 02380af6abc21d251dc8be8eef24223b3e929d52 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 21 Mar 2024 15:12:49 -0400 Subject: [PATCH 009/113] vulkan-tutorial 06 --- src/Context.zig | 105 ++++++++++++++++++++++++++++++++++++++++-------- src/Window.zig | 42 ------------------- src/main.zig | 25 +++++++----- 3 files changed, 102 insertions(+), 70 deletions(-) delete mode 100644 src/Window.zig diff --git a/src/Context.zig b/src/Context.zig index 6f08bca..6ac0810 100644 --- a/src/Context.zig +++ b/src/Context.zig @@ -21,11 +21,14 @@ device: vk.Device, queues: struct { graphics: vk.Queue, + present: vk.Queue, }, +surface: vk.SurfaceKHR, + messenger: if (USE_DEBUG_LAYERS) vk.DebugUtilsMessengerEXT else void, -pub fn init(allocator: std.mem.Allocator) !Self { +pub fn init(allocator: std.mem.Allocator, window: *c.GLFWwindow) !Self { var self: Self = undefined; self.allocator = allocator; @@ -107,7 +110,7 @@ pub fn init(allocator: std.mem.Allocator) !Self { .api_version = vk.API_VERSION_1_3, }; - const create_info = vk.InstanceCreateInfo{ + const instance_create_info = vk.InstanceCreateInfo{ .p_application_info = &app_info, .enabled_extension_count = @intCast(exts.items.len), .pp_enabled_extension_names = exts.items.ptr, @@ -116,7 +119,7 @@ pub fn init(allocator: std.mem.Allocator) !Self { .p_next = if (USE_DEBUG_LAYERS) &debug_create_info else null, }; - self.instance = try vkb.createInstance(&create_info, null); + self.instance = try vkb.createInstance(&instance_create_info, null); self.vki = try InstanceDispatch.load(self.instance, vkb.dispatch.vkGetInstanceProcAddr); const vki = self.vki; errdefer vki.destroyInstance(self.instance, null); @@ -132,6 +135,20 @@ pub fn init(allocator: std.mem.Allocator) !Self { null, ); + switch (c.glfwCreateWindowSurface( + self.instance, + window, + null, + &self.surface, + )) { + .success => {}, + else => |e| { + std.log.err("{}", .{e}); + return error.Unknown; + }, + } + errdefer vki.destroySurfaceKHR(self.instance, self.surface, null); + var device_count: u32 = 0; _ = try vki.enumeratePhysicalDevices(self.instance, &device_count, null); const devices = try allocator.alloc(vk.PhysicalDevice, device_count); @@ -170,35 +187,76 @@ pub fn init(allocator: std.mem.Allocator) !Self { // todo this should be incorporated with physical device selection/ranking. const Indices = struct { graphics: u32, + present: u32, }; const indices: Indices = find_index: { var graphics: ?u32 = null; + var present: ?u32 = null; for (queue_family_properties, 0..) |prop, idx| { if (graphics == null and prop.queue_flags.graphics_bit) { graphics = @intCast(idx); + // continue; // forces distinct queue families } - if (graphics != null) { - break :find_index .{ .graphics = graphics.? }; + if (present == null) { + const present_support = try vki.getPhysicalDeviceSurfaceSupportKHR( + selected.device, + @intCast(idx), + self.surface, + ) == vk.TRUE; + if (present_support) { + present = @intCast(idx); + } + } + + if (graphics != null and present != null) { + break :find_index .{ + .graphics = graphics.?, + .present = present.?, + }; } } return error.IncompatibleDeviceQueues; }; - const priorities = [_]f32{1.0}; - const queue_create_infos = [_]vk.DeviceQueueCreateInfo{ - .{ + const gp_priorities = [_]f32{ 1.0, 1.0 }; + + var queue_create_infos = std.ArrayList(vk.DeviceQueueCreateInfo).init(allocator); + defer queue_create_infos.deinit(); + + // queue info family indices must be unique. so if the graphics and present queues are the same, create two queues + // in the same family. otherwise create queues in separate families. there should probably be some general way to + // group and unpack the queues, but I'm not bothering with that for now until I restructure this monolithic function + // in general. + if (indices.graphics == indices.present) { + std.log.info("using one queue family", .{}); + const gp_slice = gp_priorities[0..2]; + try queue_create_infos.append(.{ .queue_family_index = indices.graphics, - .queue_count = priorities.len, - .p_queue_priorities = &priorities, - }, - }; + .queue_count = @intCast(gp_slice.len), + .p_queue_priorities = gp_slice.ptr, + }); + } else { + std.log.info("using two queue families", .{}); + const g_slice = gp_priorities[0..1]; + const p_slice = gp_priorities[1..2]; + try queue_create_infos.append(.{ + .queue_family_index = indices.graphics, + .queue_count = @intCast(g_slice.len), + .p_queue_priorities = g_slice.ptr, + }); + try queue_create_infos.append(.{ + .queue_family_index = indices.present, + .queue_count = @intCast(p_slice.len), + .p_queue_priorities = p_slice.ptr, + }); + } const device_create_info = vk.DeviceCreateInfo{ - .queue_create_info_count = queue_create_infos.len, - .p_queue_create_infos = &queue_create_infos, + .queue_create_info_count = @intCast(queue_create_infos.items.len), + .p_queue_create_infos = queue_create_infos.items.ptr, .p_enabled_features = &selected.feats, // .enabled_extension_count = @intCast(exts.items.len), // .pp_enabled_extension_names = exts.items.ptr, @@ -215,14 +273,25 @@ pub fn init(allocator: std.mem.Allocator) !Self { const vkd = self.vkd; errdefer vkd.destroyDevice(self.device, null); - self.queues = .{ - .graphics = vkd.getDeviceQueue(self.device, indices.graphics, 0), - }; + if (indices.graphics == indices.present) { + // two queues in the same family + self.queues = .{ + .graphics = vkd.getDeviceQueue(self.device, indices.graphics, 0), + .present = vkd.getDeviceQueue(self.device, indices.present, 1), + }; + } else { + // queues from different families + self.queues = .{ + .graphics = vkd.getDeviceQueue(self.device, indices.graphics, 0), + .present = vkd.getDeviceQueue(self.device, indices.present, 0), + }; + } return self; } pub fn deinit(self: Self) void { + self.vki.destroySurfaceKHR(self.instance, self.surface, null); self.vkd.destroyDevice(self.device, null); if (USE_DEBUG_LAYERS) self.vki.destroyDebugUtilsMessengerEXT( self.instance, @@ -305,6 +374,8 @@ const InstanceDispatch = vk.InstanceWrapper(.{ .getPhysicalDeviceQueueFamilyProperties = true, .createDevice = true, .getDeviceProcAddr = true, + .destroySurfaceKHR = true, + .getPhysicalDeviceSurfaceSupportKHR = true, }); const DeviceDispatch = vk.DeviceWrapper(.{ diff --git a/src/Window.zig b/src/Window.zig deleted file mode 100644 index d792468..0000000 --- a/src/Window.zig +++ /dev/null @@ -1,42 +0,0 @@ -const Self = @This(); - -const std = @import("std"); -const c = @import("c.zig"); -const vk = @import("vk"); -const builtin = @import("builtin"); - -allocator: std.mem.Allocator, - -window: *c.GLFWwindow, - -pub fn mainLoop(self: Self) void { - while (c.glfwWindowShouldClose(self.window) == 0) : (c.glfwPollEvents()) { - c.glfwSwapBuffers(self.window); - } -} - -pub fn init( - allocator: std.mem.Allocator, - width: u32, - height: u32, - title: []const u8, -) !Self { - var self: Self = undefined; - self.allocator = allocator; - - c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); - self.window = c.glfwCreateWindow( - @intCast(width), - @intCast(height), - title.ptr, - null, - null, - ) orelse return error.glfwCreateWindowFailed; - errdefer c.glfwDestroyWindow(self.window); - - return self; -} - -pub fn deinit(self: Self) void { - c.glfwDestroyWindow(self.window); -} diff --git a/src/main.zig b/src/main.zig index fdf0ba4..3841f81 100644 --- a/src/main.zig +++ b/src/main.zig @@ -2,7 +2,6 @@ const std = @import("std"); const c = @import("c.zig"); const vk = @import("vk"); -const Window = @import("Window.zig"); const Context = @import("Context.zig"); pub fn main() !void { @@ -15,16 +14,20 @@ pub fn main() !void { } defer c.glfwTerminate(); - const ctx = try Context.init(allocator); + c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); + const window = c.glfwCreateWindow( + 720, + 1280, + "Hello World!", + null, + null, + ) orelse return error.glfwCreateWindowFailed; + defer c.glfwDestroyWindow(window); + + const ctx = try Context.init(allocator, window); defer ctx.deinit(); - const window = try Window.init( - allocator, - 1280, - 720, - "Hello World", - ); - defer window.deinit(); - - window.mainLoop(); + while (c.glfwWindowShouldClose(window) == 0) : (c.glfwPollEvents()) { + c.glfwSwapBuffers(window); + } } From 9001ee42aca6f7256518b77f931b02268e19b85f Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 21 Mar 2024 15:31:34 -0400 Subject: [PATCH 010/113] WIP vulkan-tutorial 07 - device extensions --- src/Context.zig | 78 +++++++++++++++++++++++++++++++++++-------------- 1 file changed, 56 insertions(+), 22 deletions(-) diff --git a/src/Context.zig b/src/Context.zig index 6ac0810..6a138fd 100644 --- a/src/Context.zig +++ b/src/Context.zig @@ -35,23 +35,29 @@ pub fn init(allocator: std.mem.Allocator, window: *c.GLFWwindow) !Self { self.vkb = try BaseDispatch.load(&c.glfwGetInstanceProcAddress); const vkb = self.vkb; - var exts = std.ArrayList([*:0]const u8).init(allocator); - defer exts.deinit(); + var req_exts = std.ArrayList([*:0]const u8).init(allocator); + defer req_exts.deinit(); - var layers = std.ArrayList([*:0]const u8).init(allocator); - defer layers.deinit(); + var req_layers = std.ArrayList([*:0]const u8).init(allocator); + defer req_layers.deinit(); + + var req_device_exts = std.ArrayList([*:0]const u8).init(allocator); + defer req_device_exts.deinit(); if (USE_DEBUG_LAYERS) { - try layers.append("VK_LAYER_KHRONOS_validation"); - try exts.append("VK_EXT_debug_utils"); + try req_layers.append("VK_LAYER_KHRONOS_validation"); + try req_exts.append("VK_EXT_debug_utils"); } + try req_device_exts.append("VK_KHR_swapchain"); + var glfw_ext_count: u32 = 0; const glfw_exts: [*][*:0]const u8 = @ptrCast(c.glfwGetRequiredInstanceExtensions(&glfw_ext_count)); - try exts.appendSlice(glfw_exts[0..glfw_ext_count]); + try req_exts.appendSlice(glfw_exts[0..glfw_ext_count]); - std.log.debug("requesting extensions: {s}", .{exts.items}); - std.log.debug("requesting layers: {s}", .{layers.items}); + std.log.debug("requesting extensions: {s}", .{req_exts.items}); + std.log.debug("requesting layers: {s}", .{req_layers.items}); + std.log.debug("requesting device extensions: {s}", .{req_device_exts.items}); var available_ext_count: u32 = 0; _ = try vkb.enumerateInstanceExtensionProperties(null, &available_ext_count, null); @@ -65,7 +71,7 @@ pub fn init(allocator: std.mem.Allocator, window: *c.GLFWwindow) !Self { defer allocator.free(available_layers); _ = try vkb.enumerateInstanceLayerProperties(&available_layer_count, available_layers.ptr); - for (exts.items) |name| { + for (req_exts.items) |name| { const required_name = std.mem.sliceTo(name, 0); for (available_exts) |prop| { const available_name = std.mem.sliceTo(&prop.extension_name, 0); @@ -75,7 +81,7 @@ pub fn init(allocator: std.mem.Allocator, window: *c.GLFWwindow) !Self { } } - for (layers.items) |name| { + for (req_layers.items) |name| { const required_name = std.mem.sliceTo(name, 0); for (available_layers) |prop| { const available_name = std.mem.sliceTo(&prop.layer_name, 0); @@ -112,10 +118,10 @@ pub fn init(allocator: std.mem.Allocator, window: *c.GLFWwindow) !Self { const instance_create_info = vk.InstanceCreateInfo{ .p_application_info = &app_info, - .enabled_extension_count = @intCast(exts.items.len), - .pp_enabled_extension_names = exts.items.ptr, - .enabled_layer_count = @intCast(layers.items.len), - .pp_enabled_layer_names = layers.items.ptr, + .enabled_extension_count = @intCast(req_exts.items.len), + .pp_enabled_extension_names = req_exts.items.ptr, + .enabled_layer_count = @intCast(req_layers.items.len), + .pp_enabled_layer_names = req_layers.items.ptr, .p_next = if (USE_DEBUG_LAYERS) &debug_create_info else null, }; @@ -155,13 +161,17 @@ pub fn init(allocator: std.mem.Allocator, window: *c.GLFWwindow) !Self { defer allocator.free(devices); _ = try vki.enumeratePhysicalDevices(self.instance, &device_count, devices.ptr); + var available_device_exts_count: u32 = 0; + var available_device_exts = std.ArrayList(vk.ExtensionProperties).init(allocator); + defer available_device_exts.deinit(); + // todo some ranking strategy to find the most-suitable device const Selection = struct { device: vk.PhysicalDevice, props: vk.PhysicalDeviceProperties, feats: vk.PhysicalDeviceFeatures, }; - const selected: Selection = for (devices) |device| { + const selected: Selection = find_device: for (devices) |device| { const props = vki.getPhysicalDeviceProperties(device); const feats = vki.getPhysicalDeviceFeatures(device); @@ -169,6 +179,31 @@ pub fn init(allocator: std.mem.Allocator, window: *c.GLFWwindow) !Self { // if (feats.geometry_shader == vk.FALSE) continue; + _ = try vki.enumerateDeviceExtensionProperties( + device, + null, + &available_device_exts_count, + null, + ); + try available_device_exts.resize(available_device_exts_count); + _ = try vki.enumerateDeviceExtensionProperties( + device, + null, + &available_device_exts_count, + available_device_exts.items.ptr, + ); + + for (req_device_exts.items) |name| { + const required_name = std.mem.sliceTo(name, 0); + for (available_device_exts.items) |prop| { + const available_name = std.mem.sliceTo(&prop.extension_name, 0); + if (std.mem.eql(u8, required_name, available_name)) break; + } else { + std.log.warn("cannot find {s}\n", .{required_name}); + continue :find_device; + } + } + break .{ .device = device, .props = props, @@ -231,7 +266,6 @@ pub fn init(allocator: std.mem.Allocator, window: *c.GLFWwindow) !Self { // group and unpack the queues, but I'm not bothering with that for now until I restructure this monolithic function // in general. if (indices.graphics == indices.present) { - std.log.info("using one queue family", .{}); const gp_slice = gp_priorities[0..2]; try queue_create_infos.append(.{ .queue_family_index = indices.graphics, @@ -239,7 +273,6 @@ pub fn init(allocator: std.mem.Allocator, window: *c.GLFWwindow) !Self { .p_queue_priorities = gp_slice.ptr, }); } else { - std.log.info("using two queue families", .{}); const g_slice = gp_priorities[0..1]; const p_slice = gp_priorities[1..2]; try queue_create_infos.append(.{ @@ -258,10 +291,10 @@ pub fn init(allocator: std.mem.Allocator, window: *c.GLFWwindow) !Self { .queue_create_info_count = @intCast(queue_create_infos.items.len), .p_queue_create_infos = queue_create_infos.items.ptr, .p_enabled_features = &selected.feats, - // .enabled_extension_count = @intCast(exts.items.len), - // .pp_enabled_extension_names = exts.items.ptr, - .enabled_layer_count = @intCast(layers.items.len), - .pp_enabled_layer_names = layers.items.ptr, + .enabled_extension_count = @intCast(req_device_exts.items.len), + .pp_enabled_extension_names = req_device_exts.items.ptr, + .enabled_layer_count = @intCast(req_layers.items.len), + .pp_enabled_layer_names = req_layers.items.ptr, }; self.device = try vki.createDevice( @@ -376,6 +409,7 @@ const InstanceDispatch = vk.InstanceWrapper(.{ .getDeviceProcAddr = true, .destroySurfaceKHR = true, .getPhysicalDeviceSurfaceSupportKHR = true, + .enumerateDeviceExtensionProperties = true, }); const DeviceDispatch = vk.DeviceWrapper(.{ From 2403befc0e1bca51066675d429082189704aaea5 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Fri, 22 Mar 2024 15:59:20 -0400 Subject: [PATCH 011/113] Introduce "Enumeration" type for vk.enumerate* results --- build.zig | 24 ++++++- src/Context.zig | 163 +++++++++++++++++++++++++++--------------------- src/main.zig | 6 +- 3 files changed, 114 insertions(+), 79 deletions(-) diff --git a/build.zig b/build.zig index 574ad5b..1826f14 100644 --- a/build.zig +++ b/build.zig @@ -25,12 +25,24 @@ pub fn build(b: *std.Build) void { .use_pkg_config = .force, }); exe.linkLibC(); - - b.installArtifact(exe); exe.root_module.addImport("vk", vkmod); - const run_cmd = b.addRunArtifact(exe); + b.installArtifact(exe); + var docs_dir: std.Build.GeneratedFile = .{ + .path = "docs", + .step = &exe.step, + }; + exe.generated_docs = &docs_dir; + const docs = b.addInstallDirectory(.{ + .source_dir = .{ .generated = &docs_dir }, + .install_dir = .{ .custom = "docs" }, + .install_subdir = "", + }); + const docs_step = b.step("docs", "Build the docs"); + docs_step.dependOn(&docs.step); + + const run_cmd = b.addRunArtifact(exe); run_cmd.step.dependOn(b.getInstallStep()); if (b.args) |args| { @@ -45,6 +57,12 @@ pub fn build(b: *std.Build) void { .target = target, .optimize = optimize, }); + exe_unit_tests.linkSystemLibrary2("glfw3", .{ + .needed = true, + .preferred_link_mode = .static, + .use_pkg_config = .force, + }); + exe_unit_tests.linkLibC(); const run_exe_unit_tests = b.addRunArtifact(exe_unit_tests); diff --git a/src/Context.zig b/src/Context.zig index 6a138fd..1652be0 100644 --- a/src/Context.zig +++ b/src/Context.zig @@ -10,7 +10,10 @@ const USE_DEBUG_LAYERS = switch (builtin.mode) { .ReleaseSmall, .ReleaseFast => false, }; -allocator: std.mem.Allocator, +const MAX_DEVICES = 16; +const MAX_DEVICE_EXTENSIONS = 512; +const MAX_INSTANCE_EXTENSIONS = 64; +const MAX_LAYERS = 512; vkb: BaseDispatch, vki: InstanceDispatch, @@ -28,52 +31,79 @@ surface: vk.SurfaceKHR, messenger: if (USE_DEBUG_LAYERS) vk.DebugUtilsMessengerEXT else void, -pub fn init(allocator: std.mem.Allocator, window: *c.GLFWwindow) !Self { +fn Enumeration(comptime T: type, comptime cap: u32) type { + return struct { + buf: [cap]T = undefined, + len: u32 = 0, + + const FULL: @This() = .{ .len = cap }; + const EMPTY: @This() = .{ .len = 0 }; + + pub fn slice(self: anytype) switch (@TypeOf(&self.buf)) { + *[cap]T => []T, + *const [cap]T => []const T, + else => unreachable, + } { + return self.buf[0..self.len]; + } + + pub fn appendSlice(self: *@This(), source: []const T) !void { + if (self.len + source.len > cap) return error.Overflow; + @memcpy(self.buf[self.len..][0..source.len], source); + self.len += @intCast(source.len); + } + + pub fn append(self: *@This(), val: T) !void { + if (self.len + 1 > cap) return error.Overflow; + self.buf[self.len] = val; + self.len += 1; + } + }; +} + +pub fn init(window: *c.GLFWwindow) !Self { var self: Self = undefined; - self.allocator = allocator; self.vkb = try BaseDispatch.load(&c.glfwGetInstanceProcAddress); const vkb = self.vkb; - var req_exts = std.ArrayList([*:0]const u8).init(allocator); - defer req_exts.deinit(); + var req_exts = Enumeration([*:0]const u8, MAX_INSTANCE_EXTENSIONS).EMPTY; + var req_layers = Enumeration([*:0]const u8, MAX_LAYERS).EMPTY; + var req_dev_exts = Enumeration([*:0]const u8, MAX_DEVICE_EXTENSIONS).EMPTY; - var req_layers = std.ArrayList([*:0]const u8).init(allocator); - defer req_layers.deinit(); - - var req_device_exts = std.ArrayList([*:0]const u8).init(allocator); - defer req_device_exts.deinit(); + try req_dev_exts.append("VK_KHR_swapchain"); if (USE_DEBUG_LAYERS) { try req_layers.append("VK_LAYER_KHRONOS_validation"); try req_exts.append("VK_EXT_debug_utils"); } - try req_device_exts.append("VK_KHR_swapchain"); + { + var glfw_ext_count: u32 = 0; + const glfw_exts: [*][*:0]const u8 = @ptrCast(c.glfwGetRequiredInstanceExtensions(&glfw_ext_count)); + try req_exts.appendSlice(glfw_exts[0..glfw_ext_count]); + } - var glfw_ext_count: u32 = 0; - const glfw_exts: [*][*:0]const u8 = @ptrCast(c.glfwGetRequiredInstanceExtensions(&glfw_ext_count)); - try req_exts.appendSlice(glfw_exts[0..glfw_ext_count]); + std.log.debug("requesting extensions: {s}", .{req_exts.slice()}); + std.log.debug("requesting layers: {s}", .{req_layers.slice()}); + std.log.debug("requesting device extensions: {s}", .{req_dev_exts.slice()}); - std.log.debug("requesting extensions: {s}", .{req_exts.items}); - std.log.debug("requesting layers: {s}", .{req_layers.items}); - std.log.debug("requesting device extensions: {s}", .{req_device_exts.items}); + var available_exts = Enumeration(vk.ExtensionProperties, MAX_INSTANCE_EXTENSIONS).FULL; + _ = try vkb.enumerateInstanceExtensionProperties( + null, + &available_exts.len, + &available_exts.buf, + ); - var available_ext_count: u32 = 0; - _ = try vkb.enumerateInstanceExtensionProperties(null, &available_ext_count, null); - const available_exts = try allocator.alloc(vk.ExtensionProperties, available_ext_count); - defer allocator.free(available_exts); - _ = try vkb.enumerateInstanceExtensionProperties(null, &available_ext_count, available_exts.ptr); + var available_layers = Enumeration(vk.LayerProperties, MAX_LAYERS).FULL; + _ = try vkb.enumerateInstanceLayerProperties( + &available_layers.len, + &available_layers.buf, + ); - var available_layer_count: u32 = 0; - _ = try vkb.enumerateInstanceLayerProperties(&available_layer_count, null); - const available_layers = try allocator.alloc(vk.LayerProperties, available_layer_count); - defer allocator.free(available_layers); - _ = try vkb.enumerateInstanceLayerProperties(&available_layer_count, available_layers.ptr); - - for (req_exts.items) |name| { + for (req_exts.slice()) |name| { const required_name = std.mem.sliceTo(name, 0); - for (available_exts) |prop| { + for (available_exts.slice()) |prop| { const available_name = std.mem.sliceTo(&prop.extension_name, 0); if (std.mem.eql(u8, required_name, available_name)) break; } else { @@ -81,9 +111,9 @@ pub fn init(allocator: std.mem.Allocator, window: *c.GLFWwindow) !Self { } } - for (req_layers.items) |name| { + for (req_layers.slice()) |name| { const required_name = std.mem.sliceTo(name, 0); - for (available_layers) |prop| { + for (available_layers.slice()) |prop| { const available_name = std.mem.sliceTo(&prop.layer_name, 0); if (std.mem.eql(u8, required_name, available_name)) break; } else { @@ -118,10 +148,10 @@ pub fn init(allocator: std.mem.Allocator, window: *c.GLFWwindow) !Self { const instance_create_info = vk.InstanceCreateInfo{ .p_application_info = &app_info, - .enabled_extension_count = @intCast(req_exts.items.len), - .pp_enabled_extension_names = req_exts.items.ptr, - .enabled_layer_count = @intCast(req_layers.items.len), - .pp_enabled_layer_names = req_layers.items.ptr, + .enabled_extension_count = req_exts.len, + .pp_enabled_extension_names = &req_exts.buf, + .enabled_layer_count = req_layers.len, + .pp_enabled_layer_names = &req_layers.buf, .p_next = if (USE_DEBUG_LAYERS) &debug_create_info else null, }; @@ -155,15 +185,12 @@ pub fn init(allocator: std.mem.Allocator, window: *c.GLFWwindow) !Self { } errdefer vki.destroySurfaceKHR(self.instance, self.surface, null); - var device_count: u32 = 0; - _ = try vki.enumeratePhysicalDevices(self.instance, &device_count, null); - const devices = try allocator.alloc(vk.PhysicalDevice, device_count); - defer allocator.free(devices); - _ = try vki.enumeratePhysicalDevices(self.instance, &device_count, devices.ptr); - - var available_device_exts_count: u32 = 0; - var available_device_exts = std.ArrayList(vk.ExtensionProperties).init(allocator); - defer available_device_exts.deinit(); + var devices = Enumeration(vk.PhysicalDevice, MAX_DEVICES).FULL; + _ = try vki.enumeratePhysicalDevices( + self.instance, + &devices.len, + &devices.buf, + ); // todo some ranking strategy to find the most-suitable device const Selection = struct { @@ -171,7 +198,7 @@ pub fn init(allocator: std.mem.Allocator, window: *c.GLFWwindow) !Self { props: vk.PhysicalDeviceProperties, feats: vk.PhysicalDeviceFeatures, }; - const selected: Selection = find_device: for (devices) |device| { + const selected: Selection = find_device: for (devices.slice()) |device| { const props = vki.getPhysicalDeviceProperties(device); const feats = vki.getPhysicalDeviceFeatures(device); @@ -179,23 +206,17 @@ pub fn init(allocator: std.mem.Allocator, window: *c.GLFWwindow) !Self { // if (feats.geometry_shader == vk.FALSE) continue; + var available_dev_exts = Enumeration(vk.ExtensionProperties, MAX_DEVICE_EXTENSIONS).FULL; _ = try vki.enumerateDeviceExtensionProperties( device, null, - &available_device_exts_count, - null, - ); - try available_device_exts.resize(available_device_exts_count); - _ = try vki.enumerateDeviceExtensionProperties( - device, - null, - &available_device_exts_count, - available_device_exts.items.ptr, + &available_dev_exts.len, + &available_dev_exts.buf, ); - for (req_device_exts.items) |name| { + for (req_dev_exts.slice()) |name| { const required_name = std.mem.sliceTo(name, 0); - for (available_device_exts.items) |prop| { + for (available_dev_exts.slice()) |prop| { const available_name = std.mem.sliceTo(&prop.extension_name, 0); if (std.mem.eql(u8, required_name, available_name)) break; } else { @@ -213,11 +234,12 @@ pub fn init(allocator: std.mem.Allocator, window: *c.GLFWwindow) !Self { return error.NoSuitablePhysicalDevice; }; - var queue_family_count: u32 = 0; - vki.getPhysicalDeviceQueueFamilyProperties(selected.device, &queue_family_count, null); - const queue_family_properties = try allocator.alloc(vk.QueueFamilyProperties, queue_family_count); - defer allocator.free(queue_family_properties); - vki.getPhysicalDeviceQueueFamilyProperties(selected.device, &queue_family_count, queue_family_properties.ptr); + var queue_families = Enumeration(vk.QueueFamilyProperties, 64).FULL; + vki.getPhysicalDeviceQueueFamilyProperties( + selected.device, + &queue_families.len, + &queue_families.buf, + ); // todo this should be incorporated with physical device selection/ranking. const Indices = struct { @@ -228,7 +250,7 @@ pub fn init(allocator: std.mem.Allocator, window: *c.GLFWwindow) !Self { var graphics: ?u32 = null; var present: ?u32 = null; - for (queue_family_properties, 0..) |prop, idx| { + for (queue_families.slice(), 0..) |prop, idx| { if (graphics == null and prop.queue_flags.graphics_bit) { graphics = @intCast(idx); // continue; // forces distinct queue families @@ -258,8 +280,7 @@ pub fn init(allocator: std.mem.Allocator, window: *c.GLFWwindow) !Self { const gp_priorities = [_]f32{ 1.0, 1.0 }; - var queue_create_infos = std.ArrayList(vk.DeviceQueueCreateInfo).init(allocator); - defer queue_create_infos.deinit(); + var queue_create_infos = Enumeration(vk.DeviceQueueCreateInfo, 2).EMPTY; // queue info family indices must be unique. so if the graphics and present queues are the same, create two queues // in the same family. otherwise create queues in separate families. there should probably be some general way to @@ -288,13 +309,13 @@ pub fn init(allocator: std.mem.Allocator, window: *c.GLFWwindow) !Self { } const device_create_info = vk.DeviceCreateInfo{ - .queue_create_info_count = @intCast(queue_create_infos.items.len), - .p_queue_create_infos = queue_create_infos.items.ptr, + .queue_create_info_count = queue_create_infos.len, + .p_queue_create_infos = &queue_create_infos.buf, .p_enabled_features = &selected.feats, - .enabled_extension_count = @intCast(req_device_exts.items.len), - .pp_enabled_extension_names = req_device_exts.items.ptr, - .enabled_layer_count = @intCast(req_layers.items.len), - .pp_enabled_layer_names = req_layers.items.ptr, + .enabled_extension_count = req_dev_exts.len, + .pp_enabled_extension_names = &req_dev_exts.buf, + .enabled_layer_count = req_layers.len, + .pp_enabled_layer_names = &req_layers.buf, }; self.device = try vki.createDevice( diff --git a/src/main.zig b/src/main.zig index 3841f81..1aa6e33 100644 --- a/src/main.zig +++ b/src/main.zig @@ -5,10 +5,6 @@ const vk = @import("vk"); const Context = @import("Context.zig"); pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - const allocator = gpa.allocator(); - defer _ = gpa.detectLeaks(); - if (c.glfwInit() != c.GLFW_TRUE) { return error.GlfwInitFailed; } @@ -24,7 +20,7 @@ pub fn main() !void { ) orelse return error.glfwCreateWindowFailed; defer c.glfwDestroyWindow(window); - const ctx = try Context.init(allocator, window); + const ctx = try Context.init(window); defer ctx.deinit(); while (c.glfwWindowShouldClose(window) == 0) : (c.glfwPollEvents()) { From bc9aa90068ea3fd3c3e486625548c096971def46 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Fri, 22 Mar 2024 16:23:38 -0400 Subject: [PATCH 012/113] use vk.extension_info names --- src/Context.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Context.zig b/src/Context.zig index 1652be0..0ef90e8 100644 --- a/src/Context.zig +++ b/src/Context.zig @@ -71,11 +71,11 @@ pub fn init(window: *c.GLFWwindow) !Self { var req_layers = Enumeration([*:0]const u8, MAX_LAYERS).EMPTY; var req_dev_exts = Enumeration([*:0]const u8, MAX_DEVICE_EXTENSIONS).EMPTY; - try req_dev_exts.append("VK_KHR_swapchain"); + try req_dev_exts.append(vk.extension_info.khr_swapchain.name); if (USE_DEBUG_LAYERS) { try req_layers.append("VK_LAYER_KHRONOS_validation"); - try req_exts.append("VK_EXT_debug_utils"); + try req_exts.append(vk.extension_info.ext_debug_utils.name); } { From 282c38ddfc2e3a6d5cc4edd0a1b2f813c17e9a17 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Fri, 22 Mar 2024 16:34:26 -0400 Subject: [PATCH 013/113] direct copy vulkan-zig example --- build.zig | 10 + src/graphics_context.zig | 344 +++++++++++++++++++++++++++ src/main.zig | 480 ++++++++++++++++++++++++++++++++++++-- src/shaders/triangle.frag | 9 + src/shaders/triangle.vert | 11 + src/swapchain.zig | 322 +++++++++++++++++++++++++ 6 files changed, 1163 insertions(+), 13 deletions(-) create mode 100644 src/graphics_context.zig create mode 100644 src/shaders/triangle.frag create mode 100644 src/shaders/triangle.vert create mode 100644 src/swapchain.zig diff --git a/build.zig b/build.zig index 1826f14..bded306 100644 --- a/build.zig +++ b/build.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const vkgen = @import("vulkan-zig"); pub fn build(b: *std.Build) void { const target = b.standardTargetOptions(.{}); @@ -17,6 +18,15 @@ pub fn build(b: *std.Build) void { .optimize = optimize, }); + const shaders = vkgen.ShaderCompileStep.create( + b, + &[_][]const u8{ "glslc", "--target-env=vulkan1.3" }, + "-o", + ); + shaders.add("triangle_vert", "src/shaders/triangle.vert", .{}); + shaders.add("triangle_frag", "src/shaders/triangle.frag", .{}); + exe.root_module.addImport("shaders", shaders.getModule()); + // this requires PKG_CONFIG_PATH to be set. something like: // ~/.local/lib/pkgconfig/ exe.linkSystemLibrary2("glfw3", .{ diff --git a/src/graphics_context.zig b/src/graphics_context.zig new file mode 100644 index 0000000..18ee6d9 --- /dev/null +++ b/src/graphics_context.zig @@ -0,0 +1,344 @@ +const std = @import("std"); +const vk = @import("vk"); +const c = @import("c.zig"); +const Allocator = std.mem.Allocator; + +const required_device_extensions = [_][*:0]const u8{vk.extension_info.khr_swapchain.name}; + +const BaseDispatch = vk.BaseWrapper(.{ + .createInstance = true, + .getInstanceProcAddr = true, +}); + +const InstanceDispatch = vk.InstanceWrapper(.{ + .destroyInstance = true, + .createDevice = true, + .destroySurfaceKHR = true, + .enumeratePhysicalDevices = true, + .getPhysicalDeviceProperties = true, + .enumerateDeviceExtensionProperties = true, + .getPhysicalDeviceSurfaceFormatsKHR = true, + .getPhysicalDeviceSurfacePresentModesKHR = true, + .getPhysicalDeviceSurfaceCapabilitiesKHR = true, + .getPhysicalDeviceQueueFamilyProperties = true, + .getPhysicalDeviceSurfaceSupportKHR = true, + .getPhysicalDeviceMemoryProperties = true, + .getDeviceProcAddr = true, +}); + +const DeviceDispatch = vk.DeviceWrapper(.{ + .destroyDevice = true, + .getDeviceQueue = true, + .createSemaphore = true, + .createFence = true, + .createImageView = true, + .destroyImageView = true, + .destroySemaphore = true, + .destroyFence = true, + .getSwapchainImagesKHR = true, + .createSwapchainKHR = true, + .destroySwapchainKHR = true, + .acquireNextImageKHR = true, + .deviceWaitIdle = true, + .waitForFences = true, + .resetFences = true, + .queueSubmit = true, + .queuePresentKHR = true, + .createCommandPool = true, + .destroyCommandPool = true, + .allocateCommandBuffers = true, + .freeCommandBuffers = true, + .queueWaitIdle = true, + .createShaderModule = true, + .destroyShaderModule = true, + .createPipelineLayout = true, + .destroyPipelineLayout = true, + .createRenderPass = true, + .destroyRenderPass = true, + .createGraphicsPipelines = true, + .destroyPipeline = true, + .createFramebuffer = true, + .destroyFramebuffer = true, + .beginCommandBuffer = true, + .endCommandBuffer = true, + .allocateMemory = true, + .freeMemory = true, + .createBuffer = true, + .destroyBuffer = true, + .getBufferMemoryRequirements = true, + .mapMemory = true, + .unmapMemory = true, + .bindBufferMemory = true, + .cmdBeginRenderPass = true, + .cmdEndRenderPass = true, + .cmdBindPipeline = true, + .cmdDraw = true, + .cmdSetViewport = true, + .cmdSetScissor = true, + .cmdBindVertexBuffers = true, + .cmdCopyBuffer = true, +}); + +pub const GraphicsContext = struct { + vkb: BaseDispatch, + vki: InstanceDispatch, + vkd: DeviceDispatch, + + instance: vk.Instance, + surface: vk.SurfaceKHR, + pdev: vk.PhysicalDevice, + props: vk.PhysicalDeviceProperties, + mem_props: vk.PhysicalDeviceMemoryProperties, + + dev: vk.Device, + graphics_queue: Queue, + present_queue: Queue, + + pub fn init(allocator: Allocator, app_name: [*:0]const u8, window: *c.GLFWwindow) !GraphicsContext { + var self: GraphicsContext = undefined; + self.vkb = try BaseDispatch.load(c.glfwGetInstanceProcAddress); + + var glfw_exts_count: u32 = 0; + const glfw_exts = c.glfwGetRequiredInstanceExtensions(&glfw_exts_count); + + const app_info = vk.ApplicationInfo{ + .p_application_name = app_name, + .application_version = vk.makeApiVersion(0, 0, 0, 0), + .p_engine_name = app_name, + .engine_version = vk.makeApiVersion(0, 0, 0, 0), + .api_version = vk.API_VERSION_1_2, + }; + + self.instance = try self.vkb.createInstance(&.{ + .p_application_info = &app_info, + .enabled_extension_count = glfw_exts_count, + .pp_enabled_extension_names = @as([*]const [*:0]const u8, @ptrCast(glfw_exts)), + }, null); + + self.vki = try InstanceDispatch.load(self.instance, self.vkb.dispatch.vkGetInstanceProcAddr); + errdefer self.vki.destroyInstance(self.instance, null); + + self.surface = try createSurface(self.instance, window); + errdefer self.vki.destroySurfaceKHR(self.instance, self.surface, null); + + const candidate = try pickPhysicalDevice(self.vki, self.instance, allocator, self.surface); + self.pdev = candidate.pdev; + self.props = candidate.props; + self.dev = try initializeCandidate(self.vki, candidate); + self.vkd = try DeviceDispatch.load(self.dev, self.vki.dispatch.vkGetDeviceProcAddr); + errdefer self.vkd.destroyDevice(self.dev, null); + + self.graphics_queue = Queue.init(self.vkd, self.dev, candidate.queues.graphics_family); + self.present_queue = Queue.init(self.vkd, self.dev, candidate.queues.present_family); + + self.mem_props = self.vki.getPhysicalDeviceMemoryProperties(self.pdev); + + return self; + } + + pub fn deinit(self: GraphicsContext) void { + self.vkd.destroyDevice(self.dev, null); + self.vki.destroySurfaceKHR(self.instance, self.surface, null); + self.vki.destroyInstance(self.instance, null); + } + + pub fn deviceName(self: *const GraphicsContext) []const u8 { + return std.mem.sliceTo(&self.props.device_name, 0); + } + + pub fn findMemoryTypeIndex(self: GraphicsContext, memory_type_bits: u32, flags: vk.MemoryPropertyFlags) !u32 { + for (self.mem_props.memory_types[0..self.mem_props.memory_type_count], 0..) |mem_type, i| { + if (memory_type_bits & (@as(u32, 1) << @truncate(i)) != 0 and mem_type.property_flags.contains(flags)) { + return @truncate(i); + } + } + + return error.NoSuitableMemoryType; + } + + pub fn allocate(self: GraphicsContext, requirements: vk.MemoryRequirements, flags: vk.MemoryPropertyFlags) !vk.DeviceMemory { + return try self.vkd.allocateMemory(self.dev, &.{ + .allocation_size = requirements.size, + .memory_type_index = try self.findMemoryTypeIndex(requirements.memory_type_bits, flags), + }, null); + } +}; + +pub const Queue = struct { + handle: vk.Queue, + family: u32, + + fn init(vkd: DeviceDispatch, dev: vk.Device, family: u32) Queue { + return .{ + .handle = vkd.getDeviceQueue(dev, family, 0), + .family = family, + }; + } +}; + +fn createSurface(instance: vk.Instance, window: *c.GLFWwindow) !vk.SurfaceKHR { + var surface: vk.SurfaceKHR = undefined; + if (c.glfwCreateWindowSurface(instance, window, null, &surface) != .success) { + return error.SurfaceInitFailed; + } + + return surface; +} + +fn initializeCandidate(vki: InstanceDispatch, candidate: DeviceCandidate) !vk.Device { + const priority = [_]f32{1}; + const qci = [_]vk.DeviceQueueCreateInfo{ + .{ + .queue_family_index = candidate.queues.graphics_family, + .queue_count = 1, + .p_queue_priorities = &priority, + }, + .{ + .queue_family_index = candidate.queues.present_family, + .queue_count = 1, + .p_queue_priorities = &priority, + }, + }; + + const queue_count: u32 = if (candidate.queues.graphics_family == candidate.queues.present_family) + 1 + else + 2; + + return try vki.createDevice(candidate.pdev, &.{ + .queue_create_info_count = queue_count, + .p_queue_create_infos = &qci, + .enabled_extension_count = required_device_extensions.len, + .pp_enabled_extension_names = @as([*]const [*:0]const u8, @ptrCast(&required_device_extensions)), + }, null); +} + +const DeviceCandidate = struct { + pdev: vk.PhysicalDevice, + props: vk.PhysicalDeviceProperties, + queues: QueueAllocation, +}; + +const QueueAllocation = struct { + graphics_family: u32, + present_family: u32, +}; + +fn pickPhysicalDevice( + vki: InstanceDispatch, + instance: vk.Instance, + allocator: Allocator, + surface: vk.SurfaceKHR, +) !DeviceCandidate { + var device_count: u32 = undefined; + _ = try vki.enumeratePhysicalDevices(instance, &device_count, null); + + const pdevs = try allocator.alloc(vk.PhysicalDevice, device_count); + defer allocator.free(pdevs); + + _ = try vki.enumeratePhysicalDevices(instance, &device_count, pdevs.ptr); + + for (pdevs) |pdev| { + if (try checkSuitable(vki, pdev, allocator, surface)) |candidate| { + return candidate; + } + } + + return error.NoSuitableDevice; +} + +fn checkSuitable( + vki: InstanceDispatch, + pdev: vk.PhysicalDevice, + allocator: Allocator, + surface: vk.SurfaceKHR, +) !?DeviceCandidate { + const props = vki.getPhysicalDeviceProperties(pdev); + + if (!try checkExtensionSupport(vki, pdev, allocator)) { + return null; + } + + if (!try checkSurfaceSupport(vki, pdev, surface)) { + return null; + } + + if (try allocateQueues(vki, pdev, allocator, surface)) |allocation| { + return DeviceCandidate{ + .pdev = pdev, + .props = props, + .queues = allocation, + }; + } + + return null; +} + +fn allocateQueues(vki: InstanceDispatch, pdev: vk.PhysicalDevice, allocator: Allocator, surface: vk.SurfaceKHR) !?QueueAllocation { + var family_count: u32 = undefined; + vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); + + const families = try allocator.alloc(vk.QueueFamilyProperties, family_count); + defer allocator.free(families); + vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr); + + var graphics_family: ?u32 = null; + var present_family: ?u32 = null; + + for (families, 0..) |properties, i| { + const family: u32 = @intCast(i); + + if (graphics_family == null and properties.queue_flags.graphics_bit) { + graphics_family = family; + } + + if (present_family == null and (try vki.getPhysicalDeviceSurfaceSupportKHR(pdev, family, surface)) == vk.TRUE) { + present_family = family; + } + } + + if (graphics_family != null and present_family != null) { + return QueueAllocation{ + .graphics_family = graphics_family.?, + .present_family = present_family.?, + }; + } + + return null; +} + +fn checkSurfaceSupport(vki: InstanceDispatch, pdev: vk.PhysicalDevice, surface: vk.SurfaceKHR) !bool { + var format_count: u32 = undefined; + _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null); + + var present_mode_count: u32 = undefined; + _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &present_mode_count, null); + + return format_count > 0 and present_mode_count > 0; +} + +fn checkExtensionSupport( + vki: InstanceDispatch, + pdev: vk.PhysicalDevice, + allocator: Allocator, +) !bool { + var count: u32 = undefined; + _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &count, null); + + const propsv = try allocator.alloc(vk.ExtensionProperties, count); + defer allocator.free(propsv); + + _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &count, propsv.ptr); + + for (required_device_extensions) |ext| { + for (propsv) |props| { + if (std.mem.eql(u8, std.mem.span(ext), std.mem.sliceTo(&props.extension_name, 0))) { + break; + } + } else { + return false; + } + } + + return true; +} diff --git a/src/main.zig b/src/main.zig index 1aa6e33..f17842b 100644 --- a/src/main.zig +++ b/src/main.zig @@ -1,29 +1,483 @@ const std = @import("std"); -const c = @import("c.zig"); const vk = @import("vk"); +const c = @import("c.zig"); +const shaders = @import("shaders"); +const GraphicsContext = @import("graphics_context.zig").GraphicsContext; +const Swapchain = @import("swapchain.zig").Swapchain; +const Allocator = std.mem.Allocator; -const Context = @import("Context.zig"); +const app_name = "vulkan-zig triangle example"; + +const Vertex = struct { + const binding_description = vk.VertexInputBindingDescription{ + .binding = 0, + .stride = @sizeOf(Vertex), + .input_rate = .vertex, + }; + + const attribute_description = [_]vk.VertexInputAttributeDescription{ + .{ + .binding = 0, + .location = 0, + .format = .r32g32_sfloat, + .offset = @offsetOf(Vertex, "pos"), + }, + .{ + .binding = 0, + .location = 1, + .format = .r32g32b32_sfloat, + .offset = @offsetOf(Vertex, "color"), + }, + }; + + pos: [2]f32, + color: [3]f32, +}; + +const vertices = [_]Vertex{ + .{ .pos = .{ 0, -0.5 }, .color = .{ 1, 0, 0 } }, + .{ .pos = .{ 0.5, 0.5 }, .color = .{ 0, 1, 0 } }, + .{ .pos = .{ -0.5, 0.5 }, .color = .{ 0, 0, 1 } }, +}; pub fn main() !void { - if (c.glfwInit() != c.GLFW_TRUE) { - return error.GlfwInitFailed; - } + if (c.glfwInit() != c.GLFW_TRUE) return error.GlfwInitFailed; defer c.glfwTerminate(); + if (c.glfwVulkanSupported() != c.GLFW_TRUE) { + std.log.err("GLFW could not find libvulkan", .{}); + return error.NoVulkan; + } + + var extent = vk.Extent2D{ .width = 800, .height = 600 }; + c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); const window = c.glfwCreateWindow( - 720, - 1280, - "Hello World!", + @intCast(extent.width), + @intCast(extent.height), + app_name, null, null, - ) orelse return error.glfwCreateWindowFailed; + ) orelse return error.WindowInitFailed; defer c.glfwDestroyWindow(window); - const ctx = try Context.init(window); - defer ctx.deinit(); + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + defer _ = gpa.deinit(); + const allocator = gpa.allocator(); - while (c.glfwWindowShouldClose(window) == 0) : (c.glfwPollEvents()) { - c.glfwSwapBuffers(window); + const gc = try GraphicsContext.init(allocator, app_name, window); + defer gc.deinit(); + + std.log.debug("Using device: {s}", .{gc.deviceName()}); + + var swapchain = try Swapchain.init(&gc, allocator, extent); + defer swapchain.deinit(); + + const pipeline_layout = try gc.vkd.createPipelineLayout(gc.dev, &.{ + .flags = .{}, + .set_layout_count = 0, + .p_set_layouts = undefined, + .push_constant_range_count = 0, + .p_push_constant_ranges = undefined, + }, null); + defer gc.vkd.destroyPipelineLayout(gc.dev, pipeline_layout, null); + + const render_pass = try createRenderPass(&gc, swapchain); + defer gc.vkd.destroyRenderPass(gc.dev, render_pass, null); + + const pipeline = try createPipeline(&gc, pipeline_layout, render_pass); + defer gc.vkd.destroyPipeline(gc.dev, pipeline, null); + + var framebuffers = try createFramebuffers(&gc, allocator, render_pass, swapchain); + defer destroyFramebuffers(&gc, allocator, framebuffers); + + const pool = try gc.vkd.createCommandPool(gc.dev, &.{ + .queue_family_index = gc.graphics_queue.family, + }, null); + defer gc.vkd.destroyCommandPool(gc.dev, pool, null); + + const buffer = try gc.vkd.createBuffer(gc.dev, &.{ + .size = @sizeOf(@TypeOf(vertices)), + .usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, + .sharing_mode = .exclusive, + }, null); + defer gc.vkd.destroyBuffer(gc.dev, buffer, null); + const mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, buffer); + const memory = try gc.allocate(mem_reqs, .{ .device_local_bit = true }); + defer gc.vkd.freeMemory(gc.dev, memory, null); + try gc.vkd.bindBufferMemory(gc.dev, buffer, memory, 0); + + try uploadVertices(&gc, pool, buffer); + + var cmdbufs = try createCommandBuffers( + &gc, + pool, + allocator, + buffer, + swapchain.extent, + render_pass, + pipeline, + framebuffers, + ); + defer destroyCommandBuffers(&gc, pool, allocator, cmdbufs); + + while (c.glfwWindowShouldClose(window) == c.GLFW_FALSE) { + var w: c_int = undefined; + var h: c_int = undefined; + c.glfwGetFramebufferSize(window, &w, &h); + + // Don't present or resize swapchain while the window is minimized + if (w == 0 or h == 0) { + c.glfwPollEvents(); + continue; + } + + const cmdbuf = cmdbufs[swapchain.image_index]; + + const state = swapchain.present(cmdbuf) catch |err| switch (err) { + error.OutOfDateKHR => Swapchain.PresentState.suboptimal, + else => |narrow| return narrow, + }; + + if (state == .suboptimal or extent.width != @as(u32, @intCast(w)) or extent.height != @as(u32, @intCast(h))) { + extent.width = @intCast(w); + extent.height = @intCast(h); + try swapchain.recreate(extent); + + destroyFramebuffers(&gc, allocator, framebuffers); + framebuffers = try createFramebuffers(&gc, allocator, render_pass, swapchain); + + destroyCommandBuffers(&gc, pool, allocator, cmdbufs); + cmdbufs = try createCommandBuffers( + &gc, + pool, + allocator, + buffer, + swapchain.extent, + render_pass, + pipeline, + framebuffers, + ); + } + + c.glfwPollEvents(); } + + try swapchain.waitForAllFences(); + try gc.vkd.deviceWaitIdle(gc.dev); +} + +fn uploadVertices(gc: *const GraphicsContext, pool: vk.CommandPool, buffer: vk.Buffer) !void { + const staging_buffer = try gc.vkd.createBuffer(gc.dev, &.{ + .size = @sizeOf(@TypeOf(vertices)), + .usage = .{ .transfer_src_bit = true }, + .sharing_mode = .exclusive, + }, null); + defer gc.vkd.destroyBuffer(gc.dev, staging_buffer, null); + const mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, staging_buffer); + const staging_memory = try gc.allocate(mem_reqs, .{ .host_visible_bit = true, .host_coherent_bit = true }); + defer gc.vkd.freeMemory(gc.dev, staging_memory, null); + try gc.vkd.bindBufferMemory(gc.dev, staging_buffer, staging_memory, 0); + + { + const data = try gc.vkd.mapMemory(gc.dev, staging_memory, 0, vk.WHOLE_SIZE, .{}); + defer gc.vkd.unmapMemory(gc.dev, staging_memory); + + const gpu_vertices: [*]Vertex = @ptrCast(@alignCast(data)); + @memcpy(gpu_vertices, vertices[0..]); + } + + try copyBuffer(gc, pool, buffer, staging_buffer, @sizeOf(@TypeOf(vertices))); +} + +fn copyBuffer(gc: *const GraphicsContext, pool: vk.CommandPool, dst: vk.Buffer, src: vk.Buffer, size: vk.DeviceSize) !void { + var cmdbuf: vk.CommandBuffer = undefined; + try gc.vkd.allocateCommandBuffers(gc.dev, &.{ + .command_pool = pool, + .level = .primary, + .command_buffer_count = 1, + }, @ptrCast(&cmdbuf)); + defer gc.vkd.freeCommandBuffers(gc.dev, pool, 1, @ptrCast(&cmdbuf)); + + try gc.vkd.beginCommandBuffer(cmdbuf, &.{ + .flags = .{ .one_time_submit_bit = true }, + }); + + const region = vk.BufferCopy{ + .src_offset = 0, + .dst_offset = 0, + .size = size, + }; + gc.vkd.cmdCopyBuffer(cmdbuf, src, dst, 1, @ptrCast(®ion)); + + try gc.vkd.endCommandBuffer(cmdbuf); + + const si = vk.SubmitInfo{ + .command_buffer_count = 1, + .p_command_buffers = @ptrCast(&cmdbuf), + .p_wait_dst_stage_mask = undefined, + }; + try gc.vkd.queueSubmit(gc.graphics_queue.handle, 1, @ptrCast(&si), .null_handle); + try gc.vkd.queueWaitIdle(gc.graphics_queue.handle); +} + +fn createCommandBuffers( + gc: *const GraphicsContext, + pool: vk.CommandPool, + allocator: Allocator, + buffer: vk.Buffer, + extent: vk.Extent2D, + render_pass: vk.RenderPass, + pipeline: vk.Pipeline, + framebuffers: []vk.Framebuffer, +) ![]vk.CommandBuffer { + const cmdbufs = try allocator.alloc(vk.CommandBuffer, framebuffers.len); + errdefer allocator.free(cmdbufs); + + try gc.vkd.allocateCommandBuffers(gc.dev, &.{ + .command_pool = pool, + .level = .primary, + .command_buffer_count = @as(u32, @truncate(cmdbufs.len)), + }, cmdbufs.ptr); + errdefer gc.vkd.freeCommandBuffers(gc.dev, pool, @truncate(cmdbufs.len), cmdbufs.ptr); + + const clear = vk.ClearValue{ + .color = .{ .float_32 = .{ 0, 0, 0, 1 } }, + }; + + const viewport = vk.Viewport{ + .x = 0, + .y = 0, + .width = @as(f32, @floatFromInt(extent.width)), + .height = @as(f32, @floatFromInt(extent.height)), + .min_depth = 0, + .max_depth = 1, + }; + + const scissor = vk.Rect2D{ + .offset = .{ .x = 0, .y = 0 }, + .extent = extent, + }; + + for (cmdbufs, framebuffers) |cmdbuf, framebuffer| { + try gc.vkd.beginCommandBuffer(cmdbuf, &.{}); + + gc.vkd.cmdSetViewport(cmdbuf, 0, 1, @ptrCast(&viewport)); + gc.vkd.cmdSetScissor(cmdbuf, 0, 1, @ptrCast(&scissor)); + + // This needs to be a separate definition - see https://github.com/ziglang/zig/issues/7627. + const render_area = vk.Rect2D{ + .offset = .{ .x = 0, .y = 0 }, + .extent = extent, + }; + + gc.vkd.cmdBeginRenderPass(cmdbuf, &.{ + .render_pass = render_pass, + .framebuffer = framebuffer, + .render_area = render_area, + .clear_value_count = 1, + .p_clear_values = @as([*]const vk.ClearValue, @ptrCast(&clear)), + }, .@"inline"); + + gc.vkd.cmdBindPipeline(cmdbuf, .graphics, pipeline); + const offset = [_]vk.DeviceSize{0}; + gc.vkd.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast(&buffer), &offset); + gc.vkd.cmdDraw(cmdbuf, vertices.len, 1, 0, 0); + + gc.vkd.cmdEndRenderPass(cmdbuf); + try gc.vkd.endCommandBuffer(cmdbuf); + } + + return cmdbufs; +} + +fn destroyCommandBuffers(gc: *const GraphicsContext, pool: vk.CommandPool, allocator: Allocator, cmdbufs: []vk.CommandBuffer) void { + gc.vkd.freeCommandBuffers(gc.dev, pool, @truncate(cmdbufs.len), cmdbufs.ptr); + allocator.free(cmdbufs); +} + +fn createFramebuffers(gc: *const GraphicsContext, allocator: Allocator, render_pass: vk.RenderPass, swapchain: Swapchain) ![]vk.Framebuffer { + const framebuffers = try allocator.alloc(vk.Framebuffer, swapchain.swap_images.len); + errdefer allocator.free(framebuffers); + + var i: usize = 0; + errdefer for (framebuffers[0..i]) |fb| gc.vkd.destroyFramebuffer(gc.dev, fb, null); + + for (framebuffers) |*fb| { + fb.* = try gc.vkd.createFramebuffer(gc.dev, &.{ + .render_pass = render_pass, + .attachment_count = 1, + .p_attachments = @as([*]const vk.ImageView, @ptrCast(&swapchain.swap_images[i].view)), + .width = swapchain.extent.width, + .height = swapchain.extent.height, + .layers = 1, + }, null); + i += 1; + } + + return framebuffers; +} + +fn destroyFramebuffers(gc: *const GraphicsContext, allocator: Allocator, framebuffers: []const vk.Framebuffer) void { + for (framebuffers) |fb| gc.vkd.destroyFramebuffer(gc.dev, fb, null); + allocator.free(framebuffers); +} + +fn createRenderPass(gc: *const GraphicsContext, swapchain: Swapchain) !vk.RenderPass { + const color_attachment = vk.AttachmentDescription{ + .format = swapchain.surface_format.format, + .samples = .{ .@"1_bit" = true }, + .load_op = .clear, + .store_op = .store, + .stencil_load_op = .dont_care, + .stencil_store_op = .dont_care, + .initial_layout = .undefined, + .final_layout = .present_src_khr, + }; + + const color_attachment_ref = vk.AttachmentReference{ + .attachment = 0, + .layout = .color_attachment_optimal, + }; + + const subpass = vk.SubpassDescription{ + .pipeline_bind_point = .graphics, + .color_attachment_count = 1, + .p_color_attachments = @ptrCast(&color_attachment_ref), + }; + + return try gc.vkd.createRenderPass(gc.dev, &.{ + .attachment_count = 1, + .p_attachments = @as([*]const vk.AttachmentDescription, @ptrCast(&color_attachment)), + .subpass_count = 1, + .p_subpasses = @as([*]const vk.SubpassDescription, @ptrCast(&subpass)), + }, null); +} + +fn createPipeline( + gc: *const GraphicsContext, + layout: vk.PipelineLayout, + render_pass: vk.RenderPass, +) !vk.Pipeline { + const vert = try gc.vkd.createShaderModule(gc.dev, &.{ + .code_size = shaders.triangle_vert.len, + .p_code = @as([*]const u32, @ptrCast(&shaders.triangle_vert)), + }, null); + defer gc.vkd.destroyShaderModule(gc.dev, vert, null); + + const frag = try gc.vkd.createShaderModule(gc.dev, &.{ + .code_size = shaders.triangle_frag.len, + .p_code = @as([*]const u32, @ptrCast(&shaders.triangle_frag)), + }, null); + defer gc.vkd.destroyShaderModule(gc.dev, frag, null); + + const pssci = [_]vk.PipelineShaderStageCreateInfo{ + .{ + .stage = .{ .vertex_bit = true }, + .module = vert, + .p_name = "main", + }, + .{ + .stage = .{ .fragment_bit = true }, + .module = frag, + .p_name = "main", + }, + }; + + const pvisci = vk.PipelineVertexInputStateCreateInfo{ + .vertex_binding_description_count = 1, + .p_vertex_binding_descriptions = @ptrCast(&Vertex.binding_description), + .vertex_attribute_description_count = Vertex.attribute_description.len, + .p_vertex_attribute_descriptions = &Vertex.attribute_description, + }; + + const piasci = vk.PipelineInputAssemblyStateCreateInfo{ + .topology = .triangle_list, + .primitive_restart_enable = vk.FALSE, + }; + + const pvsci = vk.PipelineViewportStateCreateInfo{ + .viewport_count = 1, + .p_viewports = undefined, // set in createCommandBuffers with cmdSetViewport + .scissor_count = 1, + .p_scissors = undefined, // set in createCommandBuffers with cmdSetScissor + }; + + const prsci = vk.PipelineRasterizationStateCreateInfo{ + .depth_clamp_enable = vk.FALSE, + .rasterizer_discard_enable = vk.FALSE, + .polygon_mode = .fill, + .cull_mode = .{ .back_bit = true }, + .front_face = .clockwise, + .depth_bias_enable = vk.FALSE, + .depth_bias_constant_factor = 0, + .depth_bias_clamp = 0, + .depth_bias_slope_factor = 0, + .line_width = 1, + }; + + const pmsci = vk.PipelineMultisampleStateCreateInfo{ + .rasterization_samples = .{ .@"1_bit" = true }, + .sample_shading_enable = vk.FALSE, + .min_sample_shading = 1, + .alpha_to_coverage_enable = vk.FALSE, + .alpha_to_one_enable = vk.FALSE, + }; + + const pcbas = vk.PipelineColorBlendAttachmentState{ + .blend_enable = vk.FALSE, + .src_color_blend_factor = .one, + .dst_color_blend_factor = .zero, + .color_blend_op = .add, + .src_alpha_blend_factor = .one, + .dst_alpha_blend_factor = .zero, + .alpha_blend_op = .add, + .color_write_mask = .{ .r_bit = true, .g_bit = true, .b_bit = true, .a_bit = true }, + }; + + const pcbsci = vk.PipelineColorBlendStateCreateInfo{ + .logic_op_enable = vk.FALSE, + .logic_op = .copy, + .attachment_count = 1, + .p_attachments = @ptrCast(&pcbas), + .blend_constants = [_]f32{ 0, 0, 0, 0 }, + }; + + const dynstate = [_]vk.DynamicState{ .viewport, .scissor }; + const pdsci = vk.PipelineDynamicStateCreateInfo{ + .flags = .{}, + .dynamic_state_count = dynstate.len, + .p_dynamic_states = &dynstate, + }; + + const gpci = vk.GraphicsPipelineCreateInfo{ + .flags = .{}, + .stage_count = 2, + .p_stages = &pssci, + .p_vertex_input_state = &pvisci, + .p_input_assembly_state = &piasci, + .p_tessellation_state = null, + .p_viewport_state = &pvsci, + .p_rasterization_state = &prsci, + .p_multisample_state = &pmsci, + .p_depth_stencil_state = null, + .p_color_blend_state = &pcbsci, + .p_dynamic_state = &pdsci, + .layout = layout, + .render_pass = render_pass, + .subpass = 0, + .base_pipeline_handle = .null_handle, + .base_pipeline_index = -1, + }; + + var pipeline: vk.Pipeline = undefined; + _ = try gc.vkd.createGraphicsPipelines( + gc.dev, + .null_handle, + 1, + @ptrCast(&gpci), + null, + @ptrCast(&pipeline), + ); + return pipeline; } diff --git a/src/shaders/triangle.frag b/src/shaders/triangle.frag new file mode 100644 index 0000000..8c952fe --- /dev/null +++ b/src/shaders/triangle.frag @@ -0,0 +1,9 @@ +#version 450 + +layout(location = 0) in vec3 v_color; + +layout(location = 0) out vec4 f_color; + +void main() { + f_color = vec4(v_color, 1.0); +} diff --git a/src/shaders/triangle.vert b/src/shaders/triangle.vert new file mode 100644 index 0000000..2b8dfa5 --- /dev/null +++ b/src/shaders/triangle.vert @@ -0,0 +1,11 @@ +#version 450 + +layout(location = 0) in vec2 a_pos; +layout(location = 1) in vec3 a_color; + +layout(location = 0) out vec3 v_color; + +void main() { + gl_Position = vec4(a_pos, 0.0, 1.0); + v_color = a_color; +} diff --git a/src/swapchain.zig b/src/swapchain.zig new file mode 100644 index 0000000..09519ff --- /dev/null +++ b/src/swapchain.zig @@ -0,0 +1,322 @@ +const std = @import("std"); +const vk = @import("vk"); +const GraphicsContext = @import("graphics_context.zig").GraphicsContext; +const Allocator = std.mem.Allocator; + +pub const Swapchain = struct { + pub const PresentState = enum { + optimal, + suboptimal, + }; + + gc: *const GraphicsContext, + allocator: Allocator, + + surface_format: vk.SurfaceFormatKHR, + present_mode: vk.PresentModeKHR, + extent: vk.Extent2D, + handle: vk.SwapchainKHR, + + swap_images: []SwapImage, + image_index: u32, + next_image_acquired: vk.Semaphore, + + pub fn init(gc: *const GraphicsContext, allocator: Allocator, extent: vk.Extent2D) !Swapchain { + return try initRecycle(gc, allocator, extent, .null_handle); + } + + pub fn initRecycle(gc: *const GraphicsContext, allocator: Allocator, extent: vk.Extent2D, old_handle: vk.SwapchainKHR) !Swapchain { + const caps = try gc.vki.getPhysicalDeviceSurfaceCapabilitiesKHR(gc.pdev, gc.surface); + const actual_extent = findActualExtent(caps, extent); + if (actual_extent.width == 0 or actual_extent.height == 0) { + return error.InvalidSurfaceDimensions; + } + + const surface_format = try findSurfaceFormat(gc, allocator); + const present_mode = try findPresentMode(gc, allocator); + + var image_count = caps.min_image_count + 1; + if (caps.max_image_count > 0) { + image_count = @min(image_count, caps.max_image_count); + } + + const qfi = [_]u32{ gc.graphics_queue.family, gc.present_queue.family }; + const sharing_mode: vk.SharingMode = if (gc.graphics_queue.family != gc.present_queue.family) + .concurrent + else + .exclusive; + + const handle = try gc.vkd.createSwapchainKHR(gc.dev, &.{ + .surface = gc.surface, + .min_image_count = image_count, + .image_format = surface_format.format, + .image_color_space = surface_format.color_space, + .image_extent = actual_extent, + .image_array_layers = 1, + .image_usage = .{ .color_attachment_bit = true, .transfer_dst_bit = true }, + .image_sharing_mode = sharing_mode, + .queue_family_index_count = qfi.len, + .p_queue_family_indices = &qfi, + .pre_transform = caps.current_transform, + .composite_alpha = .{ .opaque_bit_khr = true }, + .present_mode = present_mode, + .clipped = vk.TRUE, + .old_swapchain = old_handle, + }, null); + errdefer gc.vkd.destroySwapchainKHR(gc.dev, handle, null); + + if (old_handle != .null_handle) { + // Apparently, the old swapchain handle still needs to be destroyed after recreating. + gc.vkd.destroySwapchainKHR(gc.dev, old_handle, null); + } + + const swap_images = try initSwapchainImages(gc, handle, surface_format.format, allocator); + errdefer { + for (swap_images) |si| si.deinit(gc); + allocator.free(swap_images); + } + + var next_image_acquired = try gc.vkd.createSemaphore(gc.dev, &.{}, null); + errdefer gc.vkd.destroySemaphore(gc.dev, next_image_acquired, null); + + const result = try gc.vkd.acquireNextImageKHR(gc.dev, handle, std.math.maxInt(u64), next_image_acquired, .null_handle); + if (result.result != .success) { + return error.ImageAcquireFailed; + } + + std.mem.swap(vk.Semaphore, &swap_images[result.image_index].image_acquired, &next_image_acquired); + return Swapchain{ + .gc = gc, + .allocator = allocator, + .surface_format = surface_format, + .present_mode = present_mode, + .extent = actual_extent, + .handle = handle, + .swap_images = swap_images, + .image_index = result.image_index, + .next_image_acquired = next_image_acquired, + }; + } + + fn deinitExceptSwapchain(self: Swapchain) void { + for (self.swap_images) |si| si.deinit(self.gc); + self.allocator.free(self.swap_images); + self.gc.vkd.destroySemaphore(self.gc.dev, self.next_image_acquired, null); + } + + pub fn waitForAllFences(self: Swapchain) !void { + for (self.swap_images) |si| si.waitForFence(self.gc) catch {}; + } + + pub fn deinit(self: Swapchain) void { + self.deinitExceptSwapchain(); + self.gc.vkd.destroySwapchainKHR(self.gc.dev, self.handle, null); + } + + pub fn recreate(self: *Swapchain, new_extent: vk.Extent2D) !void { + const gc = self.gc; + const allocator = self.allocator; + const old_handle = self.handle; + self.deinitExceptSwapchain(); + self.* = try initRecycle(gc, allocator, new_extent, old_handle); + } + + pub fn currentImage(self: Swapchain) vk.Image { + return self.swap_images[self.image_index].image; + } + + pub fn currentSwapImage(self: Swapchain) *const SwapImage { + return &self.swap_images[self.image_index]; + } + + pub fn present(self: *Swapchain, cmdbuf: vk.CommandBuffer) !PresentState { + // Simple method: + // 1) Acquire next image + // 2) Wait for and reset fence of the acquired image + // 3) Submit command buffer with fence of acquired image, + // dependendent on the semaphore signalled by the first step. + // 4) Present current frame, dependent on semaphore signalled by previous step + // Problem: This way we can't reference the current image while rendering. + // Better method: Shuffle the steps around such that acquire next image is the last step, + // leaving the swapchain in a state with the current image. + // 1) Wait for and reset fence of current image + // 2) Submit command buffer, signalling fence of current image and dependent on + // the semaphore signalled by step 4. + // 3) Present current frame, dependent on semaphore signalled by the submit + // 4) Acquire next image, signalling its semaphore + // One problem that arises is that we can't know beforehand which semaphore to signal, + // so we keep an extra auxilery semaphore that is swapped around + + // Step 1: Make sure the current frame has finished rendering + const current = self.currentSwapImage(); + try current.waitForFence(self.gc); + try self.gc.vkd.resetFences(self.gc.dev, 1, @ptrCast(¤t.frame_fence)); + + // Step 2: Submit the command buffer + const wait_stage = [_]vk.PipelineStageFlags{.{ .top_of_pipe_bit = true }}; + try self.gc.vkd.queueSubmit(self.gc.graphics_queue.handle, 1, &[_]vk.SubmitInfo{.{ + .wait_semaphore_count = 1, + .p_wait_semaphores = @ptrCast(¤t.image_acquired), + .p_wait_dst_stage_mask = &wait_stage, + .command_buffer_count = 1, + .p_command_buffers = @ptrCast(&cmdbuf), + .signal_semaphore_count = 1, + .p_signal_semaphores = @ptrCast(¤t.render_finished), + }}, current.frame_fence); + + // Step 3: Present the current frame + _ = try self.gc.vkd.queuePresentKHR(self.gc.present_queue.handle, &.{ + .wait_semaphore_count = 1, + .p_wait_semaphores = @as([*]const vk.Semaphore, @ptrCast(¤t.render_finished)), + .swapchain_count = 1, + .p_swapchains = @as([*]const vk.SwapchainKHR, @ptrCast(&self.handle)), + .p_image_indices = @as([*]const u32, @ptrCast(&self.image_index)), + }); + + // Step 4: Acquire next frame + const result = try self.gc.vkd.acquireNextImageKHR( + self.gc.dev, + self.handle, + std.math.maxInt(u64), + self.next_image_acquired, + .null_handle, + ); + + std.mem.swap(vk.Semaphore, &self.swap_images[result.image_index].image_acquired, &self.next_image_acquired); + self.image_index = result.image_index; + + return switch (result.result) { + .success => .optimal, + .suboptimal_khr => .suboptimal, + else => unreachable, + }; + } +}; + +const SwapImage = struct { + image: vk.Image, + view: vk.ImageView, + image_acquired: vk.Semaphore, + render_finished: vk.Semaphore, + frame_fence: vk.Fence, + + fn init(gc: *const GraphicsContext, image: vk.Image, format: vk.Format) !SwapImage { + const view = try gc.vkd.createImageView(gc.dev, &.{ + .image = image, + .view_type = .@"2d", + .format = format, + .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity }, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }, null); + errdefer gc.vkd.destroyImageView(gc.dev, view, null); + + const image_acquired = try gc.vkd.createSemaphore(gc.dev, &.{}, null); + errdefer gc.vkd.destroySemaphore(gc.dev, image_acquired, null); + + const render_finished = try gc.vkd.createSemaphore(gc.dev, &.{}, null); + errdefer gc.vkd.destroySemaphore(gc.dev, render_finished, null); + + const frame_fence = try gc.vkd.createFence(gc.dev, &.{ .flags = .{ .signaled_bit = true } }, null); + errdefer gc.vkd.destroyFence(gc.dev, frame_fence, null); + + return SwapImage{ + .image = image, + .view = view, + .image_acquired = image_acquired, + .render_finished = render_finished, + .frame_fence = frame_fence, + }; + } + + fn deinit(self: SwapImage, gc: *const GraphicsContext) void { + self.waitForFence(gc) catch return; + gc.vkd.destroyImageView(gc.dev, self.view, null); + gc.vkd.destroySemaphore(gc.dev, self.image_acquired, null); + gc.vkd.destroySemaphore(gc.dev, self.render_finished, null); + gc.vkd.destroyFence(gc.dev, self.frame_fence, null); + } + + fn waitForFence(self: SwapImage, gc: *const GraphicsContext) !void { + _ = try gc.vkd.waitForFences(gc.dev, 1, @ptrCast(&self.frame_fence), vk.TRUE, std.math.maxInt(u64)); + } +}; + +fn initSwapchainImages(gc: *const GraphicsContext, swapchain: vk.SwapchainKHR, format: vk.Format, allocator: Allocator) ![]SwapImage { + var count: u32 = undefined; + _ = try gc.vkd.getSwapchainImagesKHR(gc.dev, swapchain, &count, null); + const images = try allocator.alloc(vk.Image, count); + defer allocator.free(images); + _ = try gc.vkd.getSwapchainImagesKHR(gc.dev, swapchain, &count, images.ptr); + + const swap_images = try allocator.alloc(SwapImage, count); + errdefer allocator.free(swap_images); + + var i: usize = 0; + errdefer for (swap_images[0..i]) |si| si.deinit(gc); + + for (images) |image| { + swap_images[i] = try SwapImage.init(gc, image, format); + i += 1; + } + + return swap_images; +} + +fn findSurfaceFormat(gc: *const GraphicsContext, allocator: Allocator) !vk.SurfaceFormatKHR { + const preferred = vk.SurfaceFormatKHR{ + .format = .b8g8r8a8_srgb, + .color_space = .srgb_nonlinear_khr, + }; + + var count: u32 = undefined; + _ = try gc.vki.getPhysicalDeviceSurfaceFormatsKHR(gc.pdev, gc.surface, &count, null); + const surface_formats = try allocator.alloc(vk.SurfaceFormatKHR, count); + defer allocator.free(surface_formats); + _ = try gc.vki.getPhysicalDeviceSurfaceFormatsKHR(gc.pdev, gc.surface, &count, surface_formats.ptr); + + for (surface_formats) |sfmt| { + if (std.meta.eql(sfmt, preferred)) { + return preferred; + } + } + + return surface_formats[0]; // There must always be at least one supported surface format +} + +fn findPresentMode(gc: *const GraphicsContext, allocator: Allocator) !vk.PresentModeKHR { + var count: u32 = undefined; + _ = try gc.vki.getPhysicalDeviceSurfacePresentModesKHR(gc.pdev, gc.surface, &count, null); + const present_modes = try allocator.alloc(vk.PresentModeKHR, count); + defer allocator.free(present_modes); + _ = try gc.vki.getPhysicalDeviceSurfacePresentModesKHR(gc.pdev, gc.surface, &count, present_modes.ptr); + + const preferred = [_]vk.PresentModeKHR{ + .mailbox_khr, + .immediate_khr, + }; + + for (preferred) |mode| { + if (std.mem.indexOfScalar(vk.PresentModeKHR, present_modes, mode) != null) { + return mode; + } + } + + return .fifo_khr; +} + +fn findActualExtent(caps: vk.SurfaceCapabilitiesKHR, extent: vk.Extent2D) vk.Extent2D { + if (caps.current_extent.width != 0xFFFF_FFFF) { + return caps.current_extent; + } else { + return .{ + .width = std.math.clamp(extent.width, caps.min_image_extent.width, caps.max_image_extent.width), + .height = std.math.clamp(extent.height, caps.min_image_extent.height, caps.max_image_extent.height), + }; + } +} From 6763d9a73a36a4c5818c330213a6b507931abd23 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 27 Mar 2024 10:56:18 -0400 Subject: [PATCH 014/113] move vk.xml to src --- build.zig | 2 +- {reg => src}/vk.xml | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename {reg => src}/vk.xml (100%) diff --git a/build.zig b/build.zig index bded306..1e6582a 100644 --- a/build.zig +++ b/build.zig @@ -7,7 +7,7 @@ pub fn build(b: *std.Build) void { const optimize = b.standardOptimizeOption(.{}); const vk = b.dependency("vulkan-zig", .{ - .registry = @as([]const u8, b.pathFromRoot("reg/vk.xml")), + .registry = @as([]const u8, b.pathFromRoot("src/vk.xml")), }); const vkmod = vk.module("vulkan-zig"); diff --git a/reg/vk.xml b/src/vk.xml similarity index 100% rename from reg/vk.xml rename to src/vk.xml From 4413e242fd2e79f16a24a10e874ad455d8a6b6ce Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 27 Mar 2024 10:57:34 -0400 Subject: [PATCH 015/113] fixup! direct copy vulkan-zig example --- src/Context.zig | 439 ------------------------------------------------ 1 file changed, 439 deletions(-) delete mode 100644 src/Context.zig diff --git a/src/Context.zig b/src/Context.zig deleted file mode 100644 index 0ef90e8..0000000 --- a/src/Context.zig +++ /dev/null @@ -1,439 +0,0 @@ -const Self = @This(); - -const std = @import("std"); -const c = @import("c.zig"); -const vk = @import("vk"); -const builtin = @import("builtin"); - -const USE_DEBUG_LAYERS = switch (builtin.mode) { - .ReleaseSafe, .Debug => true, - .ReleaseSmall, .ReleaseFast => false, -}; - -const MAX_DEVICES = 16; -const MAX_DEVICE_EXTENSIONS = 512; -const MAX_INSTANCE_EXTENSIONS = 64; -const MAX_LAYERS = 512; - -vkb: BaseDispatch, -vki: InstanceDispatch, -vkd: DeviceDispatch, - -instance: vk.Instance, -device: vk.Device, - -queues: struct { - graphics: vk.Queue, - present: vk.Queue, -}, - -surface: vk.SurfaceKHR, - -messenger: if (USE_DEBUG_LAYERS) vk.DebugUtilsMessengerEXT else void, - -fn Enumeration(comptime T: type, comptime cap: u32) type { - return struct { - buf: [cap]T = undefined, - len: u32 = 0, - - const FULL: @This() = .{ .len = cap }; - const EMPTY: @This() = .{ .len = 0 }; - - pub fn slice(self: anytype) switch (@TypeOf(&self.buf)) { - *[cap]T => []T, - *const [cap]T => []const T, - else => unreachable, - } { - return self.buf[0..self.len]; - } - - pub fn appendSlice(self: *@This(), source: []const T) !void { - if (self.len + source.len > cap) return error.Overflow; - @memcpy(self.buf[self.len..][0..source.len], source); - self.len += @intCast(source.len); - } - - pub fn append(self: *@This(), val: T) !void { - if (self.len + 1 > cap) return error.Overflow; - self.buf[self.len] = val; - self.len += 1; - } - }; -} - -pub fn init(window: *c.GLFWwindow) !Self { - var self: Self = undefined; - - self.vkb = try BaseDispatch.load(&c.glfwGetInstanceProcAddress); - const vkb = self.vkb; - - var req_exts = Enumeration([*:0]const u8, MAX_INSTANCE_EXTENSIONS).EMPTY; - var req_layers = Enumeration([*:0]const u8, MAX_LAYERS).EMPTY; - var req_dev_exts = Enumeration([*:0]const u8, MAX_DEVICE_EXTENSIONS).EMPTY; - - try req_dev_exts.append(vk.extension_info.khr_swapchain.name); - - if (USE_DEBUG_LAYERS) { - try req_layers.append("VK_LAYER_KHRONOS_validation"); - try req_exts.append(vk.extension_info.ext_debug_utils.name); - } - - { - var glfw_ext_count: u32 = 0; - const glfw_exts: [*][*:0]const u8 = @ptrCast(c.glfwGetRequiredInstanceExtensions(&glfw_ext_count)); - try req_exts.appendSlice(glfw_exts[0..glfw_ext_count]); - } - - std.log.debug("requesting extensions: {s}", .{req_exts.slice()}); - std.log.debug("requesting layers: {s}", .{req_layers.slice()}); - std.log.debug("requesting device extensions: {s}", .{req_dev_exts.slice()}); - - var available_exts = Enumeration(vk.ExtensionProperties, MAX_INSTANCE_EXTENSIONS).FULL; - _ = try vkb.enumerateInstanceExtensionProperties( - null, - &available_exts.len, - &available_exts.buf, - ); - - var available_layers = Enumeration(vk.LayerProperties, MAX_LAYERS).FULL; - _ = try vkb.enumerateInstanceLayerProperties( - &available_layers.len, - &available_layers.buf, - ); - - for (req_exts.slice()) |name| { - const required_name = std.mem.sliceTo(name, 0); - for (available_exts.slice()) |prop| { - const available_name = std.mem.sliceTo(&prop.extension_name, 0); - if (std.mem.eql(u8, required_name, available_name)) break; - } else { - return error.ExtensionNotPresent; - } - } - - for (req_layers.slice()) |name| { - const required_name = std.mem.sliceTo(name, 0); - for (available_layers.slice()) |prop| { - const available_name = std.mem.sliceTo(&prop.layer_name, 0); - if (std.mem.eql(u8, required_name, available_name)) break; - } else { - return error.LayerNotPresent; - } - } - - const debug_create_info = vk.DebugUtilsMessengerCreateInfoEXT{ - .message_severity = vk.DebugUtilsMessageSeverityFlagsEXT{ - .verbose_bit_ext = false, - .warning_bit_ext = true, - .error_bit_ext = true, - .info_bit_ext = false, - }, - .message_type = vk.DebugUtilsMessageTypeFlagsEXT{ - .general_bit_ext = true, - .validation_bit_ext = true, - .performance_bit_ext = true, - .device_address_binding_bit_ext = false, - }, - .pfn_user_callback = &debug_callback, - .p_user_data = null, - }; - - const app_info = vk.ApplicationInfo{ - .p_application_name = "Hello World", - .application_version = vk.makeApiVersion(0, 0, 0, 0), - .p_engine_name = "No Engine", - .engine_version = vk.makeApiVersion(0, 0, 0, 0), - .api_version = vk.API_VERSION_1_3, - }; - - const instance_create_info = vk.InstanceCreateInfo{ - .p_application_info = &app_info, - .enabled_extension_count = req_exts.len, - .pp_enabled_extension_names = &req_exts.buf, - .enabled_layer_count = req_layers.len, - .pp_enabled_layer_names = &req_layers.buf, - .p_next = if (USE_DEBUG_LAYERS) &debug_create_info else null, - }; - - self.instance = try vkb.createInstance(&instance_create_info, null); - self.vki = try InstanceDispatch.load(self.instance, vkb.dispatch.vkGetInstanceProcAddr); - const vki = self.vki; - errdefer vki.destroyInstance(self.instance, null); - - if (USE_DEBUG_LAYERS) self.messenger = try vki.createDebugUtilsMessengerEXT( - self.instance, - &debug_create_info, - null, - ); - errdefer if (USE_DEBUG_LAYERS) vki.destroyDebugUtilsMessengerEXT( - self.instance, - self.messenger, - null, - ); - - switch (c.glfwCreateWindowSurface( - self.instance, - window, - null, - &self.surface, - )) { - .success => {}, - else => |e| { - std.log.err("{}", .{e}); - return error.Unknown; - }, - } - errdefer vki.destroySurfaceKHR(self.instance, self.surface, null); - - var devices = Enumeration(vk.PhysicalDevice, MAX_DEVICES).FULL; - _ = try vki.enumeratePhysicalDevices( - self.instance, - &devices.len, - &devices.buf, - ); - - // todo some ranking strategy to find the most-suitable device - const Selection = struct { - device: vk.PhysicalDevice, - props: vk.PhysicalDeviceProperties, - feats: vk.PhysicalDeviceFeatures, - }; - const selected: Selection = find_device: for (devices.slice()) |device| { - const props = vki.getPhysicalDeviceProperties(device); - const feats = vki.getPhysicalDeviceFeatures(device); - - if (props.device_type != vk.PhysicalDeviceType.discrete_gpu) continue; - - // if (feats.geometry_shader == vk.FALSE) continue; - - var available_dev_exts = Enumeration(vk.ExtensionProperties, MAX_DEVICE_EXTENSIONS).FULL; - _ = try vki.enumerateDeviceExtensionProperties( - device, - null, - &available_dev_exts.len, - &available_dev_exts.buf, - ); - - for (req_dev_exts.slice()) |name| { - const required_name = std.mem.sliceTo(name, 0); - for (available_dev_exts.slice()) |prop| { - const available_name = std.mem.sliceTo(&prop.extension_name, 0); - if (std.mem.eql(u8, required_name, available_name)) break; - } else { - std.log.warn("cannot find {s}\n", .{required_name}); - continue :find_device; - } - } - - break .{ - .device = device, - .props = props, - .feats = feats, - }; - } else { - return error.NoSuitablePhysicalDevice; - }; - - var queue_families = Enumeration(vk.QueueFamilyProperties, 64).FULL; - vki.getPhysicalDeviceQueueFamilyProperties( - selected.device, - &queue_families.len, - &queue_families.buf, - ); - - // todo this should be incorporated with physical device selection/ranking. - const Indices = struct { - graphics: u32, - present: u32, - }; - const indices: Indices = find_index: { - var graphics: ?u32 = null; - var present: ?u32 = null; - - for (queue_families.slice(), 0..) |prop, idx| { - if (graphics == null and prop.queue_flags.graphics_bit) { - graphics = @intCast(idx); - // continue; // forces distinct queue families - } - - if (present == null) { - const present_support = try vki.getPhysicalDeviceSurfaceSupportKHR( - selected.device, - @intCast(idx), - self.surface, - ) == vk.TRUE; - if (present_support) { - present = @intCast(idx); - } - } - - if (graphics != null and present != null) { - break :find_index .{ - .graphics = graphics.?, - .present = present.?, - }; - } - } - - return error.IncompatibleDeviceQueues; - }; - - const gp_priorities = [_]f32{ 1.0, 1.0 }; - - var queue_create_infos = Enumeration(vk.DeviceQueueCreateInfo, 2).EMPTY; - - // queue info family indices must be unique. so if the graphics and present queues are the same, create two queues - // in the same family. otherwise create queues in separate families. there should probably be some general way to - // group and unpack the queues, but I'm not bothering with that for now until I restructure this monolithic function - // in general. - if (indices.graphics == indices.present) { - const gp_slice = gp_priorities[0..2]; - try queue_create_infos.append(.{ - .queue_family_index = indices.graphics, - .queue_count = @intCast(gp_slice.len), - .p_queue_priorities = gp_slice.ptr, - }); - } else { - const g_slice = gp_priorities[0..1]; - const p_slice = gp_priorities[1..2]; - try queue_create_infos.append(.{ - .queue_family_index = indices.graphics, - .queue_count = @intCast(g_slice.len), - .p_queue_priorities = g_slice.ptr, - }); - try queue_create_infos.append(.{ - .queue_family_index = indices.present, - .queue_count = @intCast(p_slice.len), - .p_queue_priorities = p_slice.ptr, - }); - } - - const device_create_info = vk.DeviceCreateInfo{ - .queue_create_info_count = queue_create_infos.len, - .p_queue_create_infos = &queue_create_infos.buf, - .p_enabled_features = &selected.feats, - .enabled_extension_count = req_dev_exts.len, - .pp_enabled_extension_names = &req_dev_exts.buf, - .enabled_layer_count = req_layers.len, - .pp_enabled_layer_names = &req_layers.buf, - }; - - self.device = try vki.createDevice( - selected.device, - &device_create_info, - null, - ); - self.vkd = try DeviceDispatch.load(self.device, vki.dispatch.vkGetDeviceProcAddr); - const vkd = self.vkd; - errdefer vkd.destroyDevice(self.device, null); - - if (indices.graphics == indices.present) { - // two queues in the same family - self.queues = .{ - .graphics = vkd.getDeviceQueue(self.device, indices.graphics, 0), - .present = vkd.getDeviceQueue(self.device, indices.present, 1), - }; - } else { - // queues from different families - self.queues = .{ - .graphics = vkd.getDeviceQueue(self.device, indices.graphics, 0), - .present = vkd.getDeviceQueue(self.device, indices.present, 0), - }; - } - - return self; -} - -pub fn deinit(self: Self) void { - self.vki.destroySurfaceKHR(self.instance, self.surface, null); - self.vkd.destroyDevice(self.device, null); - if (USE_DEBUG_LAYERS) self.vki.destroyDebugUtilsMessengerEXT( - self.instance, - self.messenger, - null, - ); - self.vki.destroyInstance(self.instance, null); -} - -export fn debug_callback( - message_severity: vk.DebugUtilsMessageSeverityFlagsEXT, - message_type: vk.DebugUtilsMessageTypeFlagsEXT, - p_callback_data: ?*const vk.DebugUtilsMessengerCallbackDataEXT, - _: ?*anyopaque, -) callconv(.C) vk.Bool32 { - if (p_callback_data == null) return vk.FALSE; - if (p_callback_data.?.p_message == null) return vk.FALSE; - const msg = p_callback_data.?.p_message.?; - - const scopes = .{ - "validation", - "performance", - "device_address_binding", - "general", - }; - - const scope: []const u8 = inline for (scopes) |tag| { - if (@field(message_type, tag ++ "_bit_ext")) { - break tag; - } - } else { - return vk.FALSE; - }; - - const levels = .{ - "error", - "info", - "warning", - "verbose", - }; - - const level: []const u8 = inline for (levels) |tag| { - if (@field(message_severity, tag ++ "_bit_ext")) { - break tag; - } - } else { - return vk.FALSE; - }; - - // ripped from std.log, but with my own levels and scope. - const stderr = std.io.getStdErr().writer(); - var bw = std.io.bufferedWriter(stderr); - const writer = bw.writer(); - - std.debug.getStderrMutex().lock(); - defer std.debug.getStderrMutex().unlock(); - nosuspend { - writer.print("vk-{s}({s}): {s}\n", .{ level, scope, msg }) catch return vk.FALSE; - bw.flush() catch return vk.FALSE; - } - - return vk.FALSE; -} - -const BaseDispatch = vk.BaseWrapper(.{ - .createInstance = true, - .getInstanceProcAddr = true, - .enumerateInstanceExtensionProperties = true, - .enumerateInstanceLayerProperties = true, -}); - -const InstanceDispatch = vk.InstanceWrapper(.{ - .destroyInstance = true, - .createDebugUtilsMessengerEXT = USE_DEBUG_LAYERS, - .destroyDebugUtilsMessengerEXT = USE_DEBUG_LAYERS, - .submitDebugUtilsMessageEXT = USE_DEBUG_LAYERS, - .enumeratePhysicalDevices = true, - .getPhysicalDeviceProperties = true, - .getPhysicalDeviceFeatures = true, - .getPhysicalDeviceQueueFamilyProperties = true, - .createDevice = true, - .getDeviceProcAddr = true, - .destroySurfaceKHR = true, - .getPhysicalDeviceSurfaceSupportKHR = true, - .enumerateDeviceExtensionProperties = true, -}); - -const DeviceDispatch = vk.DeviceWrapper(.{ - .destroyDevice = true, - .getDeviceQueue = true, -}); From f65a22116a50b0857a63a72cd9ba77f68e6ffba5 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 27 Mar 2024 11:56:27 -0400 Subject: [PATCH 016/113] indexed rendering --- src/graphics_context.zig | 2 + src/main.zig | 83 +++++++++++++++++++++++++++++----------- 2 files changed, 63 insertions(+), 22 deletions(-) diff --git a/src/graphics_context.zig b/src/graphics_context.zig index 18ee6d9..2c2cd30 100644 --- a/src/graphics_context.zig +++ b/src/graphics_context.zig @@ -73,9 +73,11 @@ const DeviceDispatch = vk.DeviceWrapper(.{ .cmdEndRenderPass = true, .cmdBindPipeline = true, .cmdDraw = true, + .cmdDrawIndexed = true, .cmdSetViewport = true, .cmdSetScissor = true, .cmdBindVertexBuffers = true, + .cmdBindIndexBuffer = true, .cmdCopyBuffer = true, }); diff --git a/src/main.zig b/src/main.zig index f17842b..cce1c18 100644 --- a/src/main.zig +++ b/src/main.zig @@ -8,7 +8,7 @@ const Allocator = std.mem.Allocator; const app_name = "vulkan-zig triangle example"; -const Vertex = struct { +const Vertex = extern struct { const binding_description = vk.VertexInputBindingDescription{ .binding = 0, .stride = @sizeOf(Vertex), @@ -35,11 +35,20 @@ const Vertex = struct { }; const vertices = [_]Vertex{ - .{ .pos = .{ 0, -0.5 }, .color = .{ 1, 0, 0 } }, - .{ .pos = .{ 0.5, 0.5 }, .color = .{ 0, 1, 0 } }, - .{ .pos = .{ -0.5, 0.5 }, .color = .{ 0, 0, 1 } }, + // .{ .pos = .{ -0.5, -0.5 }, .color = .{ 1, 0, 0 } }, + // .{ .pos = .{ 0.5, 0.5 }, .color = .{ 0, 1, 0 } }, + // .{ .pos = .{ -0.5, 0.5 }, .color = .{ 0, 0, 1 } }, + + .{ .pos = .{ -0.5, -0.5 }, .color = .{ 1, 0, 0 } }, + .{ .pos = .{ -0.5, 0.5 }, .color = .{ 0, 1, 0 } }, + .{ .pos = .{ 0.5, -0.5 }, .color = .{ 0, 0, 1 } }, + .{ .pos = .{ 0.5, 0.5 }, .color = .{ 1, 1, 0 } }, }; +const Index = u16; + +const indices = [_]Index{ 0, 2, 1, 1, 2, 3 }; + pub fn main() !void { if (c.glfwInit() != c.GLFW_TRUE) return error.GlfwInitFailed; defer c.glfwTerminate(); @@ -96,24 +105,38 @@ pub fn main() !void { }, null); defer gc.vkd.destroyCommandPool(gc.dev, pool, null); - const buffer = try gc.vkd.createBuffer(gc.dev, &.{ + const vertex_buffer = try gc.vkd.createBuffer(gc.dev, &.{ .size = @sizeOf(@TypeOf(vertices)), .usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, .sharing_mode = .exclusive, }, null); - defer gc.vkd.destroyBuffer(gc.dev, buffer, null); - const mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, buffer); - const memory = try gc.allocate(mem_reqs, .{ .device_local_bit = true }); - defer gc.vkd.freeMemory(gc.dev, memory, null); - try gc.vkd.bindBufferMemory(gc.dev, buffer, memory, 0); + defer gc.vkd.destroyBuffer(gc.dev, vertex_buffer, null); + const vertex_mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, vertex_buffer); + const vertex_memory = try gc.allocate(vertex_mem_reqs, .{ .device_local_bit = true }); + defer gc.vkd.freeMemory(gc.dev, vertex_memory, null); + try gc.vkd.bindBufferMemory(gc.dev, vertex_buffer, vertex_memory, 0); - try uploadVertices(&gc, pool, buffer); + try uploadData(Vertex, &gc, pool, vertex_buffer, &vertices); + + const index_buffer = try gc.vkd.createBuffer(gc.dev, &.{ + .size = @sizeOf(@TypeOf(indices)), + .usage = .{ .transfer_dst_bit = true, .index_buffer_bit = true }, + .sharing_mode = .exclusive, + }, null); + defer gc.vkd.destroyBuffer(gc.dev, index_buffer, null); + const index_mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, index_buffer); + const index_memory = try gc.allocate(index_mem_reqs, .{ .device_local_bit = true }); + defer gc.vkd.freeMemory(gc.dev, index_memory, null); + try gc.vkd.bindBufferMemory(gc.dev, index_buffer, index_memory, 0); + + try uploadData(Index, &gc, pool, index_buffer, &indices); var cmdbufs = try createCommandBuffers( &gc, pool, allocator, - buffer, + vertex_buffer, + index_buffer, swapchain.extent, render_pass, pipeline, @@ -152,7 +175,8 @@ pub fn main() !void { &gc, pool, allocator, - buffer, + vertex_buffer, + index_buffer, swapchain.extent, render_pass, pipeline, @@ -167,27 +191,36 @@ pub fn main() !void { try gc.vkd.deviceWaitIdle(gc.dev); } -fn uploadVertices(gc: *const GraphicsContext, pool: vk.CommandPool, buffer: vk.Buffer) !void { +fn uploadData(comptime T: type, gc: *const GraphicsContext, pool: vk.CommandPool, buffer: vk.Buffer, source: []const T) !void { + // if (@typeInfo(T) == .Struct and @typeInfo(T).Struct.layout == .auto) @compileError("Requires defined T layout"); + + const size = @sizeOf(T) * source.len; + const staging_buffer = try gc.vkd.createBuffer(gc.dev, &.{ - .size = @sizeOf(@TypeOf(vertices)), + .size = size, .usage = .{ .transfer_src_bit = true }, .sharing_mode = .exclusive, }, null); defer gc.vkd.destroyBuffer(gc.dev, staging_buffer, null); + const mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, staging_buffer); - const staging_memory = try gc.allocate(mem_reqs, .{ .host_visible_bit = true, .host_coherent_bit = true }); + const staging_memory = try gc.allocate(mem_reqs, .{ + .host_visible_bit = true, + .host_coherent_bit = true, + }); defer gc.vkd.freeMemory(gc.dev, staging_memory, null); + try gc.vkd.bindBufferMemory(gc.dev, staging_buffer, staging_memory, 0); { const data = try gc.vkd.mapMemory(gc.dev, staging_memory, 0, vk.WHOLE_SIZE, .{}); defer gc.vkd.unmapMemory(gc.dev, staging_memory); - const gpu_vertices: [*]Vertex = @ptrCast(@alignCast(data)); - @memcpy(gpu_vertices, vertices[0..]); + const dest: [*]T = @ptrCast(@alignCast(data)); + @memcpy(dest, source); } - try copyBuffer(gc, pool, buffer, staging_buffer, @sizeOf(@TypeOf(vertices))); + try copyBuffer(gc, pool, buffer, staging_buffer, size); } fn copyBuffer(gc: *const GraphicsContext, pool: vk.CommandPool, dst: vk.Buffer, src: vk.Buffer, size: vk.DeviceSize) !void { @@ -217,6 +250,9 @@ fn copyBuffer(gc: *const GraphicsContext, pool: vk.CommandPool, dst: vk.Buffer, .p_command_buffers = @ptrCast(&cmdbuf), .p_wait_dst_stage_mask = undefined, }; + // creating and submitting a queue for every copy operation seems a bad idea for "streamed" data + // gonna want a way to send a copy operation WITH SYNCHRONIZATION PRIMITIVES on a particular queue + // see https://stackoverflow.com/a/62183243 try gc.vkd.queueSubmit(gc.graphics_queue.handle, 1, @ptrCast(&si), .null_handle); try gc.vkd.queueWaitIdle(gc.graphics_queue.handle); } @@ -225,7 +261,8 @@ fn createCommandBuffers( gc: *const GraphicsContext, pool: vk.CommandPool, allocator: Allocator, - buffer: vk.Buffer, + vertex_buffer: vk.Buffer, + index_buffer: vk.Buffer, extent: vk.Extent2D, render_pass: vk.RenderPass, pipeline: vk.Pipeline, @@ -281,8 +318,10 @@ fn createCommandBuffers( gc.vkd.cmdBindPipeline(cmdbuf, .graphics, pipeline); const offset = [_]vk.DeviceSize{0}; - gc.vkd.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast(&buffer), &offset); - gc.vkd.cmdDraw(cmdbuf, vertices.len, 1, 0, 0); + gc.vkd.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast(&vertex_buffer), &offset); + gc.vkd.cmdBindIndexBuffer(cmdbuf, index_buffer, 0, .uint16); + gc.vkd.cmdDrawIndexed(cmdbuf, indices.len, 1, 0, 0, 0); + // gc.vkd.cmdDraw(cmdbuf, vertices.len, 1, 0, 0); gc.vkd.cmdEndRenderPass(cmdbuf); try gc.vkd.endCommandBuffer(cmdbuf); From 7bcc460d9bba4c8b643f143ed3cb37b72dab8e7f Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 27 Mar 2024 12:53:06 -0400 Subject: [PATCH 017/113] Revert "move vk.xml to src" This reverts commit 6763d9a73a36a4c5818c330213a6b507931abd23. --- build.zig | 2 +- {src => reg}/vk.xml | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename {src => reg}/vk.xml (100%) diff --git a/build.zig b/build.zig index 1e6582a..bded306 100644 --- a/build.zig +++ b/build.zig @@ -7,7 +7,7 @@ pub fn build(b: *std.Build) void { const optimize = b.standardOptimizeOption(.{}); const vk = b.dependency("vulkan-zig", .{ - .registry = @as([]const u8, b.pathFromRoot("src/vk.xml")), + .registry = @as([]const u8, b.pathFromRoot("reg/vk.xml")), }); const vkmod = vk.module("vulkan-zig"); diff --git a/src/vk.xml b/reg/vk.xml similarity index 100% rename from src/vk.xml rename to reg/vk.xml From 2f70c43c00abddaa4cf64bc7e7903b71f308ef98 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 27 Mar 2024 14:05:44 -0400 Subject: [PATCH 018/113] switch to dynamic rendering --- src/graphics_context.zig | 11 +-- src/main.zig | 151 ++++++++++++--------------------------- 2 files changed, 51 insertions(+), 111 deletions(-) diff --git a/src/graphics_context.zig b/src/graphics_context.zig index 2c2cd30..a77e00f 100644 --- a/src/graphics_context.zig +++ b/src/graphics_context.zig @@ -3,7 +3,10 @@ const vk = @import("vk"); const c = @import("c.zig"); const Allocator = std.mem.Allocator; -const required_device_extensions = [_][*:0]const u8{vk.extension_info.khr_swapchain.name}; +const required_device_extensions = [_][*:0]const u8{ + vk.extension_info.khr_swapchain.name, + vk.extension_info.khr_dynamic_rendering.name, +}; const BaseDispatch = vk.BaseWrapper(.{ .createInstance = true, @@ -53,12 +56,8 @@ const DeviceDispatch = vk.DeviceWrapper(.{ .destroyShaderModule = true, .createPipelineLayout = true, .destroyPipelineLayout = true, - .createRenderPass = true, - .destroyRenderPass = true, .createGraphicsPipelines = true, .destroyPipeline = true, - .createFramebuffer = true, - .destroyFramebuffer = true, .beginCommandBuffer = true, .endCommandBuffer = true, .allocateMemory = true, @@ -79,6 +78,8 @@ const DeviceDispatch = vk.DeviceWrapper(.{ .cmdBindVertexBuffers = true, .cmdBindIndexBuffer = true, .cmdCopyBuffer = true, + .cmdBeginRenderingKHR = true, + .cmdEndRenderingKHR = true, }); pub const GraphicsContext = struct { diff --git a/src/main.zig b/src/main.zig index cce1c18..ac48b66 100644 --- a/src/main.zig +++ b/src/main.zig @@ -35,14 +35,10 @@ const Vertex = extern struct { }; const vertices = [_]Vertex{ - // .{ .pos = .{ -0.5, -0.5 }, .color = .{ 1, 0, 0 } }, - // .{ .pos = .{ 0.5, 0.5 }, .color = .{ 0, 1, 0 } }, - // .{ .pos = .{ -0.5, 0.5 }, .color = .{ 0, 0, 1 } }, - - .{ .pos = .{ -0.5, -0.5 }, .color = .{ 1, 0, 0 } }, - .{ .pos = .{ -0.5, 0.5 }, .color = .{ 0, 1, 0 } }, - .{ .pos = .{ 0.5, -0.5 }, .color = .{ 0, 0, 1 } }, - .{ .pos = .{ 0.5, 0.5 }, .color = .{ 1, 1, 0 } }, + .{ .pos = .{ -0.5, -0.5 }, .color = .{ 1, 0, 0 } }, + .{ .pos = .{ -0.5, 0.5 }, .color = .{ 0, 1, 0 } }, + .{ .pos = .{ 0.5, -0.5 }, .color = .{ 0, 0, 1 } }, + .{ .pos = .{ 0.5, 0.5 }, .color = .{ 1, 1, 0 } }, }; const Index = u16; @@ -91,15 +87,9 @@ pub fn main() !void { }, null); defer gc.vkd.destroyPipelineLayout(gc.dev, pipeline_layout, null); - const render_pass = try createRenderPass(&gc, swapchain); - defer gc.vkd.destroyRenderPass(gc.dev, render_pass, null); - - const pipeline = try createPipeline(&gc, pipeline_layout, render_pass); + const pipeline = try createPipeline(&gc, pipeline_layout, swapchain); defer gc.vkd.destroyPipeline(gc.dev, pipeline, null); - var framebuffers = try createFramebuffers(&gc, allocator, render_pass, swapchain); - defer destroyFramebuffers(&gc, allocator, framebuffers); - const pool = try gc.vkd.createCommandPool(gc.dev, &.{ .queue_family_index = gc.graphics_queue.family, }, null); @@ -137,10 +127,8 @@ pub fn main() !void { allocator, vertex_buffer, index_buffer, - swapchain.extent, - render_pass, pipeline, - framebuffers, + swapchain, ); defer destroyCommandBuffers(&gc, pool, allocator, cmdbufs); @@ -167,9 +155,6 @@ pub fn main() !void { extent.height = @intCast(h); try swapchain.recreate(extent); - destroyFramebuffers(&gc, allocator, framebuffers); - framebuffers = try createFramebuffers(&gc, allocator, render_pass, swapchain); - destroyCommandBuffers(&gc, pool, allocator, cmdbufs); cmdbufs = try createCommandBuffers( &gc, @@ -177,10 +162,8 @@ pub fn main() !void { allocator, vertex_buffer, index_buffer, - swapchain.extent, - render_pass, pipeline, - framebuffers, + swapchain, ); } @@ -263,12 +246,12 @@ fn createCommandBuffers( allocator: Allocator, vertex_buffer: vk.Buffer, index_buffer: vk.Buffer, - extent: vk.Extent2D, - render_pass: vk.RenderPass, pipeline: vk.Pipeline, - framebuffers: []vk.Framebuffer, + swapchain: Swapchain, ) ![]vk.CommandBuffer { - const cmdbufs = try allocator.alloc(vk.CommandBuffer, framebuffers.len); + const extent = swapchain.extent; + + const cmdbufs = try allocator.alloc(vk.CommandBuffer, swapchain.swap_images.len); errdefer allocator.free(cmdbufs); try gc.vkd.allocateCommandBuffers(gc.dev, &.{ @@ -296,34 +279,43 @@ fn createCommandBuffers( .extent = extent, }; - for (cmdbufs, framebuffers) |cmdbuf, framebuffer| { + for (cmdbufs, swapchain.swap_images) |cmdbuf, image| { try gc.vkd.beginCommandBuffer(cmdbuf, &.{}); gc.vkd.cmdSetViewport(cmdbuf, 0, 1, @ptrCast(&viewport)); gc.vkd.cmdSetScissor(cmdbuf, 0, 1, @ptrCast(&scissor)); - // This needs to be a separate definition - see https://github.com/ziglang/zig/issues/7627. - const render_area = vk.Rect2D{ - .offset = .{ .x = 0, .y = 0 }, - .extent = extent, + const color_attachments = [_]vk.RenderingAttachmentInfoKHR{ + .{ + .image_view = image.view, + .image_layout = .present_src_khr, + .resolve_mode = .{}, + .resolve_image_view = .null_handle, + .resolve_image_layout = .undefined, + .load_op = .clear, + .store_op = .store, + .clear_value = clear, + }, }; - gc.vkd.cmdBeginRenderPass(cmdbuf, &.{ - .render_pass = render_pass, - .framebuffer = framebuffer, - .render_area = render_area, - .clear_value_count = 1, - .p_clear_values = @as([*]const vk.ClearValue, @ptrCast(&clear)), - }, .@"inline"); + const render_info = vk.RenderingInfoKHR{ + .render_area = scissor, // since we always do full-frame changes + .layer_count = 1, + .view_mask = 0, + .color_attachment_count = color_attachments.len, + .p_color_attachments = &color_attachments, + }; + + gc.vkd.cmdBeginRenderingKHR(cmdbuf, &render_info); gc.vkd.cmdBindPipeline(cmdbuf, .graphics, pipeline); const offset = [_]vk.DeviceSize{0}; gc.vkd.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast(&vertex_buffer), &offset); gc.vkd.cmdBindIndexBuffer(cmdbuf, index_buffer, 0, .uint16); gc.vkd.cmdDrawIndexed(cmdbuf, indices.len, 1, 0, 0, 0); - // gc.vkd.cmdDraw(cmdbuf, vertices.len, 1, 0, 0); - gc.vkd.cmdEndRenderPass(cmdbuf); + gc.vkd.cmdEndRenderingKHR(cmdbuf); + try gc.vkd.endCommandBuffer(cmdbuf); } @@ -335,69 +327,7 @@ fn destroyCommandBuffers(gc: *const GraphicsContext, pool: vk.CommandPool, alloc allocator.free(cmdbufs); } -fn createFramebuffers(gc: *const GraphicsContext, allocator: Allocator, render_pass: vk.RenderPass, swapchain: Swapchain) ![]vk.Framebuffer { - const framebuffers = try allocator.alloc(vk.Framebuffer, swapchain.swap_images.len); - errdefer allocator.free(framebuffers); - - var i: usize = 0; - errdefer for (framebuffers[0..i]) |fb| gc.vkd.destroyFramebuffer(gc.dev, fb, null); - - for (framebuffers) |*fb| { - fb.* = try gc.vkd.createFramebuffer(gc.dev, &.{ - .render_pass = render_pass, - .attachment_count = 1, - .p_attachments = @as([*]const vk.ImageView, @ptrCast(&swapchain.swap_images[i].view)), - .width = swapchain.extent.width, - .height = swapchain.extent.height, - .layers = 1, - }, null); - i += 1; - } - - return framebuffers; -} - -fn destroyFramebuffers(gc: *const GraphicsContext, allocator: Allocator, framebuffers: []const vk.Framebuffer) void { - for (framebuffers) |fb| gc.vkd.destroyFramebuffer(gc.dev, fb, null); - allocator.free(framebuffers); -} - -fn createRenderPass(gc: *const GraphicsContext, swapchain: Swapchain) !vk.RenderPass { - const color_attachment = vk.AttachmentDescription{ - .format = swapchain.surface_format.format, - .samples = .{ .@"1_bit" = true }, - .load_op = .clear, - .store_op = .store, - .stencil_load_op = .dont_care, - .stencil_store_op = .dont_care, - .initial_layout = .undefined, - .final_layout = .present_src_khr, - }; - - const color_attachment_ref = vk.AttachmentReference{ - .attachment = 0, - .layout = .color_attachment_optimal, - }; - - const subpass = vk.SubpassDescription{ - .pipeline_bind_point = .graphics, - .color_attachment_count = 1, - .p_color_attachments = @ptrCast(&color_attachment_ref), - }; - - return try gc.vkd.createRenderPass(gc.dev, &.{ - .attachment_count = 1, - .p_attachments = @as([*]const vk.AttachmentDescription, @ptrCast(&color_attachment)), - .subpass_count = 1, - .p_subpasses = @as([*]const vk.SubpassDescription, @ptrCast(&subpass)), - }, null); -} - -fn createPipeline( - gc: *const GraphicsContext, - layout: vk.PipelineLayout, - render_pass: vk.RenderPass, -) !vk.Pipeline { +fn createPipeline(gc: *const GraphicsContext, layout: vk.PipelineLayout, swapchain: Swapchain) !vk.Pipeline { const vert = try gc.vkd.createShaderModule(gc.dev, &.{ .code_size = shaders.triangle_vert.len, .p_code = @as([*]const u32, @ptrCast(&shaders.triangle_vert)), @@ -489,6 +419,14 @@ fn createPipeline( .p_dynamic_states = &dynstate, }; + const prci = vk.PipelineRenderingCreateInfoKHR{ + .color_attachment_count = 1, + .p_color_attachment_formats = @ptrCast(&swapchain.surface_format.format), + .depth_attachment_format = .undefined, + .stencil_attachment_format = .undefined, + .view_mask = 0, + }; + const gpci = vk.GraphicsPipelineCreateInfo{ .flags = .{}, .stage_count = 2, @@ -503,10 +441,11 @@ fn createPipeline( .p_color_blend_state = &pcbsci, .p_dynamic_state = &pdsci, .layout = layout, - .render_pass = render_pass, + .render_pass = .null_handle, .subpass = 0, .base_pipeline_handle = .null_handle, .base_pipeline_index = -1, + .p_next = &prci, }; var pipeline: vk.Pipeline = undefined; From d4196644df3122a38752d928e7030c6492ac73fe Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 27 Mar 2024 17:40:39 -0400 Subject: [PATCH 019/113] vector math experiments --- box.zig | 29 +++++++++++++++++ vecs.zig | 97 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 126 insertions(+) create mode 100644 box.zig create mode 100644 vecs.zig diff --git a/box.zig b/box.zig new file mode 100644 index 0000000..aa17978 --- /dev/null +++ b/box.zig @@ -0,0 +1,29 @@ +const std = @import("std"); + +pub fn Box(comptime T: type) type { + return struct { + val: T, + }; +} + +pub fn AddBoxType(comptime LHS: type, comptime RHS: type) type { + const x = std.mem.zeroes(std.meta.FieldType(LHS, .val)); + const y = std.mem.zeroes(std.meta.FieldType(RHS, .val)); + return Box(@TypeOf(x + y)); +} + +pub fn addbox(lhs: anytype, rhs: anytype) AddBoxType(@TypeOf(lhs), @TypeOf(rhs)) { + return .{ .val = lhs.val + rhs.val }; +} + +test { + std.testing.refAllDecls(@This()); +} + +test "widen" { + const foo: Box(u8) = .{ .val = 99 }; + const bar: Box(u16) = .{ .val = 599 }; + const actual = addbox(foo, bar); + const expected: Box(u16) = .{ .val = 698 }; + try std.testing.expectEqual(expected, actual); +} diff --git a/vecs.zig b/vecs.zig new file mode 100644 index 0000000..c1ce0b1 --- /dev/null +++ b/vecs.zig @@ -0,0 +1,97 @@ +const std = @import("std"); + +const mat4f = mat(4, 4, f32); +const vec4f = mat(4, 1, f32); + +const mat4i = mat(4, 4, i32); +const vec4i = mat(4, 1, i32); + +const mat4u = mat(4, 4, u32); +const vec4u = mat(4, 1, u32); + +pub fn mat(comptime R_: usize, comptime C_: usize, comptime T_: type) type { + return struct { + pub const Rows = R_; + pub const Cols = C_; + pub const T = T_; + + data: [Cols][Rows]T, + + pub fn mul(l: @This(), r: anytype) MatMulReturnType(@This(), @TypeOf(r)) { + return matmul(l, r); + } + }; +} + +fn MatMulReturnType(comptime L: type, comptime R: type) type { + if (L.Cols != R.Rows) @compileError("invalid dimensions"); + + const x: L.T = std.mem.zeroes(L.T); + const y: R.T = std.mem.zeroes(R.T); + const T = @TypeOf(x + y); + + return mat(L.Rows, R.Cols, T); +} + +pub fn matmul(lhs: anytype, rhs: anytype) MatMulReturnType(@TypeOf(lhs), @TypeOf(rhs)) { + @setFloatMode(.optimized); + + const L = @TypeOf(lhs); + const R = @TypeOf(rhs); + const Ret = MatMulReturnType(L, R); + + var res = std.mem.zeroes(Ret); + + if (L.Cols != R.Rows) @compileError("invalid dimensions"); + + inline for (0..R.Cols) |col| { + inline for (0..L.Rows) |row| { + inline for (0..L.Cols) |k| { + res.data[col][row] += lhs.data[k][row] * rhs.data[col][k]; + } + } + } + + return res; +} + +export fn c_matmul_f_4x4_1x4(lhs: *const anyopaque, rhs: *const anyopaque, out: *anyopaque) void { + const l: *const mat4f = @alignCast(@ptrCast(lhs)); + const r: *const vec4f = @alignCast(@ptrCast(rhs)); + const o: *vec4f = @alignCast(@ptrCast(out)); + + o.* = matmul(l.*, r.*); +} + +export fn c_matmul_i_4x4_1x4(lhs: *const anyopaque, rhs: *const anyopaque, out: *anyopaque) void { + const l: *const mat4i = @alignCast(@ptrCast(lhs)); + const r: *const vec4i = @alignCast(@ptrCast(rhs)); + const o: *vec4i = @alignCast(@ptrCast(out)); + + o.* = matmul(l.*, r.*); +} + +export fn c_matmul_u_4x4_1x4(lhs: *const anyopaque, rhs: *const anyopaque, out: *anyopaque) void { + const l: *const mat4u = @alignCast(@ptrCast(lhs)); + const r: *const vec4u = @alignCast(@ptrCast(rhs)); + const o: *vec4u = @alignCast(@ptrCast(out)); + + o.* = matmul(l.*, r.*); +} + +test "matmul" { + // note, column major; it's transposed. + const m: mat4u = .{ .data = .{ + .{ 85, 84, 87, 37 }, + .{ 33, 54, 49, 83 }, + .{ 96, 97, 3, 13 }, + .{ 69, 12, 45, 77 }, + } }; + const u: vec4u = .{ .data = .{.{ 37, 69, 94, 87 }} }; + + const actual: vec4u = matmul(m, u); + + const expect: vec4u = .{ .data = .{.{ 20449, 16996, 10797, 15017 }} }; + + try std.testing.expectEqualDeep(expect, actual); +} From 0f840a642282d1dd3515396a8a2279a4ef63a214 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 28 Mar 2024 14:30:02 -0400 Subject: [PATCH 020/113] vec4 and indices --- src/graphics_context.zig | 2 +- src/main.zig | 29 ++++++++++++++++++----------- src/shaders/triangle.vert | 4 ++-- 3 files changed, 21 insertions(+), 14 deletions(-) diff --git a/src/graphics_context.zig b/src/graphics_context.zig index a77e00f..45e3b75 100644 --- a/src/graphics_context.zig +++ b/src/graphics_context.zig @@ -109,7 +109,7 @@ pub const GraphicsContext = struct { .application_version = vk.makeApiVersion(0, 0, 0, 0), .p_engine_name = app_name, .engine_version = vk.makeApiVersion(0, 0, 0, 0), - .api_version = vk.API_VERSION_1_2, + .api_version = vk.API_VERSION_1_3, }; self.instance = try self.vkb.createInstance(&.{ diff --git a/src/main.zig b/src/main.zig index ac48b66..cef4a9d 100644 --- a/src/main.zig +++ b/src/main.zig @@ -19,7 +19,7 @@ const Vertex = extern struct { .{ .binding = 0, .location = 0, - .format = .r32g32_sfloat, + .format = .r32g32b32a32_sfloat, .offset = @offsetOf(Vertex, "pos"), }, .{ @@ -30,20 +30,25 @@ const Vertex = extern struct { }, }; - pos: [2]f32, + pos: [4]f32, color: [3]f32, }; -const vertices = [_]Vertex{ - .{ .pos = .{ -0.5, -0.5 }, .color = .{ 1, 0, 0 } }, - .{ .pos = .{ -0.5, 0.5 }, .color = .{ 0, 1, 0 } }, - .{ .pos = .{ 0.5, -0.5 }, .color = .{ 0, 0, 1 } }, - .{ .pos = .{ 0.5, 0.5 }, .color = .{ 1, 1, 0 } }, -}; - const Index = u16; -const indices = [_]Index{ 0, 2, 1, 1, 2, 3 }; +const vertices = [_]Vertex{ + // Vulkan depth range is 0, 1 instead of OpenGL -1, 1 + .{ .pos = .{ -0.5, -0.5, -0.5, 1.0 }, .color = .{ 1, 0, 0 } }, + .{ .pos = .{ -0.5, 0.5, -0.5, 1.0 }, .color = .{ 0, 1, 0 } }, + .{ .pos = .{ 0.5, -0.5, -0.5, 1.0 }, .color = .{ 0, 0, 1 } }, + .{ .pos = .{ 0.5, 0.5, -0.5, 1.0 }, .color = .{ 1, 1, 0 } }, + .{ .pos = .{ -0.5, -0.5, 0.5, 1.0 }, .color = .{ 1, 0, 0 } }, + .{ .pos = .{ -0.5, 0.5, 0.5, 1.0 }, .color = .{ 0, 1, 0 } }, + .{ .pos = .{ 0.5, -0.5, 0.5, 1.0 }, .color = .{ 0, 0, 1 } }, + .{ .pos = .{ 0.5, 0.5, 0.5, 1.0 }, .color = .{ 1, 1, 0 } }, +}; + +const indices = [_]Index{ 4, 5, 6, 6, 5, 7 }; pub fn main() !void { if (c.glfwInit() != c.GLFW_TRUE) return error.GlfwInitFailed; @@ -56,6 +61,8 @@ pub fn main() !void { var extent = vk.Extent2D{ .width = 800, .height = 600 }; + c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, "floating_window"); + c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, "floating_window"); c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); const window = c.glfwCreateWindow( @intCast(extent.width), @@ -377,7 +384,7 @@ fn createPipeline(gc: *const GraphicsContext, layout: vk.PipelineLayout, swapcha .rasterizer_discard_enable = vk.FALSE, .polygon_mode = .fill, .cull_mode = .{ .back_bit = true }, - .front_face = .clockwise, + .front_face = .counter_clockwise, .depth_bias_enable = vk.FALSE, .depth_bias_constant_factor = 0, .depth_bias_clamp = 0, diff --git a/src/shaders/triangle.vert b/src/shaders/triangle.vert index 2b8dfa5..f820750 100644 --- a/src/shaders/triangle.vert +++ b/src/shaders/triangle.vert @@ -1,11 +1,11 @@ #version 450 -layout(location = 0) in vec2 a_pos; +layout(location = 0) in vec4 a_pos; layout(location = 1) in vec3 a_color; layout(location = 0) out vec3 v_color; void main() { - gl_Position = vec4(a_pos, 0.0, 1.0); + gl_Position = a_pos; v_color = a_color; } From 282e85db24d438292fa2717ad8604b26290b4487 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 28 Mar 2024 16:06:50 -0400 Subject: [PATCH 021/113] graphics_context incremental teardown --- box.zig | 29 ---------- src/gfx.zig | 75 ++++++++++++++++++++++++ src/gfx/Context.zig | 6 ++ src/graphics_context.zig | 120 +++++---------------------------------- src/main.zig | 75 +++++++++++++++++++----- vecs.zig | 97 ------------------------------- 6 files changed, 155 insertions(+), 247 deletions(-) delete mode 100644 box.zig create mode 100644 src/gfx.zig create mode 100644 src/gfx/Context.zig delete mode 100644 vecs.zig diff --git a/box.zig b/box.zig deleted file mode 100644 index aa17978..0000000 --- a/box.zig +++ /dev/null @@ -1,29 +0,0 @@ -const std = @import("std"); - -pub fn Box(comptime T: type) type { - return struct { - val: T, - }; -} - -pub fn AddBoxType(comptime LHS: type, comptime RHS: type) type { - const x = std.mem.zeroes(std.meta.FieldType(LHS, .val)); - const y = std.mem.zeroes(std.meta.FieldType(RHS, .val)); - return Box(@TypeOf(x + y)); -} - -pub fn addbox(lhs: anytype, rhs: anytype) AddBoxType(@TypeOf(lhs), @TypeOf(rhs)) { - return .{ .val = lhs.val + rhs.val }; -} - -test { - std.testing.refAllDecls(@This()); -} - -test "widen" { - const foo: Box(u8) = .{ .val = 99 }; - const bar: Box(u16) = .{ .val = 599 }; - const actual = addbox(foo, bar); - const expected: Box(u16) = .{ .val = 698 }; - try std.testing.expectEqual(expected, actual); -} diff --git a/src/gfx.zig b/src/gfx.zig new file mode 100644 index 0000000..260368c --- /dev/null +++ b/src/gfx.zig @@ -0,0 +1,75 @@ +const vk = @import("vk"); + +pub const BaseDispatch = vk.BaseWrapper(.{ + .createInstance = true, + .getInstanceProcAddr = true, +}); + +pub const InstanceDispatch = vk.InstanceWrapper(.{ + .destroyInstance = true, + .createDevice = true, + .destroySurfaceKHR = true, + .enumeratePhysicalDevices = true, + .getPhysicalDeviceProperties = true, + .enumerateDeviceExtensionProperties = true, + .getPhysicalDeviceSurfaceFormatsKHR = true, + .getPhysicalDeviceSurfacePresentModesKHR = true, + .getPhysicalDeviceSurfaceCapabilitiesKHR = true, + .getPhysicalDeviceQueueFamilyProperties = true, + .getPhysicalDeviceSurfaceSupportKHR = true, + .getPhysicalDeviceMemoryProperties = true, + .getDeviceProcAddr = true, +}); + +pub const DeviceDispatch = vk.DeviceWrapper(.{ + .destroyDevice = true, + .getDeviceQueue = true, + .createSemaphore = true, + .createFence = true, + .createImageView = true, + .destroyImageView = true, + .destroySemaphore = true, + .destroyFence = true, + .getSwapchainImagesKHR = true, + .createSwapchainKHR = true, + .destroySwapchainKHR = true, + .acquireNextImageKHR = true, + .deviceWaitIdle = true, + .waitForFences = true, + .resetFences = true, + .queueSubmit = true, + .queuePresentKHR = true, + .createCommandPool = true, + .destroyCommandPool = true, + .allocateCommandBuffers = true, + .freeCommandBuffers = true, + .queueWaitIdle = true, + .createShaderModule = true, + .destroyShaderModule = true, + .createPipelineLayout = true, + .destroyPipelineLayout = true, + .createGraphicsPipelines = true, + .destroyPipeline = true, + .beginCommandBuffer = true, + .endCommandBuffer = true, + .allocateMemory = true, + .freeMemory = true, + .createBuffer = true, + .destroyBuffer = true, + .getBufferMemoryRequirements = true, + .mapMemory = true, + .unmapMemory = true, + .bindBufferMemory = true, + .cmdBeginRenderPass = true, + .cmdEndRenderPass = true, + .cmdBindPipeline = true, + .cmdDraw = true, + .cmdDrawIndexed = true, + .cmdSetViewport = true, + .cmdSetScissor = true, + .cmdBindVertexBuffers = true, + .cmdBindIndexBuffer = true, + .cmdCopyBuffer = true, + .cmdBeginRenderingKHR = true, + .cmdEndRenderingKHR = true, +}); diff --git a/src/gfx/Context.zig b/src/gfx/Context.zig new file mode 100644 index 0000000..7c53be3 --- /dev/null +++ b/src/gfx/Context.zig @@ -0,0 +1,6 @@ +const std = @import("std"); +const vk = @import("vk"); + +const d = @import("dispatch.zig"); + +const Self = @This(); diff --git a/src/graphics_context.zig b/src/graphics_context.zig index 45e3b75..959da98 100644 --- a/src/graphics_context.zig +++ b/src/graphics_context.zig @@ -3,91 +3,21 @@ const vk = @import("vk"); const c = @import("c.zig"); const Allocator = std.mem.Allocator; +const gfx = @import("gfx.zig"); + +const BaseDispatch = gfx.BaseDispatch; +const InstanceDispatch = gfx.InstanceDispatch; +const DeviceDispatch = gfx.DeviceDispatch; + const required_device_extensions = [_][*:0]const u8{ vk.extension_info.khr_swapchain.name, vk.extension_info.khr_dynamic_rendering.name, }; -const BaseDispatch = vk.BaseWrapper(.{ - .createInstance = true, - .getInstanceProcAddr = true, -}); - -const InstanceDispatch = vk.InstanceWrapper(.{ - .destroyInstance = true, - .createDevice = true, - .destroySurfaceKHR = true, - .enumeratePhysicalDevices = true, - .getPhysicalDeviceProperties = true, - .enumerateDeviceExtensionProperties = true, - .getPhysicalDeviceSurfaceFormatsKHR = true, - .getPhysicalDeviceSurfacePresentModesKHR = true, - .getPhysicalDeviceSurfaceCapabilitiesKHR = true, - .getPhysicalDeviceQueueFamilyProperties = true, - .getPhysicalDeviceSurfaceSupportKHR = true, - .getPhysicalDeviceMemoryProperties = true, - .getDeviceProcAddr = true, -}); - -const DeviceDispatch = vk.DeviceWrapper(.{ - .destroyDevice = true, - .getDeviceQueue = true, - .createSemaphore = true, - .createFence = true, - .createImageView = true, - .destroyImageView = true, - .destroySemaphore = true, - .destroyFence = true, - .getSwapchainImagesKHR = true, - .createSwapchainKHR = true, - .destroySwapchainKHR = true, - .acquireNextImageKHR = true, - .deviceWaitIdle = true, - .waitForFences = true, - .resetFences = true, - .queueSubmit = true, - .queuePresentKHR = true, - .createCommandPool = true, - .destroyCommandPool = true, - .allocateCommandBuffers = true, - .freeCommandBuffers = true, - .queueWaitIdle = true, - .createShaderModule = true, - .destroyShaderModule = true, - .createPipelineLayout = true, - .destroyPipelineLayout = true, - .createGraphicsPipelines = true, - .destroyPipeline = true, - .beginCommandBuffer = true, - .endCommandBuffer = true, - .allocateMemory = true, - .freeMemory = true, - .createBuffer = true, - .destroyBuffer = true, - .getBufferMemoryRequirements = true, - .mapMemory = true, - .unmapMemory = true, - .bindBufferMemory = true, - .cmdBeginRenderPass = true, - .cmdEndRenderPass = true, - .cmdBindPipeline = true, - .cmdDraw = true, - .cmdDrawIndexed = true, - .cmdSetViewport = true, - .cmdSetScissor = true, - .cmdBindVertexBuffers = true, - .cmdBindIndexBuffer = true, - .cmdCopyBuffer = true, - .cmdBeginRenderingKHR = true, - .cmdEndRenderingKHR = true, -}); - pub const GraphicsContext = struct { - vkb: BaseDispatch, vki: InstanceDispatch, vkd: DeviceDispatch, - instance: vk.Instance, surface: vk.SurfaceKHR, pdev: vk.PhysicalDevice, props: vk.PhysicalDeviceProperties, @@ -97,52 +27,28 @@ pub const GraphicsContext = struct { graphics_queue: Queue, present_queue: Queue, - pub fn init(allocator: Allocator, app_name: [*:0]const u8, window: *c.GLFWwindow) !GraphicsContext { + pub fn init(allocator: Allocator, instance: vk.Instance, surface: vk.SurfaceKHR, vki: InstanceDispatch) !GraphicsContext { var self: GraphicsContext = undefined; - self.vkb = try BaseDispatch.load(c.glfwGetInstanceProcAddress); + self.vki = vki; + self.surface = surface; - var glfw_exts_count: u32 = 0; - const glfw_exts = c.glfwGetRequiredInstanceExtensions(&glfw_exts_count); - - const app_info = vk.ApplicationInfo{ - .p_application_name = app_name, - .application_version = vk.makeApiVersion(0, 0, 0, 0), - .p_engine_name = app_name, - .engine_version = vk.makeApiVersion(0, 0, 0, 0), - .api_version = vk.API_VERSION_1_3, - }; - - self.instance = try self.vkb.createInstance(&.{ - .p_application_info = &app_info, - .enabled_extension_count = glfw_exts_count, - .pp_enabled_extension_names = @as([*]const [*:0]const u8, @ptrCast(glfw_exts)), - }, null); - - self.vki = try InstanceDispatch.load(self.instance, self.vkb.dispatch.vkGetInstanceProcAddr); - errdefer self.vki.destroyInstance(self.instance, null); - - self.surface = try createSurface(self.instance, window); - errdefer self.vki.destroySurfaceKHR(self.instance, self.surface, null); - - const candidate = try pickPhysicalDevice(self.vki, self.instance, allocator, self.surface); + const candidate = try pickPhysicalDevice(vki, instance, allocator, surface); self.pdev = candidate.pdev; self.props = candidate.props; - self.dev = try initializeCandidate(self.vki, candidate); - self.vkd = try DeviceDispatch.load(self.dev, self.vki.dispatch.vkGetDeviceProcAddr); + self.dev = try initializeCandidate(vki, candidate); + self.vkd = try DeviceDispatch.load(self.dev, vki.dispatch.vkGetDeviceProcAddr); errdefer self.vkd.destroyDevice(self.dev, null); self.graphics_queue = Queue.init(self.vkd, self.dev, candidate.queues.graphics_family); self.present_queue = Queue.init(self.vkd, self.dev, candidate.queues.present_family); - self.mem_props = self.vki.getPhysicalDeviceMemoryProperties(self.pdev); + self.mem_props = vki.getPhysicalDeviceMemoryProperties(self.pdev); return self; } pub fn deinit(self: GraphicsContext) void { self.vkd.destroyDevice(self.dev, null); - self.vki.destroySurfaceKHR(self.instance, self.surface, null); - self.vki.destroyInstance(self.instance, null); } pub fn deviceName(self: *const GraphicsContext) []const u8 { diff --git a/src/main.zig b/src/main.zig index cef4a9d..30abb74 100644 --- a/src/main.zig +++ b/src/main.zig @@ -6,6 +6,8 @@ const GraphicsContext = @import("graphics_context.zig").GraphicsContext; const Swapchain = @import("swapchain.zig").Swapchain; const Allocator = std.mem.Allocator; +const gfx = @import("gfx.zig"); + const app_name = "vulkan-zig triangle example"; const Vertex = extern struct { @@ -50,7 +52,57 @@ const vertices = [_]Vertex{ const indices = [_]Index{ 4, 5, 6, 6, 5, 7 }; +/// note: destroy with vki.destroyInstance(instance, null) +fn create_instance(vkb: gfx.BaseDispatch) !std.meta.Tuple(&.{ vk.Instance, gfx.InstanceDispatch }) { + var glfw_exts_count: u32 = 0; + const glfw_exts = c.glfwGetRequiredInstanceExtensions(&glfw_exts_count); + + const instance = try vkb.createInstance(&vk.InstanceCreateInfo{ + .p_application_info = &vk.ApplicationInfo{ + .p_application_name = app_name, + .application_version = vk.makeApiVersion(0, 0, 0, 0), + .p_engine_name = app_name, + .engine_version = vk.makeApiVersion(0, 0, 0, 0), + .api_version = vk.API_VERSION_1_3, + }, + .enabled_extension_count = glfw_exts_count, + .pp_enabled_extension_names = @ptrCast(glfw_exts), + }, null); + + const vki = try gfx.InstanceDispatch.load(instance, vkb.dispatch.vkGetInstanceProcAddr); + + return .{ instance, vki }; +} + +/// note: destroy with vki.destroySurfaceKHR(instance, surface, null) +fn create_surface(instance: vk.Instance, window: *c.GLFWwindow) !vk.SurfaceKHR { + var surface: vk.SurfaceKHR = undefined; + if (c.glfwCreateWindowSurface(instance, window, null, &surface) != .success) { + return error.SurfaceInitFailed; + } + return surface; +} + +/// note: destroy with c.glfwDestroyWindow(window) +fn create_window(extent: vk.Extent2D, title: [*:0]const u8) !*c.GLFWwindow { + c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, "floating_window"); + c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, "floating_window"); + c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); + + return c.glfwCreateWindow( + @intCast(extent.width), + @intCast(extent.height), + title, + null, + null, + ) orelse error.WindowInitFailed; +} + pub fn main() !void { + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + defer _ = gpa.deinit(); + const allocator = gpa.allocator(); + if (c.glfwInit() != c.GLFW_TRUE) return error.GlfwInitFailed; defer c.glfwTerminate(); @@ -61,23 +113,18 @@ pub fn main() !void { var extent = vk.Extent2D{ .width = 800, .height = 600 }; - c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, "floating_window"); - c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, "floating_window"); - c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); - const window = c.glfwCreateWindow( - @intCast(extent.width), - @intCast(extent.height), - app_name, - null, - null, - ) orelse return error.WindowInitFailed; + const window = try create_window(extent, app_name); defer c.glfwDestroyWindow(window); - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - defer _ = gpa.deinit(); - const allocator = gpa.allocator(); + const vkb = try gfx.BaseDispatch.load(c.glfwGetInstanceProcAddress); - const gc = try GraphicsContext.init(allocator, app_name, window); + const instance, const vki = try create_instance(vkb); + defer vki.destroyInstance(instance, null); + + const surface = try create_surface(instance, window); + defer vki.destroySurfaceKHR(instance, surface, null); + + const gc = try GraphicsContext.init(allocator, instance, surface, vki); defer gc.deinit(); std.log.debug("Using device: {s}", .{gc.deviceName()}); diff --git a/vecs.zig b/vecs.zig deleted file mode 100644 index c1ce0b1..0000000 --- a/vecs.zig +++ /dev/null @@ -1,97 +0,0 @@ -const std = @import("std"); - -const mat4f = mat(4, 4, f32); -const vec4f = mat(4, 1, f32); - -const mat4i = mat(4, 4, i32); -const vec4i = mat(4, 1, i32); - -const mat4u = mat(4, 4, u32); -const vec4u = mat(4, 1, u32); - -pub fn mat(comptime R_: usize, comptime C_: usize, comptime T_: type) type { - return struct { - pub const Rows = R_; - pub const Cols = C_; - pub const T = T_; - - data: [Cols][Rows]T, - - pub fn mul(l: @This(), r: anytype) MatMulReturnType(@This(), @TypeOf(r)) { - return matmul(l, r); - } - }; -} - -fn MatMulReturnType(comptime L: type, comptime R: type) type { - if (L.Cols != R.Rows) @compileError("invalid dimensions"); - - const x: L.T = std.mem.zeroes(L.T); - const y: R.T = std.mem.zeroes(R.T); - const T = @TypeOf(x + y); - - return mat(L.Rows, R.Cols, T); -} - -pub fn matmul(lhs: anytype, rhs: anytype) MatMulReturnType(@TypeOf(lhs), @TypeOf(rhs)) { - @setFloatMode(.optimized); - - const L = @TypeOf(lhs); - const R = @TypeOf(rhs); - const Ret = MatMulReturnType(L, R); - - var res = std.mem.zeroes(Ret); - - if (L.Cols != R.Rows) @compileError("invalid dimensions"); - - inline for (0..R.Cols) |col| { - inline for (0..L.Rows) |row| { - inline for (0..L.Cols) |k| { - res.data[col][row] += lhs.data[k][row] * rhs.data[col][k]; - } - } - } - - return res; -} - -export fn c_matmul_f_4x4_1x4(lhs: *const anyopaque, rhs: *const anyopaque, out: *anyopaque) void { - const l: *const mat4f = @alignCast(@ptrCast(lhs)); - const r: *const vec4f = @alignCast(@ptrCast(rhs)); - const o: *vec4f = @alignCast(@ptrCast(out)); - - o.* = matmul(l.*, r.*); -} - -export fn c_matmul_i_4x4_1x4(lhs: *const anyopaque, rhs: *const anyopaque, out: *anyopaque) void { - const l: *const mat4i = @alignCast(@ptrCast(lhs)); - const r: *const vec4i = @alignCast(@ptrCast(rhs)); - const o: *vec4i = @alignCast(@ptrCast(out)); - - o.* = matmul(l.*, r.*); -} - -export fn c_matmul_u_4x4_1x4(lhs: *const anyopaque, rhs: *const anyopaque, out: *anyopaque) void { - const l: *const mat4u = @alignCast(@ptrCast(lhs)); - const r: *const vec4u = @alignCast(@ptrCast(rhs)); - const o: *vec4u = @alignCast(@ptrCast(out)); - - o.* = matmul(l.*, r.*); -} - -test "matmul" { - // note, column major; it's transposed. - const m: mat4u = .{ .data = .{ - .{ 85, 84, 87, 37 }, - .{ 33, 54, 49, 83 }, - .{ 96, 97, 3, 13 }, - .{ 69, 12, 45, 77 }, - } }; - const u: vec4u = .{ .data = .{.{ 37, 69, 94, 87 }} }; - - const actual: vec4u = matmul(m, u); - - const expect: vec4u = .{ .data = .{.{ 20449, 16996, 10797, 15017 }} }; - - try std.testing.expectEqualDeep(expect, actual); -} From 84167b7bd373146c1199ce34be1491530f630253 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 28 Mar 2024 17:31:45 -0400 Subject: [PATCH 022/113] wip: extract create_device() but do not use yet --- src/main.zig | 115 ++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 114 insertions(+), 1 deletion(-) diff --git a/src/main.zig b/src/main.zig index 30abb74..4e2eda3 100644 --- a/src/main.zig +++ b/src/main.zig @@ -52,8 +52,10 @@ const vertices = [_]Vertex{ const indices = [_]Index{ 4, 5, 6, 6, 5, 7 }; +const InstancePair = std.meta.Tuple(&.{ vk.Instance, gfx.InstanceDispatch }); + /// note: destroy with vki.destroyInstance(instance, null) -fn create_instance(vkb: gfx.BaseDispatch) !std.meta.Tuple(&.{ vk.Instance, gfx.InstanceDispatch }) { +fn create_instance(vkb: gfx.BaseDispatch) !InstancePair { var glfw_exts_count: u32 = 0; const glfw_exts = c.glfwGetRequiredInstanceExtensions(&glfw_exts_count); @@ -98,6 +100,117 @@ fn create_window(extent: vk.Extent2D, title: [*:0]const u8) !*c.GLFWwindow { ) orelse error.WindowInitFailed; } +const DevicePair = struct { + pdev: vk.PhysicalDevice, + dev: vk.Device, + graphics: vk.Queue, + present: vk.Queue, + present_sharing_mode: vk.SharingMode, +}; + +fn create_device(ally: std.mem.Allocator, instance: vk.Instance, vki: gfx.InstanceDispatch, surface: vk.SurfaceKHR) !DevicePair { + const required_device_extensions = [_][*:0]const u8{ + vk.extension_info.khr_swapchain.name, + vk.extension_info.khr_dynamic_rendering.name, + }; + + var pdev_count: u32 = undefined; + + _ = try vki.enumeratePhysicalDevices(instance, &pdev_count, null); + const pdevs = try ally.alloc(vk.PhysicalDevice, pdev_count); + defer ally.free(pdevs); + _ = try vki.enumeratePhysicalDevices(instance, &pdev_count, pdevs); + + pdev_search: for (pdevs[0..pdev_count]) |pdev| { + // const props = vki.getPhysicalDeviceProperties(pdev); + // const feats = vki.getPhysicalDeviceFeatures(pdev); + + var ext_prop_count: u32 = undefined; + _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_prop_count, null); + const ext_props = try ally.alloc(vk.ExtensionProperties, ext_prop_count); + defer ally.free(ext_props); + _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_prop_count, ext_props); + + for (required_device_extensions) |required| { + for (ext_props) |prop| { + if (std.mem.eql(u8, std.mem.span(required), std.mem.sliceTo(&prop.extension_name, 0))) { + break; + } + } else { + continue :pdev_search; + } + } + + var format_count: u32 = undefined; + _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null); + if (format_count == 0) continue :pdev_search; + + var present_mode_count: u32 = undefined; + _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &present_mode_count, null); + if (present_mode_count == 0) continue :pdev_search; + + var fam_prop_count: u32 = undefined; + vki.getPhysicalDeviceQueueFamilyProperties(pdev, &fam_prop_count, null); + const fam_props = try ally.alloc(vk.QueueFamilyProperties, fam_prop_count); + defer ally.free(fam_props); + vki.getPhysicalDeviceQueueFamilyProperties(pdev, &fam_prop_count, fam_props); + + var graphics_families = std.ArrayList(u32).init(ally); + var present_families = std.ArrayList(u32).init(ally); + + for (fam_props, 0..) |fam, idx| { + if (fam.queue_flags.graphics_bit) { + graphics_families.append(idx); + } + if (try vki.getPhysicalDeviceSurfaceSupportKHR(pdev, idx, surface)) { + present_families.append(idx); + } + } + + // only choose the same family if we really have to. + + // at this point, we know this pdev can support _a_ swap chain and has required extensions. try to make a + // logical device and queues out of it. + + } + + return error.NoSuitableDevice; +} + +pub fn includes(comptime T: type, us: []const T, vs: []const T) bool { + var vidx: usize = 0; + var uidx: usize = 0; + while (uidx < us.len) { + while (vidx < vs.len) : (vidx += 1) { + if (us[uidx] == vs[vidx]) break; + vidx += 1; + } else { + return false; + } + uidx += 1; + vidx += 1; + } + return true; +} + +test "includes" { + const u = &.{ 0, 1, 7 }; + const v = &.{11}; + const w = &.{}; + const x = &.{ 0, 1, 3, 5, 7, 9, 11 }; + const y = &.{2}; + const z = &.{ 0, 1, 3, 5, 7, 9, 11, 12 }; + + const full = &.{ 0, 1, 3, 5, 7, 9, 11 }; + + try std.testing.expectEqual(true, includes(usize, u, full)); + try std.testing.expectEqual(true, includes(usize, v, full)); + try std.testing.expectEqual(true, includes(usize, w, full)); + try std.testing.expectEqual(true, includes(usize, x, full)); + try std.testing.expectEqual(false, includes(usize, y, full)); + try std.testing.expectEqual(false, includes(usize, z, full)); +} + pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); From a029196cdd62c12973df257cc903a7520917f79b Mon Sep 17 00:00:00 2001 From: David Allemang Date: Fri, 29 Mar 2024 16:30:53 -0400 Subject: [PATCH 023/113] inspecting without glfw --- build.zig | 27 ++++++++++++++++++++- src/dsa.zig | 59 +++++++++++++++++++++++++++++++++++++++++++++ src/inspect.zig | 63 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 148 insertions(+), 1 deletion(-) create mode 100644 src/dsa.zig create mode 100644 src/inspect.zig diff --git a/build.zig b/build.zig index bded306..0317b85 100644 --- a/build.zig +++ b/build.zig @@ -73,9 +73,34 @@ pub fn build(b: *std.Build) void { .use_pkg_config = .force, }); exe_unit_tests.linkLibC(); - const run_exe_unit_tests = b.addRunArtifact(exe_unit_tests); + const dsa_unit_tests = b.addTest(.{ + .name = "dsa.zig tests", + .root_source_file = .{ .path = "src/dsa.zig" }, + .target = target, + .optimize = optimize, + }); + const run_dsa_unit_tests = b.addRunArtifact(dsa_unit_tests); + const test_step = b.step("test", "Run unit tests"); test_step.dependOn(&run_exe_unit_tests.step); + test_step.dependOn(&run_dsa_unit_tests.step); + + const inspect = b.addExecutable(.{ + .name = "vkinspect", + .root_source_file = .{ .path = "src/inspect.zig" }, + .target = target, + .optimize = optimize, + }); + inspect.linkSystemLibrary2("vulkan", .{ + .needed = true, + .preferred_link_mode = .dynamic, + }); + exe_unit_tests.linkLibC(); + inspect.root_module.addImport("vk", vkmod); + inspect.linkLibC(); + const run_inspect = b.addRunArtifact(inspect); + const inspect_step = b.step("vki", "Vulkan Inspect"); + inspect_step.dependOn(&run_inspect.step); } diff --git a/src/dsa.zig b/src/dsa.zig new file mode 100644 index 0000000..83153af --- /dev/null +++ b/src/dsa.zig @@ -0,0 +1,59 @@ +const std = @import("std"); + +/// Slices must be sorted. Checks if `a` includes all elements of `b`. +pub fn includes(comptime T: type, a: []const T, b: []const T) bool { + var ia: usize = 0; + var ib: usize = 0; + + while (ib != b.len) { + if (ia == a.len) return false; + if (b[ib] < a[ia]) return false; + if (!(a[ia] < b[ib])) ib += 1; + ia += 1; + } + return true; +} + +test includes { + try std.testing.expect(includes( + usize, + &.{}, + &.{}, + )); + try std.testing.expect(includes( + usize, + &.{ 1, 2, 3, 4, 5 }, + &.{}, + )); + try std.testing.expect(includes( + usize, + &.{ 1, 2, 3, 4, 5 }, + &.{ 1, 2, 3, 4, 5 }, + )); + try std.testing.expect(!includes( + usize, + &.{}, + &.{ 1, 2, 3, 4, 5 }, + )); + + try std.testing.expect(includes( + usize, + &.{ 1, 2, 2, 4 }, + &.{ 2, 2 }, + )); + try std.testing.expect(includes( + usize, + &.{ 1, 2, 2, 4 }, + &.{ 1, 2, 2, 4 }, + )); + try std.testing.expect(!includes( + usize, + &.{ 1, 2, 2, 4 }, + &.{ 2, 2, 2 }, + )); + try std.testing.expect(!includes( + usize, + &.{ 1, 2, 2, 4 }, + &.{ 2, 2, 3 }, + )); +} diff --git a/src/inspect.zig b/src/inspect.zig new file mode 100644 index 0000000..7f936d2 --- /dev/null +++ b/src/inspect.zig @@ -0,0 +1,63 @@ +const std = @import("std"); +const vk = @import("vk"); + +const BaseWrapper = vk.BaseWrapper(.{ + .getInstanceProcAddr = true, + .createInstance = true, +}); + +const InstanceWrapper = vk.InstanceWrapper(.{ + .destroyInstance = true, + .enumeratePhysicalDevices = true, + .getPhysicalDeviceProperties = true, + .getPhysicalDeviceQueueFamilyProperties = true, +}); + +extern fn vkGetInstanceProcAddr(instance: vk.Instance, procname: [*:0]const u8) vk.PfnVoidFunction; +extern fn vkGetDeviceProcAddr(device: vk.Device, procname: [*:0]const u8) vk.PfnVoidFunction; + +pub fn main() !void { + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + const ally = gpa.allocator(); + + const vkb = try BaseWrapper.load(vkGetInstanceProcAddr); + + const instance = try vkb.createInstance(&.{ + .p_application_info = &.{ + .p_application_name = "vkinspect", + .application_version = 0, + .p_engine_name = "vkinspect", + .engine_version = 0, + .api_version = vk.API_VERSION_1_3, + }, + .enabled_extension_count = 0, + .enabled_layer_count = 0, + }, null); + const vki = try InstanceWrapper.load(instance, vkGetInstanceProcAddr); + defer vki.destroyInstance(instance, null); + + var pdev_count: u32 = undefined; + _ = try vki.enumeratePhysicalDevices(instance, &pdev_count, null); + const pdevs = try ally.alloc(vk.PhysicalDevice, pdev_count); + defer ally.free(pdevs); + _ = try vki.enumeratePhysicalDevices(instance, &pdev_count, pdevs.ptr); + + std.debug.print("{d} physical devices:\n", .{pdev_count}); + for (pdevs) |pdev| { + const props = vki.getPhysicalDeviceProperties(pdev); + const name = std.mem.sliceTo(&props.device_name, 0); + std.debug.print("- {s}\n", .{name}); + + var family_count: u32 = undefined; + vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); + const families = try ally.alloc(vk.QueueFamilyProperties, family_count); + defer ally.free(families); + vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr); + + std.debug.print(" {d} queue families:\n", .{family_count}); + for (families) |family| { + std.debug.print(" - {any}\n", .{family.queue_flags}); + std.debug.print(" (max {d})\n", .{family.queue_count}); + } + } +} From c69ba42f0222c0c734a1266c57c5621a74c76fe3 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Fri, 29 Mar 2024 23:31:06 -0400 Subject: [PATCH 024/113] wip: extract create_device and use it; disable swapchain/presentation/etc. --- src/gfx/Context.zig | 6 ------ 1 file changed, 6 deletions(-) delete mode 100644 src/gfx/Context.zig diff --git a/src/gfx/Context.zig b/src/gfx/Context.zig deleted file mode 100644 index 7c53be3..0000000 --- a/src/gfx/Context.zig +++ /dev/null @@ -1,6 +0,0 @@ -const std = @import("std"); -const vk = @import("vk"); - -const d = @import("dispatch.zig"); - -const Self = @This(); From 5a48cdd936014303a01ff41cec671dbcf9af5123 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Fri, 29 Mar 2024 23:43:15 -0400 Subject: [PATCH 025/113] debugging swapchain create with hardcoded values seems to work? --- build.zig | 5 + src/graphics_context.zig | 11 +- src/inspect.zig | 89 +++++++++- src/main.zig | 375 +++++++++++++++++++++------------------ 4 files changed, 291 insertions(+), 189 deletions(-) diff --git a/build.zig b/build.zig index 0317b85..523bea7 100644 --- a/build.zig +++ b/build.zig @@ -97,6 +97,11 @@ pub fn build(b: *std.Build) void { .needed = true, .preferred_link_mode = .dynamic, }); + inspect.linkSystemLibrary2("glfw3", .{ + .needed = true, + .preferred_link_mode = .static, + .use_pkg_config = .force, + }); exe_unit_tests.linkLibC(); inspect.root_module.addImport("vk", vkmod); inspect.linkLibC(); diff --git a/src/graphics_context.zig b/src/graphics_context.zig index 959da98..16f7acc 100644 --- a/src/graphics_context.zig +++ b/src/graphics_context.zig @@ -85,15 +85,6 @@ pub const Queue = struct { } }; -fn createSurface(instance: vk.Instance, window: *c.GLFWwindow) !vk.SurfaceKHR { - var surface: vk.SurfaceKHR = undefined; - if (c.glfwCreateWindowSurface(instance, window, null, &surface) != .success) { - return error.SurfaceInitFailed; - } - - return surface; -} - fn initializeCandidate(vki: InstanceDispatch, candidate: DeviceCandidate) !vk.Device { const priority = [_]f32{1}; const qci = [_]vk.DeviceQueueCreateInfo{ @@ -164,6 +155,8 @@ fn checkSuitable( ) !?DeviceCandidate { const props = vki.getPhysicalDeviceProperties(pdev); + if (props.device_type != .discrete_gpu) return null; + if (!try checkExtensionSupport(vki, pdev, allocator)) { return null; } diff --git a/src/inspect.zig b/src/inspect.zig index 7f936d2..9aead49 100644 --- a/src/inspect.zig +++ b/src/inspect.zig @@ -1,6 +1,11 @@ const std = @import("std"); const vk = @import("vk"); +const c = @cImport({ + @cDefine("GLFW_INCLUDE_NONE", {}); + @cInclude("GLFW/glfw3.h"); +}); + const BaseWrapper = vk.BaseWrapper(.{ .getInstanceProcAddr = true, .createInstance = true, @@ -11,17 +16,44 @@ const InstanceWrapper = vk.InstanceWrapper(.{ .enumeratePhysicalDevices = true, .getPhysicalDeviceProperties = true, .getPhysicalDeviceQueueFamilyProperties = true, + .getPhysicalDeviceSurfaceFormatsKHR = true, + .getPhysicalDeviceSurfacePresentModesKHR = true, + .getPhysicalDeviceSurfaceSupportKHR = true, + .getPhysicalDeviceSurfaceCapabilitiesKHR = true, + .destroySurfaceKHR = true, }); +extern fn glfwGetRequiredInstanceExtensions(count: *u32) [*]const [*:0]const u8; + +extern fn glfwCreateWindowSurface( + instance: vk.Instance, + window: *c.GLFWwindow, + allocation_callbacks: ?*const vk.AllocationCallbacks, + surface: *vk.SurfaceKHR, +) vk.Result; + extern fn vkGetInstanceProcAddr(instance: vk.Instance, procname: [*:0]const u8) vk.PfnVoidFunction; extern fn vkGetDeviceProcAddr(device: vk.Device, procname: [*:0]const u8) vk.PfnVoidFunction; pub fn main() !void { + if (c.glfwInit() == c.GLFW_FALSE) return error.glfwInitFailed; + defer c.glfwTerminate(); + + c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); + c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, "floating_window"); + c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, "floating_window"); + const window = c.glfwCreateWindow(400, 300, "vkinspect", null, null) orelse + return error.glfwWindowCreateFailed; + defer c.glfwDestroyWindow(window); + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const ally = gpa.allocator(); const vkb = try BaseWrapper.load(vkGetInstanceProcAddr); + var ext_count: u32 = undefined; + const exts = glfwGetRequiredInstanceExtensions(&ext_count); + const instance = try vkb.createInstance(&.{ .p_application_info = &.{ .p_application_name = "vkinspect", @@ -30,12 +62,20 @@ pub fn main() !void { .engine_version = 0, .api_version = vk.API_VERSION_1_3, }, - .enabled_extension_count = 0, + .enabled_extension_count = ext_count, + .pp_enabled_extension_names = exts, .enabled_layer_count = 0, }, null); const vki = try InstanceWrapper.load(instance, vkGetInstanceProcAddr); defer vki.destroyInstance(instance, null); + var surface: vk.SurfaceKHR = undefined; + switch (glfwCreateWindowSurface(instance, window, null, &surface)) { + .success => {}, + else => return error.Unknown, + } + defer vki.destroySurfaceKHR(instance, surface, null); + var pdev_count: u32 = undefined; _ = try vki.enumeratePhysicalDevices(instance, &pdev_count, null); const pdevs = try ally.alloc(vk.PhysicalDevice, pdev_count); @@ -46,7 +86,12 @@ pub fn main() !void { for (pdevs) |pdev| { const props = vki.getPhysicalDeviceProperties(pdev); const name = std.mem.sliceTo(&props.device_name, 0); - std.debug.print("- {s}\n", .{name}); + std.debug.print("=" ** 30 ++ "\n", .{}); + std.debug.print("= {s}\n", .{name}); + std.debug.print("=" ** 30 ++ "\n", .{}); + + std.debug.print("type: {any}\n", .{props.device_type}); + // props.device_type var family_count: u32 = undefined; vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); @@ -55,9 +100,45 @@ pub fn main() !void { vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr); std.debug.print(" {d} queue families:\n", .{family_count}); - for (families) |family| { + for (families, 0..) |family, idx| { + const support = try vki.getPhysicalDeviceSurfaceSupportKHR(pdev, @intCast(idx), surface); std.debug.print(" - {any}\n", .{family.queue_flags}); - std.debug.print(" (max {d})\n", .{family.queue_count}); + std.debug.print(" (max {d}, surface {any})\n", .{ + family.queue_count, + support != 0, + }); } + + var format_count: u32 = undefined; + _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null); + const formats = try ally.alloc(vk.SurfaceFormatKHR, format_count); + defer ally.free(formats); + _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, formats.ptr); + + var mode_count: u32 = undefined; + _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &mode_count, null); + const modes = try ally.alloc(vk.PresentModeKHR, mode_count); + defer ally.free(modes); + _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &mode_count, modes.ptr); + + std.debug.print(" {d} formats\n", .{format_count}); + for (formats) |format| { + std.debug.print(" - {any}\n", .{format}); + } + std.debug.print(" {d} present modes\n", .{mode_count}); + for (modes) |mode| { + std.debug.print(" - {any}\n", .{mode}); + } + + const caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, surface); + std.debug.print(" surface capabilities:\n", .{}); + std.debug.print(" {any}\n", .{caps.current_extent}); + std.debug.print(" current: {any}\n", .{caps.current_transform}); + std.debug.print(" supported: {any}\n", .{caps.supported_transforms}); + std.debug.print(" {any}\n", .{caps.supported_usage_flags}); + std.debug.print(" {} - {} images\n", .{ caps.min_image_count, caps.max_image_count }); + std.debug.print(" {} - {} extent\n", .{ caps.min_image_extent, caps.max_image_extent }); + std.debug.print(" 1 - {} arrays\n", .{caps.max_image_array_layers}); + std.debug.print(" {}\n", .{caps.supported_composite_alpha}); } } diff --git a/src/main.zig b/src/main.zig index 4e2eda3..7d97f11 100644 --- a/src/main.zig +++ b/src/main.zig @@ -100,40 +100,51 @@ fn create_window(extent: vk.Extent2D, title: [*:0]const u8) !*c.GLFWwindow { ) orelse error.WindowInitFailed; } -const DevicePair = struct { - pdev: vk.PhysicalDevice, - dev: vk.Device, - graphics: vk.Queue, - present: vk.Queue, - present_sharing_mode: vk.SharingMode, -}; +const DevicePair = std.meta.Tuple(&.{ vk.PhysicalDevice, vk.Device, gfx.DeviceDispatch, vk.Queue }); -fn create_device(ally: std.mem.Allocator, instance: vk.Instance, vki: gfx.InstanceDispatch, surface: vk.SurfaceKHR) !DevicePair { - const required_device_extensions = [_][*:0]const u8{ +/// note: destroy with vkd.destroyDevice(dev, null) +fn create_device( + ally: std.mem.Allocator, + instance: vk.Instance, + surface: vk.SurfaceKHR, + vki: gfx.InstanceDispatch, +) !DevicePair { + const required_device_extensions: []const [*:0]const u8 = &.{ vk.extension_info.khr_swapchain.name, vk.extension_info.khr_dynamic_rendering.name, }; var pdev_count: u32 = undefined; - _ = try vki.enumeratePhysicalDevices(instance, &pdev_count, null); const pdevs = try ally.alloc(vk.PhysicalDevice, pdev_count); defer ally.free(pdevs); - _ = try vki.enumeratePhysicalDevices(instance, &pdev_count, pdevs); + _ = try vki.enumeratePhysicalDevices(instance, &pdev_count, pdevs.ptr); - pdev_search: for (pdevs[0..pdev_count]) |pdev| { - // const props = vki.getPhysicalDeviceProperties(pdev); - // const feats = vki.getPhysicalDeviceFeatures(pdev); + pdev_search: for (pdevs) |pdev| { + const props = vki.getPhysicalDeviceProperties(pdev); + if (props.device_type != .discrete_gpu) continue :pdev_search; - var ext_prop_count: u32 = undefined; - _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_prop_count, null); - const ext_props = try ally.alloc(vk.ExtensionProperties, ext_prop_count); - defer ally.free(ext_props); - _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_prop_count, ext_props); + var format_count: u32 = undefined; + _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null); + if (format_count == 0) continue :pdev_search; - for (required_device_extensions) |required| { - for (ext_props) |prop| { - if (std.mem.eql(u8, std.mem.span(required), std.mem.sliceTo(&prop.extension_name, 0))) { + var mode_count: u32 = undefined; + _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &mode_count, null); + if (mode_count == 0) continue :pdev_search; + + var ext_count: u32 = undefined; + _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, null); + const exts = try ally.alloc(vk.ExtensionProperties, ext_count); + defer ally.free(exts); + _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, exts.ptr); + + for (required_device_extensions) |name| { + for (exts) |ext| { + if (std.mem.eql( + u8, + std.mem.span(name), + std.mem.sliceTo(&ext.extension_name, 0), + )) { break; } } else { @@ -141,80 +152,61 @@ fn create_device(ally: std.mem.Allocator, instance: vk.Instance, vki: gfx.Instan } } - var format_count: u32 = undefined; - _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null); - if (format_count == 0) continue :pdev_search; + var family_count: u32 = undefined; + vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); + const families = try ally.alloc(vk.QueueFamilyProperties, family_count); + defer ally.free(families); + vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr); - var present_mode_count: u32 = undefined; - _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &present_mode_count, null); - if (present_mode_count == 0) continue :pdev_search; + // just find one family that does graphics and present, so we can use exclusive sharing + // on the swapchain. apparently most hardware supports this. logic for queue allocation + // and swapchain creation is so much simpler this way. swapchain creation needs to know + // the list of queue family indices which will have access to the images, and there's a + // performance penalty to allow concurrent access to multiple queue families. + // + // multiple _queues_ may have exclusive access, but only if they're in the smae family. - var fam_prop_count: u32 = undefined; - vki.getPhysicalDeviceQueueFamilyProperties(pdev, &fam_prop_count, null); - const fam_props = try ally.alloc(vk.QueueFamilyProperties, fam_prop_count); - defer ally.free(fam_props); - vki.getPhysicalDeviceQueueFamilyProperties(pdev, &fam_prop_count, fam_props); - - var graphics_families = std.ArrayList(u32).init(ally); - var present_families = std.ArrayList(u32).init(ally); - - for (fam_props, 0..) |fam, idx| { - if (fam.queue_flags.graphics_bit) { - graphics_families.append(idx); + const graphics_family: u32 = for (families, 0..) |family, idx| { + const graphics = family.queue_flags.graphics_bit; + const present = try vki.getPhysicalDeviceSurfaceSupportKHR(pdev, @intCast(idx), surface) == vk.TRUE; + if (graphics and present) { + break @intCast(idx); } - if (try vki.getPhysicalDeviceSurfaceSupportKHR(pdev, idx, surface)) { - present_families.append(idx); - } - } + } else { + continue :pdev_search; + }; - // only choose the same family if we really have to. + std.log.debug("selecting device {s}", .{std.mem.sliceTo(&props.device_name, 0)}); - // at this point, we know this pdev can support _a_ swap chain and has required extensions. try to make a - // logical device and queues out of it. + const qci: []const vk.DeviceQueueCreateInfo = &.{ + vk.DeviceQueueCreateInfo{ + .queue_family_index = graphics_family, + .queue_count = 1, + .p_queue_priorities = &[_]f32{1.0}, + }, + }; + const dev = try vki.createDevice(pdev, &.{ + .queue_create_info_count = @intCast(qci.len), + .p_queue_create_infos = qci.ptr, + .enabled_extension_count = @intCast(required_device_extensions.len), + .pp_enabled_extension_names = required_device_extensions.ptr, + }, null); + const vkd = try gfx.DeviceDispatch.load(dev, vki.dispatch.vkGetDeviceProcAddr); + errdefer vkd.destroyDevice(dev, null); + + const queue = vkd.getDeviceQueue(dev, graphics_family, 0); + + return .{ pdev, dev, vkd, queue }; } return error.NoSuitableDevice; } -pub fn includes(comptime T: type, us: []const T, vs: []const T) bool { - var vidx: usize = 0; - var uidx: usize = 0; - while (uidx < us.len) { - while (vidx < vs.len) : (vidx += 1) { - if (us[uidx] == vs[vidx]) break; - vidx += 1; - } else { - return false; - } - uidx += 1; - vidx += 1; - } - return true; -} - -test "includes" { - const u = &.{ 0, 1, 7 }; - const v = &.{11}; - const w = &.{}; - const x = &.{ 0, 1, 3, 5, 7, 9, 11 }; - const y = &.{2}; - const z = &.{ 0, 1, 3, 5, 7, 9, 11, 12 }; - - const full = &.{ 0, 1, 3, 5, 7, 9, 11 }; - - try std.testing.expectEqual(true, includes(usize, u, full)); - try std.testing.expectEqual(true, includes(usize, v, full)); - try std.testing.expectEqual(true, includes(usize, w, full)); - try std.testing.expectEqual(true, includes(usize, x, full)); - try std.testing.expectEqual(false, includes(usize, y, full)); - try std.testing.expectEqual(false, includes(usize, z, full)); -} - pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); - const allocator = gpa.allocator(); + const ally = gpa.allocator(); if (c.glfwInit() != c.GLFW_TRUE) return error.GlfwInitFailed; defer c.glfwTerminate(); @@ -237,108 +229,139 @@ pub fn main() !void { const surface = try create_surface(instance, window); defer vki.destroySurfaceKHR(instance, surface, null); - const gc = try GraphicsContext.init(allocator, instance, surface, vki); - defer gc.deinit(); + const pdev: vk.PhysicalDevice, const dev: vk.Device, const vkd: gfx.DeviceDispatch, const queue: vk.Queue = + try create_device(ally, instance, surface, vki); + defer vkd.destroyDevice(dev, null); - std.log.debug("Using device: {s}", .{gc.deviceName()}); + var swapchain: vk.SwapchainKHR = .null_handle; + defer vkd.destroySwapchainKHR(dev, swapchain, null); - var swapchain = try Swapchain.init(&gc, allocator, extent); - defer swapchain.deinit(); - - const pipeline_layout = try gc.vkd.createPipelineLayout(gc.dev, &.{ - .flags = .{}, - .set_layout_count = 0, - .p_set_layouts = undefined, - .push_constant_range_count = 0, - .p_push_constant_ranges = undefined, + swapchain = try vkd.createSwapchainKHR(dev, &.{ + .surface = surface, + .min_image_count = 3, // todo compute + .image_format = .r8g8b8a8_sint, // todo compute + // .image_format = .r8g8b8a8_sint, // todo compute + .image_color_space = .srgb_nonlinear_khr, // todo compute + .image_extent = extent, // todo compute + .image_array_layers = 1, + .image_usage = .{ .color_attachment_bit = true, .transfer_dst_bit = true }, + .image_sharing_mode = .exclusive, // since we only choose one queue family + .pre_transform = .{ .identity_bit_khr = true }, // todo compute + .composite_alpha = .{ .opaque_bit_khr = true }, + .present_mode = .mailbox_khr, // todo compute + .clipped = vk.TRUE, + .old_swapchain = swapchain, }, null); - defer gc.vkd.destroyPipelineLayout(gc.dev, pipeline_layout, null); - const pipeline = try createPipeline(&gc, pipeline_layout, swapchain); - defer gc.vkd.destroyPipeline(gc.dev, pipeline, null); + _ = try vkd.queuePresentKHR(queue, &.{ + .wait_semaphore_count = 0, + .swapchain_count = 1, + .p_swapchains = &[_]vk.SwapchainKHR{swapchain}, + .p_image_indices = &[_]u32{0}, + }); - const pool = try gc.vkd.createCommandPool(gc.dev, &.{ - .queue_family_index = gc.graphics_queue.family, - }, null); - defer gc.vkd.destroyCommandPool(gc.dev, pool, null); + try vkd.deviceWaitIdle(dev); - const vertex_buffer = try gc.vkd.createBuffer(gc.dev, &.{ - .size = @sizeOf(@TypeOf(vertices)), - .usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, - .sharing_mode = .exclusive, - }, null); - defer gc.vkd.destroyBuffer(gc.dev, vertex_buffer, null); - const vertex_mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, vertex_buffer); - const vertex_memory = try gc.allocate(vertex_mem_reqs, .{ .device_local_bit = true }); - defer gc.vkd.freeMemory(gc.dev, vertex_memory, null); - try gc.vkd.bindBufferMemory(gc.dev, vertex_buffer, vertex_memory, 0); + _ = pdev; + extent = undefined; - try uploadData(Vertex, &gc, pool, vertex_buffer, &vertices); + // var swapchain = try Swapchain.init(&gc, ally, extent); + // defer swapchain.deinit(); - const index_buffer = try gc.vkd.createBuffer(gc.dev, &.{ - .size = @sizeOf(@TypeOf(indices)), - .usage = .{ .transfer_dst_bit = true, .index_buffer_bit = true }, - .sharing_mode = .exclusive, - }, null); - defer gc.vkd.destroyBuffer(gc.dev, index_buffer, null); - const index_mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, index_buffer); - const index_memory = try gc.allocate(index_mem_reqs, .{ .device_local_bit = true }); - defer gc.vkd.freeMemory(gc.dev, index_memory, null); - try gc.vkd.bindBufferMemory(gc.dev, index_buffer, index_memory, 0); - - try uploadData(Index, &gc, pool, index_buffer, &indices); - - var cmdbufs = try createCommandBuffers( - &gc, - pool, - allocator, - vertex_buffer, - index_buffer, - pipeline, - swapchain, - ); - defer destroyCommandBuffers(&gc, pool, allocator, cmdbufs); - - while (c.glfwWindowShouldClose(window) == c.GLFW_FALSE) { - var w: c_int = undefined; - var h: c_int = undefined; - c.glfwGetFramebufferSize(window, &w, &h); - - // Don't present or resize swapchain while the window is minimized - if (w == 0 or h == 0) { - c.glfwPollEvents(); - continue; - } - - const cmdbuf = cmdbufs[swapchain.image_index]; - - const state = swapchain.present(cmdbuf) catch |err| switch (err) { - error.OutOfDateKHR => Swapchain.PresentState.suboptimal, - else => |narrow| return narrow, - }; - - if (state == .suboptimal or extent.width != @as(u32, @intCast(w)) or extent.height != @as(u32, @intCast(h))) { - extent.width = @intCast(w); - extent.height = @intCast(h); - try swapchain.recreate(extent); - - destroyCommandBuffers(&gc, pool, allocator, cmdbufs); - cmdbufs = try createCommandBuffers( - &gc, - pool, - allocator, - vertex_buffer, - index_buffer, - pipeline, - swapchain, - ); - } - - c.glfwPollEvents(); - } - - try swapchain.waitForAllFences(); - try gc.vkd.deviceWaitIdle(gc.dev); + // const pipeline_layout = try gc.vkd.createPipelineLayout(gc.dev, &.{ + // .flags = .{}, + // .set_layout_count = 0, + // .p_set_layouts = undefined, + // .push_constant_range_count = 0, + // .p_push_constant_ranges = undefined, + // }, null); + // defer gc.vkd.destroyPipelineLayout(gc.dev, pipeline_layout, null); + // + // const pipeline = try createPipeline(&gc, pipeline_layout, swapchain); + // defer gc.vkd.destroyPipeline(gc.dev, pipeline, null); + // + // const pool = try gc.vkd.createCommandPool(gc.dev, &.{ + // .queue_family_index = gc.graphics_queue.family, + // }, null); + // defer gc.vkd.destroyCommandPool(gc.dev, pool, null); + // + // const vertex_buffer = try gc.vkd.createBuffer(gc.dev, &.{ + // .size = @sizeOf(@TypeOf(vertices)), + // .usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, + // .sharing_mode = .exclusive, + // }, null); + // defer gc.vkd.destroyBuffer(gc.dev, vertex_buffer, null); + // const vertex_mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, vertex_buffer); + // const vertex_memory = try gc.allocate(vertex_mem_reqs, .{ .device_local_bit = true }); + // defer gc.vkd.freeMemory(gc.dev, vertex_memory, null); + // try gc.vkd.bindBufferMemory(gc.dev, vertex_buffer, vertex_memory, 0); + // + // try uploadData(Vertex, &gc, pool, vertex_buffer, &vertices); + // + // const index_buffer = try gc.vkd.createBuffer(gc.dev, &.{ + // .size = @sizeOf(@TypeOf(indices)), + // .usage = .{ .transfer_dst_bit = true, .index_buffer_bit = true }, + // .sharing_mode = .exclusive, + // }, null); + // defer gc.vkd.destroyBuffer(gc.dev, index_buffer, null); + // const index_mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, index_buffer); + // const index_memory = try gc.allocate(index_mem_reqs, .{ .device_local_bit = true }); + // defer gc.vkd.freeMemory(gc.dev, index_memory, null); + // try gc.vkd.bindBufferMemory(gc.dev, index_buffer, index_memory, 0); + // + // try uploadData(Index, &gc, pool, index_buffer, &indices); + // + // var cmdbufs = try createCommandBuffers( + // &gc, + // pool, + // ally, + // vertex_buffer, + // index_buffer, + // pipeline, + // swapchain, + // ); + // defer destroyCommandBuffers(&gc, pool, ally, cmdbufs); + // + // while (c.glfwWindowShouldClose(window) == c.GLFW_FALSE) { + // var w: c_int = undefined; + // var h: c_int = undefined; + // c.glfwGetFramebufferSize(window, &w, &h); + // + // // Don't present or resize swapchain while the window is minimized + // if (w == 0 or h == 0) { + // c.glfwPollEvents(); + // continue; + // } + // + // const cmdbuf = cmdbufs[swapchain.image_index]; + // + // const state = swapchain.present(cmdbuf) catch |err| switch (err) { + // error.OutOfDateKHR => Swapchain.PresentState.suboptimal, + // else => |narrow| return narrow, + // }; + // + // if (state == .suboptimal or extent.width != @as(u32, @intCast(w)) or extent.height != @as(u32, @intCast(h))) { + // extent.width = @intCast(w); + // extent.height = @intCast(h); + // try swapchain.recreate(extent); + // + // destroyCommandBuffers(&gc, pool, ally, cmdbufs); + // cmdbufs = try createCommandBuffers( + // &gc, + // pool, + // ally, + // vertex_buffer, + // index_buffer, + // pipeline, + // swapchain, + // ); + // } + // + // c.glfwPollEvents(); + // } + // + // try swapchain.waitForAllFences(); + // try gc.vkd.deviceWaitIdle(gc.dev); } fn uploadData(comptime T: type, gc: *const GraphicsContext, pool: vk.CommandPool, buffer: vk.Buffer, source: []const T) !void { From 2402f29742a0be71fabe7c8d26c3eb8d7bf59b01 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Sat, 30 Mar 2024 00:41:48 -0400 Subject: [PATCH 026/113] incremental remove GraphicsContext --- src/main.zig | 279 ++++++++++++++++++++++++---------------------- src/swapchain.zig | 74 ++++++++---- 2 files changed, 198 insertions(+), 155 deletions(-) diff --git a/src/main.zig b/src/main.zig index 7d97f11..43eea52 100644 --- a/src/main.zig +++ b/src/main.zig @@ -2,8 +2,9 @@ const std = @import("std"); const vk = @import("vk"); const c = @import("c.zig"); const shaders = @import("shaders"); -const GraphicsContext = @import("graphics_context.zig").GraphicsContext; +// const GraphicsContext = @import("graphics_context.zig").GraphicsContext; const Swapchain = @import("swapchain.zig").Swapchain; +const Context = @import("swapchain.zig").Context; const Allocator = std.mem.Allocator; const gfx = @import("gfx.zig"); @@ -100,7 +101,7 @@ fn create_window(extent: vk.Extent2D, title: [*:0]const u8) !*c.GLFWwindow { ) orelse error.WindowInitFailed; } -const DevicePair = std.meta.Tuple(&.{ vk.PhysicalDevice, vk.Device, gfx.DeviceDispatch, vk.Queue }); +const DevicePair = std.meta.Tuple(&.{ vk.PhysicalDevice, vk.Device, gfx.DeviceDispatch, u32 }); /// note: destroy with vkd.destroyDevice(dev, null) fn create_device( @@ -195,9 +196,7 @@ fn create_device( const vkd = try gfx.DeviceDispatch.load(dev, vki.dispatch.vkGetDeviceProcAddr); errdefer vkd.destroyDevice(dev, null); - const queue = vkd.getDeviceQueue(dev, graphics_family, 0); - - return .{ pdev, dev, vkd, queue }; + return .{ pdev, dev, vkd, graphics_family }; } return error.NoSuitableDevice; @@ -229,142 +228,154 @@ pub fn main() !void { const surface = try create_surface(instance, window); defer vki.destroySurfaceKHR(instance, surface, null); - const pdev: vk.PhysicalDevice, const dev: vk.Device, const vkd: gfx.DeviceDispatch, const queue: vk.Queue = + const pdev: vk.PhysicalDevice, const dev: vk.Device, const vkd: gfx.DeviceDispatch, const family: u32 = try create_device(ally, instance, surface, vki); defer vkd.destroyDevice(dev, null); - var swapchain: vk.SwapchainKHR = .null_handle; - defer vkd.destroySwapchainKHR(dev, swapchain, null); + const queue = vkd.getDeviceQueue(dev, family, 0); - swapchain = try vkd.createSwapchainKHR(dev, &.{ + // var swapchain: vk.SwapchainKHR = .null_handle; + // defer vkd.destroySwapchainKHR(dev, swapchain, null); + // + // swapchain = try vkd.createSwapchainKHR(dev, &.{ + // .surface = surface, + // .min_image_count = 3, // todo compute + // .image_format = .r8g8b8a8_sint, // todo compute + // // .image_format = .r8g8b8a8_sint, // todo compute + // .image_color_space = .srgb_nonlinear_khr, // todo compute + // .image_extent = extent, // todo compute + // .image_array_layers = 1, + // .image_usage = .{ .color_attachment_bit = true, .transfer_dst_bit = true }, + // .image_sharing_mode = .exclusive, // since we only choose one queue family + // .pre_transform = .{ .identity_bit_khr = true }, // todo compute + // .composite_alpha = .{ .opaque_bit_khr = true }, + // .present_mode = .mailbox_khr, // todo compute + // .clipped = vk.TRUE, + // .old_swapchain = swapchain, + // }, null); + // + // _ = try vkd.queuePresentKHR(queue, &.{ + // .wait_semaphore_count = 0, + // .swapchain_count = 1, + // .p_swapchains = &[_]vk.SwapchainKHR{swapchain}, + // .p_image_indices = &[_]u32{0}, + // }); + // + // try vkd.deviceWaitIdle(dev); + // + // _ = pdev; + // extent = undefined; + + const gc: Context = .{ + .vki = vki, + .vkd = vkd, + .pdev = pdev, + .dev = dev, .surface = surface, - .min_image_count = 3, // todo compute - .image_format = .r8g8b8a8_sint, // todo compute - // .image_format = .r8g8b8a8_sint, // todo compute - .image_color_space = .srgb_nonlinear_khr, // todo compute - .image_extent = extent, // todo compute - .image_array_layers = 1, - .image_usage = .{ .color_attachment_bit = true, .transfer_dst_bit = true }, - .image_sharing_mode = .exclusive, // since we only choose one queue family - .pre_transform = .{ .identity_bit_khr = true }, // todo compute - .composite_alpha = .{ .opaque_bit_khr = true }, - .present_mode = .mailbox_khr, // todo compute - .clipped = vk.TRUE, - .old_swapchain = swapchain, + .queue = queue, + .family = family, + }; + + var swapchain = try Swapchain.init(&gc, ally, extent); + defer swapchain.deinit(); + + const pipeline_layout = try gc.vkd.createPipelineLayout(gc.dev, &.{ + .flags = .{}, + .set_layout_count = 0, + .p_set_layouts = undefined, + .push_constant_range_count = 0, + .p_push_constant_ranges = undefined, }, null); + defer gc.vkd.destroyPipelineLayout(gc.dev, pipeline_layout, null); - _ = try vkd.queuePresentKHR(queue, &.{ - .wait_semaphore_count = 0, - .swapchain_count = 1, - .p_swapchains = &[_]vk.SwapchainKHR{swapchain}, - .p_image_indices = &[_]u32{0}, - }); + const pipeline = try createPipeline(&gc, pipeline_layout, swapchain); + defer gc.vkd.destroyPipeline(gc.dev, pipeline, null); - try vkd.deviceWaitIdle(dev); + const pool = try gc.vkd.createCommandPool(gc.dev, &.{ + .queue_family_index = family, + }, null); + defer gc.vkd.destroyCommandPool(gc.dev, pool, null); - _ = pdev; - extent = undefined; + const vertex_buffer = try gc.vkd.createBuffer(gc.dev, &.{ + .size = @sizeOf(@TypeOf(vertices)), + .usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, + .sharing_mode = .exclusive, + }, null); + defer gc.vkd.destroyBuffer(gc.dev, vertex_buffer, null); + const vertex_mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, vertex_buffer); + const vertex_memory = try gc.allocate(vertex_mem_reqs, .{ .device_local_bit = true }); + defer gc.vkd.freeMemory(gc.dev, vertex_memory, null); + try gc.vkd.bindBufferMemory(gc.dev, vertex_buffer, vertex_memory, 0); - // var swapchain = try Swapchain.init(&gc, ally, extent); - // defer swapchain.deinit(); + try uploadData(Vertex, &gc, pool, vertex_buffer, &vertices); - // const pipeline_layout = try gc.vkd.createPipelineLayout(gc.dev, &.{ - // .flags = .{}, - // .set_layout_count = 0, - // .p_set_layouts = undefined, - // .push_constant_range_count = 0, - // .p_push_constant_ranges = undefined, - // }, null); - // defer gc.vkd.destroyPipelineLayout(gc.dev, pipeline_layout, null); - // - // const pipeline = try createPipeline(&gc, pipeline_layout, swapchain); - // defer gc.vkd.destroyPipeline(gc.dev, pipeline, null); - // - // const pool = try gc.vkd.createCommandPool(gc.dev, &.{ - // .queue_family_index = gc.graphics_queue.family, - // }, null); - // defer gc.vkd.destroyCommandPool(gc.dev, pool, null); - // - // const vertex_buffer = try gc.vkd.createBuffer(gc.dev, &.{ - // .size = @sizeOf(@TypeOf(vertices)), - // .usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, - // .sharing_mode = .exclusive, - // }, null); - // defer gc.vkd.destroyBuffer(gc.dev, vertex_buffer, null); - // const vertex_mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, vertex_buffer); - // const vertex_memory = try gc.allocate(vertex_mem_reqs, .{ .device_local_bit = true }); - // defer gc.vkd.freeMemory(gc.dev, vertex_memory, null); - // try gc.vkd.bindBufferMemory(gc.dev, vertex_buffer, vertex_memory, 0); - // - // try uploadData(Vertex, &gc, pool, vertex_buffer, &vertices); - // - // const index_buffer = try gc.vkd.createBuffer(gc.dev, &.{ - // .size = @sizeOf(@TypeOf(indices)), - // .usage = .{ .transfer_dst_bit = true, .index_buffer_bit = true }, - // .sharing_mode = .exclusive, - // }, null); - // defer gc.vkd.destroyBuffer(gc.dev, index_buffer, null); - // const index_mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, index_buffer); - // const index_memory = try gc.allocate(index_mem_reqs, .{ .device_local_bit = true }); - // defer gc.vkd.freeMemory(gc.dev, index_memory, null); - // try gc.vkd.bindBufferMemory(gc.dev, index_buffer, index_memory, 0); - // - // try uploadData(Index, &gc, pool, index_buffer, &indices); - // - // var cmdbufs = try createCommandBuffers( - // &gc, - // pool, - // ally, - // vertex_buffer, - // index_buffer, - // pipeline, - // swapchain, - // ); - // defer destroyCommandBuffers(&gc, pool, ally, cmdbufs); - // - // while (c.glfwWindowShouldClose(window) == c.GLFW_FALSE) { - // var w: c_int = undefined; - // var h: c_int = undefined; - // c.glfwGetFramebufferSize(window, &w, &h); - // - // // Don't present or resize swapchain while the window is minimized - // if (w == 0 or h == 0) { - // c.glfwPollEvents(); - // continue; - // } - // - // const cmdbuf = cmdbufs[swapchain.image_index]; - // - // const state = swapchain.present(cmdbuf) catch |err| switch (err) { - // error.OutOfDateKHR => Swapchain.PresentState.suboptimal, - // else => |narrow| return narrow, - // }; - // - // if (state == .suboptimal or extent.width != @as(u32, @intCast(w)) or extent.height != @as(u32, @intCast(h))) { - // extent.width = @intCast(w); - // extent.height = @intCast(h); - // try swapchain.recreate(extent); - // - // destroyCommandBuffers(&gc, pool, ally, cmdbufs); - // cmdbufs = try createCommandBuffers( - // &gc, - // pool, - // ally, - // vertex_buffer, - // index_buffer, - // pipeline, - // swapchain, - // ); - // } - // - // c.glfwPollEvents(); - // } - // - // try swapchain.waitForAllFences(); - // try gc.vkd.deviceWaitIdle(gc.dev); + const index_buffer = try gc.vkd.createBuffer(gc.dev, &.{ + .size = @sizeOf(@TypeOf(indices)), + .usage = .{ .transfer_dst_bit = true, .index_buffer_bit = true }, + .sharing_mode = .exclusive, + }, null); + defer gc.vkd.destroyBuffer(gc.dev, index_buffer, null); + const index_mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, index_buffer); + const index_memory = try gc.allocate(index_mem_reqs, .{ .device_local_bit = true }); + defer gc.vkd.freeMemory(gc.dev, index_memory, null); + try gc.vkd.bindBufferMemory(gc.dev, index_buffer, index_memory, 0); + + try uploadData(Index, &gc, pool, index_buffer, &indices); + + var cmdbufs = try createCommandBuffers( + &gc, + pool, + ally, + vertex_buffer, + index_buffer, + pipeline, + swapchain, + ); + defer destroyCommandBuffers(&gc, pool, ally, cmdbufs); + + while (c.glfwWindowShouldClose(window) == c.GLFW_FALSE) { + var w: c_int = undefined; + var h: c_int = undefined; + c.glfwGetFramebufferSize(window, &w, &h); + + // Don't present or resize swapchain while the window is minimized + if (w == 0 or h == 0) { + c.glfwPollEvents(); + continue; + } + + const cmdbuf = cmdbufs[swapchain.image_index]; + + const state = swapchain.present(cmdbuf) catch |err| switch (err) { + error.OutOfDateKHR => Swapchain.PresentState.suboptimal, + else => |narrow| return narrow, + }; + + if (state == .suboptimal or extent.width != @as(u32, @intCast(w)) or extent.height != @as(u32, @intCast(h))) { + extent.width = @intCast(w); + extent.height = @intCast(h); + try swapchain.recreate(extent); + + destroyCommandBuffers(&gc, pool, ally, cmdbufs); + cmdbufs = try createCommandBuffers( + &gc, + pool, + ally, + vertex_buffer, + index_buffer, + pipeline, + swapchain, + ); + } + + c.glfwPollEvents(); + } + + try swapchain.waitForAllFences(); + try gc.vkd.deviceWaitIdle(gc.dev); } -fn uploadData(comptime T: type, gc: *const GraphicsContext, pool: vk.CommandPool, buffer: vk.Buffer, source: []const T) !void { +fn uploadData(comptime T: type, gc: *const Context, pool: vk.CommandPool, buffer: vk.Buffer, source: []const T) !void { // if (@typeInfo(T) == .Struct and @typeInfo(T).Struct.layout == .auto) @compileError("Requires defined T layout"); const size = @sizeOf(T) * source.len; @@ -396,7 +407,7 @@ fn uploadData(comptime T: type, gc: *const GraphicsContext, pool: vk.CommandPool try copyBuffer(gc, pool, buffer, staging_buffer, size); } -fn copyBuffer(gc: *const GraphicsContext, pool: vk.CommandPool, dst: vk.Buffer, src: vk.Buffer, size: vk.DeviceSize) !void { +fn copyBuffer(gc: *const Context, pool: vk.CommandPool, dst: vk.Buffer, src: vk.Buffer, size: vk.DeviceSize) !void { var cmdbuf: vk.CommandBuffer = undefined; try gc.vkd.allocateCommandBuffers(gc.dev, &.{ .command_pool = pool, @@ -426,12 +437,12 @@ fn copyBuffer(gc: *const GraphicsContext, pool: vk.CommandPool, dst: vk.Buffer, // creating and submitting a queue for every copy operation seems a bad idea for "streamed" data // gonna want a way to send a copy operation WITH SYNCHRONIZATION PRIMITIVES on a particular queue // see https://stackoverflow.com/a/62183243 - try gc.vkd.queueSubmit(gc.graphics_queue.handle, 1, @ptrCast(&si), .null_handle); - try gc.vkd.queueWaitIdle(gc.graphics_queue.handle); + try gc.vkd.queueSubmit(gc.queue, 1, @ptrCast(&si), .null_handle); + try gc.vkd.queueWaitIdle(gc.queue); } fn createCommandBuffers( - gc: *const GraphicsContext, + gc: *const Context, pool: vk.CommandPool, allocator: Allocator, vertex_buffer: vk.Buffer, @@ -512,12 +523,12 @@ fn createCommandBuffers( return cmdbufs; } -fn destroyCommandBuffers(gc: *const GraphicsContext, pool: vk.CommandPool, allocator: Allocator, cmdbufs: []vk.CommandBuffer) void { +fn destroyCommandBuffers(gc: *const Context, pool: vk.CommandPool, allocator: Allocator, cmdbufs: []vk.CommandBuffer) void { gc.vkd.freeCommandBuffers(gc.dev, pool, @truncate(cmdbufs.len), cmdbufs.ptr); allocator.free(cmdbufs); } -fn createPipeline(gc: *const GraphicsContext, layout: vk.PipelineLayout, swapchain: Swapchain) !vk.Pipeline { +fn createPipeline(gc: *const Context, layout: vk.PipelineLayout, swapchain: Swapchain) !vk.Pipeline { const vert = try gc.vkd.createShaderModule(gc.dev, &.{ .code_size = shaders.triangle_vert.len, .p_code = @as([*]const u32, @ptrCast(&shaders.triangle_vert)), diff --git a/src/swapchain.zig b/src/swapchain.zig index 09519ff..13d2ce0 100644 --- a/src/swapchain.zig +++ b/src/swapchain.zig @@ -1,15 +1,55 @@ const std = @import("std"); const vk = @import("vk"); -const GraphicsContext = @import("graphics_context.zig").GraphicsContext; +const gfx = @import("gfx.zig"); const Allocator = std.mem.Allocator; +pub const Context = struct { + vki: gfx.InstanceDispatch, + vkd: gfx.DeviceDispatch, + + pdev: vk.PhysicalDevice, + dev: vk.Device, + + surface: vk.SurfaceKHR, + + queue: vk.Queue, + family: u32, + + pub fn findMemoryTypeIndex( + self: @This(), + memory_type_bits: u32, + flags: vk.MemoryPropertyFlags, + ) !u32 { + const mem_props = self.vki.getPhysicalDeviceMemoryProperties(self.pdev); + + for (mem_props.memory_types[0..mem_props.memory_type_count], 0..) |mem_type, i| { + if (memory_type_bits & (@as(u32, 1) << @truncate(i)) != 0 and mem_type.property_flags.contains(flags)) { + return @truncate(i); + } + } + + return error.NoSuitableMemoryType; + } + + pub fn allocate( + self: @This(), + requirements: vk.MemoryRequirements, + flags: vk.MemoryPropertyFlags, + ) !vk.DeviceMemory { + return try self.vkd.allocateMemory(self.dev, &.{ + .allocation_size = requirements.size, + .memory_type_index = try self.findMemoryTypeIndex(requirements.memory_type_bits, flags), + }, null); + } +}; + pub const Swapchain = struct { pub const PresentState = enum { optimal, suboptimal, }; - gc: *const GraphicsContext, + gc: *const Context, allocator: Allocator, surface_format: vk.SurfaceFormatKHR, @@ -21,11 +61,11 @@ pub const Swapchain = struct { image_index: u32, next_image_acquired: vk.Semaphore, - pub fn init(gc: *const GraphicsContext, allocator: Allocator, extent: vk.Extent2D) !Swapchain { + pub fn init(gc: *const Context, allocator: Allocator, extent: vk.Extent2D) !Swapchain { return try initRecycle(gc, allocator, extent, .null_handle); } - pub fn initRecycle(gc: *const GraphicsContext, allocator: Allocator, extent: vk.Extent2D, old_handle: vk.SwapchainKHR) !Swapchain { + pub fn initRecycle(gc: *const Context, allocator: Allocator, extent: vk.Extent2D, old_handle: vk.SwapchainKHR) !Swapchain { const caps = try gc.vki.getPhysicalDeviceSurfaceCapabilitiesKHR(gc.pdev, gc.surface); const actual_extent = findActualExtent(caps, extent); if (actual_extent.width == 0 or actual_extent.height == 0) { @@ -40,12 +80,6 @@ pub const Swapchain = struct { image_count = @min(image_count, caps.max_image_count); } - const qfi = [_]u32{ gc.graphics_queue.family, gc.present_queue.family }; - const sharing_mode: vk.SharingMode = if (gc.graphics_queue.family != gc.present_queue.family) - .concurrent - else - .exclusive; - const handle = try gc.vkd.createSwapchainKHR(gc.dev, &.{ .surface = gc.surface, .min_image_count = image_count, @@ -54,9 +88,7 @@ pub const Swapchain = struct { .image_extent = actual_extent, .image_array_layers = 1, .image_usage = .{ .color_attachment_bit = true, .transfer_dst_bit = true }, - .image_sharing_mode = sharing_mode, - .queue_family_index_count = qfi.len, - .p_queue_family_indices = &qfi, + .image_sharing_mode = .exclusive, .pre_transform = caps.current_transform, .composite_alpha = .{ .opaque_bit_khr = true }, .present_mode = present_mode, @@ -154,7 +186,7 @@ pub const Swapchain = struct { // Step 2: Submit the command buffer const wait_stage = [_]vk.PipelineStageFlags{.{ .top_of_pipe_bit = true }}; - try self.gc.vkd.queueSubmit(self.gc.graphics_queue.handle, 1, &[_]vk.SubmitInfo{.{ + try self.gc.vkd.queueSubmit(self.gc.queue, 1, &[_]vk.SubmitInfo{.{ .wait_semaphore_count = 1, .p_wait_semaphores = @ptrCast(¤t.image_acquired), .p_wait_dst_stage_mask = &wait_stage, @@ -165,7 +197,7 @@ pub const Swapchain = struct { }}, current.frame_fence); // Step 3: Present the current frame - _ = try self.gc.vkd.queuePresentKHR(self.gc.present_queue.handle, &.{ + _ = try self.gc.vkd.queuePresentKHR(self.gc.queue, &.{ .wait_semaphore_count = 1, .p_wait_semaphores = @as([*]const vk.Semaphore, @ptrCast(¤t.render_finished)), .swapchain_count = 1, @@ -200,7 +232,7 @@ const SwapImage = struct { render_finished: vk.Semaphore, frame_fence: vk.Fence, - fn init(gc: *const GraphicsContext, image: vk.Image, format: vk.Format) !SwapImage { + fn init(gc: *const Context, image: vk.Image, format: vk.Format) !SwapImage { const view = try gc.vkd.createImageView(gc.dev, &.{ .image = image, .view_type = .@"2d", @@ -234,7 +266,7 @@ const SwapImage = struct { }; } - fn deinit(self: SwapImage, gc: *const GraphicsContext) void { + fn deinit(self: SwapImage, gc: *const Context) void { self.waitForFence(gc) catch return; gc.vkd.destroyImageView(gc.dev, self.view, null); gc.vkd.destroySemaphore(gc.dev, self.image_acquired, null); @@ -242,12 +274,12 @@ const SwapImage = struct { gc.vkd.destroyFence(gc.dev, self.frame_fence, null); } - fn waitForFence(self: SwapImage, gc: *const GraphicsContext) !void { + fn waitForFence(self: SwapImage, gc: *const Context) !void { _ = try gc.vkd.waitForFences(gc.dev, 1, @ptrCast(&self.frame_fence), vk.TRUE, std.math.maxInt(u64)); } }; -fn initSwapchainImages(gc: *const GraphicsContext, swapchain: vk.SwapchainKHR, format: vk.Format, allocator: Allocator) ![]SwapImage { +fn initSwapchainImages(gc: *const Context, swapchain: vk.SwapchainKHR, format: vk.Format, allocator: Allocator) ![]SwapImage { var count: u32 = undefined; _ = try gc.vkd.getSwapchainImagesKHR(gc.dev, swapchain, &count, null); const images = try allocator.alloc(vk.Image, count); @@ -268,7 +300,7 @@ fn initSwapchainImages(gc: *const GraphicsContext, swapchain: vk.SwapchainKHR, f return swap_images; } -fn findSurfaceFormat(gc: *const GraphicsContext, allocator: Allocator) !vk.SurfaceFormatKHR { +fn findSurfaceFormat(gc: *const Context, allocator: Allocator) !vk.SurfaceFormatKHR { const preferred = vk.SurfaceFormatKHR{ .format = .b8g8r8a8_srgb, .color_space = .srgb_nonlinear_khr, @@ -289,7 +321,7 @@ fn findSurfaceFormat(gc: *const GraphicsContext, allocator: Allocator) !vk.Surfa return surface_formats[0]; // There must always be at least one supported surface format } -fn findPresentMode(gc: *const GraphicsContext, allocator: Allocator) !vk.PresentModeKHR { +fn findPresentMode(gc: *const Context, allocator: Allocator) !vk.PresentModeKHR { var count: u32 = undefined; _ = try gc.vki.getPhysicalDeviceSurfacePresentModesKHR(gc.pdev, gc.surface, &count, null); const present_modes = try allocator.alloc(vk.PresentModeKHR, count); From fb42f4c47fa3f2dfbb599672bcd9e3f1bb5f086a Mon Sep 17 00:00:00 2001 From: David Allemang Date: Sat, 30 Mar 2024 00:51:42 -0400 Subject: [PATCH 027/113] remove graphics_context.zig --- src/graphics_context.zig | 246 --------------------------------------- src/main.zig | 16 +-- 2 files changed, 9 insertions(+), 253 deletions(-) delete mode 100644 src/graphics_context.zig diff --git a/src/graphics_context.zig b/src/graphics_context.zig deleted file mode 100644 index 16f7acc..0000000 --- a/src/graphics_context.zig +++ /dev/null @@ -1,246 +0,0 @@ -const std = @import("std"); -const vk = @import("vk"); -const c = @import("c.zig"); -const Allocator = std.mem.Allocator; - -const gfx = @import("gfx.zig"); - -const BaseDispatch = gfx.BaseDispatch; -const InstanceDispatch = gfx.InstanceDispatch; -const DeviceDispatch = gfx.DeviceDispatch; - -const required_device_extensions = [_][*:0]const u8{ - vk.extension_info.khr_swapchain.name, - vk.extension_info.khr_dynamic_rendering.name, -}; - -pub const GraphicsContext = struct { - vki: InstanceDispatch, - vkd: DeviceDispatch, - - surface: vk.SurfaceKHR, - pdev: vk.PhysicalDevice, - props: vk.PhysicalDeviceProperties, - mem_props: vk.PhysicalDeviceMemoryProperties, - - dev: vk.Device, - graphics_queue: Queue, - present_queue: Queue, - - pub fn init(allocator: Allocator, instance: vk.Instance, surface: vk.SurfaceKHR, vki: InstanceDispatch) !GraphicsContext { - var self: GraphicsContext = undefined; - self.vki = vki; - self.surface = surface; - - const candidate = try pickPhysicalDevice(vki, instance, allocator, surface); - self.pdev = candidate.pdev; - self.props = candidate.props; - self.dev = try initializeCandidate(vki, candidate); - self.vkd = try DeviceDispatch.load(self.dev, vki.dispatch.vkGetDeviceProcAddr); - errdefer self.vkd.destroyDevice(self.dev, null); - - self.graphics_queue = Queue.init(self.vkd, self.dev, candidate.queues.graphics_family); - self.present_queue = Queue.init(self.vkd, self.dev, candidate.queues.present_family); - - self.mem_props = vki.getPhysicalDeviceMemoryProperties(self.pdev); - - return self; - } - - pub fn deinit(self: GraphicsContext) void { - self.vkd.destroyDevice(self.dev, null); - } - - pub fn deviceName(self: *const GraphicsContext) []const u8 { - return std.mem.sliceTo(&self.props.device_name, 0); - } - - pub fn findMemoryTypeIndex(self: GraphicsContext, memory_type_bits: u32, flags: vk.MemoryPropertyFlags) !u32 { - for (self.mem_props.memory_types[0..self.mem_props.memory_type_count], 0..) |mem_type, i| { - if (memory_type_bits & (@as(u32, 1) << @truncate(i)) != 0 and mem_type.property_flags.contains(flags)) { - return @truncate(i); - } - } - - return error.NoSuitableMemoryType; - } - - pub fn allocate(self: GraphicsContext, requirements: vk.MemoryRequirements, flags: vk.MemoryPropertyFlags) !vk.DeviceMemory { - return try self.vkd.allocateMemory(self.dev, &.{ - .allocation_size = requirements.size, - .memory_type_index = try self.findMemoryTypeIndex(requirements.memory_type_bits, flags), - }, null); - } -}; - -pub const Queue = struct { - handle: vk.Queue, - family: u32, - - fn init(vkd: DeviceDispatch, dev: vk.Device, family: u32) Queue { - return .{ - .handle = vkd.getDeviceQueue(dev, family, 0), - .family = family, - }; - } -}; - -fn initializeCandidate(vki: InstanceDispatch, candidate: DeviceCandidate) !vk.Device { - const priority = [_]f32{1}; - const qci = [_]vk.DeviceQueueCreateInfo{ - .{ - .queue_family_index = candidate.queues.graphics_family, - .queue_count = 1, - .p_queue_priorities = &priority, - }, - .{ - .queue_family_index = candidate.queues.present_family, - .queue_count = 1, - .p_queue_priorities = &priority, - }, - }; - - const queue_count: u32 = if (candidate.queues.graphics_family == candidate.queues.present_family) - 1 - else - 2; - - return try vki.createDevice(candidate.pdev, &.{ - .queue_create_info_count = queue_count, - .p_queue_create_infos = &qci, - .enabled_extension_count = required_device_extensions.len, - .pp_enabled_extension_names = @as([*]const [*:0]const u8, @ptrCast(&required_device_extensions)), - }, null); -} - -const DeviceCandidate = struct { - pdev: vk.PhysicalDevice, - props: vk.PhysicalDeviceProperties, - queues: QueueAllocation, -}; - -const QueueAllocation = struct { - graphics_family: u32, - present_family: u32, -}; - -fn pickPhysicalDevice( - vki: InstanceDispatch, - instance: vk.Instance, - allocator: Allocator, - surface: vk.SurfaceKHR, -) !DeviceCandidate { - var device_count: u32 = undefined; - _ = try vki.enumeratePhysicalDevices(instance, &device_count, null); - - const pdevs = try allocator.alloc(vk.PhysicalDevice, device_count); - defer allocator.free(pdevs); - - _ = try vki.enumeratePhysicalDevices(instance, &device_count, pdevs.ptr); - - for (pdevs) |pdev| { - if (try checkSuitable(vki, pdev, allocator, surface)) |candidate| { - return candidate; - } - } - - return error.NoSuitableDevice; -} - -fn checkSuitable( - vki: InstanceDispatch, - pdev: vk.PhysicalDevice, - allocator: Allocator, - surface: vk.SurfaceKHR, -) !?DeviceCandidate { - const props = vki.getPhysicalDeviceProperties(pdev); - - if (props.device_type != .discrete_gpu) return null; - - if (!try checkExtensionSupport(vki, pdev, allocator)) { - return null; - } - - if (!try checkSurfaceSupport(vki, pdev, surface)) { - return null; - } - - if (try allocateQueues(vki, pdev, allocator, surface)) |allocation| { - return DeviceCandidate{ - .pdev = pdev, - .props = props, - .queues = allocation, - }; - } - - return null; -} - -fn allocateQueues(vki: InstanceDispatch, pdev: vk.PhysicalDevice, allocator: Allocator, surface: vk.SurfaceKHR) !?QueueAllocation { - var family_count: u32 = undefined; - vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); - - const families = try allocator.alloc(vk.QueueFamilyProperties, family_count); - defer allocator.free(families); - vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr); - - var graphics_family: ?u32 = null; - var present_family: ?u32 = null; - - for (families, 0..) |properties, i| { - const family: u32 = @intCast(i); - - if (graphics_family == null and properties.queue_flags.graphics_bit) { - graphics_family = family; - } - - if (present_family == null and (try vki.getPhysicalDeviceSurfaceSupportKHR(pdev, family, surface)) == vk.TRUE) { - present_family = family; - } - } - - if (graphics_family != null and present_family != null) { - return QueueAllocation{ - .graphics_family = graphics_family.?, - .present_family = present_family.?, - }; - } - - return null; -} - -fn checkSurfaceSupport(vki: InstanceDispatch, pdev: vk.PhysicalDevice, surface: vk.SurfaceKHR) !bool { - var format_count: u32 = undefined; - _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null); - - var present_mode_count: u32 = undefined; - _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &present_mode_count, null); - - return format_count > 0 and present_mode_count > 0; -} - -fn checkExtensionSupport( - vki: InstanceDispatch, - pdev: vk.PhysicalDevice, - allocator: Allocator, -) !bool { - var count: u32 = undefined; - _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &count, null); - - const propsv = try allocator.alloc(vk.ExtensionProperties, count); - defer allocator.free(propsv); - - _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &count, propsv.ptr); - - for (required_device_extensions) |ext| { - for (propsv) |props| { - if (std.mem.eql(u8, std.mem.span(ext), std.mem.sliceTo(&props.extension_name, 0))) { - break; - } - } else { - return false; - } - } - - return true; -} diff --git a/src/main.zig b/src/main.zig index 43eea52..2e20a4b 100644 --- a/src/main.zig +++ b/src/main.zig @@ -239,17 +239,17 @@ pub fn main() !void { // // swapchain = try vkd.createSwapchainKHR(dev, &.{ // .surface = surface, - // .min_image_count = 3, // todo compute - // .image_format = .r8g8b8a8_sint, // todo compute - // // .image_format = .r8g8b8a8_sint, // todo compute - // .image_color_space = .srgb_nonlinear_khr, // todo compute - // .image_extent = extent, // todo compute + // .min_image_count = 3, // should compute + // .image_format = .r8g8b8a8_sint, // should compute + // // .image_format = .r8g8b8a8_sint, // should compute + // .image_color_space = .srgb_nonlinear_khr, // should compute + // .image_extent = extent, // should compute // .image_array_layers = 1, // .image_usage = .{ .color_attachment_bit = true, .transfer_dst_bit = true }, // .image_sharing_mode = .exclusive, // since we only choose one queue family - // .pre_transform = .{ .identity_bit_khr = true }, // todo compute + // .pre_transform = .{ .identity_bit_khr = true }, // should compute // .composite_alpha = .{ .opaque_bit_khr = true }, - // .present_mode = .mailbox_khr, // todo compute + // .present_mode = .mailbox_khr, // should compute // .clipped = vk.TRUE, // .old_swapchain = swapchain, // }, null); @@ -437,6 +437,8 @@ fn copyBuffer(gc: *const Context, pool: vk.CommandPool, dst: vk.Buffer, src: vk. // creating and submitting a queue for every copy operation seems a bad idea for "streamed" data // gonna want a way to send a copy operation WITH SYNCHRONIZATION PRIMITIVES on a particular queue // see https://stackoverflow.com/a/62183243 + // + // this may be a misunderstanding on how submission works... try gc.vkd.queueSubmit(gc.queue, 1, @ptrCast(&si), .null_handle); try gc.vkd.queueWaitIdle(gc.queue); } From 538c23421377339b7a8312e4b0085d1613c0f92a Mon Sep 17 00:00:00 2001 From: David Allemang Date: Sat, 30 Mar 2024 22:46:57 -0400 Subject: [PATCH 028/113] debug messenger and validation layers. fix dynamic rendering layout errors. still broken on quadro --- src/gfx.zig | 57 ++++++++++++++++++ src/main.zig | 160 +++++++++++++++++++++++++++++++++++++-------------- 2 files changed, 175 insertions(+), 42 deletions(-) diff --git a/src/gfx.zig b/src/gfx.zig index 260368c..e0d132d 100644 --- a/src/gfx.zig +++ b/src/gfx.zig @@ -1,5 +1,12 @@ +const std = @import("std"); +const builtin = @import("builtin"); const vk = @import("vk"); +pub const use_debug_messenger = switch (builtin.mode) { + .Debug, .ReleaseSafe => true, + .ReleaseSmall, .ReleaseFast => false, +}; + pub const BaseDispatch = vk.BaseWrapper(.{ .createInstance = true, .getInstanceProcAddr = true, @@ -19,6 +26,8 @@ pub const InstanceDispatch = vk.InstanceWrapper(.{ .getPhysicalDeviceSurfaceSupportKHR = true, .getPhysicalDeviceMemoryProperties = true, .getDeviceProcAddr = true, + .createDebugUtilsMessengerEXT = use_debug_messenger, + .destroyDebugUtilsMessengerEXT = use_debug_messenger, }); pub const DeviceDispatch = vk.DeviceWrapper(.{ @@ -72,4 +81,52 @@ pub const DeviceDispatch = vk.DeviceWrapper(.{ .cmdCopyBuffer = true, .cmdBeginRenderingKHR = true, .cmdEndRenderingKHR = true, + .cmdPipelineBarrier = true, }); + +pub fn debug_callback( + msg_severity: vk.DebugUtilsMessageSeverityFlagsEXT, + msg_type: vk.DebugUtilsMessageTypeFlagsEXT, + p_data: ?*const vk.DebugUtilsMessengerCallbackDataEXT, + _: ?*anyopaque, +) callconv(vk.vulkan_call_conv) vk.Bool32 { + // ripped from std.log.defaultLog + + const data = p_data orelse return vk.FALSE; + const message = data.p_message orelse return vk.FALSE; + + const severity_prefix = if (msg_severity.verbose_bit_ext) + "verbose:" + else if (msg_severity.info_bit_ext) + "info:" + else if (msg_severity.warning_bit_ext) + "warning:" + else if (msg_severity.error_bit_ext) + "error:" + else + "?:"; + + const type_prefix = if (msg_type.general_bit_ext) + "" + else if (msg_type.validation_bit_ext) + "validation:" + else if (msg_type.performance_bit_ext) + "performance:" + else if (msg_type.device_address_binding_bit_ext) + "device_address_binding:" + else + "?:"; + + const stderr = std.io.getStdErr().writer(); + var bw = std.io.bufferedWriter(stderr); + const writer = bw.writer(); + + std.debug.getStderrMutex().lock(); + defer std.debug.getStderrMutex().unlock(); + nosuspend { + writer.print("vk-{s}{s} {s}\n", .{ severity_prefix, type_prefix, message }) catch return vk.FALSE; + bw.flush() catch return vk.FALSE; + } + + return vk.FALSE; +} diff --git a/src/main.zig b/src/main.zig index 2e20a4b..303939e 100644 --- a/src/main.zig +++ b/src/main.zig @@ -2,7 +2,6 @@ const std = @import("std"); const vk = @import("vk"); const c = @import("c.zig"); const shaders = @import("shaders"); -// const GraphicsContext = @import("graphics_context.zig").GraphicsContext; const Swapchain = @import("swapchain.zig").Swapchain; const Context = @import("swapchain.zig").Context; const Allocator = std.mem.Allocator; @@ -53,12 +52,44 @@ const vertices = [_]Vertex{ const indices = [_]Index{ 4, 5, 6, 6, 5, 7 }; -const InstancePair = std.meta.Tuple(&.{ vk.Instance, gfx.InstanceDispatch }); +const InstancePair = std.meta.Tuple(&.{ vk.Instance, gfx.InstanceDispatch, vk.DebugUtilsMessengerEXT }); /// note: destroy with vki.destroyInstance(instance, null) fn create_instance(vkb: gfx.BaseDispatch) !InstancePair { + var exts = std.BoundedArray([*:0]const u8, 32){}; + var layers = std.BoundedArray([*:0]const u8, 32){}; + + if (gfx.use_debug_messenger) { + try exts.appendSlice(&.{ + vk.extension_info.ext_debug_utils.name, + }); + + try layers.appendSlice(&.{ + "VK_LAYER_KHRONOS_validation", + }); + } + var glfw_exts_count: u32 = 0; - const glfw_exts = c.glfwGetRequiredInstanceExtensions(&glfw_exts_count); + const glfw_exts: [*]const [*:0]const u8 = + @ptrCast(c.glfwGetRequiredInstanceExtensions(&glfw_exts_count)); + try exts.appendSlice(glfw_exts[0..glfw_exts_count]); + + const dumci: vk.DebugUtilsMessengerCreateInfoEXT = .{ + .message_severity = .{ + .error_bit_ext = true, + .info_bit_ext = true, + .verbose_bit_ext = true, + .warning_bit_ext = true, + }, + .message_type = .{ + .device_address_binding_bit_ext = true, + .general_bit_ext = false, + .performance_bit_ext = true, + .validation_bit_ext = true, + }, + .pfn_user_callback = &gfx.debug_callback, + .p_user_data = null, + }; const instance = try vkb.createInstance(&vk.InstanceCreateInfo{ .p_application_info = &vk.ApplicationInfo{ @@ -68,13 +99,23 @@ fn create_instance(vkb: gfx.BaseDispatch) !InstancePair { .engine_version = vk.makeApiVersion(0, 0, 0, 0), .api_version = vk.API_VERSION_1_3, }, - .enabled_extension_count = glfw_exts_count, - .pp_enabled_extension_names = @ptrCast(glfw_exts), + .enabled_extension_count = @intCast(exts.len), + .pp_enabled_extension_names = &exts.buffer, + .enabled_layer_count = @intCast(layers.len), + .pp_enabled_layer_names = &layers.buffer, + .p_next = if (gfx.use_debug_messenger) &dumci else null, }, null); - const vki = try gfx.InstanceDispatch.load(instance, vkb.dispatch.vkGetInstanceProcAddr); + errdefer vki.destroyInstance(instance, null); - return .{ instance, vki }; + const messenger: vk.DebugUtilsMessengerEXT = if (gfx.use_debug_messenger) + try vki.createDebugUtilsMessengerEXT(instance, &dumci, null) + else + .null_handle; + errdefer if (gfx.use_debug_messenger) + vki.destroyDebugUtilsMessengerEXT(instance, messenger, null); + + return .{ instance, vki, messenger }; } /// note: destroy with vki.destroySurfaceKHR(instance, surface, null) @@ -123,7 +164,7 @@ fn create_device( pdev_search: for (pdevs) |pdev| { const props = vki.getPhysicalDeviceProperties(pdev); - if (props.device_type != .discrete_gpu) continue :pdev_search; + // if (props.device_type != .discrete_gpu) continue :pdev_search; var format_count: u32 = undefined; _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null); @@ -192,6 +233,9 @@ fn create_device( .p_queue_create_infos = qci.ptr, .enabled_extension_count = @intCast(required_device_extensions.len), .pp_enabled_extension_names = required_device_extensions.ptr, + .p_next = &vk.PhysicalDeviceDynamicRenderingFeaturesKHR{ + .dynamic_rendering = vk.TRUE, + }, }, null); const vkd = try gfx.DeviceDispatch.load(dev, vki.dispatch.vkGetDeviceProcAddr); errdefer vkd.destroyDevice(dev, null); @@ -222,8 +266,10 @@ pub fn main() !void { const vkb = try gfx.BaseDispatch.load(c.glfwGetInstanceProcAddress); - const instance, const vki = try create_instance(vkb); + const instance, const vki, const messenger = try create_instance(vkb); defer vki.destroyInstance(instance, null); + defer if (gfx.use_debug_messenger) + vki.destroyDebugUtilsMessengerEXT(instance, messenger, null); const surface = try create_surface(instance, window); defer vki.destroySurfaceKHR(instance, surface, null); @@ -234,38 +280,6 @@ pub fn main() !void { const queue = vkd.getDeviceQueue(dev, family, 0); - // var swapchain: vk.SwapchainKHR = .null_handle; - // defer vkd.destroySwapchainKHR(dev, swapchain, null); - // - // swapchain = try vkd.createSwapchainKHR(dev, &.{ - // .surface = surface, - // .min_image_count = 3, // should compute - // .image_format = .r8g8b8a8_sint, // should compute - // // .image_format = .r8g8b8a8_sint, // should compute - // .image_color_space = .srgb_nonlinear_khr, // should compute - // .image_extent = extent, // should compute - // .image_array_layers = 1, - // .image_usage = .{ .color_attachment_bit = true, .transfer_dst_bit = true }, - // .image_sharing_mode = .exclusive, // since we only choose one queue family - // .pre_transform = .{ .identity_bit_khr = true }, // should compute - // .composite_alpha = .{ .opaque_bit_khr = true }, - // .present_mode = .mailbox_khr, // should compute - // .clipped = vk.TRUE, - // .old_swapchain = swapchain, - // }, null); - // - // _ = try vkd.queuePresentKHR(queue, &.{ - // .wait_semaphore_count = 0, - // .swapchain_count = 1, - // .p_swapchains = &[_]vk.SwapchainKHR{swapchain}, - // .p_image_indices = &[_]u32{0}, - // }); - // - // try vkd.deviceWaitIdle(dev); - // - // _ = pdev; - // extent = undefined; - const gc: Context = .{ .vki = vki, .vkd = vkd, @@ -485,13 +499,44 @@ fn createCommandBuffers( for (cmdbufs, swapchain.swap_images) |cmdbuf, image| { try gc.vkd.beginCommandBuffer(cmdbuf, &.{}); + const pre_render_barriers: []const vk.ImageMemoryBarrier = &.{ + vk.ImageMemoryBarrier{ + .src_access_mask = .{}, + .dst_access_mask = .{ .color_attachment_write_bit = true }, + .old_layout = .undefined, + .new_layout = .color_attachment_optimal, + .src_queue_family_index = 0, + .dst_queue_family_index = 0, + .image = image.image, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }, + }; + gc.vkd.cmdPipelineBarrier( + cmdbuf, + .{ .top_of_pipe_bit = true }, + .{ .color_attachment_output_bit = true }, + .{}, + 0, + null, + 0, + null, + @intCast(pre_render_barriers.len), + pre_render_barriers.ptr, + ); + gc.vkd.cmdSetViewport(cmdbuf, 0, 1, @ptrCast(&viewport)); gc.vkd.cmdSetScissor(cmdbuf, 0, 1, @ptrCast(&scissor)); const color_attachments = [_]vk.RenderingAttachmentInfoKHR{ .{ .image_view = image.view, - .image_layout = .present_src_khr, + .image_layout = .color_attachment_optimal, .resolve_mode = .{}, .resolve_image_view = .null_handle, .resolve_image_layout = .undefined, @@ -519,6 +564,37 @@ fn createCommandBuffers( gc.vkd.cmdEndRenderingKHR(cmdbuf); + const post_render_barriers: []const vk.ImageMemoryBarrier = &.{ + vk.ImageMemoryBarrier{ + .src_access_mask = .{ .color_attachment_write_bit = true }, + .dst_access_mask = .{}, + .old_layout = .color_attachment_optimal, + .new_layout = .present_src_khr, + .src_queue_family_index = 0, + .dst_queue_family_index = 0, + .image = image.image, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }, + }; + gc.vkd.cmdPipelineBarrier( + cmdbuf, + .{ .color_attachment_output_bit = true }, + .{ .bottom_of_pipe_bit = true }, + .{}, + 0, + null, + 0, + null, + @intCast(post_render_barriers.len), + post_render_barriers.ptr, + ); + try gc.vkd.endCommandBuffer(cmdbuf); } From 10dcd3125a1c6b4e954609e5ea6223e8be9fa9de Mon Sep 17 00:00:00 2001 From: David Allemang Date: Sat, 30 Mar 2024 23:59:16 -0400 Subject: [PATCH 029/113] debugging the swapchain recreate bugs --- src/inspect.zig | 2 ++ src/main.zig | 63 ++++++++++++++++++++++++----------------------- src/swapchain.zig | 24 +++++++++++++++--- 3 files changed, 55 insertions(+), 34 deletions(-) diff --git a/src/inspect.zig b/src/inspect.zig index 9aead49..66cc397 100644 --- a/src/inspect.zig +++ b/src/inspect.zig @@ -93,6 +93,8 @@ pub fn main() !void { std.debug.print("type: {any}\n", .{props.device_type}); // props.device_type + std.debug.print("max_push_constants_size: {d}\n", .{props.limits.max_push_constants_size}); + var family_count: u32 = undefined; vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); const families = try ally.alloc(vk.QueueFamilyProperties, family_count); diff --git a/src/main.zig b/src/main.zig index 303939e..3addfe1 100644 --- a/src/main.zig +++ b/src/main.zig @@ -164,7 +164,7 @@ fn create_device( pdev_search: for (pdevs) |pdev| { const props = vki.getPhysicalDeviceProperties(pdev); - // if (props.device_type != .discrete_gpu) continue :pdev_search; + if (props.device_type != .discrete_gpu) continue :pdev_search; var format_count: u32 = undefined; _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null); @@ -348,6 +348,7 @@ pub fn main() !void { defer destroyCommandBuffers(&gc, pool, ally, cmdbufs); while (c.glfwWindowShouldClose(window) == c.GLFW_FALSE) { + std.log.debug("new frame", .{ }); var w: c_int = undefined; var h: c_int = undefined; c.glfwGetFramebufferSize(window, &w, &h); @@ -364,13 +365,19 @@ pub fn main() !void { error.OutOfDateKHR => Swapchain.PresentState.suboptimal, else => |narrow| return narrow, }; + + std.log.debug("state: {}", .{state}); if (state == .suboptimal or extent.width != @as(u32, @intCast(w)) or extent.height != @as(u32, @intCast(h))) { extent.width = @intCast(w); extent.height = @intCast(h); + std.log.debug("about to recreate", .{ }); try swapchain.recreate(extent); + std.log.debug("about to destroy command buffers", .{ }); destroyCommandBuffers(&gc, pool, ally, cmdbufs); + + std.log.debug("about to create command buffers", .{ }); cmdbufs = try createCommandBuffers( &gc, pool, @@ -499,8 +506,17 @@ fn createCommandBuffers( for (cmdbufs, swapchain.swap_images) |cmdbuf, image| { try gc.vkd.beginCommandBuffer(cmdbuf, &.{}); - const pre_render_barriers: []const vk.ImageMemoryBarrier = &.{ - vk.ImageMemoryBarrier{ + gc.vkd.cmdPipelineBarrier( + cmdbuf, + .{ .top_of_pipe_bit = true }, + .{ .color_attachment_output_bit = true }, + .{}, + 0, + null, + 0, + null, + 1, + @ptrCast(&vk.ImageMemoryBarrier{ .src_access_mask = .{}, .dst_access_mask = .{ .color_attachment_write_bit = true }, .old_layout = .undefined, @@ -515,19 +531,7 @@ fn createCommandBuffers( .base_array_layer = 0, .layer_count = 1, }, - }, - }; - gc.vkd.cmdPipelineBarrier( - cmdbuf, - .{ .top_of_pipe_bit = true }, - .{ .color_attachment_output_bit = true }, - .{}, - 0, - null, - 0, - null, - @intCast(pre_render_barriers.len), - pre_render_barriers.ptr, + }), ); gc.vkd.cmdSetViewport(cmdbuf, 0, 1, @ptrCast(&viewport)); @@ -564,8 +568,17 @@ fn createCommandBuffers( gc.vkd.cmdEndRenderingKHR(cmdbuf); - const post_render_barriers: []const vk.ImageMemoryBarrier = &.{ - vk.ImageMemoryBarrier{ + gc.vkd.cmdPipelineBarrier( + cmdbuf, + .{ .color_attachment_output_bit = true }, + .{ .bottom_of_pipe_bit = true }, + .{}, + 0, + null, + 0, + null, + 1, + @ptrCast(&vk.ImageMemoryBarrier{ .src_access_mask = .{ .color_attachment_write_bit = true }, .dst_access_mask = .{}, .old_layout = .color_attachment_optimal, @@ -580,19 +593,7 @@ fn createCommandBuffers( .base_array_layer = 0, .layer_count = 1, }, - }, - }; - gc.vkd.cmdPipelineBarrier( - cmdbuf, - .{ .color_attachment_output_bit = true }, - .{ .bottom_of_pipe_bit = true }, - .{}, - 0, - null, - 0, - null, - @intCast(post_render_barriers.len), - post_render_barriers.ptr, + }), ); try gc.vkd.endCommandBuffer(cmdbuf); diff --git a/src/swapchain.zig b/src/swapchain.zig index 13d2ce0..e250fde 100644 --- a/src/swapchain.zig +++ b/src/swapchain.zig @@ -111,9 +111,18 @@ pub const Swapchain = struct { var next_image_acquired = try gc.vkd.createSemaphore(gc.dev, &.{}, null); errdefer gc.vkd.destroySemaphore(gc.dev, next_image_acquired, null); - const result = try gc.vkd.acquireNextImageKHR(gc.dev, handle, std.math.maxInt(u64), next_image_acquired, .null_handle); - if (result.result != .success) { - return error.ImageAcquireFailed; + const result = try gc.vkd.acquireNextImageKHR( + gc.dev, + handle, + std.math.maxInt(u64), + next_image_acquired, + .null_handle, + ); + switch (result.result) { + vk.Result.success, vk.Result.suboptimal_khr => {}, + vk.Result.timeout => return error.Timeout, + vk.Result.not_ready => return error.NotReady, + else => unreachable, } std.mem.swap(vk.Semaphore, &swap_images[result.image_index].image_acquired, &next_image_acquired); @@ -267,6 +276,15 @@ const SwapImage = struct { } fn deinit(self: SwapImage, gc: *const Context) void { + // todo critical: this waitForFence deadlocks when recreating swapchain on nvidia. + // Something about the main "present" loop is fucked. Can't just ignore the fence; validation layers show errors + // that you can't destroy a fence while a queue depends on it (details may be wrong... I don't fully understand) + // I suspect this is more an issue with the vulkan-zig example, so I probably need to revisit vulkan-tutorial or + // try to rebuild the swapchain infrastructure myself. + // + // I do think it's clunky how the swapchain is created and recreated; duplicate logic in creation and recreation + // that could maybe be avoided with .null_handle? Maybe there's some more straightforward way to handle it. + self.waitForFence(gc) catch return; gc.vkd.destroyImageView(gc.dev, self.view, null); gc.vkd.destroySemaphore(gc.dev, self.image_acquired, null); From 1b3302cc9956f6d1d49c063c1fbd9c6a8d4d4d07 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Mon, 1 Apr 2024 20:47:29 -0400 Subject: [PATCH 030/113] incremental - dropping swapchain --- src/main.zig | 44 ++++++++++++++++++++++++++----------- src/swapchain.zig | 55 ++++++++++++++++++++--------------------------- 2 files changed, 55 insertions(+), 44 deletions(-) diff --git a/src/main.zig b/src/main.zig index 3addfe1..6b0f869 100644 --- a/src/main.zig +++ b/src/main.zig @@ -246,6 +246,26 @@ fn create_device( return error.NoSuitableDevice; } +fn find_surface_format( + pdev: vk.PhysicalDevice, + surface: vk.SurfaceKHR, + preferred: vk.SurfaceFormatKHR, + vki: gfx.InstanceDispatch, +) !vk.SurfaceFormatKHR { + var formats_buf: [64]vk.SurfaceFormatKHR = undefined; + var formats_count: u32 = 64; + _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &formats_count, &formats_buf); + const formats = formats_buf[0..formats_count]; + + for (formats) |format| { + if (std.meta.eql(format, preferred)) { + return format; + } + } + + return formats[0]; +} + pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); @@ -278,6 +298,12 @@ pub fn main() !void { try create_device(ally, instance, surface, vki); defer vkd.destroyDevice(dev, null); + const preferred_format: vk.SurfaceFormatKHR = .{ + .format = .b8g8r8a8_srgb, + .color_space = .srgb_nonlinear_khr, + }; + const format = try find_surface_format(pdev, surface, preferred_format, vki); + const queue = vkd.getDeviceQueue(dev, family, 0); const gc: Context = .{ @@ -290,7 +316,7 @@ pub fn main() !void { .family = family, }; - var swapchain = try Swapchain.init(&gc, ally, extent); + var swapchain = try Swapchain.init(&gc, ally, extent, format); defer swapchain.deinit(); const pipeline_layout = try gc.vkd.createPipelineLayout(gc.dev, &.{ @@ -302,7 +328,7 @@ pub fn main() !void { }, null); defer gc.vkd.destroyPipelineLayout(gc.dev, pipeline_layout, null); - const pipeline = try createPipeline(&gc, pipeline_layout, swapchain); + const pipeline = try createPipeline(&gc, pipeline_layout, format); defer gc.vkd.destroyPipeline(gc.dev, pipeline, null); const pool = try gc.vkd.createCommandPool(gc.dev, &.{ @@ -348,7 +374,6 @@ pub fn main() !void { defer destroyCommandBuffers(&gc, pool, ally, cmdbufs); while (c.glfwWindowShouldClose(window) == c.GLFW_FALSE) { - std.log.debug("new frame", .{ }); var w: c_int = undefined; var h: c_int = undefined; c.glfwGetFramebufferSize(window, &w, &h); @@ -365,19 +390,14 @@ pub fn main() !void { error.OutOfDateKHR => Swapchain.PresentState.suboptimal, else => |narrow| return narrow, }; - - std.log.debug("state: {}", .{state}); if (state == .suboptimal or extent.width != @as(u32, @intCast(w)) or extent.height != @as(u32, @intCast(h))) { extent.width = @intCast(w); extent.height = @intCast(h); - std.log.debug("about to recreate", .{ }); - try swapchain.recreate(extent); + try swapchain.recreate(extent, format); - std.log.debug("about to destroy command buffers", .{ }); destroyCommandBuffers(&gc, pool, ally, cmdbufs); - - std.log.debug("about to create command buffers", .{ }); + cmdbufs = try createCommandBuffers( &gc, pool, @@ -607,7 +627,7 @@ fn destroyCommandBuffers(gc: *const Context, pool: vk.CommandPool, allocator: Al allocator.free(cmdbufs); } -fn createPipeline(gc: *const Context, layout: vk.PipelineLayout, swapchain: Swapchain) !vk.Pipeline { +fn createPipeline(gc: *const Context, layout: vk.PipelineLayout, format: vk.SurfaceFormatKHR) !vk.Pipeline { const vert = try gc.vkd.createShaderModule(gc.dev, &.{ .code_size = shaders.triangle_vert.len, .p_code = @as([*]const u32, @ptrCast(&shaders.triangle_vert)), @@ -701,7 +721,7 @@ fn createPipeline(gc: *const Context, layout: vk.PipelineLayout, swapchain: Swap const prci = vk.PipelineRenderingCreateInfoKHR{ .color_attachment_count = 1, - .p_color_attachment_formats = @ptrCast(&swapchain.surface_format.format), + .p_color_attachment_formats = @ptrCast(&format), .depth_attachment_format = .undefined, .stencil_attachment_format = .undefined, .view_mask = 0, diff --git a/src/swapchain.zig b/src/swapchain.zig index e250fde..b01d7ff 100644 --- a/src/swapchain.zig +++ b/src/swapchain.zig @@ -52,7 +52,6 @@ pub const Swapchain = struct { gc: *const Context, allocator: Allocator, - surface_format: vk.SurfaceFormatKHR, present_mode: vk.PresentModeKHR, extent: vk.Extent2D, handle: vk.SwapchainKHR, @@ -61,18 +60,28 @@ pub const Swapchain = struct { image_index: u32, next_image_acquired: vk.Semaphore, - pub fn init(gc: *const Context, allocator: Allocator, extent: vk.Extent2D) !Swapchain { - return try initRecycle(gc, allocator, extent, .null_handle); + pub fn init( + gc: *const Context, + allocator: Allocator, + extent: vk.Extent2D, + format: vk.SurfaceFormatKHR, + ) !Swapchain { + return try initRecycle(gc, allocator, extent, format, .null_handle); } - pub fn initRecycle(gc: *const Context, allocator: Allocator, extent: vk.Extent2D, old_handle: vk.SwapchainKHR) !Swapchain { + pub fn initRecycle( + gc: *const Context, + allocator: Allocator, + extent: vk.Extent2D, + format: vk.SurfaceFormatKHR, + old_handle: vk.SwapchainKHR, + ) !Swapchain { const caps = try gc.vki.getPhysicalDeviceSurfaceCapabilitiesKHR(gc.pdev, gc.surface); const actual_extent = findActualExtent(caps, extent); if (actual_extent.width == 0 or actual_extent.height == 0) { return error.InvalidSurfaceDimensions; } - const surface_format = try findSurfaceFormat(gc, allocator); const present_mode = try findPresentMode(gc, allocator); var image_count = caps.min_image_count + 1; @@ -83,8 +92,8 @@ pub const Swapchain = struct { const handle = try gc.vkd.createSwapchainKHR(gc.dev, &.{ .surface = gc.surface, .min_image_count = image_count, - .image_format = surface_format.format, - .image_color_space = surface_format.color_space, + .image_format = format.format, + .image_color_space = format.color_space, .image_extent = actual_extent, .image_array_layers = 1, .image_usage = .{ .color_attachment_bit = true, .transfer_dst_bit = true }, @@ -102,7 +111,7 @@ pub const Swapchain = struct { gc.vkd.destroySwapchainKHR(gc.dev, old_handle, null); } - const swap_images = try initSwapchainImages(gc, handle, surface_format.format, allocator); + const swap_images = try initSwapchainImages(gc, handle, format.format, allocator); errdefer { for (swap_images) |si| si.deinit(gc); allocator.free(swap_images); @@ -129,7 +138,6 @@ pub const Swapchain = struct { return Swapchain{ .gc = gc, .allocator = allocator, - .surface_format = surface_format, .present_mode = present_mode, .extent = actual_extent, .handle = handle, @@ -154,12 +162,16 @@ pub const Swapchain = struct { self.gc.vkd.destroySwapchainKHR(self.gc.dev, self.handle, null); } - pub fn recreate(self: *Swapchain, new_extent: vk.Extent2D) !void { + pub fn recreate( + self: *Swapchain, + new_extent: vk.Extent2D, + format: vk.SurfaceFormatKHR, + ) !void { const gc = self.gc; const allocator = self.allocator; const old_handle = self.handle; self.deinitExceptSwapchain(); - self.* = try initRecycle(gc, allocator, new_extent, old_handle); + self.* = try initRecycle(gc, allocator, new_extent, format, old_handle); } pub fn currentImage(self: Swapchain) vk.Image { @@ -318,27 +330,6 @@ fn initSwapchainImages(gc: *const Context, swapchain: vk.SwapchainKHR, format: v return swap_images; } -fn findSurfaceFormat(gc: *const Context, allocator: Allocator) !vk.SurfaceFormatKHR { - const preferred = vk.SurfaceFormatKHR{ - .format = .b8g8r8a8_srgb, - .color_space = .srgb_nonlinear_khr, - }; - - var count: u32 = undefined; - _ = try gc.vki.getPhysicalDeviceSurfaceFormatsKHR(gc.pdev, gc.surface, &count, null); - const surface_formats = try allocator.alloc(vk.SurfaceFormatKHR, count); - defer allocator.free(surface_formats); - _ = try gc.vki.getPhysicalDeviceSurfaceFormatsKHR(gc.pdev, gc.surface, &count, surface_formats.ptr); - - for (surface_formats) |sfmt| { - if (std.meta.eql(sfmt, preferred)) { - return preferred; - } - } - - return surface_formats[0]; // There must always be at least one supported surface format -} - fn findPresentMode(gc: *const Context, allocator: Allocator) !vk.PresentModeKHR { var count: u32 = undefined; _ = try gc.vki.getPhysicalDeviceSurfacePresentModesKHR(gc.pdev, gc.surface, &count, null); From d6baf2ef097ab5939946b9ae49d2218b5997cdac Mon Sep 17 00:00:00 2001 From: David Allemang Date: Mon, 1 Apr 2024 21:12:34 -0400 Subject: [PATCH 031/113] comment command buffers + swapchain stuff. going to follow vulkan-tutorial instead --- src/dsa.zig | 59 ----- src/main.zig | 646 ++++++++++++++++++++++++---------------------- src/swapchain.zig | 27 -- 3 files changed, 335 insertions(+), 397 deletions(-) delete mode 100644 src/dsa.zig diff --git a/src/dsa.zig b/src/dsa.zig deleted file mode 100644 index 83153af..0000000 --- a/src/dsa.zig +++ /dev/null @@ -1,59 +0,0 @@ -const std = @import("std"); - -/// Slices must be sorted. Checks if `a` includes all elements of `b`. -pub fn includes(comptime T: type, a: []const T, b: []const T) bool { - var ia: usize = 0; - var ib: usize = 0; - - while (ib != b.len) { - if (ia == a.len) return false; - if (b[ib] < a[ia]) return false; - if (!(a[ia] < b[ib])) ib += 1; - ia += 1; - } - return true; -} - -test includes { - try std.testing.expect(includes( - usize, - &.{}, - &.{}, - )); - try std.testing.expect(includes( - usize, - &.{ 1, 2, 3, 4, 5 }, - &.{}, - )); - try std.testing.expect(includes( - usize, - &.{ 1, 2, 3, 4, 5 }, - &.{ 1, 2, 3, 4, 5 }, - )); - try std.testing.expect(!includes( - usize, - &.{}, - &.{ 1, 2, 3, 4, 5 }, - )); - - try std.testing.expect(includes( - usize, - &.{ 1, 2, 2, 4 }, - &.{ 2, 2 }, - )); - try std.testing.expect(includes( - usize, - &.{ 1, 2, 2, 4 }, - &.{ 1, 2, 2, 4 }, - )); - try std.testing.expect(!includes( - usize, - &.{ 1, 2, 2, 4 }, - &.{ 2, 2, 2 }, - )); - try std.testing.expect(!includes( - usize, - &.{ 1, 2, 2, 4 }, - &.{ 2, 2, 3 }, - )); -} diff --git a/src/main.zig b/src/main.zig index 6b0f869..0ecee37 100644 --- a/src/main.zig +++ b/src/main.zig @@ -2,8 +2,6 @@ const std = @import("std"); const vk = @import("vk"); const c = @import("c.zig"); const shaders = @import("shaders"); -const Swapchain = @import("swapchain.zig").Swapchain; -const Context = @import("swapchain.zig").Context; const Allocator = std.mem.Allocator; const gfx = @import("gfx.zig"); @@ -298,80 +296,82 @@ pub fn main() !void { try create_device(ally, instance, surface, vki); defer vkd.destroyDevice(dev, null); + const queue = vkd.getDeviceQueue(dev, family, 0); + const preferred_format: vk.SurfaceFormatKHR = .{ .format = .b8g8r8a8_srgb, .color_space = .srgb_nonlinear_khr, }; const format = try find_surface_format(pdev, surface, preferred_format, vki); - const queue = vkd.getDeviceQueue(dev, family, 0); + _ = &extent; - const gc: Context = .{ - .vki = vki, - .vkd = vkd, - .pdev = pdev, - .dev = dev, - .surface = surface, - .queue = queue, - .family = family, - }; + // const gc: Context = .{ + // .vki = vki, + // .vkd = vkd, + // .pdev = pdev, + // .dev = dev, + // .surface = surface, + // .queue = queue, + // .family = family, + // }; + // + // var swapchain = try Swapchain.init(&gc, ally, extent, format); + // defer swapchain.deinit(); - var swapchain = try Swapchain.init(&gc, ally, extent, format); - defer swapchain.deinit(); - - const pipeline_layout = try gc.vkd.createPipelineLayout(gc.dev, &.{ + const pipeline_layout = try vkd.createPipelineLayout(dev, &.{ .flags = .{}, .set_layout_count = 0, .p_set_layouts = undefined, .push_constant_range_count = 0, .p_push_constant_ranges = undefined, }, null); - defer gc.vkd.destroyPipelineLayout(gc.dev, pipeline_layout, null); + defer vkd.destroyPipelineLayout(dev, pipeline_layout, null); - const pipeline = try createPipeline(&gc, pipeline_layout, format); - defer gc.vkd.destroyPipeline(gc.dev, pipeline, null); + const pipeline = try createPipeline(dev, pipeline_layout, format, vkd); + defer vkd.destroyPipeline(dev, pipeline, null); - const pool = try gc.vkd.createCommandPool(gc.dev, &.{ + const pool = try vkd.createCommandPool(dev, &.{ .queue_family_index = family, }, null); - defer gc.vkd.destroyCommandPool(gc.dev, pool, null); + defer vkd.destroyCommandPool(dev, pool, null); - const vertex_buffer = try gc.vkd.createBuffer(gc.dev, &.{ + const vertex_buffer = try vkd.createBuffer(dev, &.{ .size = @sizeOf(@TypeOf(vertices)), .usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, .sharing_mode = .exclusive, }, null); - defer gc.vkd.destroyBuffer(gc.dev, vertex_buffer, null); - const vertex_mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, vertex_buffer); - const vertex_memory = try gc.allocate(vertex_mem_reqs, .{ .device_local_bit = true }); - defer gc.vkd.freeMemory(gc.dev, vertex_memory, null); - try gc.vkd.bindBufferMemory(gc.dev, vertex_buffer, vertex_memory, 0); + defer vkd.destroyBuffer(dev, vertex_buffer, null); + const vertex_mem_reqs = vkd.getBufferMemoryRequirements(dev, vertex_buffer); + const vertex_memory = try allocate(pdev, vki, dev, vkd, vertex_mem_reqs, .{ .device_local_bit = true }); + defer vkd.freeMemory(dev, vertex_memory, null); + try vkd.bindBufferMemory(dev, vertex_buffer, vertex_memory, 0); - try uploadData(Vertex, &gc, pool, vertex_buffer, &vertices); + try uploadData(Vertex, pdev, vki, dev, vkd, queue, pool, vertex_buffer, &vertices); - const index_buffer = try gc.vkd.createBuffer(gc.dev, &.{ + const index_buffer = try vkd.createBuffer(dev, &.{ .size = @sizeOf(@TypeOf(indices)), .usage = .{ .transfer_dst_bit = true, .index_buffer_bit = true }, .sharing_mode = .exclusive, }, null); - defer gc.vkd.destroyBuffer(gc.dev, index_buffer, null); - const index_mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, index_buffer); - const index_memory = try gc.allocate(index_mem_reqs, .{ .device_local_bit = true }); - defer gc.vkd.freeMemory(gc.dev, index_memory, null); - try gc.vkd.bindBufferMemory(gc.dev, index_buffer, index_memory, 0); + defer vkd.destroyBuffer(dev, index_buffer, null); + const index_mem_reqs = vkd.getBufferMemoryRequirements(dev, index_buffer); + const index_memory = try allocate(pdev, vki, dev, vkd, index_mem_reqs, .{ .device_local_bit = true }); + defer vkd.freeMemory(dev, index_memory, null); + try vkd.bindBufferMemory(dev, index_buffer, index_memory, 0); - try uploadData(Index, &gc, pool, index_buffer, &indices); + try uploadData(Index, pdev, vki, dev, vkd, queue, pool, index_buffer, &indices); - var cmdbufs = try createCommandBuffers( - &gc, - pool, - ally, - vertex_buffer, - index_buffer, - pipeline, - swapchain, - ); - defer destroyCommandBuffers(&gc, pool, ally, cmdbufs); + // var cmdbufs = try createCommandBuffers( + // &gc, + // pool, + // ally, + // vertex_buffer, + // index_buffer, + // pipeline, + // swapchain, + // ); + // defer destroyCommandBuffers(&gc, pool, ally, cmdbufs); while (c.glfwWindowShouldClose(window) == c.GLFW_FALSE) { var w: c_int = undefined; @@ -384,80 +384,98 @@ pub fn main() !void { continue; } - const cmdbuf = cmdbufs[swapchain.image_index]; + // const cmdbuf = cmdbufs[swapchain.image_index]; - const state = swapchain.present(cmdbuf) catch |err| switch (err) { - error.OutOfDateKHR => Swapchain.PresentState.suboptimal, - else => |narrow| return narrow, - }; + // const state = swapchain.present(cmdbuf) catch |err| switch (err) { + // error.OutOfDateKHR => Swapchain.PresentState.suboptimal, + // else => |narrow| return narrow, + // }; - if (state == .suboptimal or extent.width != @as(u32, @intCast(w)) or extent.height != @as(u32, @intCast(h))) { - extent.width = @intCast(w); - extent.height = @intCast(h); - try swapchain.recreate(extent, format); - - destroyCommandBuffers(&gc, pool, ally, cmdbufs); - - cmdbufs = try createCommandBuffers( - &gc, - pool, - ally, - vertex_buffer, - index_buffer, - pipeline, - swapchain, - ); - } + // if (state == .suboptimal or extent.width != @as(u32, @intCast(w)) or extent.height != @as(u32, @intCast(h))) { + // extent.width = @intCast(w); + // extent.height = @intCast(h); + // try swapchain.recreate(extent, format); + // + // destroyCommandBuffers(&gc, pool, ally, cmdbufs); + // + // cmdbufs = try createCommandBuffers( + // &gc, + // pool, + // ally, + // vertex_buffer, + // index_buffer, + // pipeline, + // swapchain, + // ); + // } c.glfwPollEvents(); } - try swapchain.waitForAllFences(); - try gc.vkd.deviceWaitIdle(gc.dev); + // try swapchain.waitForAllFences(); + try vkd.deviceWaitIdle(dev); } -fn uploadData(comptime T: type, gc: *const Context, pool: vk.CommandPool, buffer: vk.Buffer, source: []const T) !void { +fn uploadData( + comptime T: type, + pdev: vk.PhysicalDevice, + vki: gfx.InstanceDispatch, + dev: vk.Device, + vkd: gfx.DeviceDispatch, + queue: vk.Queue, + pool: vk.CommandPool, + buffer: vk.Buffer, + source: []const T, +) !void { // if (@typeInfo(T) == .Struct and @typeInfo(T).Struct.layout == .auto) @compileError("Requires defined T layout"); const size = @sizeOf(T) * source.len; - const staging_buffer = try gc.vkd.createBuffer(gc.dev, &.{ + const staging_buffer = try vkd.createBuffer(dev, &.{ .size = size, .usage = .{ .transfer_src_bit = true }, .sharing_mode = .exclusive, }, null); - defer gc.vkd.destroyBuffer(gc.dev, staging_buffer, null); + defer vkd.destroyBuffer(dev, staging_buffer, null); - const mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, staging_buffer); - const staging_memory = try gc.allocate(mem_reqs, .{ + const mem_reqs = vkd.getBufferMemoryRequirements(dev, staging_buffer); + const staging_memory = try allocate(pdev, vki, dev, vkd, mem_reqs, .{ .host_visible_bit = true, .host_coherent_bit = true, }); - defer gc.vkd.freeMemory(gc.dev, staging_memory, null); + defer vkd.freeMemory(dev, staging_memory, null); - try gc.vkd.bindBufferMemory(gc.dev, staging_buffer, staging_memory, 0); + try vkd.bindBufferMemory(dev, staging_buffer, staging_memory, 0); { - const data = try gc.vkd.mapMemory(gc.dev, staging_memory, 0, vk.WHOLE_SIZE, .{}); - defer gc.vkd.unmapMemory(gc.dev, staging_memory); + const data = try vkd.mapMemory(dev, staging_memory, 0, vk.WHOLE_SIZE, .{}); + defer vkd.unmapMemory(dev, staging_memory); const dest: [*]T = @ptrCast(@alignCast(data)); @memcpy(dest, source); } - try copyBuffer(gc, pool, buffer, staging_buffer, size); + try copyBuffer(dev, queue, pool, buffer, staging_buffer, size, vkd); } -fn copyBuffer(gc: *const Context, pool: vk.CommandPool, dst: vk.Buffer, src: vk.Buffer, size: vk.DeviceSize) !void { +fn copyBuffer( + dev: vk.Device, + queue: vk.Queue, + pool: vk.CommandPool, + dst: vk.Buffer, + src: vk.Buffer, + size: vk.DeviceSize, + vkd: gfx.DeviceDispatch, +) !void { var cmdbuf: vk.CommandBuffer = undefined; - try gc.vkd.allocateCommandBuffers(gc.dev, &.{ + try vkd.allocateCommandBuffers(dev, &.{ .command_pool = pool, .level = .primary, .command_buffer_count = 1, }, @ptrCast(&cmdbuf)); - defer gc.vkd.freeCommandBuffers(gc.dev, pool, 1, @ptrCast(&cmdbuf)); + defer vkd.freeCommandBuffers(dev, pool, 1, @ptrCast(&cmdbuf)); - try gc.vkd.beginCommandBuffer(cmdbuf, &.{ + try vkd.beginCommandBuffer(cmdbuf, &.{ .flags = .{ .one_time_submit_bit = true }, }); @@ -466,9 +484,9 @@ fn copyBuffer(gc: *const Context, pool: vk.CommandPool, dst: vk.Buffer, src: vk. .dst_offset = 0, .size = size, }; - gc.vkd.cmdCopyBuffer(cmdbuf, src, dst, 1, @ptrCast(®ion)); + vkd.cmdCopyBuffer(cmdbuf, src, dst, 1, @ptrCast(®ion)); - try gc.vkd.endCommandBuffer(cmdbuf); + try vkd.endCommandBuffer(cmdbuf); const si = vk.SubmitInfo{ .command_buffer_count = 1, @@ -480,165 +498,162 @@ fn copyBuffer(gc: *const Context, pool: vk.CommandPool, dst: vk.Buffer, src: vk. // see https://stackoverflow.com/a/62183243 // // this may be a misunderstanding on how submission works... - try gc.vkd.queueSubmit(gc.queue, 1, @ptrCast(&si), .null_handle); - try gc.vkd.queueWaitIdle(gc.queue); + try vkd.queueSubmit(queue, 1, @ptrCast(&si), .null_handle); + try vkd.queueWaitIdle(queue); } -fn createCommandBuffers( - gc: *const Context, - pool: vk.CommandPool, - allocator: Allocator, - vertex_buffer: vk.Buffer, - index_buffer: vk.Buffer, - pipeline: vk.Pipeline, - swapchain: Swapchain, -) ![]vk.CommandBuffer { - const extent = swapchain.extent; +// fn createCommandBuffers( +// pool: vk.CommandPool, +// allocator: Allocator, +// vertex_buffer: vk.Buffer, +// index_buffer: vk.Buffer, +// pipeline: vk.Pipeline, +// extent: vk.Extent2D, +// ) ![]vk.CommandBuffer { +// const cmdbufs = try allocator.alloc(vk.CommandBuffer, swapchain.swap_images.len); +// errdefer allocator.free(cmdbufs); +// +// try vkd.allocateCommandBuffers(dev, &.{ +// .command_pool = pool, +// .level = .primary, +// .command_buffer_count = @as(u32, @truncate(cmdbufs.len)), +// }, cmdbufs.ptr); +// errdefer vkd.freeCommandBuffers(dev, pool, @truncate(cmdbufs.len), cmdbufs.ptr); +// +// const clear = vk.ClearValue{ +// .color = .{ .float_32 = .{ 0, 0, 0, 1 } }, +// }; +// +// const viewport = vk.Viewport{ +// .x = 0, +// .y = 0, +// .width = @as(f32, @floatFromInt(extent.width)), +// .height = @as(f32, @floatFromInt(extent.height)), +// .min_depth = 0, +// .max_depth = 1, +// }; +// +// const scissor = vk.Rect2D{ +// .offset = .{ .x = 0, .y = 0 }, +// .extent = extent, +// }; +// +// for (cmdbufs, swapchain.swap_images) |cmdbuf, image| { +// try vkd.beginCommandBuffer(cmdbuf, &.{}); +// +// vkd.cmdPipelineBarrier( +// cmdbuf, +// .{ .top_of_pipe_bit = true }, +// .{ .color_attachment_output_bit = true }, +// .{}, +// 0, +// null, +// 0, +// null, +// 1, +// @ptrCast(&vk.ImageMemoryBarrier{ +// .src_access_mask = .{}, +// .dst_access_mask = .{ .color_attachment_write_bit = true }, +// .old_layout = .undefined, +// .new_layout = .color_attachment_optimal, +// .src_queue_family_index = 0, +// .dst_queue_family_index = 0, +// .image = image.image, +// .subresource_range = .{ +// .aspect_mask = .{ .color_bit = true }, +// .base_mip_level = 0, +// .level_count = 1, +// .base_array_layer = 0, +// .layer_count = 1, +// }, +// }), +// ); +// +// vkd.cmdSetViewport(cmdbuf, 0, 1, @ptrCast(&viewport)); +// vkd.cmdSetScissor(cmdbuf, 0, 1, @ptrCast(&scissor)); +// +// const color_attachments = [_]vk.RenderingAttachmentInfoKHR{ +// .{ +// .image_view = image.view, +// .image_layout = .color_attachment_optimal, +// .resolve_mode = .{}, +// .resolve_image_view = .null_handle, +// .resolve_image_layout = .undefined, +// .load_op = .clear, +// .store_op = .store, +// .clear_value = clear, +// }, +// }; +// +// const render_info = vk.RenderingInfoKHR{ +// .render_area = scissor, // since we always do full-frame changes +// .layer_count = 1, +// .view_mask = 0, +// .color_attachment_count = color_attachments.len, +// .p_color_attachments = &color_attachments, +// }; +// +// vkd.cmdBeginRenderingKHR(cmdbuf, &render_info); +// +// vkd.cmdBindPipeline(cmdbuf, .graphics, pipeline); +// const offset = [_]vk.DeviceSize{0}; +// vkd.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast(&vertex_buffer), &offset); +// vkd.cmdBindIndexBuffer(cmdbuf, index_buffer, 0, .uint16); +// vkd.cmdDrawIndexed(cmdbuf, indices.len, 1, 0, 0, 0); +// +// vkd.cmdEndRenderingKHR(cmdbuf); +// +// vkd.cmdPipelineBarrier( +// cmdbuf, +// .{ .color_attachment_output_bit = true }, +// .{ .bottom_of_pipe_bit = true }, +// .{}, +// 0, +// null, +// 0, +// null, +// 1, +// @ptrCast(&vk.ImageMemoryBarrier{ +// .src_access_mask = .{ .color_attachment_write_bit = true }, +// .dst_access_mask = .{}, +// .old_layout = .color_attachment_optimal, +// .new_layout = .present_src_khr, +// .src_queue_family_index = 0, +// .dst_queue_family_index = 0, +// .image = image.image, +// .subresource_range = .{ +// .aspect_mask = .{ .color_bit = true }, +// .base_mip_level = 0, +// .level_count = 1, +// .base_array_layer = 0, +// .layer_count = 1, +// }, +// }), +// ); +// +// try vkd.endCommandBuffer(cmdbuf); +// } +// +// return cmdbufs; +// } - const cmdbufs = try allocator.alloc(vk.CommandBuffer, swapchain.swap_images.len); - errdefer allocator.free(cmdbufs); +// fn destroyCommandBuffers(gc: *const Context, pool: vk.CommandPool, allocator: Allocator, cmdbufs: []vk.CommandBuffer) void { +// vkd.freeCommandBuffers(dev, pool, @truncate(cmdbufs.len), cmdbufs.ptr); +// allocator.free(cmdbufs); +// } - try gc.vkd.allocateCommandBuffers(gc.dev, &.{ - .command_pool = pool, - .level = .primary, - .command_buffer_count = @as(u32, @truncate(cmdbufs.len)), - }, cmdbufs.ptr); - errdefer gc.vkd.freeCommandBuffers(gc.dev, pool, @truncate(cmdbufs.len), cmdbufs.ptr); - - const clear = vk.ClearValue{ - .color = .{ .float_32 = .{ 0, 0, 0, 1 } }, - }; - - const viewport = vk.Viewport{ - .x = 0, - .y = 0, - .width = @as(f32, @floatFromInt(extent.width)), - .height = @as(f32, @floatFromInt(extent.height)), - .min_depth = 0, - .max_depth = 1, - }; - - const scissor = vk.Rect2D{ - .offset = .{ .x = 0, .y = 0 }, - .extent = extent, - }; - - for (cmdbufs, swapchain.swap_images) |cmdbuf, image| { - try gc.vkd.beginCommandBuffer(cmdbuf, &.{}); - - gc.vkd.cmdPipelineBarrier( - cmdbuf, - .{ .top_of_pipe_bit = true }, - .{ .color_attachment_output_bit = true }, - .{}, - 0, - null, - 0, - null, - 1, - @ptrCast(&vk.ImageMemoryBarrier{ - .src_access_mask = .{}, - .dst_access_mask = .{ .color_attachment_write_bit = true }, - .old_layout = .undefined, - .new_layout = .color_attachment_optimal, - .src_queue_family_index = 0, - .dst_queue_family_index = 0, - .image = image.image, - .subresource_range = .{ - .aspect_mask = .{ .color_bit = true }, - .base_mip_level = 0, - .level_count = 1, - .base_array_layer = 0, - .layer_count = 1, - }, - }), - ); - - gc.vkd.cmdSetViewport(cmdbuf, 0, 1, @ptrCast(&viewport)); - gc.vkd.cmdSetScissor(cmdbuf, 0, 1, @ptrCast(&scissor)); - - const color_attachments = [_]vk.RenderingAttachmentInfoKHR{ - .{ - .image_view = image.view, - .image_layout = .color_attachment_optimal, - .resolve_mode = .{}, - .resolve_image_view = .null_handle, - .resolve_image_layout = .undefined, - .load_op = .clear, - .store_op = .store, - .clear_value = clear, - }, - }; - - const render_info = vk.RenderingInfoKHR{ - .render_area = scissor, // since we always do full-frame changes - .layer_count = 1, - .view_mask = 0, - .color_attachment_count = color_attachments.len, - .p_color_attachments = &color_attachments, - }; - - gc.vkd.cmdBeginRenderingKHR(cmdbuf, &render_info); - - gc.vkd.cmdBindPipeline(cmdbuf, .graphics, pipeline); - const offset = [_]vk.DeviceSize{0}; - gc.vkd.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast(&vertex_buffer), &offset); - gc.vkd.cmdBindIndexBuffer(cmdbuf, index_buffer, 0, .uint16); - gc.vkd.cmdDrawIndexed(cmdbuf, indices.len, 1, 0, 0, 0); - - gc.vkd.cmdEndRenderingKHR(cmdbuf); - - gc.vkd.cmdPipelineBarrier( - cmdbuf, - .{ .color_attachment_output_bit = true }, - .{ .bottom_of_pipe_bit = true }, - .{}, - 0, - null, - 0, - null, - 1, - @ptrCast(&vk.ImageMemoryBarrier{ - .src_access_mask = .{ .color_attachment_write_bit = true }, - .dst_access_mask = .{}, - .old_layout = .color_attachment_optimal, - .new_layout = .present_src_khr, - .src_queue_family_index = 0, - .dst_queue_family_index = 0, - .image = image.image, - .subresource_range = .{ - .aspect_mask = .{ .color_bit = true }, - .base_mip_level = 0, - .level_count = 1, - .base_array_layer = 0, - .layer_count = 1, - }, - }), - ); - - try gc.vkd.endCommandBuffer(cmdbuf); - } - - return cmdbufs; -} - -fn destroyCommandBuffers(gc: *const Context, pool: vk.CommandPool, allocator: Allocator, cmdbufs: []vk.CommandBuffer) void { - gc.vkd.freeCommandBuffers(gc.dev, pool, @truncate(cmdbufs.len), cmdbufs.ptr); - allocator.free(cmdbufs); -} - -fn createPipeline(gc: *const Context, layout: vk.PipelineLayout, format: vk.SurfaceFormatKHR) !vk.Pipeline { - const vert = try gc.vkd.createShaderModule(gc.dev, &.{ +fn createPipeline(dev: vk.Device, layout: vk.PipelineLayout, format: vk.SurfaceFormatKHR, vkd: gfx.DeviceDispatch) !vk.Pipeline { + const vert = try vkd.createShaderModule(dev, &.{ .code_size = shaders.triangle_vert.len, .p_code = @as([*]const u32, @ptrCast(&shaders.triangle_vert)), }, null); - defer gc.vkd.destroyShaderModule(gc.dev, vert, null); + defer vkd.destroyShaderModule(dev, vert, null); - const frag = try gc.vkd.createShaderModule(gc.dev, &.{ + const frag = try vkd.createShaderModule(dev, &.{ .code_size = shaders.triangle_frag.len, .p_code = @as([*]const u32, @ptrCast(&shaders.triangle_frag)), }, null); - defer gc.vkd.destroyShaderModule(gc.dev, frag, null); + defer vkd.destroyShaderModule(dev, frag, null); const pssci = [_]vk.PipelineShaderStageCreateInfo{ .{ @@ -653,46 +668,6 @@ fn createPipeline(gc: *const Context, layout: vk.PipelineLayout, format: vk.Surf }, }; - const pvisci = vk.PipelineVertexInputStateCreateInfo{ - .vertex_binding_description_count = 1, - .p_vertex_binding_descriptions = @ptrCast(&Vertex.binding_description), - .vertex_attribute_description_count = Vertex.attribute_description.len, - .p_vertex_attribute_descriptions = &Vertex.attribute_description, - }; - - const piasci = vk.PipelineInputAssemblyStateCreateInfo{ - .topology = .triangle_list, - .primitive_restart_enable = vk.FALSE, - }; - - const pvsci = vk.PipelineViewportStateCreateInfo{ - .viewport_count = 1, - .p_viewports = undefined, // set in createCommandBuffers with cmdSetViewport - .scissor_count = 1, - .p_scissors = undefined, // set in createCommandBuffers with cmdSetScissor - }; - - const prsci = vk.PipelineRasterizationStateCreateInfo{ - .depth_clamp_enable = vk.FALSE, - .rasterizer_discard_enable = vk.FALSE, - .polygon_mode = .fill, - .cull_mode = .{ .back_bit = true }, - .front_face = .counter_clockwise, - .depth_bias_enable = vk.FALSE, - .depth_bias_constant_factor = 0, - .depth_bias_clamp = 0, - .depth_bias_slope_factor = 0, - .line_width = 1, - }; - - const pmsci = vk.PipelineMultisampleStateCreateInfo{ - .rasterization_samples = .{ .@"1_bit" = true }, - .sample_shading_enable = vk.FALSE, - .min_sample_shading = 1, - .alpha_to_coverage_enable = vk.FALSE, - .alpha_to_one_enable = vk.FALSE, - }; - const pcbas = vk.PipelineColorBlendAttachmentState{ .blend_enable = vk.FALSE, .src_color_blend_factor = .one, @@ -704,58 +679,107 @@ fn createPipeline(gc: *const Context, layout: vk.PipelineLayout, format: vk.Surf .color_write_mask = .{ .r_bit = true, .g_bit = true, .b_bit = true, .a_bit = true }, }; - const pcbsci = vk.PipelineColorBlendStateCreateInfo{ - .logic_op_enable = vk.FALSE, - .logic_op = .copy, - .attachment_count = 1, - .p_attachments = @ptrCast(&pcbas), - .blend_constants = [_]f32{ 0, 0, 0, 0 }, - }; - const dynstate = [_]vk.DynamicState{ .viewport, .scissor }; - const pdsci = vk.PipelineDynamicStateCreateInfo{ - .flags = .{}, - .dynamic_state_count = dynstate.len, - .p_dynamic_states = &dynstate, - }; - - const prci = vk.PipelineRenderingCreateInfoKHR{ - .color_attachment_count = 1, - .p_color_attachment_formats = @ptrCast(&format), - .depth_attachment_format = .undefined, - .stencil_attachment_format = .undefined, - .view_mask = 0, - }; const gpci = vk.GraphicsPipelineCreateInfo{ .flags = .{}, - .stage_count = 2, + .stage_count = @intCast(pssci.len), .p_stages = &pssci, - .p_vertex_input_state = &pvisci, - .p_input_assembly_state = &piasci, + .p_vertex_input_state = &vk.PipelineVertexInputStateCreateInfo{ + .vertex_binding_description_count = 1, + .p_vertex_binding_descriptions = @ptrCast(&Vertex.binding_description), + .vertex_attribute_description_count = Vertex.attribute_description.len, + .p_vertex_attribute_descriptions = &Vertex.attribute_description, + }, + .p_input_assembly_state = &vk.PipelineInputAssemblyStateCreateInfo{ + .topology = .triangle_list, + .primitive_restart_enable = vk.FALSE, + }, .p_tessellation_state = null, - .p_viewport_state = &pvsci, - .p_rasterization_state = &prsci, - .p_multisample_state = &pmsci, + .p_viewport_state = &vk.PipelineViewportStateCreateInfo{ + .viewport_count = 1, + .p_viewports = undefined, // set in createCommandBuffers with cmdSetViewport + .scissor_count = 1, + .p_scissors = undefined, // set in createCommandBuffers with cmdSetScissor + }, + .p_rasterization_state = &vk.PipelineRasterizationStateCreateInfo{ + .depth_clamp_enable = vk.FALSE, + .rasterizer_discard_enable = vk.FALSE, + .polygon_mode = .fill, + .cull_mode = .{ .back_bit = true }, + .front_face = .counter_clockwise, + .depth_bias_enable = vk.FALSE, + .depth_bias_constant_factor = 0, + .depth_bias_clamp = 0, + .depth_bias_slope_factor = 0, + .line_width = 1, + }, + .p_multisample_state = &vk.PipelineMultisampleStateCreateInfo{ + .rasterization_samples = .{ .@"1_bit" = true }, + .sample_shading_enable = vk.FALSE, + .min_sample_shading = 1, + .alpha_to_coverage_enable = vk.FALSE, + .alpha_to_one_enable = vk.FALSE, + }, .p_depth_stencil_state = null, - .p_color_blend_state = &pcbsci, - .p_dynamic_state = &pdsci, + .p_color_blend_state = &vk.PipelineColorBlendStateCreateInfo{ + .logic_op_enable = vk.FALSE, + .logic_op = .copy, + .attachment_count = 1, + .p_attachments = @ptrCast(&pcbas), + .blend_constants = [_]f32{ 0, 0, 0, 0 }, + }, + .p_dynamic_state = &vk.PipelineDynamicStateCreateInfo{ + .flags = .{}, + .dynamic_state_count = dynstate.len, + .p_dynamic_states = &dynstate, + }, .layout = layout, .render_pass = .null_handle, .subpass = 0, .base_pipeline_handle = .null_handle, .base_pipeline_index = -1, - .p_next = &prci, + .p_next = &vk.PipelineRenderingCreateInfoKHR{ + .color_attachment_count = 1, + .p_color_attachment_formats = @ptrCast(&format), + .depth_attachment_format = .undefined, + .stencil_attachment_format = .undefined, + .view_mask = 0, + }, }; var pipeline: vk.Pipeline = undefined; - _ = try gc.vkd.createGraphicsPipelines( - gc.dev, - .null_handle, - 1, - @ptrCast(&gpci), - null, - @ptrCast(&pipeline), - ); + _ = try vkd.createGraphicsPipelines(dev, .null_handle, 1, @ptrCast(&gpci), null, @ptrCast(&pipeline)); return pipeline; } + +pub fn findMemoryTypeIndex( + pdev: vk.PhysicalDevice, + memory_type_bits: u32, + flags: vk.MemoryPropertyFlags, + vki: gfx.InstanceDispatch, +) !u32 { + const mem_props = vki.getPhysicalDeviceMemoryProperties(pdev); + + for (mem_props.memory_types[0..mem_props.memory_type_count], 0..) |mem_type, i| { + if (memory_type_bits & (@as(u32, 1) << @truncate(i)) != 0 and mem_type.property_flags.contains(flags)) { + return @truncate(i); + } + } + + return error.NoSuitableMemoryType; +} + +pub fn allocate( + pdev: vk.PhysicalDevice, + vki: gfx.InstanceDispatch, + dev: vk.Device, + vkd: gfx.DeviceDispatch, + requirements: vk.MemoryRequirements, + flags: vk.MemoryPropertyFlags, +) !vk.DeviceMemory { + return try vkd.allocateMemory(dev, &.{ + .allocation_size = requirements.size, + .memory_type_index = try findMemoryTypeIndex(pdev, requirements.memory_type_bits, flags, vki), + }, null); +} diff --git a/src/swapchain.zig b/src/swapchain.zig index b01d7ff..ed85604 100644 --- a/src/swapchain.zig +++ b/src/swapchain.zig @@ -14,33 +14,6 @@ pub const Context = struct { queue: vk.Queue, family: u32, - - pub fn findMemoryTypeIndex( - self: @This(), - memory_type_bits: u32, - flags: vk.MemoryPropertyFlags, - ) !u32 { - const mem_props = self.vki.getPhysicalDeviceMemoryProperties(self.pdev); - - for (mem_props.memory_types[0..mem_props.memory_type_count], 0..) |mem_type, i| { - if (memory_type_bits & (@as(u32, 1) << @truncate(i)) != 0 and mem_type.property_flags.contains(flags)) { - return @truncate(i); - } - } - - return error.NoSuitableMemoryType; - } - - pub fn allocate( - self: @This(), - requirements: vk.MemoryRequirements, - flags: vk.MemoryPropertyFlags, - ) !vk.DeviceMemory { - return try self.vkd.allocateMemory(self.dev, &.{ - .allocation_size = requirements.size, - .memory_type_index = try self.findMemoryTypeIndex(requirements.memory_type_bits, flags), - }, null); - } }; pub const Swapchain = struct { From 6f7378cf2138ccea8500f552a7327193535d08ec Mon Sep 17 00:00:00 2001 From: David Allemang Date: Mon, 1 Apr 2024 21:28:49 -0400 Subject: [PATCH 032/113] un-comment command buffer sections --- src/main.zig | 286 ++++++++++++++++++++++++++------------------------- 1 file changed, 148 insertions(+), 138 deletions(-) diff --git a/src/main.zig b/src/main.zig index 0ecee37..fd12717 100644 --- a/src/main.zig +++ b/src/main.zig @@ -502,145 +502,155 @@ fn copyBuffer( try vkd.queueWaitIdle(queue); } -// fn createCommandBuffers( -// pool: vk.CommandPool, -// allocator: Allocator, -// vertex_buffer: vk.Buffer, -// index_buffer: vk.Buffer, -// pipeline: vk.Pipeline, -// extent: vk.Extent2D, -// ) ![]vk.CommandBuffer { -// const cmdbufs = try allocator.alloc(vk.CommandBuffer, swapchain.swap_images.len); -// errdefer allocator.free(cmdbufs); -// -// try vkd.allocateCommandBuffers(dev, &.{ -// .command_pool = pool, -// .level = .primary, -// .command_buffer_count = @as(u32, @truncate(cmdbufs.len)), -// }, cmdbufs.ptr); -// errdefer vkd.freeCommandBuffers(dev, pool, @truncate(cmdbufs.len), cmdbufs.ptr); -// -// const clear = vk.ClearValue{ -// .color = .{ .float_32 = .{ 0, 0, 0, 1 } }, -// }; -// -// const viewport = vk.Viewport{ -// .x = 0, -// .y = 0, -// .width = @as(f32, @floatFromInt(extent.width)), -// .height = @as(f32, @floatFromInt(extent.height)), -// .min_depth = 0, -// .max_depth = 1, -// }; -// -// const scissor = vk.Rect2D{ -// .offset = .{ .x = 0, .y = 0 }, -// .extent = extent, -// }; -// -// for (cmdbufs, swapchain.swap_images) |cmdbuf, image| { -// try vkd.beginCommandBuffer(cmdbuf, &.{}); -// -// vkd.cmdPipelineBarrier( -// cmdbuf, -// .{ .top_of_pipe_bit = true }, -// .{ .color_attachment_output_bit = true }, -// .{}, -// 0, -// null, -// 0, -// null, -// 1, -// @ptrCast(&vk.ImageMemoryBarrier{ -// .src_access_mask = .{}, -// .dst_access_mask = .{ .color_attachment_write_bit = true }, -// .old_layout = .undefined, -// .new_layout = .color_attachment_optimal, -// .src_queue_family_index = 0, -// .dst_queue_family_index = 0, -// .image = image.image, -// .subresource_range = .{ -// .aspect_mask = .{ .color_bit = true }, -// .base_mip_level = 0, -// .level_count = 1, -// .base_array_layer = 0, -// .layer_count = 1, -// }, -// }), -// ); -// -// vkd.cmdSetViewport(cmdbuf, 0, 1, @ptrCast(&viewport)); -// vkd.cmdSetScissor(cmdbuf, 0, 1, @ptrCast(&scissor)); -// -// const color_attachments = [_]vk.RenderingAttachmentInfoKHR{ -// .{ -// .image_view = image.view, -// .image_layout = .color_attachment_optimal, -// .resolve_mode = .{}, -// .resolve_image_view = .null_handle, -// .resolve_image_layout = .undefined, -// .load_op = .clear, -// .store_op = .store, -// .clear_value = clear, -// }, -// }; -// -// const render_info = vk.RenderingInfoKHR{ -// .render_area = scissor, // since we always do full-frame changes -// .layer_count = 1, -// .view_mask = 0, -// .color_attachment_count = color_attachments.len, -// .p_color_attachments = &color_attachments, -// }; -// -// vkd.cmdBeginRenderingKHR(cmdbuf, &render_info); -// -// vkd.cmdBindPipeline(cmdbuf, .graphics, pipeline); -// const offset = [_]vk.DeviceSize{0}; -// vkd.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast(&vertex_buffer), &offset); -// vkd.cmdBindIndexBuffer(cmdbuf, index_buffer, 0, .uint16); -// vkd.cmdDrawIndexed(cmdbuf, indices.len, 1, 0, 0, 0); -// -// vkd.cmdEndRenderingKHR(cmdbuf); -// -// vkd.cmdPipelineBarrier( -// cmdbuf, -// .{ .color_attachment_output_bit = true }, -// .{ .bottom_of_pipe_bit = true }, -// .{}, -// 0, -// null, -// 0, -// null, -// 1, -// @ptrCast(&vk.ImageMemoryBarrier{ -// .src_access_mask = .{ .color_attachment_write_bit = true }, -// .dst_access_mask = .{}, -// .old_layout = .color_attachment_optimal, -// .new_layout = .present_src_khr, -// .src_queue_family_index = 0, -// .dst_queue_family_index = 0, -// .image = image.image, -// .subresource_range = .{ -// .aspect_mask = .{ .color_bit = true }, -// .base_mip_level = 0, -// .level_count = 1, -// .base_array_layer = 0, -// .layer_count = 1, -// }, -// }), -// ); -// -// try vkd.endCommandBuffer(cmdbuf); -// } -// -// return cmdbufs; -// } +fn createCommandBuffers( + views: []const vk.Image, + images: []const vk.ImageView, + dev: vk.Device, + vkd: gfx.DeviceDispatch, + pool: vk.CommandPool, + allocator: Allocator, + vertex_buffer: vk.Buffer, + index_buffer: vk.Buffer, + pipeline: vk.Pipeline, + extent: vk.Extent2D, +) ![]vk.CommandBuffer { + const cmdbufs = try allocator.alloc(vk.CommandBuffer, images.len); + errdefer allocator.free(cmdbufs); -// fn destroyCommandBuffers(gc: *const Context, pool: vk.CommandPool, allocator: Allocator, cmdbufs: []vk.CommandBuffer) void { -// vkd.freeCommandBuffers(dev, pool, @truncate(cmdbufs.len), cmdbufs.ptr); -// allocator.free(cmdbufs); -// } + try vkd.allocateCommandBuffers(dev, &.{ + .command_pool = pool, + .level = .primary, + .command_buffer_count = @as(u32, @truncate(cmdbufs.len)), + }, cmdbufs.ptr); + errdefer vkd.freeCommandBuffers(dev, pool, @truncate(cmdbufs.len), cmdbufs.ptr); + + const clear = vk.ClearValue{ + .color = .{ .float_32 = .{ 0, 0, 0, 1 } }, + }; + + const viewport = vk.Viewport{ + .x = 0, + .y = 0, + .width = @as(f32, @floatFromInt(extent.width)), + .height = @as(f32, @floatFromInt(extent.height)), + .min_depth = 0, + .max_depth = 1, + }; + + const scissor = vk.Rect2D{ + .offset = .{ .x = 0, .y = 0 }, + .extent = extent, + }; + + for (cmdbufs, images, views) |cmdbuf, image, view| { + try vkd.beginCommandBuffer(cmdbuf, &.{}); + + vkd.cmdPipelineBarrier( + cmdbuf, + .{ .top_of_pipe_bit = true }, + .{ .color_attachment_output_bit = true }, + .{}, + 0, + null, + 0, + null, + 1, + @ptrCast(&vk.ImageMemoryBarrier{ + .src_access_mask = .{}, + .dst_access_mask = .{ .color_attachment_write_bit = true }, + .old_layout = .undefined, + .new_layout = .color_attachment_optimal, + .src_queue_family_index = 0, + .dst_queue_family_index = 0, + .image = image, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }), + ); + + vkd.cmdSetViewport(cmdbuf, 0, 1, @ptrCast(&viewport)); + vkd.cmdSetScissor(cmdbuf, 0, 1, @ptrCast(&scissor)); + + const color_attachments = [_]vk.RenderingAttachmentInfoKHR{ + .{ + .image_view = view, + .image_layout = .color_attachment_optimal, + .resolve_mode = .{}, + .resolve_image_view = .null_handle, + .resolve_image_layout = .undefined, + .load_op = .clear, + .store_op = .store, + .clear_value = clear, + }, + }; + + const render_info = vk.RenderingInfoKHR{ + .render_area = scissor, // since we always do full-frame changes + .layer_count = 1, + .view_mask = 0, + .color_attachment_count = color_attachments.len, + .p_color_attachments = &color_attachments, + }; + + vkd.cmdBeginRenderingKHR(cmdbuf, &render_info); + + vkd.cmdBindPipeline(cmdbuf, .graphics, pipeline); + const offset = [_]vk.DeviceSize{0}; + vkd.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast(&vertex_buffer), &offset); + vkd.cmdBindIndexBuffer(cmdbuf, index_buffer, 0, .uint16); + vkd.cmdDrawIndexed(cmdbuf, indices.len, 1, 0, 0, 0); + + vkd.cmdEndRenderingKHR(cmdbuf); + + vkd.cmdPipelineBarrier( + cmdbuf, + .{ .color_attachment_output_bit = true }, + .{ .bottom_of_pipe_bit = true }, + .{}, + 0, + null, + 0, + null, + 1, + @ptrCast(&vk.ImageMemoryBarrier{ + .src_access_mask = .{ .color_attachment_write_bit = true }, + .dst_access_mask = .{}, + .old_layout = .color_attachment_optimal, + .new_layout = .present_src_khr, + .src_queue_family_index = 0, + .dst_queue_family_index = 0, + .image = image, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }), + ); + + try vkd.endCommandBuffer(cmdbuf); + } + + return cmdbufs; +} + +fn destroyCommandBuffers( + dev: vk.Device, + vkd: gfx.DeviceDispatch, + pool: vk.CommandPool, + allocator: Allocator, + cmdbufs: []vk.CommandBuffer, +) void { + vkd.freeCommandBuffers(dev, pool, @truncate(cmdbufs.len), cmdbufs.ptr); + allocator.free(cmdbufs); +} fn createPipeline(dev: vk.Device, layout: vk.PipelineLayout, format: vk.SurfaceFormatKHR, vkd: gfx.DeviceDispatch) !vk.Pipeline { const vert = try vkd.createShaderModule(dev, &.{ From 836daad0ae3690fdc01e435d16acf1e7142b40f3 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Mon, 1 Apr 2024 22:12:22 -0400 Subject: [PATCH 033/113] create swapchain, fetch images, image views --- src/main.zig | 121 ++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 105 insertions(+), 16 deletions(-) diff --git a/src/main.zig b/src/main.zig index fd12717..9427a78 100644 --- a/src/main.zig +++ b/src/main.zig @@ -246,12 +246,12 @@ fn create_device( fn find_surface_format( pdev: vk.PhysicalDevice, + vki: gfx.InstanceDispatch, surface: vk.SurfaceKHR, preferred: vk.SurfaceFormatKHR, - vki: gfx.InstanceDispatch, ) !vk.SurfaceFormatKHR { var formats_buf: [64]vk.SurfaceFormatKHR = undefined; - var formats_count: u32 = 64; + var formats_count: u32 = @intCast(formats_buf.len); _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &formats_count, &formats_buf); const formats = formats_buf[0..formats_count]; @@ -264,6 +264,57 @@ fn find_surface_format( return formats[0]; } +fn find_present_mode( + pdev: vk.PhysicalDevice, + vki: gfx.InstanceDispatch, + surface: vk.SurfaceKHR, + preferred: vk.PresentModeKHR, +) !vk.PresentModeKHR { + var modes_buf: [8]vk.PresentModeKHR = undefined; + var modes_count: u32 = @intCast(modes_buf.len); + _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &modes_count, &modes_buf); + const modes = modes_buf[0..modes_count]; + + for (modes) |mode| { + if (std.meta.eql(mode, preferred)) { + return mode; + } + } + + return .mailbox_khr; +} + +fn find_swap_extent( + pdev: vk.PhysicalDevice, + vki: gfx.InstanceDispatch, + surface: vk.SurfaceKHR, + window: *c.GLFWwindow, +) !vk.Extent2D { + const caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, surface); + var extent = caps.current_extent; + + if (extent.width == std.math.maxInt(u32)) { + c.glfwGetFramebufferSize(window, @ptrCast(&extent.width), @ptrCast(&extent.height)); + extent.width = std.math.clamp(extent.width, caps.min_image_extent.width, caps.max_image_extent.width); + extent.height = std.math.clamp(extent.height, caps.min_image_extent.height, caps.max_image_extent.height); + } + + return extent; +} + +fn find_swap_image_count( + pdev: vk.PhysicalDevice, + vki: gfx.InstanceDispatch, + surface: vk.SurfaceKHR, +) !u32 { + const caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, surface); + var count = caps.min_image_count + 1; + if (caps.max_image_count > 0) { + count = @min(count, caps.max_image_count); + } + return count; +} + pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); @@ -302,22 +353,60 @@ pub fn main() !void { .format = .b8g8r8a8_srgb, .color_space = .srgb_nonlinear_khr, }; - const format = try find_surface_format(pdev, surface, preferred_format, vki); + const format = try find_surface_format(pdev, vki, surface, preferred_format); + extent = try find_swap_extent(pdev, vki, surface, window); - _ = &extent; + const present_mode = try find_present_mode(pdev, vki, surface, .mailbox_khr); - // const gc: Context = .{ - // .vki = vki, - // .vkd = vkd, - // .pdev = pdev, - // .dev = dev, - // .surface = surface, - // .queue = queue, - // .family = family, - // }; - // - // var swapchain = try Swapchain.init(&gc, ally, extent, format); - // defer swapchain.deinit(); + const swap_image_count = try find_swap_image_count(pdev, vki, surface); + + var swapchain: vk.SwapchainKHR = .null_handle; + defer vkd.destroySwapchainKHR(dev, swapchain, null); + + var image_buf: [8]vk.Image = undefined; + @memset(&image_buf, .null_handle); + var images: []vk.Image = &.{}; + var image_views_buf: [8]vk.ImageView = undefined; + @memset(&image_views_buf, .null_handle); + var image_views: []vk.ImageView = &.{}; + + swapchain = try vkd.createSwapchainKHR(dev, &.{ + .surface = surface, + .min_image_count = swap_image_count, + .image_format = format.format, + .image_color_space = format.color_space, + .image_extent = extent, + .image_array_layers = 1, + .image_usage = .{ .color_attachment_bit = true }, + .image_sharing_mode = .exclusive, + .pre_transform = .{ .identity_bit_khr = true }, + .composite_alpha = .{ .opaque_bit_khr = true }, + .present_mode = present_mode, + .clipped = vk.TRUE, + .old_swapchain = swapchain, + }, null); + + var image_count: u32 = @intCast(image_buf.len); + _ = try vkd.getSwapchainImagesKHR(dev, swapchain, &image_count, &image_buf); + images = image_buf[0..image_count]; + image_views = image_views_buf[0..image_count]; + defer for (image_views) |view| vkd.destroyImageView(dev, view, null); + + for (images, image_views) |image, *view| { + view.* = try vkd.createImageView(dev, &.{ + .image = image, + .view_type = .@"2d", + .format = format.format, + .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity }, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }, null); + } const pipeline_layout = try vkd.createPipelineLayout(dev, &.{ .flags = .{}, From 8e90619d6adca3651d8b86aeb73da4d2014c8f8e Mon Sep 17 00:00:00 2001 From: David Allemang Date: Mon, 1 Apr 2024 22:37:31 -0400 Subject: [PATCH 034/113] move noise to gfx.zig --- src/gfx.zig | 384 ++++++++++++++++++++++++++++++++++ src/main.zig | 566 +++++++++------------------------------------------ 2 files changed, 480 insertions(+), 470 deletions(-) diff --git a/src/gfx.zig b/src/gfx.zig index e0d132d..1975ef6 100644 --- a/src/gfx.zig +++ b/src/gfx.zig @@ -1,12 +1,396 @@ const std = @import("std"); const builtin = @import("builtin"); + const vk = @import("vk"); +const c = @import("c.zig"); pub const use_debug_messenger = switch (builtin.mode) { .Debug, .ReleaseSafe => true, .ReleaseSmall, .ReleaseFast => false, }; +const InstancePair = std.meta.Tuple(&.{ vk.Instance, InstanceDispatch, vk.DebugUtilsMessengerEXT }); + +/// note: destroy with vki.destroyInstance(instance, null) +pub fn create_instance(vkb: BaseDispatch, app_name: [*:0]const u8) !InstancePair { + var exts = std.BoundedArray([*:0]const u8, 32){}; + var layers = std.BoundedArray([*:0]const u8, 32){}; + + if (use_debug_messenger) { + try exts.appendSlice(&.{ + vk.extension_info.ext_debug_utils.name, + }); + + try layers.appendSlice(&.{ + "VK_LAYER_KHRONOS_validation", + }); + } + + var glfw_exts_count: u32 = 0; + const glfw_exts: [*]const [*:0]const u8 = + @ptrCast(c.glfwGetRequiredInstanceExtensions(&glfw_exts_count)); + try exts.appendSlice(glfw_exts[0..glfw_exts_count]); + + const dumci: vk.DebugUtilsMessengerCreateInfoEXT = .{ + .message_severity = .{ + .error_bit_ext = true, + .info_bit_ext = true, + .verbose_bit_ext = true, + .warning_bit_ext = true, + }, + .message_type = .{ + .device_address_binding_bit_ext = true, + .general_bit_ext = false, + .performance_bit_ext = true, + .validation_bit_ext = true, + }, + .pfn_user_callback = &debug_callback, + .p_user_data = null, + }; + + const instance = try vkb.createInstance(&vk.InstanceCreateInfo{ + .p_application_info = &vk.ApplicationInfo{ + .p_application_name = app_name, + .application_version = vk.makeApiVersion(0, 0, 0, 0), + .p_engine_name = app_name, + .engine_version = vk.makeApiVersion(0, 0, 0, 0), + .api_version = vk.API_VERSION_1_3, + }, + .enabled_extension_count = @intCast(exts.len), + .pp_enabled_extension_names = &exts.buffer, + .enabled_layer_count = @intCast(layers.len), + .pp_enabled_layer_names = &layers.buffer, + .p_next = if (use_debug_messenger) &dumci else null, + }, null); + const vki = try InstanceDispatch.load(instance, vkb.dispatch.vkGetInstanceProcAddr); + errdefer vki.destroyInstance(instance, null); + + const messenger: vk.DebugUtilsMessengerEXT = if (use_debug_messenger) + try vki.createDebugUtilsMessengerEXT(instance, &dumci, null) + else + .null_handle; + errdefer if (use_debug_messenger) + vki.destroyDebugUtilsMessengerEXT(instance, messenger, null); + + return .{ instance, vki, messenger }; +} + +/// note: destroy with vki.destroySurfaceKHR(instance, surface, null) +pub fn create_surface(instance: vk.Instance, window: *c.GLFWwindow) !vk.SurfaceKHR { + var surface: vk.SurfaceKHR = undefined; + if (c.glfwCreateWindowSurface(instance, window, null, &surface) != .success) { + return error.SurfaceInitFailed; + } + return surface; +} + +/// note: destroy with c.glfwDestroyWindow(window) +pub fn create_window(extent: vk.Extent2D, title: [*:0]const u8) !*c.GLFWwindow { + c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, "floating_window"); + c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, "floating_window"); + c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); + + return c.glfwCreateWindow( + @intCast(extent.width), + @intCast(extent.height), + title, + null, + null, + ) orelse error.WindowInitFailed; +} + +const DevicePair = std.meta.Tuple(&.{ vk.PhysicalDevice, vk.Device, DeviceDispatch, u32 }); + +/// note: destroy with vkd.destroyDevice(dev, null) +pub fn create_device( + ally: std.mem.Allocator, + instance: vk.Instance, + surface: vk.SurfaceKHR, + vki: InstanceDispatch, +) !DevicePair { + const required_device_extensions: []const [*:0]const u8 = &.{ + vk.extension_info.khr_swapchain.name, + vk.extension_info.khr_dynamic_rendering.name, + }; + + var pdev_count: u32 = undefined; + _ = try vki.enumeratePhysicalDevices(instance, &pdev_count, null); + const pdevs = try ally.alloc(vk.PhysicalDevice, pdev_count); + defer ally.free(pdevs); + _ = try vki.enumeratePhysicalDevices(instance, &pdev_count, pdevs.ptr); + + pdev_search: for (pdevs) |pdev| { + const props = vki.getPhysicalDeviceProperties(pdev); + if (props.device_type != .discrete_gpu) continue :pdev_search; + + var format_count: u32 = undefined; + _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null); + if (format_count == 0) continue :pdev_search; + + var mode_count: u32 = undefined; + _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &mode_count, null); + if (mode_count == 0) continue :pdev_search; + + var ext_count: u32 = undefined; + _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, null); + const exts = try ally.alloc(vk.ExtensionProperties, ext_count); + defer ally.free(exts); + _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, exts.ptr); + + for (required_device_extensions) |name| { + for (exts) |ext| { + if (std.mem.eql( + u8, + std.mem.span(name), + std.mem.sliceTo(&ext.extension_name, 0), + )) { + break; + } + } else { + continue :pdev_search; + } + } + + var family_count: u32 = undefined; + vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); + const families = try ally.alloc(vk.QueueFamilyProperties, family_count); + defer ally.free(families); + vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr); + + // just find one family that does graphics and present, so we can use exclusive sharing + // on the swapchain. apparently most hardware supports this. logic for queue allocation + // and swapchain creation is so much simpler this way. swapchain creation needs to know + // the list of queue family indices which will have access to the images, and there's a + // performance penalty to allow concurrent access to multiple queue families. + // + // multiple _queues_ may have exclusive access, but only if they're in the smae family. + + const graphics_family: u32 = for (families, 0..) |family, idx| { + const graphics = family.queue_flags.graphics_bit; + const present = try vki.getPhysicalDeviceSurfaceSupportKHR(pdev, @intCast(idx), surface) == vk.TRUE; + if (graphics and present) { + break @intCast(idx); + } + } else { + continue :pdev_search; + }; + + std.log.debug("selecting device {s}", .{std.mem.sliceTo(&props.device_name, 0)}); + + const qci: []const vk.DeviceQueueCreateInfo = &.{ + vk.DeviceQueueCreateInfo{ + .queue_family_index = graphics_family, + .queue_count = 1, + .p_queue_priorities = &[_]f32{1.0}, + }, + }; + + const dev = try vki.createDevice(pdev, &.{ + .queue_create_info_count = @intCast(qci.len), + .p_queue_create_infos = qci.ptr, + .enabled_extension_count = @intCast(required_device_extensions.len), + .pp_enabled_extension_names = required_device_extensions.ptr, + .p_next = &vk.PhysicalDeviceDynamicRenderingFeaturesKHR{ + .dynamic_rendering = vk.TRUE, + }, + }, null); + const vkd = try DeviceDispatch.load(dev, vki.dispatch.vkGetDeviceProcAddr); + errdefer vkd.destroyDevice(dev, null); + + return .{ pdev, dev, vkd, graphics_family }; + } + + return error.NoSuitableDevice; +} + +pub fn find_surface_format( + pdev: vk.PhysicalDevice, + vki: InstanceDispatch, + surface: vk.SurfaceKHR, + preferred: vk.SurfaceFormatKHR, +) !vk.SurfaceFormatKHR { + var formats_buf: [64]vk.SurfaceFormatKHR = undefined; + var formats_count: u32 = @intCast(formats_buf.len); + _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &formats_count, &formats_buf); + const formats = formats_buf[0..formats_count]; + + for (formats) |format| { + if (std.meta.eql(format, preferred)) { + return format; + } + } + + return formats[0]; +} + +pub fn find_present_mode( + pdev: vk.PhysicalDevice, + vki: InstanceDispatch, + surface: vk.SurfaceKHR, + preferred: vk.PresentModeKHR, +) !vk.PresentModeKHR { + var modes_buf: [8]vk.PresentModeKHR = undefined; + var modes_count: u32 = @intCast(modes_buf.len); + _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &modes_count, &modes_buf); + const modes = modes_buf[0..modes_count]; + + for (modes) |mode| { + if (std.meta.eql(mode, preferred)) { + return mode; + } + } + + return .mailbox_khr; +} + +pub fn find_swap_extent( + pdev: vk.PhysicalDevice, + vki: InstanceDispatch, + surface: vk.SurfaceKHR, + window: *c.GLFWwindow, +) !vk.Extent2D { + const caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, surface); + var extent = caps.current_extent; + + if (extent.width == std.math.maxInt(u32)) { + c.glfwGetFramebufferSize(window, @ptrCast(&extent.width), @ptrCast(&extent.height)); + extent.width = std.math.clamp(extent.width, caps.min_image_extent.width, caps.max_image_extent.width); + extent.height = std.math.clamp(extent.height, caps.min_image_extent.height, caps.max_image_extent.height); + } + + return extent; +} + +pub fn find_swap_image_count( + pdev: vk.PhysicalDevice, + vki: InstanceDispatch, + surface: vk.SurfaceKHR, +) !u32 { + const caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, surface); + var count = caps.min_image_count + 1; + if (caps.max_image_count > 0) { + count = @min(count, caps.max_image_count); + } + return count; +} + +pub fn uploadData( + comptime T: type, + pdev: vk.PhysicalDevice, + vki: InstanceDispatch, + dev: vk.Device, + vkd: DeviceDispatch, + queue: vk.Queue, + pool: vk.CommandPool, + buffer: vk.Buffer, + source: []const T, +) !void { + // if (@typeInfo(T) == .Struct and @typeInfo(T).Struct.layout == .auto) @compileError("Requires defined T layout"); + + const size = @sizeOf(T) * source.len; + + const staging_buffer = try vkd.createBuffer(dev, &.{ + .size = size, + .usage = .{ .transfer_src_bit = true }, + .sharing_mode = .exclusive, + }, null); + defer vkd.destroyBuffer(dev, staging_buffer, null); + + const mem_reqs = vkd.getBufferMemoryRequirements(dev, staging_buffer); + const staging_memory = try allocate(pdev, vki, dev, vkd, mem_reqs, .{ + .host_visible_bit = true, + .host_coherent_bit = true, + }); + defer vkd.freeMemory(dev, staging_memory, null); + + try vkd.bindBufferMemory(dev, staging_buffer, staging_memory, 0); + + { + const data = try vkd.mapMemory(dev, staging_memory, 0, vk.WHOLE_SIZE, .{}); + defer vkd.unmapMemory(dev, staging_memory); + + const dest: [*]T = @ptrCast(@alignCast(data)); + @memcpy(dest, source); + } + + try copyBuffer(dev, queue, pool, buffer, staging_buffer, size, vkd); +} + +pub fn copyBuffer( + dev: vk.Device, + queue: vk.Queue, + pool: vk.CommandPool, + dst: vk.Buffer, + src: vk.Buffer, + size: vk.DeviceSize, + vkd: DeviceDispatch, +) !void { + var cmdbuf: vk.CommandBuffer = undefined; + try vkd.allocateCommandBuffers(dev, &.{ + .command_pool = pool, + .level = .primary, + .command_buffer_count = 1, + }, @ptrCast(&cmdbuf)); + defer vkd.freeCommandBuffers(dev, pool, 1, @ptrCast(&cmdbuf)); + + try vkd.beginCommandBuffer(cmdbuf, &.{ + .flags = .{ .one_time_submit_bit = true }, + }); + + const region = vk.BufferCopy{ + .src_offset = 0, + .dst_offset = 0, + .size = size, + }; + vkd.cmdCopyBuffer(cmdbuf, src, dst, 1, @ptrCast(®ion)); + + try vkd.endCommandBuffer(cmdbuf); + + const si = vk.SubmitInfo{ + .command_buffer_count = 1, + .p_command_buffers = @ptrCast(&cmdbuf), + .p_wait_dst_stage_mask = undefined, + }; + // creating and submitting a queue for every copy operation seems a bad idea for "streamed" data + // gonna want a way to send a copy operation WITH SYNCHRONIZATION PRIMITIVES on a particular queue + // see https://stackoverflow.com/a/62183243 + // + // this may be a misunderstanding on how submission works... + try vkd.queueSubmit(queue, 1, @ptrCast(&si), .null_handle); + try vkd.queueWaitIdle(queue); +} + +pub fn findMemoryTypeIndex( + pdev: vk.PhysicalDevice, + memory_type_bits: u32, + flags: vk.MemoryPropertyFlags, + vki: InstanceDispatch, +) !u32 { + const mem_props = vki.getPhysicalDeviceMemoryProperties(pdev); + + for (mem_props.memory_types[0..mem_props.memory_type_count], 0..) |mem_type, i| { + if (memory_type_bits & (@as(u32, 1) << @truncate(i)) != 0 and mem_type.property_flags.contains(flags)) { + return @truncate(i); + } + } + + return error.NoSuitableMemoryType; +} + +pub fn allocate( + pdev: vk.PhysicalDevice, + vki: InstanceDispatch, + dev: vk.Device, + vkd: DeviceDispatch, + requirements: vk.MemoryRequirements, + flags: vk.MemoryPropertyFlags, +) !vk.DeviceMemory { + return try vkd.allocateMemory(dev, &.{ + .allocation_size = requirements.size, + .memory_type_index = try findMemoryTypeIndex(pdev, requirements.memory_type_bits, flags, vki), + }, null); +} + pub const BaseDispatch = vk.BaseWrapper(.{ .createInstance = true, .getInstanceProcAddr = true, diff --git a/src/main.zig b/src/main.zig index 9427a78..e0a328c 100644 --- a/src/main.zig +++ b/src/main.zig @@ -50,271 +50,6 @@ const vertices = [_]Vertex{ const indices = [_]Index{ 4, 5, 6, 6, 5, 7 }; -const InstancePair = std.meta.Tuple(&.{ vk.Instance, gfx.InstanceDispatch, vk.DebugUtilsMessengerEXT }); - -/// note: destroy with vki.destroyInstance(instance, null) -fn create_instance(vkb: gfx.BaseDispatch) !InstancePair { - var exts = std.BoundedArray([*:0]const u8, 32){}; - var layers = std.BoundedArray([*:0]const u8, 32){}; - - if (gfx.use_debug_messenger) { - try exts.appendSlice(&.{ - vk.extension_info.ext_debug_utils.name, - }); - - try layers.appendSlice(&.{ - "VK_LAYER_KHRONOS_validation", - }); - } - - var glfw_exts_count: u32 = 0; - const glfw_exts: [*]const [*:0]const u8 = - @ptrCast(c.glfwGetRequiredInstanceExtensions(&glfw_exts_count)); - try exts.appendSlice(glfw_exts[0..glfw_exts_count]); - - const dumci: vk.DebugUtilsMessengerCreateInfoEXT = .{ - .message_severity = .{ - .error_bit_ext = true, - .info_bit_ext = true, - .verbose_bit_ext = true, - .warning_bit_ext = true, - }, - .message_type = .{ - .device_address_binding_bit_ext = true, - .general_bit_ext = false, - .performance_bit_ext = true, - .validation_bit_ext = true, - }, - .pfn_user_callback = &gfx.debug_callback, - .p_user_data = null, - }; - - const instance = try vkb.createInstance(&vk.InstanceCreateInfo{ - .p_application_info = &vk.ApplicationInfo{ - .p_application_name = app_name, - .application_version = vk.makeApiVersion(0, 0, 0, 0), - .p_engine_name = app_name, - .engine_version = vk.makeApiVersion(0, 0, 0, 0), - .api_version = vk.API_VERSION_1_3, - }, - .enabled_extension_count = @intCast(exts.len), - .pp_enabled_extension_names = &exts.buffer, - .enabled_layer_count = @intCast(layers.len), - .pp_enabled_layer_names = &layers.buffer, - .p_next = if (gfx.use_debug_messenger) &dumci else null, - }, null); - const vki = try gfx.InstanceDispatch.load(instance, vkb.dispatch.vkGetInstanceProcAddr); - errdefer vki.destroyInstance(instance, null); - - const messenger: vk.DebugUtilsMessengerEXT = if (gfx.use_debug_messenger) - try vki.createDebugUtilsMessengerEXT(instance, &dumci, null) - else - .null_handle; - errdefer if (gfx.use_debug_messenger) - vki.destroyDebugUtilsMessengerEXT(instance, messenger, null); - - return .{ instance, vki, messenger }; -} - -/// note: destroy with vki.destroySurfaceKHR(instance, surface, null) -fn create_surface(instance: vk.Instance, window: *c.GLFWwindow) !vk.SurfaceKHR { - var surface: vk.SurfaceKHR = undefined; - if (c.glfwCreateWindowSurface(instance, window, null, &surface) != .success) { - return error.SurfaceInitFailed; - } - return surface; -} - -/// note: destroy with c.glfwDestroyWindow(window) -fn create_window(extent: vk.Extent2D, title: [*:0]const u8) !*c.GLFWwindow { - c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, "floating_window"); - c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, "floating_window"); - c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); - - return c.glfwCreateWindow( - @intCast(extent.width), - @intCast(extent.height), - title, - null, - null, - ) orelse error.WindowInitFailed; -} - -const DevicePair = std.meta.Tuple(&.{ vk.PhysicalDevice, vk.Device, gfx.DeviceDispatch, u32 }); - -/// note: destroy with vkd.destroyDevice(dev, null) -fn create_device( - ally: std.mem.Allocator, - instance: vk.Instance, - surface: vk.SurfaceKHR, - vki: gfx.InstanceDispatch, -) !DevicePair { - const required_device_extensions: []const [*:0]const u8 = &.{ - vk.extension_info.khr_swapchain.name, - vk.extension_info.khr_dynamic_rendering.name, - }; - - var pdev_count: u32 = undefined; - _ = try vki.enumeratePhysicalDevices(instance, &pdev_count, null); - const pdevs = try ally.alloc(vk.PhysicalDevice, pdev_count); - defer ally.free(pdevs); - _ = try vki.enumeratePhysicalDevices(instance, &pdev_count, pdevs.ptr); - - pdev_search: for (pdevs) |pdev| { - const props = vki.getPhysicalDeviceProperties(pdev); - if (props.device_type != .discrete_gpu) continue :pdev_search; - - var format_count: u32 = undefined; - _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null); - if (format_count == 0) continue :pdev_search; - - var mode_count: u32 = undefined; - _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &mode_count, null); - if (mode_count == 0) continue :pdev_search; - - var ext_count: u32 = undefined; - _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, null); - const exts = try ally.alloc(vk.ExtensionProperties, ext_count); - defer ally.free(exts); - _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, exts.ptr); - - for (required_device_extensions) |name| { - for (exts) |ext| { - if (std.mem.eql( - u8, - std.mem.span(name), - std.mem.sliceTo(&ext.extension_name, 0), - )) { - break; - } - } else { - continue :pdev_search; - } - } - - var family_count: u32 = undefined; - vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); - const families = try ally.alloc(vk.QueueFamilyProperties, family_count); - defer ally.free(families); - vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr); - - // just find one family that does graphics and present, so we can use exclusive sharing - // on the swapchain. apparently most hardware supports this. logic for queue allocation - // and swapchain creation is so much simpler this way. swapchain creation needs to know - // the list of queue family indices which will have access to the images, and there's a - // performance penalty to allow concurrent access to multiple queue families. - // - // multiple _queues_ may have exclusive access, but only if they're in the smae family. - - const graphics_family: u32 = for (families, 0..) |family, idx| { - const graphics = family.queue_flags.graphics_bit; - const present = try vki.getPhysicalDeviceSurfaceSupportKHR(pdev, @intCast(idx), surface) == vk.TRUE; - if (graphics and present) { - break @intCast(idx); - } - } else { - continue :pdev_search; - }; - - std.log.debug("selecting device {s}", .{std.mem.sliceTo(&props.device_name, 0)}); - - const qci: []const vk.DeviceQueueCreateInfo = &.{ - vk.DeviceQueueCreateInfo{ - .queue_family_index = graphics_family, - .queue_count = 1, - .p_queue_priorities = &[_]f32{1.0}, - }, - }; - - const dev = try vki.createDevice(pdev, &.{ - .queue_create_info_count = @intCast(qci.len), - .p_queue_create_infos = qci.ptr, - .enabled_extension_count = @intCast(required_device_extensions.len), - .pp_enabled_extension_names = required_device_extensions.ptr, - .p_next = &vk.PhysicalDeviceDynamicRenderingFeaturesKHR{ - .dynamic_rendering = vk.TRUE, - }, - }, null); - const vkd = try gfx.DeviceDispatch.load(dev, vki.dispatch.vkGetDeviceProcAddr); - errdefer vkd.destroyDevice(dev, null); - - return .{ pdev, dev, vkd, graphics_family }; - } - - return error.NoSuitableDevice; -} - -fn find_surface_format( - pdev: vk.PhysicalDevice, - vki: gfx.InstanceDispatch, - surface: vk.SurfaceKHR, - preferred: vk.SurfaceFormatKHR, -) !vk.SurfaceFormatKHR { - var formats_buf: [64]vk.SurfaceFormatKHR = undefined; - var formats_count: u32 = @intCast(formats_buf.len); - _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &formats_count, &formats_buf); - const formats = formats_buf[0..formats_count]; - - for (formats) |format| { - if (std.meta.eql(format, preferred)) { - return format; - } - } - - return formats[0]; -} - -fn find_present_mode( - pdev: vk.PhysicalDevice, - vki: gfx.InstanceDispatch, - surface: vk.SurfaceKHR, - preferred: vk.PresentModeKHR, -) !vk.PresentModeKHR { - var modes_buf: [8]vk.PresentModeKHR = undefined; - var modes_count: u32 = @intCast(modes_buf.len); - _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &modes_count, &modes_buf); - const modes = modes_buf[0..modes_count]; - - for (modes) |mode| { - if (std.meta.eql(mode, preferred)) { - return mode; - } - } - - return .mailbox_khr; -} - -fn find_swap_extent( - pdev: vk.PhysicalDevice, - vki: gfx.InstanceDispatch, - surface: vk.SurfaceKHR, - window: *c.GLFWwindow, -) !vk.Extent2D { - const caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, surface); - var extent = caps.current_extent; - - if (extent.width == std.math.maxInt(u32)) { - c.glfwGetFramebufferSize(window, @ptrCast(&extent.width), @ptrCast(&extent.height)); - extent.width = std.math.clamp(extent.width, caps.min_image_extent.width, caps.max_image_extent.width); - extent.height = std.math.clamp(extent.height, caps.min_image_extent.height, caps.max_image_extent.height); - } - - return extent; -} - -fn find_swap_image_count( - pdev: vk.PhysicalDevice, - vki: gfx.InstanceDispatch, - surface: vk.SurfaceKHR, -) !u32 { - const caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, surface); - var count = caps.min_image_count + 1; - if (caps.max_image_count > 0) { - count = @min(count, caps.max_image_count); - } - return count; -} - pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); @@ -330,21 +65,21 @@ pub fn main() !void { var extent = vk.Extent2D{ .width = 800, .height = 600 }; - const window = try create_window(extent, app_name); + const window = try gfx.create_window(extent, app_name); defer c.glfwDestroyWindow(window); const vkb = try gfx.BaseDispatch.load(c.glfwGetInstanceProcAddress); - const instance, const vki, const messenger = try create_instance(vkb); + const instance, const vki, const messenger = try gfx.create_instance(vkb, app_name); defer vki.destroyInstance(instance, null); defer if (gfx.use_debug_messenger) vki.destroyDebugUtilsMessengerEXT(instance, messenger, null); - const surface = try create_surface(instance, window); + const surface = try gfx.create_surface(instance, window); defer vki.destroySurfaceKHR(instance, surface, null); const pdev: vk.PhysicalDevice, const dev: vk.Device, const vkd: gfx.DeviceDispatch, const family: u32 = - try create_device(ally, instance, surface, vki); + try gfx.create_device(ally, instance, surface, vki); defer vkd.destroyDevice(dev, null); const queue = vkd.getDeviceQueue(dev, family, 0); @@ -353,12 +88,12 @@ pub fn main() !void { .format = .b8g8r8a8_srgb, .color_space = .srgb_nonlinear_khr, }; - const format = try find_surface_format(pdev, vki, surface, preferred_format); - extent = try find_swap_extent(pdev, vki, surface, window); + const format = try gfx.find_surface_format(pdev, vki, surface, preferred_format); + extent = try gfx.find_swap_extent(pdev, vki, surface, window); - const present_mode = try find_present_mode(pdev, vki, surface, .mailbox_khr); + const present_mode = try gfx.find_present_mode(pdev, vki, surface, .mailbox_khr); - const swap_image_count = try find_swap_image_count(pdev, vki, surface); + const swap_image_count = try gfx.find_swap_image_count(pdev, vki, surface); var swapchain: vk.SwapchainKHR = .null_handle; defer vkd.destroySwapchainKHR(dev, swapchain, null); @@ -432,11 +167,11 @@ pub fn main() !void { }, null); defer vkd.destroyBuffer(dev, vertex_buffer, null); const vertex_mem_reqs = vkd.getBufferMemoryRequirements(dev, vertex_buffer); - const vertex_memory = try allocate(pdev, vki, dev, vkd, vertex_mem_reqs, .{ .device_local_bit = true }); + const vertex_memory = try gfx.allocate(pdev, vki, dev, vkd, vertex_mem_reqs, .{ .device_local_bit = true }); defer vkd.freeMemory(dev, vertex_memory, null); try vkd.bindBufferMemory(dev, vertex_buffer, vertex_memory, 0); - try uploadData(Vertex, pdev, vki, dev, vkd, queue, pool, vertex_buffer, &vertices); + try gfx.uploadData(Vertex, pdev, vki, dev, vkd, queue, pool, vertex_buffer, &vertices); const index_buffer = try vkd.createBuffer(dev, &.{ .size = @sizeOf(@TypeOf(indices)), @@ -445,11 +180,11 @@ pub fn main() !void { }, null); defer vkd.destroyBuffer(dev, index_buffer, null); const index_mem_reqs = vkd.getBufferMemoryRequirements(dev, index_buffer); - const index_memory = try allocate(pdev, vki, dev, vkd, index_mem_reqs, .{ .device_local_bit = true }); + const index_memory = try gfx.allocate(pdev, vki, dev, vkd, index_mem_reqs, .{ .device_local_bit = true }); defer vkd.freeMemory(dev, index_memory, null); try vkd.bindBufferMemory(dev, index_buffer, index_memory, 0); - try uploadData(Index, pdev, vki, dev, vkd, queue, pool, index_buffer, &indices); + try gfx.uploadData(Index, pdev, vki, dev, vkd, queue, pool, index_buffer, &indices); // var cmdbufs = try createCommandBuffers( // &gc, @@ -505,92 +240,6 @@ pub fn main() !void { try vkd.deviceWaitIdle(dev); } -fn uploadData( - comptime T: type, - pdev: vk.PhysicalDevice, - vki: gfx.InstanceDispatch, - dev: vk.Device, - vkd: gfx.DeviceDispatch, - queue: vk.Queue, - pool: vk.CommandPool, - buffer: vk.Buffer, - source: []const T, -) !void { - // if (@typeInfo(T) == .Struct and @typeInfo(T).Struct.layout == .auto) @compileError("Requires defined T layout"); - - const size = @sizeOf(T) * source.len; - - const staging_buffer = try vkd.createBuffer(dev, &.{ - .size = size, - .usage = .{ .transfer_src_bit = true }, - .sharing_mode = .exclusive, - }, null); - defer vkd.destroyBuffer(dev, staging_buffer, null); - - const mem_reqs = vkd.getBufferMemoryRequirements(dev, staging_buffer); - const staging_memory = try allocate(pdev, vki, dev, vkd, mem_reqs, .{ - .host_visible_bit = true, - .host_coherent_bit = true, - }); - defer vkd.freeMemory(dev, staging_memory, null); - - try vkd.bindBufferMemory(dev, staging_buffer, staging_memory, 0); - - { - const data = try vkd.mapMemory(dev, staging_memory, 0, vk.WHOLE_SIZE, .{}); - defer vkd.unmapMemory(dev, staging_memory); - - const dest: [*]T = @ptrCast(@alignCast(data)); - @memcpy(dest, source); - } - - try copyBuffer(dev, queue, pool, buffer, staging_buffer, size, vkd); -} - -fn copyBuffer( - dev: vk.Device, - queue: vk.Queue, - pool: vk.CommandPool, - dst: vk.Buffer, - src: vk.Buffer, - size: vk.DeviceSize, - vkd: gfx.DeviceDispatch, -) !void { - var cmdbuf: vk.CommandBuffer = undefined; - try vkd.allocateCommandBuffers(dev, &.{ - .command_pool = pool, - .level = .primary, - .command_buffer_count = 1, - }, @ptrCast(&cmdbuf)); - defer vkd.freeCommandBuffers(dev, pool, 1, @ptrCast(&cmdbuf)); - - try vkd.beginCommandBuffer(cmdbuf, &.{ - .flags = .{ .one_time_submit_bit = true }, - }); - - const region = vk.BufferCopy{ - .src_offset = 0, - .dst_offset = 0, - .size = size, - }; - vkd.cmdCopyBuffer(cmdbuf, src, dst, 1, @ptrCast(®ion)); - - try vkd.endCommandBuffer(cmdbuf); - - const si = vk.SubmitInfo{ - .command_buffer_count = 1, - .p_command_buffers = @ptrCast(&cmdbuf), - .p_wait_dst_stage_mask = undefined, - }; - // creating and submitting a queue for every copy operation seems a bad idea for "streamed" data - // gonna want a way to send a copy operation WITH SYNCHRONIZATION PRIMITIVES on a particular queue - // see https://stackoverflow.com/a/62183243 - // - // this may be a misunderstanding on how submission works... - try vkd.queueSubmit(queue, 1, @ptrCast(&si), .null_handle); - try vkd.queueWaitIdle(queue); -} - fn createCommandBuffers( views: []const vk.Image, images: []const vk.ImageView, @@ -767,118 +416,95 @@ fn createPipeline(dev: vk.Device, layout: vk.PipelineLayout, format: vk.SurfaceF }, }; - const pcbas = vk.PipelineColorBlendAttachmentState{ - .blend_enable = vk.FALSE, - .src_color_blend_factor = .one, - .dst_color_blend_factor = .zero, - .color_blend_op = .add, - .src_alpha_blend_factor = .one, - .dst_alpha_blend_factor = .zero, - .alpha_blend_op = .add, - .color_write_mask = .{ .r_bit = true, .g_bit = true, .b_bit = true, .a_bit = true }, + const color_blend_attachment_states = [_]vk.PipelineColorBlendAttachmentState{ + vk.PipelineColorBlendAttachmentState{ + .blend_enable = vk.FALSE, + .src_color_blend_factor = .one, + .dst_color_blend_factor = .zero, + .color_blend_op = .add, + .src_alpha_blend_factor = .one, + .dst_alpha_blend_factor = .zero, + .alpha_blend_op = .add, + .color_write_mask = .{ .r_bit = true, .g_bit = true, .b_bit = true, .a_bit = true }, + }, }; - const dynstate = [_]vk.DynamicState{ .viewport, .scissor }; + const dynamic_states = [_]vk.DynamicState{ + .viewport, + .scissor, + }; - const gpci = vk.GraphicsPipelineCreateInfo{ - .flags = .{}, - .stage_count = @intCast(pssci.len), - .p_stages = &pssci, - .p_vertex_input_state = &vk.PipelineVertexInputStateCreateInfo{ - .vertex_binding_description_count = 1, - .p_vertex_binding_descriptions = @ptrCast(&Vertex.binding_description), - .vertex_attribute_description_count = Vertex.attribute_description.len, - .p_vertex_attribute_descriptions = &Vertex.attribute_description, - }, - .p_input_assembly_state = &vk.PipelineInputAssemblyStateCreateInfo{ - .topology = .triangle_list, - .primitive_restart_enable = vk.FALSE, - }, - .p_tessellation_state = null, - .p_viewport_state = &vk.PipelineViewportStateCreateInfo{ - .viewport_count = 1, - .p_viewports = undefined, // set in createCommandBuffers with cmdSetViewport - .scissor_count = 1, - .p_scissors = undefined, // set in createCommandBuffers with cmdSetScissor - }, - .p_rasterization_state = &vk.PipelineRasterizationStateCreateInfo{ - .depth_clamp_enable = vk.FALSE, - .rasterizer_discard_enable = vk.FALSE, - .polygon_mode = .fill, - .cull_mode = .{ .back_bit = true }, - .front_face = .counter_clockwise, - .depth_bias_enable = vk.FALSE, - .depth_bias_constant_factor = 0, - .depth_bias_clamp = 0, - .depth_bias_slope_factor = 0, - .line_width = 1, - }, - .p_multisample_state = &vk.PipelineMultisampleStateCreateInfo{ - .rasterization_samples = .{ .@"1_bit" = true }, - .sample_shading_enable = vk.FALSE, - .min_sample_shading = 1, - .alpha_to_coverage_enable = vk.FALSE, - .alpha_to_one_enable = vk.FALSE, - }, - .p_depth_stencil_state = null, - .p_color_blend_state = &vk.PipelineColorBlendStateCreateInfo{ - .logic_op_enable = vk.FALSE, - .logic_op = .copy, - .attachment_count = 1, - .p_attachments = @ptrCast(&pcbas), - .blend_constants = [_]f32{ 0, 0, 0, 0 }, - }, - .p_dynamic_state = &vk.PipelineDynamicStateCreateInfo{ + const create_infos = [_]vk.GraphicsPipelineCreateInfo{ + .{ .flags = .{}, - .dynamic_state_count = dynstate.len, - .p_dynamic_states = &dynstate, - }, - .layout = layout, - .render_pass = .null_handle, - .subpass = 0, - .base_pipeline_handle = .null_handle, - .base_pipeline_index = -1, - .p_next = &vk.PipelineRenderingCreateInfoKHR{ - .color_attachment_count = 1, - .p_color_attachment_formats = @ptrCast(&format), - .depth_attachment_format = .undefined, - .stencil_attachment_format = .undefined, - .view_mask = 0, + .stage_count = @intCast(pssci.len), + .p_stages = &pssci, + .p_vertex_input_state = &vk.PipelineVertexInputStateCreateInfo{ + .vertex_binding_description_count = 1, + .p_vertex_binding_descriptions = @ptrCast(&Vertex.binding_description), + .vertex_attribute_description_count = Vertex.attribute_description.len, + .p_vertex_attribute_descriptions = &Vertex.attribute_description, + }, + .p_input_assembly_state = &vk.PipelineInputAssemblyStateCreateInfo{ + .topology = .triangle_list, + .primitive_restart_enable = vk.FALSE, + }, + .p_tessellation_state = null, + .p_viewport_state = &vk.PipelineViewportStateCreateInfo{ + .viewport_count = 1, + .p_viewports = undefined, // set in createCommandBuffers with cmdSetViewport + .scissor_count = 1, + .p_scissors = undefined, // set in createCommandBuffers with cmdSetScissor + }, + .p_rasterization_state = &vk.PipelineRasterizationStateCreateInfo{ + .depth_clamp_enable = vk.FALSE, + .rasterizer_discard_enable = vk.FALSE, + .polygon_mode = .fill, + .cull_mode = .{ .back_bit = true }, + .front_face = .counter_clockwise, + .depth_bias_enable = vk.FALSE, + .depth_bias_constant_factor = 0, + .depth_bias_clamp = 0, + .depth_bias_slope_factor = 0, + .line_width = 1, + }, + .p_multisample_state = &vk.PipelineMultisampleStateCreateInfo{ + .rasterization_samples = .{ .@"1_bit" = true }, + .sample_shading_enable = vk.FALSE, + .min_sample_shading = 1, + .alpha_to_coverage_enable = vk.FALSE, + .alpha_to_one_enable = vk.FALSE, + }, + .p_depth_stencil_state = null, + .p_color_blend_state = &vk.PipelineColorBlendStateCreateInfo{ + .logic_op_enable = vk.FALSE, + .logic_op = .copy, + .attachment_count = @intCast(color_blend_attachment_states.len), + .p_attachments = &color_blend_attachment_states, + .blend_constants = [_]f32{ 0, 0, 0, 0 }, + }, + .p_dynamic_state = &vk.PipelineDynamicStateCreateInfo{ + .flags = .{}, + .dynamic_state_count = @intCast(dynamic_states.len), + .p_dynamic_states = &dynamic_states, + }, + .layout = layout, + .render_pass = .null_handle, + .subpass = 0, + .base_pipeline_handle = .null_handle, + .base_pipeline_index = -1, + .p_next = &vk.PipelineRenderingCreateInfoKHR{ + .color_attachment_count = 1, + .p_color_attachment_formats = @ptrCast(&format), + .depth_attachment_format = .undefined, + .stencil_attachment_format = .undefined, + .view_mask = 0, + }, }, }; - var pipeline: vk.Pipeline = undefined; - _ = try vkd.createGraphicsPipelines(dev, .null_handle, 1, @ptrCast(&gpci), null, @ptrCast(&pipeline)); - return pipeline; -} - -pub fn findMemoryTypeIndex( - pdev: vk.PhysicalDevice, - memory_type_bits: u32, - flags: vk.MemoryPropertyFlags, - vki: gfx.InstanceDispatch, -) !u32 { - const mem_props = vki.getPhysicalDeviceMemoryProperties(pdev); - - for (mem_props.memory_types[0..mem_props.memory_type_count], 0..) |mem_type, i| { - if (memory_type_bits & (@as(u32, 1) << @truncate(i)) != 0 and mem_type.property_flags.contains(flags)) { - return @truncate(i); - } - } - - return error.NoSuitableMemoryType; -} - -pub fn allocate( - pdev: vk.PhysicalDevice, - vki: gfx.InstanceDispatch, - dev: vk.Device, - vkd: gfx.DeviceDispatch, - requirements: vk.MemoryRequirements, - flags: vk.MemoryPropertyFlags, -) !vk.DeviceMemory { - return try vkd.allocateMemory(dev, &.{ - .allocation_size = requirements.size, - .memory_type_index = try findMemoryTypeIndex(pdev, requirements.memory_type_bits, flags, vki), - }, null); + var pipelines: [create_infos.len]vk.Pipeline = undefined; + _ = try vkd.createGraphicsPipelines(dev, .null_handle, @intCast(create_infos.len), &create_infos, null, &pipelines); + std.debug.assert(pipelines.len == 1); + return pipelines[0]; } From 790c7955c75da5a7d55dfa6d7c49100fb8a393d9 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Mon, 1 Apr 2024 23:44:36 -0400 Subject: [PATCH 035/113] swapchain with vulkan-tutorial --- src/gfx.zig | 2 +- src/main.zig | 325 ++++++++++++++++++++++++++++----------------------- 2 files changed, 180 insertions(+), 147 deletions(-) diff --git a/src/gfx.zig b/src/gfx.zig index 1975ef6..8ec9b96 100644 --- a/src/gfx.zig +++ b/src/gfx.zig @@ -267,7 +267,7 @@ pub fn find_swap_image_count( surface: vk.SurfaceKHR, ) !u32 { const caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, surface); - var count = caps.min_image_count + 1; + var count = @max(3, caps.min_image_count + 1); if (caps.max_image_count > 0) { count = @min(count, caps.max_image_count); } diff --git a/src/main.zig b/src/main.zig index e0a328c..8fceabd 100644 --- a/src/main.zig +++ b/src/main.zig @@ -84,6 +84,11 @@ pub fn main() !void { const queue = vkd.getDeviceQueue(dev, family, 0); + const pool = try vkd.createCommandPool(dev, &.{ + .queue_family_index = family, + }, null); + defer vkd.destroyCommandPool(dev, pool, null); + const preferred_format: vk.SurfaceFormatKHR = .{ .format = .b8g8r8a8_srgb, .color_space = .srgb_nonlinear_khr, @@ -98,12 +103,29 @@ pub fn main() !void { var swapchain: vk.SwapchainKHR = .null_handle; defer vkd.destroySwapchainKHR(dev, swapchain, null); - var image_buf: [8]vk.Image = undefined; - @memset(&image_buf, .null_handle); - var images: []vk.Image = &.{}; - var image_views_buf: [8]vk.ImageView = undefined; - @memset(&image_views_buf, .null_handle); - var image_views: []vk.ImageView = &.{}; + const ChainImage = struct { + image: vk.Image = .null_handle, + view: vk.ImageView = .null_handle, + cmdbuf: vk.CommandBuffer = .null_handle, + // fence: vk.Fence = .null_handle, + // image_available: vk.Semaphore = .null_handle, + // render_finished: vk.Semaphore = .null_handle, + }; + + var chain = std.MultiArrayList(ChainImage){}; + defer chain.deinit(ally); + defer vkd.freeCommandBuffers(dev, pool, @intCast(chain.len), chain.items(.cmdbuf).ptr); + defer for (chain.items(.view)) |view| vkd.destroyImageView(dev, view, null); + // defer for (chain.items(.fence)) |fence| vkd.destroyFence(dev, fence, null); + // defer for (chain.items(.image_available)) |sem| vkd.destroySemaphore(dev, sem, null); + // defer for (chain.items(.render_finished)) |sem| vkd.destroySemaphore(dev, sem, null); + + const frame_fence = try vkd.createFence(dev, &.{ .flags = .{ .signaled_bit = true } }, null); + defer vkd.destroyFence(dev, frame_fence, null); + const image_available = try vkd.createSemaphore(dev, &.{}, null); + defer vkd.destroySemaphore(dev, image_available, null); + const render_finished = try vkd.createSemaphore(dev, &.{}, null); + defer vkd.destroySemaphore(dev, render_finished, null); swapchain = try vkd.createSwapchainKHR(dev, &.{ .surface = surface, @@ -121,13 +143,12 @@ pub fn main() !void { .old_swapchain = swapchain, }, null); - var image_count: u32 = @intCast(image_buf.len); - _ = try vkd.getSwapchainImagesKHR(dev, swapchain, &image_count, &image_buf); - images = image_buf[0..image_count]; - image_views = image_views_buf[0..image_count]; - defer for (image_views) |view| vkd.destroyImageView(dev, view, null); + var image_count: u32 = undefined; + _ = try vkd.getSwapchainImagesKHR(dev, swapchain, &image_count, null); + try chain.resize(ally, image_count); + _ = try vkd.getSwapchainImagesKHR(dev, swapchain, &image_count, chain.items(.image).ptr); - for (images, image_views) |image, *view| { + for (chain.items(.image), chain.items(.view)) |image, *view| { view.* = try vkd.createImageView(dev, &.{ .image = image, .view_type = .@"2d", @@ -143,6 +164,24 @@ pub fn main() !void { }, null); } + // for (chain.items(.fence)) |*fence| { + // fence.* = try vkd.createFence(dev, &.{ .flags = .{ .signaled_bit = true } }, null); + // } + // + // for (chain.items(.image_available)) |*sem| { + // sem.* = try vkd.createSemaphore(dev, &.{}, null); + // } + // + // for (chain.items(.render_finished)) |*sem| { + // sem.* = try vkd.createSemaphore(dev, &.{}, null); + // } + + try vkd.allocateCommandBuffers(dev, &.{ + .command_buffer_count = @intCast(chain.len), + .command_pool = pool, + .level = .primary, + }, chain.items(.cmdbuf).ptr); + const pipeline_layout = try vkd.createPipelineLayout(dev, &.{ .flags = .{}, .set_layout_count = 0, @@ -155,11 +194,6 @@ pub fn main() !void { const pipeline = try createPipeline(dev, pipeline_layout, format, vkd); defer vkd.destroyPipeline(dev, pipeline, null); - const pool = try vkd.createCommandPool(dev, &.{ - .queue_family_index = family, - }, null); - defer vkd.destroyCommandPool(dev, pool, null); - const vertex_buffer = try vkd.createBuffer(dev, &.{ .size = @sizeOf(@TypeOf(vertices)), .usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, @@ -186,16 +220,11 @@ pub fn main() !void { try gfx.uploadData(Index, pdev, vki, dev, vkd, queue, pool, index_buffer, &indices); - // var cmdbufs = try createCommandBuffers( - // &gc, - // pool, - // ally, - // vertex_buffer, - // index_buffer, - // pipeline, - // swapchain, - // ); - // defer destroyCommandBuffers(&gc, pool, ally, cmdbufs); + for (chain.items(.image), chain.items(.view), chain.items(.cmdbuf)) |image, view, cmdbuf| { + try record_cmdbuf(cmdbuf, vkd, image, view, extent, pipeline, vertex_buffer, index_buffer); + } + + // var index: u32 = 0; while (c.glfwWindowShouldClose(window) == c.GLFW_FALSE) { var w: c_int = undefined; @@ -208,6 +237,37 @@ pub fn main() !void { continue; } + _ = try vkd.waitForFences(dev, 1, @ptrCast(&frame_fence), vk.TRUE, std.math.maxInt(u64)); + try vkd.resetFences(dev, 1, @ptrCast(&frame_fence)); + + // const frame: ChainImage = chain.get(); + + // var index: u32 = undefined; + // try vkd.acquireNextImageKHR(dev, swapchain, std.math.maxInt(u64), frame., fence); + const result = try vkd.acquireNextImageKHR(dev, swapchain, std.math.maxInt(u64), image_available, .null_handle); + + // std.log.debug("frame {d}", .{result.image_index}); + const frame = chain.get(result.image_index); + + try vkd.queueSubmit(queue, 1, @ptrCast(&vk.SubmitInfo{ + .wait_semaphore_count = 1, + .p_wait_semaphores = @ptrCast(&image_available), + .p_wait_dst_stage_mask = @ptrCast(&vk.PipelineStageFlags{ .color_attachment_output_bit = true }), + .command_buffer_count = 1, + .p_command_buffers = @ptrCast(&frame.cmdbuf), + .signal_semaphore_count = 1, + .p_signal_semaphores = @ptrCast(&render_finished), + }), frame_fence); + + _ = try vkd.queuePresentKHR(queue, &.{ + .wait_semaphore_count = 1, + .p_wait_semaphores = @ptrCast(&render_finished), + .swapchain_count = 1, + .p_swapchains = @ptrCast(&swapchain), + .p_image_indices = @ptrCast(&result.image_index), + .p_results = null, + }); + // const cmdbuf = cmdbufs[swapchain.image_index]; // const state = swapchain.present(cmdbuf) catch |err| switch (err) { @@ -240,28 +300,16 @@ pub fn main() !void { try vkd.deviceWaitIdle(dev); } -fn createCommandBuffers( - views: []const vk.Image, - images: []const vk.ImageView, - dev: vk.Device, +fn record_cmdbuf( + cmdbuf: vk.CommandBuffer, vkd: gfx.DeviceDispatch, - pool: vk.CommandPool, - allocator: Allocator, + image: vk.Image, + view: vk.ImageView, + extent: vk.Extent2D, + pipeline: vk.Pipeline, vertex_buffer: vk.Buffer, index_buffer: vk.Buffer, - pipeline: vk.Pipeline, - extent: vk.Extent2D, -) ![]vk.CommandBuffer { - const cmdbufs = try allocator.alloc(vk.CommandBuffer, images.len); - errdefer allocator.free(cmdbufs); - - try vkd.allocateCommandBuffers(dev, &.{ - .command_pool = pool, - .level = .primary, - .command_buffer_count = @as(u32, @truncate(cmdbufs.len)), - }, cmdbufs.ptr); - errdefer vkd.freeCommandBuffers(dev, pool, @truncate(cmdbufs.len), cmdbufs.ptr); - +) !void { const clear = vk.ClearValue{ .color = .{ .float_32 = .{ 0, 0, 0, 1 } }, }; @@ -269,8 +317,8 @@ fn createCommandBuffers( const viewport = vk.Viewport{ .x = 0, .y = 0, - .width = @as(f32, @floatFromInt(extent.width)), - .height = @as(f32, @floatFromInt(extent.height)), + .width = @floatFromInt(extent.width), + .height = @floatFromInt(extent.height), .min_depth = 0, .max_depth = 1, }; @@ -280,114 +328,99 @@ fn createCommandBuffers( .extent = extent, }; - for (cmdbufs, images, views) |cmdbuf, image, view| { - try vkd.beginCommandBuffer(cmdbuf, &.{}); + try vkd.beginCommandBuffer(cmdbuf, &.{}); - vkd.cmdPipelineBarrier( - cmdbuf, - .{ .top_of_pipe_bit = true }, - .{ .color_attachment_output_bit = true }, - .{}, - 0, - null, - 0, - null, - 1, - @ptrCast(&vk.ImageMemoryBarrier{ - .src_access_mask = .{}, - .dst_access_mask = .{ .color_attachment_write_bit = true }, - .old_layout = .undefined, - .new_layout = .color_attachment_optimal, - .src_queue_family_index = 0, - .dst_queue_family_index = 0, - .image = image, - .subresource_range = .{ - .aspect_mask = .{ .color_bit = true }, - .base_mip_level = 0, - .level_count = 1, - .base_array_layer = 0, - .layer_count = 1, - }, - }), - ); - - vkd.cmdSetViewport(cmdbuf, 0, 1, @ptrCast(&viewport)); - vkd.cmdSetScissor(cmdbuf, 0, 1, @ptrCast(&scissor)); - - const color_attachments = [_]vk.RenderingAttachmentInfoKHR{ - .{ - .image_view = view, - .image_layout = .color_attachment_optimal, - .resolve_mode = .{}, - .resolve_image_view = .null_handle, - .resolve_image_layout = .undefined, - .load_op = .clear, - .store_op = .store, - .clear_value = clear, + vkd.cmdPipelineBarrier( + cmdbuf, + .{ .top_of_pipe_bit = true }, + .{ .color_attachment_output_bit = true }, + .{}, + 0, + null, + 0, + null, + 1, + @ptrCast(&vk.ImageMemoryBarrier{ + .src_access_mask = .{}, + .dst_access_mask = .{ .color_attachment_write_bit = true }, + .old_layout = .undefined, + .new_layout = .color_attachment_optimal, + .src_queue_family_index = 0, + .dst_queue_family_index = 0, + .image = image, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, }, - }; + }), + ); - const render_info = vk.RenderingInfoKHR{ - .render_area = scissor, // since we always do full-frame changes - .layer_count = 1, - .view_mask = 0, - .color_attachment_count = color_attachments.len, - .p_color_attachments = &color_attachments, - }; + vkd.cmdSetViewport(cmdbuf, 0, 1, @ptrCast(&viewport)); + vkd.cmdSetScissor(cmdbuf, 0, 1, @ptrCast(&scissor)); - vkd.cmdBeginRenderingKHR(cmdbuf, &render_info); + const color_attachments = [_]vk.RenderingAttachmentInfoKHR{ + .{ + .image_view = view, + .image_layout = .color_attachment_optimal, + .resolve_mode = .{}, + .resolve_image_view = .null_handle, + .resolve_image_layout = .undefined, + .load_op = .clear, + .store_op = .store, + .clear_value = clear, + }, + }; - vkd.cmdBindPipeline(cmdbuf, .graphics, pipeline); - const offset = [_]vk.DeviceSize{0}; - vkd.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast(&vertex_buffer), &offset); - vkd.cmdBindIndexBuffer(cmdbuf, index_buffer, 0, .uint16); - vkd.cmdDrawIndexed(cmdbuf, indices.len, 1, 0, 0, 0); + const render_info = vk.RenderingInfoKHR{ + .render_area = scissor, // since we always do full-frame changes + .layer_count = 1, + .view_mask = 0, + .color_attachment_count = color_attachments.len, + .p_color_attachments = &color_attachments, + }; - vkd.cmdEndRenderingKHR(cmdbuf); + vkd.cmdBeginRenderingKHR(cmdbuf, &render_info); - vkd.cmdPipelineBarrier( - cmdbuf, - .{ .color_attachment_output_bit = true }, - .{ .bottom_of_pipe_bit = true }, - .{}, - 0, - null, - 0, - null, - 1, - @ptrCast(&vk.ImageMemoryBarrier{ - .src_access_mask = .{ .color_attachment_write_bit = true }, - .dst_access_mask = .{}, - .old_layout = .color_attachment_optimal, - .new_layout = .present_src_khr, - .src_queue_family_index = 0, - .dst_queue_family_index = 0, - .image = image, - .subresource_range = .{ - .aspect_mask = .{ .color_bit = true }, - .base_mip_level = 0, - .level_count = 1, - .base_array_layer = 0, - .layer_count = 1, - }, - }), - ); + vkd.cmdBindPipeline(cmdbuf, .graphics, pipeline); + const offset = [_]vk.DeviceSize{0}; + vkd.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast(&vertex_buffer), &offset); + vkd.cmdBindIndexBuffer(cmdbuf, index_buffer, 0, .uint16); + vkd.cmdDrawIndexed(cmdbuf, indices.len, 1, 0, 0, 0); - try vkd.endCommandBuffer(cmdbuf); - } + vkd.cmdEndRenderingKHR(cmdbuf); - return cmdbufs; -} + vkd.cmdPipelineBarrier( + cmdbuf, + .{ .color_attachment_output_bit = true }, + .{ .bottom_of_pipe_bit = true }, + .{}, + 0, + null, + 0, + null, + 1, + @ptrCast(&vk.ImageMemoryBarrier{ + .src_access_mask = .{ .color_attachment_write_bit = true }, + .dst_access_mask = .{}, + .old_layout = .color_attachment_optimal, + .new_layout = .present_src_khr, + .src_queue_family_index = 0, + .dst_queue_family_index = 0, + .image = image, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }), + ); -fn destroyCommandBuffers( - dev: vk.Device, - vkd: gfx.DeviceDispatch, - pool: vk.CommandPool, - allocator: Allocator, - cmdbufs: []vk.CommandBuffer, -) void { - vkd.freeCommandBuffers(dev, pool, @truncate(cmdbufs.len), cmdbufs.ptr); - allocator.free(cmdbufs); + try vkd.endCommandBuffer(cmdbuf); } fn createPipeline(dev: vk.Device, layout: vk.PipelineLayout, format: vk.SurfaceFormatKHR, vkd: gfx.DeviceDispatch) !vk.Pipeline { From 0bcc87adc33970de359257fc428e4acc0b24bb9f Mon Sep 17 00:00:00 2001 From: David Allemang Date: Mon, 1 Apr 2024 23:49:26 -0400 Subject: [PATCH 036/113] frames in flight --- src/main.zig | 66 ++++++++++++++++++++++++---------------------------- 1 file changed, 31 insertions(+), 35 deletions(-) diff --git a/src/main.zig b/src/main.zig index 8fceabd..5a5accf 100644 --- a/src/main.zig +++ b/src/main.zig @@ -107,25 +107,18 @@ pub fn main() !void { image: vk.Image = .null_handle, view: vk.ImageView = .null_handle, cmdbuf: vk.CommandBuffer = .null_handle, - // fence: vk.Fence = .null_handle, - // image_available: vk.Semaphore = .null_handle, - // render_finished: vk.Semaphore = .null_handle, + fence: vk.Fence = .null_handle, + image_available: vk.Semaphore = .null_handle, + render_finished: vk.Semaphore = .null_handle, }; var chain = std.MultiArrayList(ChainImage){}; defer chain.deinit(ally); defer vkd.freeCommandBuffers(dev, pool, @intCast(chain.len), chain.items(.cmdbuf).ptr); defer for (chain.items(.view)) |view| vkd.destroyImageView(dev, view, null); - // defer for (chain.items(.fence)) |fence| vkd.destroyFence(dev, fence, null); - // defer for (chain.items(.image_available)) |sem| vkd.destroySemaphore(dev, sem, null); - // defer for (chain.items(.render_finished)) |sem| vkd.destroySemaphore(dev, sem, null); - - const frame_fence = try vkd.createFence(dev, &.{ .flags = .{ .signaled_bit = true } }, null); - defer vkd.destroyFence(dev, frame_fence, null); - const image_available = try vkd.createSemaphore(dev, &.{}, null); - defer vkd.destroySemaphore(dev, image_available, null); - const render_finished = try vkd.createSemaphore(dev, &.{}, null); - defer vkd.destroySemaphore(dev, render_finished, null); + defer for (chain.items(.fence)) |fence| vkd.destroyFence(dev, fence, null); + defer for (chain.items(.image_available)) |sem| vkd.destroySemaphore(dev, sem, null); + defer for (chain.items(.render_finished)) |sem| vkd.destroySemaphore(dev, sem, null); swapchain = try vkd.createSwapchainKHR(dev, &.{ .surface = surface, @@ -164,17 +157,17 @@ pub fn main() !void { }, null); } - // for (chain.items(.fence)) |*fence| { - // fence.* = try vkd.createFence(dev, &.{ .flags = .{ .signaled_bit = true } }, null); - // } - // - // for (chain.items(.image_available)) |*sem| { - // sem.* = try vkd.createSemaphore(dev, &.{}, null); - // } - // - // for (chain.items(.render_finished)) |*sem| { - // sem.* = try vkd.createSemaphore(dev, &.{}, null); - // } + for (chain.items(.fence)) |*fence| { + fence.* = try vkd.createFence(dev, &.{ .flags = .{ .signaled_bit = true } }, null); + } + + for (chain.items(.image_available)) |*sem| { + sem.* = try vkd.createSemaphore(dev, &.{}, null); + } + + for (chain.items(.render_finished)) |*sem| { + sem.* = try vkd.createSemaphore(dev, &.{}, null); + } try vkd.allocateCommandBuffers(dev, &.{ .command_buffer_count = @intCast(chain.len), @@ -224,7 +217,7 @@ pub fn main() !void { try record_cmdbuf(cmdbuf, vkd, image, view, extent, pipeline, vertex_buffer, index_buffer); } - // var index: u32 = 0; + var index: u32 = 0; while (c.glfwWindowShouldClose(window) == c.GLFW_FALSE) { var w: c_int = undefined; @@ -237,31 +230,32 @@ pub fn main() !void { continue; } - _ = try vkd.waitForFences(dev, 1, @ptrCast(&frame_fence), vk.TRUE, std.math.maxInt(u64)); - try vkd.resetFences(dev, 1, @ptrCast(&frame_fence)); + const frame: ChainImage = chain.get(index); + // const next_frame: ChainImage = chain.get((index + 1) % chain.len); - // const frame: ChainImage = chain.get(); + _ = try vkd.waitForFences(dev, 1, @ptrCast(&frame.fence), vk.TRUE, std.math.maxInt(u64)); + try vkd.resetFences(dev, 1, @ptrCast(&frame.fence)); // var index: u32 = undefined; // try vkd.acquireNextImageKHR(dev, swapchain, std.math.maxInt(u64), frame., fence); - const result = try vkd.acquireNextImageKHR(dev, swapchain, std.math.maxInt(u64), image_available, .null_handle); - + const result = try vkd.acquireNextImageKHR(dev, swapchain, std.math.maxInt(u64), frame.image_available, .null_handle); + // std.log.debug("frame {d}", .{result.image_index}); - const frame = chain.get(result.image_index); + // const frame = chain.get(result.image_index); try vkd.queueSubmit(queue, 1, @ptrCast(&vk.SubmitInfo{ .wait_semaphore_count = 1, - .p_wait_semaphores = @ptrCast(&image_available), + .p_wait_semaphores = @ptrCast(&frame.image_available), .p_wait_dst_stage_mask = @ptrCast(&vk.PipelineStageFlags{ .color_attachment_output_bit = true }), .command_buffer_count = 1, .p_command_buffers = @ptrCast(&frame.cmdbuf), .signal_semaphore_count = 1, - .p_signal_semaphores = @ptrCast(&render_finished), - }), frame_fence); + .p_signal_semaphores = @ptrCast(&frame.render_finished), + }), frame.fence); _ = try vkd.queuePresentKHR(queue, &.{ .wait_semaphore_count = 1, - .p_wait_semaphores = @ptrCast(&render_finished), + .p_wait_semaphores = @ptrCast(&frame.render_finished), .swapchain_count = 1, .p_swapchains = @ptrCast(&swapchain), .p_image_indices = @ptrCast(&result.image_index), @@ -294,6 +288,8 @@ pub fn main() !void { // } c.glfwPollEvents(); + + index = @intCast((index + 1) % chain.len); } // try swapchain.waitForAllFences(); From 2300e6e3017864c5dd15153334544fa517918b58 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Tue, 2 Apr 2024 00:37:52 -0400 Subject: [PATCH 037/113] swapchain recreation works, but buggy --- src/main.zig | 287 ++++++++++++++++++++++++++++++++++----------------- 1 file changed, 192 insertions(+), 95 deletions(-) diff --git a/src/main.zig b/src/main.zig index 5a5accf..8f473f5 100644 --- a/src/main.zig +++ b/src/main.zig @@ -50,6 +50,143 @@ const vertices = [_]Vertex{ const indices = [_]Index{ 4, 5, 6, 6, 5, 7 }; +const ChainImage = struct { + image: vk.Image = .null_handle, + view: vk.ImageView = .null_handle, + cmdbuf: vk.CommandBuffer = .null_handle, + fence: vk.Fence = .null_handle, + image_available: vk.Semaphore = .null_handle, + render_finished: vk.Semaphore = .null_handle, +}; + +pub fn create_swapchain( + chain: *std.MultiArrayList(ChainImage), + swapchain: *vk.SwapchainKHR, + ally: std.mem.Allocator, + pdev: vk.PhysicalDevice, + vki: gfx.InstanceDispatch, + window: *c.GLFWwindow, + dev: vk.Device, + vkd: gfx.DeviceDispatch, + pool: vk.CommandPool, + surface: vk.SurfaceKHR, + swap_image_count: u32, + format: vk.SurfaceFormatKHR, + present_mode: vk.PresentModeKHR, +) !vk.Extent2D { + const extent = try gfx.find_swap_extent(pdev, vki, surface, window); + + swapchain.* = try vkd.createSwapchainKHR(dev, &.{ + .surface = surface, + .min_image_count = swap_image_count, + .image_format = format.format, + .image_color_space = format.color_space, + .image_extent = extent, + .image_array_layers = 1, + .image_usage = .{ .color_attachment_bit = true }, + .image_sharing_mode = .exclusive, + .pre_transform = .{ .identity_bit_khr = true }, + .composite_alpha = .{ .opaque_bit_khr = true }, + .present_mode = present_mode, + .clipped = vk.TRUE, + .old_swapchain = swapchain.*, + }, null); + + var image_count: u32 = undefined; + _ = try vkd.getSwapchainImagesKHR(dev, swapchain.*, &image_count, null); + try chain.resize(ally, image_count); + _ = try vkd.getSwapchainImagesKHR(dev, swapchain.*, &image_count, chain.items(.image).ptr); + + // memset so that deinit_chain will succeed with .null_handle if error part-way through a loop. + @memset(chain.items(.view), .null_handle); + @memset(chain.items(.cmdbuf), .null_handle); + @memset(chain.items(.fence), .null_handle); + @memset(chain.items(.image_available), .null_handle); + errdefer deinit_chain(chain.*, dev, vkd, pool); + + for (chain.items(.image), chain.items(.view)) |image, *view| { + view.* = try vkd.createImageView(dev, &.{ + .image = image, + .view_type = .@"2d", + .format = format.format, + .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity }, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }, null); + } + + for (chain.items(.fence)) |*fence| { + fence.* = try vkd.createFence(dev, &.{ .flags = .{ .signaled_bit = true } }, null); + } + + for (chain.items(.image_available)) |*sem| { + sem.* = try vkd.createSemaphore(dev, &.{}, null); + } + + for (chain.items(.render_finished)) |*sem| { + sem.* = try vkd.createSemaphore(dev, &.{}, null); + } + + try vkd.allocateCommandBuffers(dev, &.{ + .command_buffer_count = @intCast(chain.len), + .command_pool = pool, + .level = .primary, + }, chain.items(.cmdbuf).ptr); + + return extent; +} + +pub fn deinit_chain( + chain: std.MultiArrayList(ChainImage), + dev: vk.Device, + vkd: gfx.DeviceDispatch, + pool: vk.CommandPool, +) void { + vkd.freeCommandBuffers(dev, pool, @intCast(chain.len), chain.items(.cmdbuf).ptr); + for (chain.items(.view)) |view| vkd.destroyImageView(dev, view, null); + for (chain.items(.fence)) |fence| vkd.destroyFence(dev, fence, null); + for (chain.items(.image_available)) |sem| vkd.destroySemaphore(dev, sem, null); + for (chain.items(.render_finished)) |sem| vkd.destroySemaphore(dev, sem, null); +} + +fn render(dev: vk.Device, vkd: gfx.DeviceDispatch, swapchain: vk.SwapchainKHR, frame: ChainImage, queue: vk.Queue) !void { + _ = try vkd.waitForFences(dev, 1, @ptrCast(&frame.fence), vk.TRUE, std.math.maxInt(u64)); + + const result = try vkd.acquireNextImageKHR( + dev, + swapchain, + std.math.maxInt(u64), + frame.image_available, + .null_handle, + ); + + try vkd.resetFences(dev, 1, @ptrCast(&frame.fence)); + + try vkd.queueSubmit(queue, 1, @ptrCast(&vk.SubmitInfo{ + .wait_semaphore_count = 1, + .p_wait_semaphores = @ptrCast(&frame.image_available), + .p_wait_dst_stage_mask = @ptrCast(&vk.PipelineStageFlags{ .color_attachment_output_bit = true }), + .command_buffer_count = 1, + .p_command_buffers = @ptrCast(&frame.cmdbuf), + .signal_semaphore_count = 1, + .p_signal_semaphores = @ptrCast(&frame.render_finished), + }), frame.fence); + + _ = try vkd.queuePresentKHR(queue, &.{ + .wait_semaphore_count = 1, + .p_wait_semaphores = @ptrCast(&frame.render_finished), + .swapchain_count = 1, + .p_swapchains = @ptrCast(&swapchain), + .p_image_indices = @ptrCast(&result.image_index), + .p_results = null, + }); +} + pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); @@ -94,7 +231,6 @@ pub fn main() !void { .color_space = .srgb_nonlinear_khr, }; const format = try gfx.find_surface_format(pdev, vki, surface, preferred_format); - extent = try gfx.find_swap_extent(pdev, vki, surface, window); const present_mode = try gfx.find_present_mode(pdev, vki, surface, .mailbox_khr); @@ -103,77 +239,9 @@ pub fn main() !void { var swapchain: vk.SwapchainKHR = .null_handle; defer vkd.destroySwapchainKHR(dev, swapchain, null); - const ChainImage = struct { - image: vk.Image = .null_handle, - view: vk.ImageView = .null_handle, - cmdbuf: vk.CommandBuffer = .null_handle, - fence: vk.Fence = .null_handle, - image_available: vk.Semaphore = .null_handle, - render_finished: vk.Semaphore = .null_handle, - }; - var chain = std.MultiArrayList(ChainImage){}; defer chain.deinit(ally); - defer vkd.freeCommandBuffers(dev, pool, @intCast(chain.len), chain.items(.cmdbuf).ptr); - defer for (chain.items(.view)) |view| vkd.destroyImageView(dev, view, null); - defer for (chain.items(.fence)) |fence| vkd.destroyFence(dev, fence, null); - defer for (chain.items(.image_available)) |sem| vkd.destroySemaphore(dev, sem, null); - defer for (chain.items(.render_finished)) |sem| vkd.destroySemaphore(dev, sem, null); - - swapchain = try vkd.createSwapchainKHR(dev, &.{ - .surface = surface, - .min_image_count = swap_image_count, - .image_format = format.format, - .image_color_space = format.color_space, - .image_extent = extent, - .image_array_layers = 1, - .image_usage = .{ .color_attachment_bit = true }, - .image_sharing_mode = .exclusive, - .pre_transform = .{ .identity_bit_khr = true }, - .composite_alpha = .{ .opaque_bit_khr = true }, - .present_mode = present_mode, - .clipped = vk.TRUE, - .old_swapchain = swapchain, - }, null); - - var image_count: u32 = undefined; - _ = try vkd.getSwapchainImagesKHR(dev, swapchain, &image_count, null); - try chain.resize(ally, image_count); - _ = try vkd.getSwapchainImagesKHR(dev, swapchain, &image_count, chain.items(.image).ptr); - - for (chain.items(.image), chain.items(.view)) |image, *view| { - view.* = try vkd.createImageView(dev, &.{ - .image = image, - .view_type = .@"2d", - .format = format.format, - .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity }, - .subresource_range = .{ - .aspect_mask = .{ .color_bit = true }, - .base_mip_level = 0, - .level_count = 1, - .base_array_layer = 0, - .layer_count = 1, - }, - }, null); - } - - for (chain.items(.fence)) |*fence| { - fence.* = try vkd.createFence(dev, &.{ .flags = .{ .signaled_bit = true } }, null); - } - - for (chain.items(.image_available)) |*sem| { - sem.* = try vkd.createSemaphore(dev, &.{}, null); - } - - for (chain.items(.render_finished)) |*sem| { - sem.* = try vkd.createSemaphore(dev, &.{}, null); - } - - try vkd.allocateCommandBuffers(dev, &.{ - .command_buffer_count = @intCast(chain.len), - .command_pool = pool, - .level = .primary, - }, chain.items(.cmdbuf).ptr); + defer deinit_chain(chain, dev, vkd, pool); const pipeline_layout = try vkd.createPipelineLayout(dev, &.{ .flags = .{}, @@ -213,6 +281,22 @@ pub fn main() !void { try gfx.uploadData(Index, pdev, vki, dev, vkd, queue, pool, index_buffer, &indices); + extent = try create_swapchain( + &chain, + &swapchain, + ally, + pdev, + vki, + window, + dev, + vkd, + pool, + surface, + swap_image_count, + format, + present_mode, + ); + for (chain.items(.image), chain.items(.view), chain.items(.cmdbuf)) |image, view, cmdbuf| { try record_cmdbuf(cmdbuf, vkd, image, view, extent, pipeline, vertex_buffer, index_buffer); } @@ -233,34 +317,47 @@ pub fn main() !void { const frame: ChainImage = chain.get(index); // const next_frame: ChainImage = chain.get((index + 1) % chain.len); - _ = try vkd.waitForFences(dev, 1, @ptrCast(&frame.fence), vk.TRUE, std.math.maxInt(u64)); - try vkd.resetFences(dev, 1, @ptrCast(&frame.fence)); + render(dev, vkd, swapchain, frame, queue) catch |err| switch (err) { + error.OutOfDateKHR => { + // try vkd.deviceWaitIdle(dev); + // _ = try vkd.waitForFences( + // dev, + // @intCast(chain.len), + // chain.items(.fence).ptr, + // vk.TRUE, + // std.math.maxInt(u64), + // ); + try vkd.deviceWaitIdle(dev); + + deinit_chain(chain, dev, vkd, pool); + + extent = try create_swapchain( + &chain, + &swapchain, + ally, + pdev, + vki, + window, + dev, + vkd, + pool, + surface, + swap_image_count, + format, + present_mode, + ); + + for (chain.items(.image), chain.items(.view), chain.items(.cmdbuf)) |image, view, cmdbuf| { + try record_cmdbuf(cmdbuf, vkd, image, view, extent, pipeline, vertex_buffer, index_buffer); + } + + continue; + }, + else => |errx| return errx, + }; // var index: u32 = undefined; // try vkd.acquireNextImageKHR(dev, swapchain, std.math.maxInt(u64), frame., fence); - const result = try vkd.acquireNextImageKHR(dev, swapchain, std.math.maxInt(u64), frame.image_available, .null_handle); - - // std.log.debug("frame {d}", .{result.image_index}); - // const frame = chain.get(result.image_index); - - try vkd.queueSubmit(queue, 1, @ptrCast(&vk.SubmitInfo{ - .wait_semaphore_count = 1, - .p_wait_semaphores = @ptrCast(&frame.image_available), - .p_wait_dst_stage_mask = @ptrCast(&vk.PipelineStageFlags{ .color_attachment_output_bit = true }), - .command_buffer_count = 1, - .p_command_buffers = @ptrCast(&frame.cmdbuf), - .signal_semaphore_count = 1, - .p_signal_semaphores = @ptrCast(&frame.render_finished), - }), frame.fence); - - _ = try vkd.queuePresentKHR(queue, &.{ - .wait_semaphore_count = 1, - .p_wait_semaphores = @ptrCast(&frame.render_finished), - .swapchain_count = 1, - .p_swapchains = @ptrCast(&swapchain), - .p_image_indices = @ptrCast(&result.image_index), - .p_results = null, - }); // const cmdbuf = cmdbufs[swapchain.image_index]; From 002761d7f84d04a2712b95afea7bca9c42dc8243 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Tue, 2 Apr 2024 11:24:29 -0400 Subject: [PATCH 038/113] fix validation bugs. only remaining is screen tearing during resize --- src/main.zig | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/main.zig b/src/main.zig index 8f473f5..d0a5422 100644 --- a/src/main.zig +++ b/src/main.zig @@ -76,6 +76,7 @@ pub fn create_swapchain( ) !vk.Extent2D { const extent = try gfx.find_swap_extent(pdev, vki, surface, window); + const prev_swapchain = swapchain.*; swapchain.* = try vkd.createSwapchainKHR(dev, &.{ .surface = surface, .min_image_count = swap_image_count, @@ -89,8 +90,9 @@ pub fn create_swapchain( .composite_alpha = .{ .opaque_bit_khr = true }, .present_mode = present_mode, .clipped = vk.TRUE, - .old_swapchain = swapchain.*, + .old_swapchain = prev_swapchain, }, null); + vkd.destroySwapchainKHR(dev, prev_swapchain, null); var image_count: u32 = undefined; _ = try vkd.getSwapchainImagesKHR(dev, swapchain.*, &image_count, null); @@ -351,6 +353,8 @@ pub fn main() !void { try record_cmdbuf(cmdbuf, vkd, image, view, extent, pipeline, vertex_buffer, index_buffer); } + index = 0; + continue; }, else => |errx| return errx, From 5809b64f1cb540eb4f24c920edfc96057da100d3 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 3 Apr 2024 16:12:59 -0400 Subject: [PATCH 039/113] remove unused swapchain.zig --- src/swapchain.zig | 336 ---------------------------------------------- 1 file changed, 336 deletions(-) delete mode 100644 src/swapchain.zig diff --git a/src/swapchain.zig b/src/swapchain.zig deleted file mode 100644 index ed85604..0000000 --- a/src/swapchain.zig +++ /dev/null @@ -1,336 +0,0 @@ -const std = @import("std"); -const vk = @import("vk"); -const gfx = @import("gfx.zig"); -const Allocator = std.mem.Allocator; - -pub const Context = struct { - vki: gfx.InstanceDispatch, - vkd: gfx.DeviceDispatch, - - pdev: vk.PhysicalDevice, - dev: vk.Device, - - surface: vk.SurfaceKHR, - - queue: vk.Queue, - family: u32, -}; - -pub const Swapchain = struct { - pub const PresentState = enum { - optimal, - suboptimal, - }; - - gc: *const Context, - allocator: Allocator, - - present_mode: vk.PresentModeKHR, - extent: vk.Extent2D, - handle: vk.SwapchainKHR, - - swap_images: []SwapImage, - image_index: u32, - next_image_acquired: vk.Semaphore, - - pub fn init( - gc: *const Context, - allocator: Allocator, - extent: vk.Extent2D, - format: vk.SurfaceFormatKHR, - ) !Swapchain { - return try initRecycle(gc, allocator, extent, format, .null_handle); - } - - pub fn initRecycle( - gc: *const Context, - allocator: Allocator, - extent: vk.Extent2D, - format: vk.SurfaceFormatKHR, - old_handle: vk.SwapchainKHR, - ) !Swapchain { - const caps = try gc.vki.getPhysicalDeviceSurfaceCapabilitiesKHR(gc.pdev, gc.surface); - const actual_extent = findActualExtent(caps, extent); - if (actual_extent.width == 0 or actual_extent.height == 0) { - return error.InvalidSurfaceDimensions; - } - - const present_mode = try findPresentMode(gc, allocator); - - var image_count = caps.min_image_count + 1; - if (caps.max_image_count > 0) { - image_count = @min(image_count, caps.max_image_count); - } - - const handle = try gc.vkd.createSwapchainKHR(gc.dev, &.{ - .surface = gc.surface, - .min_image_count = image_count, - .image_format = format.format, - .image_color_space = format.color_space, - .image_extent = actual_extent, - .image_array_layers = 1, - .image_usage = .{ .color_attachment_bit = true, .transfer_dst_bit = true }, - .image_sharing_mode = .exclusive, - .pre_transform = caps.current_transform, - .composite_alpha = .{ .opaque_bit_khr = true }, - .present_mode = present_mode, - .clipped = vk.TRUE, - .old_swapchain = old_handle, - }, null); - errdefer gc.vkd.destroySwapchainKHR(gc.dev, handle, null); - - if (old_handle != .null_handle) { - // Apparently, the old swapchain handle still needs to be destroyed after recreating. - gc.vkd.destroySwapchainKHR(gc.dev, old_handle, null); - } - - const swap_images = try initSwapchainImages(gc, handle, format.format, allocator); - errdefer { - for (swap_images) |si| si.deinit(gc); - allocator.free(swap_images); - } - - var next_image_acquired = try gc.vkd.createSemaphore(gc.dev, &.{}, null); - errdefer gc.vkd.destroySemaphore(gc.dev, next_image_acquired, null); - - const result = try gc.vkd.acquireNextImageKHR( - gc.dev, - handle, - std.math.maxInt(u64), - next_image_acquired, - .null_handle, - ); - switch (result.result) { - vk.Result.success, vk.Result.suboptimal_khr => {}, - vk.Result.timeout => return error.Timeout, - vk.Result.not_ready => return error.NotReady, - else => unreachable, - } - - std.mem.swap(vk.Semaphore, &swap_images[result.image_index].image_acquired, &next_image_acquired); - return Swapchain{ - .gc = gc, - .allocator = allocator, - .present_mode = present_mode, - .extent = actual_extent, - .handle = handle, - .swap_images = swap_images, - .image_index = result.image_index, - .next_image_acquired = next_image_acquired, - }; - } - - fn deinitExceptSwapchain(self: Swapchain) void { - for (self.swap_images) |si| si.deinit(self.gc); - self.allocator.free(self.swap_images); - self.gc.vkd.destroySemaphore(self.gc.dev, self.next_image_acquired, null); - } - - pub fn waitForAllFences(self: Swapchain) !void { - for (self.swap_images) |si| si.waitForFence(self.gc) catch {}; - } - - pub fn deinit(self: Swapchain) void { - self.deinitExceptSwapchain(); - self.gc.vkd.destroySwapchainKHR(self.gc.dev, self.handle, null); - } - - pub fn recreate( - self: *Swapchain, - new_extent: vk.Extent2D, - format: vk.SurfaceFormatKHR, - ) !void { - const gc = self.gc; - const allocator = self.allocator; - const old_handle = self.handle; - self.deinitExceptSwapchain(); - self.* = try initRecycle(gc, allocator, new_extent, format, old_handle); - } - - pub fn currentImage(self: Swapchain) vk.Image { - return self.swap_images[self.image_index].image; - } - - pub fn currentSwapImage(self: Swapchain) *const SwapImage { - return &self.swap_images[self.image_index]; - } - - pub fn present(self: *Swapchain, cmdbuf: vk.CommandBuffer) !PresentState { - // Simple method: - // 1) Acquire next image - // 2) Wait for and reset fence of the acquired image - // 3) Submit command buffer with fence of acquired image, - // dependendent on the semaphore signalled by the first step. - // 4) Present current frame, dependent on semaphore signalled by previous step - // Problem: This way we can't reference the current image while rendering. - // Better method: Shuffle the steps around such that acquire next image is the last step, - // leaving the swapchain in a state with the current image. - // 1) Wait for and reset fence of current image - // 2) Submit command buffer, signalling fence of current image and dependent on - // the semaphore signalled by step 4. - // 3) Present current frame, dependent on semaphore signalled by the submit - // 4) Acquire next image, signalling its semaphore - // One problem that arises is that we can't know beforehand which semaphore to signal, - // so we keep an extra auxilery semaphore that is swapped around - - // Step 1: Make sure the current frame has finished rendering - const current = self.currentSwapImage(); - try current.waitForFence(self.gc); - try self.gc.vkd.resetFences(self.gc.dev, 1, @ptrCast(¤t.frame_fence)); - - // Step 2: Submit the command buffer - const wait_stage = [_]vk.PipelineStageFlags{.{ .top_of_pipe_bit = true }}; - try self.gc.vkd.queueSubmit(self.gc.queue, 1, &[_]vk.SubmitInfo{.{ - .wait_semaphore_count = 1, - .p_wait_semaphores = @ptrCast(¤t.image_acquired), - .p_wait_dst_stage_mask = &wait_stage, - .command_buffer_count = 1, - .p_command_buffers = @ptrCast(&cmdbuf), - .signal_semaphore_count = 1, - .p_signal_semaphores = @ptrCast(¤t.render_finished), - }}, current.frame_fence); - - // Step 3: Present the current frame - _ = try self.gc.vkd.queuePresentKHR(self.gc.queue, &.{ - .wait_semaphore_count = 1, - .p_wait_semaphores = @as([*]const vk.Semaphore, @ptrCast(¤t.render_finished)), - .swapchain_count = 1, - .p_swapchains = @as([*]const vk.SwapchainKHR, @ptrCast(&self.handle)), - .p_image_indices = @as([*]const u32, @ptrCast(&self.image_index)), - }); - - // Step 4: Acquire next frame - const result = try self.gc.vkd.acquireNextImageKHR( - self.gc.dev, - self.handle, - std.math.maxInt(u64), - self.next_image_acquired, - .null_handle, - ); - - std.mem.swap(vk.Semaphore, &self.swap_images[result.image_index].image_acquired, &self.next_image_acquired); - self.image_index = result.image_index; - - return switch (result.result) { - .success => .optimal, - .suboptimal_khr => .suboptimal, - else => unreachable, - }; - } -}; - -const SwapImage = struct { - image: vk.Image, - view: vk.ImageView, - image_acquired: vk.Semaphore, - render_finished: vk.Semaphore, - frame_fence: vk.Fence, - - fn init(gc: *const Context, image: vk.Image, format: vk.Format) !SwapImage { - const view = try gc.vkd.createImageView(gc.dev, &.{ - .image = image, - .view_type = .@"2d", - .format = format, - .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity }, - .subresource_range = .{ - .aspect_mask = .{ .color_bit = true }, - .base_mip_level = 0, - .level_count = 1, - .base_array_layer = 0, - .layer_count = 1, - }, - }, null); - errdefer gc.vkd.destroyImageView(gc.dev, view, null); - - const image_acquired = try gc.vkd.createSemaphore(gc.dev, &.{}, null); - errdefer gc.vkd.destroySemaphore(gc.dev, image_acquired, null); - - const render_finished = try gc.vkd.createSemaphore(gc.dev, &.{}, null); - errdefer gc.vkd.destroySemaphore(gc.dev, render_finished, null); - - const frame_fence = try gc.vkd.createFence(gc.dev, &.{ .flags = .{ .signaled_bit = true } }, null); - errdefer gc.vkd.destroyFence(gc.dev, frame_fence, null); - - return SwapImage{ - .image = image, - .view = view, - .image_acquired = image_acquired, - .render_finished = render_finished, - .frame_fence = frame_fence, - }; - } - - fn deinit(self: SwapImage, gc: *const Context) void { - // todo critical: this waitForFence deadlocks when recreating swapchain on nvidia. - // Something about the main "present" loop is fucked. Can't just ignore the fence; validation layers show errors - // that you can't destroy a fence while a queue depends on it (details may be wrong... I don't fully understand) - // I suspect this is more an issue with the vulkan-zig example, so I probably need to revisit vulkan-tutorial or - // try to rebuild the swapchain infrastructure myself. - // - // I do think it's clunky how the swapchain is created and recreated; duplicate logic in creation and recreation - // that could maybe be avoided with .null_handle? Maybe there's some more straightforward way to handle it. - - self.waitForFence(gc) catch return; - gc.vkd.destroyImageView(gc.dev, self.view, null); - gc.vkd.destroySemaphore(gc.dev, self.image_acquired, null); - gc.vkd.destroySemaphore(gc.dev, self.render_finished, null); - gc.vkd.destroyFence(gc.dev, self.frame_fence, null); - } - - fn waitForFence(self: SwapImage, gc: *const Context) !void { - _ = try gc.vkd.waitForFences(gc.dev, 1, @ptrCast(&self.frame_fence), vk.TRUE, std.math.maxInt(u64)); - } -}; - -fn initSwapchainImages(gc: *const Context, swapchain: vk.SwapchainKHR, format: vk.Format, allocator: Allocator) ![]SwapImage { - var count: u32 = undefined; - _ = try gc.vkd.getSwapchainImagesKHR(gc.dev, swapchain, &count, null); - const images = try allocator.alloc(vk.Image, count); - defer allocator.free(images); - _ = try gc.vkd.getSwapchainImagesKHR(gc.dev, swapchain, &count, images.ptr); - - const swap_images = try allocator.alloc(SwapImage, count); - errdefer allocator.free(swap_images); - - var i: usize = 0; - errdefer for (swap_images[0..i]) |si| si.deinit(gc); - - for (images) |image| { - swap_images[i] = try SwapImage.init(gc, image, format); - i += 1; - } - - return swap_images; -} - -fn findPresentMode(gc: *const Context, allocator: Allocator) !vk.PresentModeKHR { - var count: u32 = undefined; - _ = try gc.vki.getPhysicalDeviceSurfacePresentModesKHR(gc.pdev, gc.surface, &count, null); - const present_modes = try allocator.alloc(vk.PresentModeKHR, count); - defer allocator.free(present_modes); - _ = try gc.vki.getPhysicalDeviceSurfacePresentModesKHR(gc.pdev, gc.surface, &count, present_modes.ptr); - - const preferred = [_]vk.PresentModeKHR{ - .mailbox_khr, - .immediate_khr, - }; - - for (preferred) |mode| { - if (std.mem.indexOfScalar(vk.PresentModeKHR, present_modes, mode) != null) { - return mode; - } - } - - return .fifo_khr; -} - -fn findActualExtent(caps: vk.SurfaceCapabilitiesKHR, extent: vk.Extent2D) vk.Extent2D { - if (caps.current_extent.width != 0xFFFF_FFFF) { - return caps.current_extent; - } else { - return .{ - .width = std.math.clamp(extent.width, caps.min_image_extent.width, caps.max_image_extent.width), - .height = std.math.clamp(extent.height, caps.min_image_extent.height, caps.max_image_extent.height), - }; - } -} From 52865ab9e98c18f63f2990238047d7e37a87046b Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 3 Apr 2024 16:14:32 -0400 Subject: [PATCH 040/113] remove dead comments --- src/gfx.zig | 2 ++ src/main.zig | 38 -------------------------------------- 2 files changed, 2 insertions(+), 38 deletions(-) diff --git a/src/gfx.zig b/src/gfx.zig index 8ec9b96..b780178 100644 --- a/src/gfx.zig +++ b/src/gfx.zig @@ -351,11 +351,13 @@ pub fn copyBuffer( .p_command_buffers = @ptrCast(&cmdbuf), .p_wait_dst_stage_mask = undefined, }; + // creating and submitting a queue for every copy operation seems a bad idea for "streamed" data // gonna want a way to send a copy operation WITH SYNCHRONIZATION PRIMITIVES on a particular queue // see https://stackoverflow.com/a/62183243 // // this may be a misunderstanding on how submission works... + try vkd.queueSubmit(queue, 1, @ptrCast(&si), .null_handle); try vkd.queueWaitIdle(queue); } diff --git a/src/main.zig b/src/main.zig index d0a5422..e07847b 100644 --- a/src/main.zig +++ b/src/main.zig @@ -317,18 +317,9 @@ pub fn main() !void { } const frame: ChainImage = chain.get(index); - // const next_frame: ChainImage = chain.get((index + 1) % chain.len); render(dev, vkd, swapchain, frame, queue) catch |err| switch (err) { error.OutOfDateKHR => { - // try vkd.deviceWaitIdle(dev); - // _ = try vkd.waitForFences( - // dev, - // @intCast(chain.len), - // chain.items(.fence).ptr, - // vk.TRUE, - // std.math.maxInt(u64), - // ); try vkd.deviceWaitIdle(dev); deinit_chain(chain, dev, vkd, pool); @@ -360,40 +351,11 @@ pub fn main() !void { else => |errx| return errx, }; - // var index: u32 = undefined; - // try vkd.acquireNextImageKHR(dev, swapchain, std.math.maxInt(u64), frame., fence); - - // const cmdbuf = cmdbufs[swapchain.image_index]; - - // const state = swapchain.present(cmdbuf) catch |err| switch (err) { - // error.OutOfDateKHR => Swapchain.PresentState.suboptimal, - // else => |narrow| return narrow, - // }; - - // if (state == .suboptimal or extent.width != @as(u32, @intCast(w)) or extent.height != @as(u32, @intCast(h))) { - // extent.width = @intCast(w); - // extent.height = @intCast(h); - // try swapchain.recreate(extent, format); - // - // destroyCommandBuffers(&gc, pool, ally, cmdbufs); - // - // cmdbufs = try createCommandBuffers( - // &gc, - // pool, - // ally, - // vertex_buffer, - // index_buffer, - // pipeline, - // swapchain, - // ); - // } - c.glfwPollEvents(); index = @intCast((index + 1) % chain.len); } - // try swapchain.waitForAllFences(); try vkd.deviceWaitIdle(dev); } From d24d49b3a5b9ba002451f1cee06c284d4f833414 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 3 Apr 2024 17:05:39 -0400 Subject: [PATCH 041/113] VkAllocator for vkAllocateMemory --- src/gfx.zig | 70 ++++++++++++++++++++++++++++++---------------------- src/main.zig | 8 ++++-- 2 files changed, 46 insertions(+), 32 deletions(-) diff --git a/src/gfx.zig b/src/gfx.zig index b780178..30085bb 100644 --- a/src/gfx.zig +++ b/src/gfx.zig @@ -296,11 +296,14 @@ pub fn uploadData( }, null); defer vkd.destroyBuffer(dev, staging_buffer, null); + const vally = VkAllocator.init(pdev, vki); + const mem_reqs = vkd.getBufferMemoryRequirements(dev, staging_buffer); - const staging_memory = try allocate(pdev, vki, dev, vkd, mem_reqs, .{ - .host_visible_bit = true, - .host_coherent_bit = true, - }); + const staging_memory = try vally.alloc(dev, vkd, mem_reqs, .{ .host_visible_bit = true, .host_coherent_bit = true }); + // const staging_memory = try allocate(pdev, vki, dev, vkd, mem_reqs, .{ + // .host_visible_bit = true, + // .host_coherent_bit = true, + // }); defer vkd.freeMemory(dev, staging_memory, null); try vkd.bindBufferMemory(dev, staging_buffer, staging_memory, 0); @@ -362,36 +365,43 @@ pub fn copyBuffer( try vkd.queueWaitIdle(queue); } -pub fn findMemoryTypeIndex( - pdev: vk.PhysicalDevice, - memory_type_bits: u32, - flags: vk.MemoryPropertyFlags, - vki: InstanceDispatch, -) !u32 { - const mem_props = vki.getPhysicalDeviceMemoryProperties(pdev); +pub const VkAllocator = struct { + memory_types: [vk.MAX_MEMORY_TYPES]vk.MemoryType, + memory_type_count: u32, - for (mem_props.memory_types[0..mem_props.memory_type_count], 0..) |mem_type, i| { - if (memory_type_bits & (@as(u32, 1) << @truncate(i)) != 0 and mem_type.property_flags.contains(flags)) { - return @truncate(i); - } + pub fn init( + pdev: vk.PhysicalDevice, + vki: InstanceDispatch, + ) VkAllocator { + const props = vki.getPhysicalDeviceMemoryProperties(pdev); + + return VkAllocator{ + .memory_types = props.memory_types, + .memory_type_count = props.memory_type_count, + }; } - return error.NoSuitableMemoryType; -} + pub fn alloc( + self: VkAllocator, + dev: vk.Device, + vkd: DeviceDispatch, + reqs: vk.MemoryRequirements, + flags: vk.MemoryPropertyFlags, + ) !vk.DeviceMemory { + const memory_type_bits = reqs.memory_type_bits; -pub fn allocate( - pdev: vk.PhysicalDevice, - vki: InstanceDispatch, - dev: vk.Device, - vkd: DeviceDispatch, - requirements: vk.MemoryRequirements, - flags: vk.MemoryPropertyFlags, -) !vk.DeviceMemory { - return try vkd.allocateMemory(dev, &.{ - .allocation_size = requirements.size, - .memory_type_index = try findMemoryTypeIndex(pdev, requirements.memory_type_bits, flags, vki), - }, null); -} + for (self.memory_types[0..self.memory_type_count], 0..) |mem_type, idx| { + if (memory_type_bits & (@as(u32, 1) << @truncate(idx)) != 0 and mem_type.property_flags.contains(flags)) { + return try vkd.allocateMemory(dev, &.{ + .allocation_size = reqs.size, + .memory_type_index = @intCast(idx), + }, null); + } + } + + return error.NoSuitableMemoryType; + } +}; pub const BaseDispatch = vk.BaseWrapper(.{ .createInstance = true, diff --git a/src/main.zig b/src/main.zig index e07847b..7f5d3a1 100644 --- a/src/main.zig +++ b/src/main.zig @@ -245,6 +245,8 @@ pub fn main() !void { defer chain.deinit(ally); defer deinit_chain(chain, dev, vkd, pool); + const device_local = gfx.VkAllocator.init(pdev, vki); + const pipeline_layout = try vkd.createPipelineLayout(dev, &.{ .flags = .{}, .set_layout_count = 0, @@ -264,7 +266,8 @@ pub fn main() !void { }, null); defer vkd.destroyBuffer(dev, vertex_buffer, null); const vertex_mem_reqs = vkd.getBufferMemoryRequirements(dev, vertex_buffer); - const vertex_memory = try gfx.allocate(pdev, vki, dev, vkd, vertex_mem_reqs, .{ .device_local_bit = true }); + // const vertex_memory = try gfx.allocate(pdev, vki, dev, vkd, vertex_mem_reqs, .{ .device_local_bit = true }); + const vertex_memory = try device_local.alloc(dev, vkd, vertex_mem_reqs, .{ .device_local_bit = true }); defer vkd.freeMemory(dev, vertex_memory, null); try vkd.bindBufferMemory(dev, vertex_buffer, vertex_memory, 0); @@ -277,7 +280,8 @@ pub fn main() !void { }, null); defer vkd.destroyBuffer(dev, index_buffer, null); const index_mem_reqs = vkd.getBufferMemoryRequirements(dev, index_buffer); - const index_memory = try gfx.allocate(pdev, vki, dev, vkd, index_mem_reqs, .{ .device_local_bit = true }); + // const index_memory = try gfx.allocate(pdev, vki, dev, vkd, index_mem_reqs, .{ .device_local_bit = true }); + const index_memory = try device_local.alloc(dev, vkd, index_mem_reqs, .{ .device_local_bit = true }); defer vkd.freeMemory(dev, index_memory, null); try vkd.bindBufferMemory(dev, index_buffer, index_memory, 0); From 7b01cfc330dcf674881e4ab7340bd6e13858f9ec Mon Sep 17 00:00:00 2001 From: David Allemang Date: Fri, 5 Apr 2024 23:47:30 -0400 Subject: [PATCH 042/113] MAJOR restructure --- src/gfx.zig | 686 +++++++++++++++++++++--------------------- src/gfx/Base.zig | 32 ++ src/gfx/Context.zig | 5 + src/gfx/Device.zig | 231 ++++++++++++++ src/gfx/Instance.zig | 165 ++++++++++ src/gfx/Swapchain.zig | 169 +++++++++++ src/gfx/Window.zig | 41 +++ src/main.zig | 370 +++++++++-------------- 8 files changed, 1136 insertions(+), 563 deletions(-) create mode 100644 src/gfx/Base.zig create mode 100644 src/gfx/Context.zig create mode 100644 src/gfx/Device.zig create mode 100644 src/gfx/Instance.zig create mode 100644 src/gfx/Swapchain.zig create mode 100644 src/gfx/Window.zig diff --git a/src/gfx.zig b/src/gfx.zig index 30085bb..4770709 100644 --- a/src/gfx.zig +++ b/src/gfx.zig @@ -4,282 +4,282 @@ const builtin = @import("builtin"); const vk = @import("vk"); const c = @import("c.zig"); -pub const use_debug_messenger = switch (builtin.mode) { - .Debug, .ReleaseSafe => true, - .ReleaseSmall, .ReleaseFast => false, -}; +const Base = @import("gfx/Base.zig"); +const Instance = @import("gfx/Instance.zig"); +const Device = @import("gfx/Device.zig"); -const InstancePair = std.meta.Tuple(&.{ vk.Instance, InstanceDispatch, vk.DebugUtilsMessengerEXT }); +// const InstancePair = std.meta.Tuple(&.{ vk.Instance, InstanceDispatch, vk.DebugUtilsMessengerEXT }); -/// note: destroy with vki.destroyInstance(instance, null) -pub fn create_instance(vkb: BaseDispatch, app_name: [*:0]const u8) !InstancePair { - var exts = std.BoundedArray([*:0]const u8, 32){}; - var layers = std.BoundedArray([*:0]const u8, 32){}; +// /// note: destroy with vki.destroyInstance(instance, null) +// pub fn create_instance(vkb: BaseDispatch, app_name: [*:0]const u8) !InstancePair { +// var exts = std.BoundedArray([*:0]const u8, 32){}; +// var layers = std.BoundedArray([*:0]const u8, 32){}; +// +// if (use_debug_messenger) { +// try exts.appendSlice(&.{ +// vk.extension_info.ext_debug_utils.name, +// }); +// +// try layers.appendSlice(&.{ +// "VK_LAYER_KHRONOS_validation", +// }); +// } +// +// var glfw_exts_count: u32 = 0; +// const glfw_exts: [*]const [*:0]const u8 = +// @ptrCast(c.glfwGetRequiredInstanceExtensions(&glfw_exts_count)); +// try exts.appendSlice(glfw_exts[0..glfw_exts_count]); +// +// const dumci: vk.DebugUtilsMessengerCreateInfoEXT = .{ +// .message_severity = .{ +// .error_bit_ext = true, +// .info_bit_ext = true, +// .verbose_bit_ext = true, +// .warning_bit_ext = true, +// }, +// .message_type = .{ +// .device_address_binding_bit_ext = true, +// .general_bit_ext = false, +// .performance_bit_ext = true, +// .validation_bit_ext = true, +// }, +// .pfn_user_callback = &debug_callback, +// .p_user_data = null, +// }; +// +// const instance = try vkb.createInstance(&vk.InstanceCreateInfo{ +// .p_application_info = &vk.ApplicationInfo{ +// .p_application_name = app_name, +// .application_version = vk.makeApiVersion(0, 0, 0, 0), +// .p_engine_name = app_name, +// .engine_version = vk.makeApiVersion(0, 0, 0, 0), +// .api_version = vk.API_VERSION_1_3, +// }, +// .enabled_extension_count = @intCast(exts.len), +// .pp_enabled_extension_names = &exts.buffer, +// .enabled_layer_count = @intCast(layers.len), +// .pp_enabled_layer_names = &layers.buffer, +// .p_next = if (use_debug_messenger) &dumci else null, +// }, null); +// +// const vki = try InstanceDispatch.load(instance, vkb.dispatch.vkGetInstanceProcAddr); +// errdefer vki.destroyInstance(instance, null); +// +// const messenger: vk.DebugUtilsMessengerEXT = if (use_debug_messenger) +// try vki.createDebugUtilsMessengerEXT(instance, &dumci, null) +// else +// .null_handle; +// errdefer if (use_debug_messenger) +// vki.destroyDebugUtilsMessengerEXT(instance, messenger, null); +// +// return .{ instance, vki, messenger }; +// } - if (use_debug_messenger) { - try exts.appendSlice(&.{ - vk.extension_info.ext_debug_utils.name, - }); +// /// note: destroy with vki.destroySurfaceKHR(instance, surface, null) +// pub fn create_surface(instance: vk.Instance, window: *c.GLFWwindow) !vk.SurfaceKHR { +// var surface: vk.SurfaceKHR = undefined; +// if (c.glfwCreateWindowSurface(instance, window, null, &surface) != .success) { +// return error.SurfaceInitFailed; +// } +// return surface; +// } - try layers.appendSlice(&.{ - "VK_LAYER_KHRONOS_validation", - }); - } +// /// note: destroy with c.glfwDestroyWindow(window) +// pub fn create_window(extent: vk.Extent2D, title: [*:0]const u8) !*c.GLFWwindow { + // c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, "floating_window"); + // c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, "floating_window"); + // c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); +// +// return c.glfwCreateWindow( +// @intCast(extent.width), +// @intCast(extent.height), +// title, +// null, +// null, +// ) orelse error.WindowInitFailed; +// } - var glfw_exts_count: u32 = 0; - const glfw_exts: [*]const [*:0]const u8 = - @ptrCast(c.glfwGetRequiredInstanceExtensions(&glfw_exts_count)); - try exts.appendSlice(glfw_exts[0..glfw_exts_count]); - - const dumci: vk.DebugUtilsMessengerCreateInfoEXT = .{ - .message_severity = .{ - .error_bit_ext = true, - .info_bit_ext = true, - .verbose_bit_ext = true, - .warning_bit_ext = true, - }, - .message_type = .{ - .device_address_binding_bit_ext = true, - .general_bit_ext = false, - .performance_bit_ext = true, - .validation_bit_ext = true, - }, - .pfn_user_callback = &debug_callback, - .p_user_data = null, - }; - - const instance = try vkb.createInstance(&vk.InstanceCreateInfo{ - .p_application_info = &vk.ApplicationInfo{ - .p_application_name = app_name, - .application_version = vk.makeApiVersion(0, 0, 0, 0), - .p_engine_name = app_name, - .engine_version = vk.makeApiVersion(0, 0, 0, 0), - .api_version = vk.API_VERSION_1_3, - }, - .enabled_extension_count = @intCast(exts.len), - .pp_enabled_extension_names = &exts.buffer, - .enabled_layer_count = @intCast(layers.len), - .pp_enabled_layer_names = &layers.buffer, - .p_next = if (use_debug_messenger) &dumci else null, - }, null); - const vki = try InstanceDispatch.load(instance, vkb.dispatch.vkGetInstanceProcAddr); - errdefer vki.destroyInstance(instance, null); - - const messenger: vk.DebugUtilsMessengerEXT = if (use_debug_messenger) - try vki.createDebugUtilsMessengerEXT(instance, &dumci, null) - else - .null_handle; - errdefer if (use_debug_messenger) - vki.destroyDebugUtilsMessengerEXT(instance, messenger, null); - - return .{ instance, vki, messenger }; -} - -/// note: destroy with vki.destroySurfaceKHR(instance, surface, null) -pub fn create_surface(instance: vk.Instance, window: *c.GLFWwindow) !vk.SurfaceKHR { - var surface: vk.SurfaceKHR = undefined; - if (c.glfwCreateWindowSurface(instance, window, null, &surface) != .success) { - return error.SurfaceInitFailed; - } - return surface; -} - -/// note: destroy with c.glfwDestroyWindow(window) -pub fn create_window(extent: vk.Extent2D, title: [*:0]const u8) !*c.GLFWwindow { - c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, "floating_window"); - c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, "floating_window"); - c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); - - return c.glfwCreateWindow( - @intCast(extent.width), - @intCast(extent.height), - title, - null, - null, - ) orelse error.WindowInitFailed; -} - -const DevicePair = std.meta.Tuple(&.{ vk.PhysicalDevice, vk.Device, DeviceDispatch, u32 }); - -/// note: destroy with vkd.destroyDevice(dev, null) -pub fn create_device( - ally: std.mem.Allocator, - instance: vk.Instance, - surface: vk.SurfaceKHR, - vki: InstanceDispatch, -) !DevicePair { - const required_device_extensions: []const [*:0]const u8 = &.{ - vk.extension_info.khr_swapchain.name, - vk.extension_info.khr_dynamic_rendering.name, - }; - - var pdev_count: u32 = undefined; - _ = try vki.enumeratePhysicalDevices(instance, &pdev_count, null); - const pdevs = try ally.alloc(vk.PhysicalDevice, pdev_count); - defer ally.free(pdevs); - _ = try vki.enumeratePhysicalDevices(instance, &pdev_count, pdevs.ptr); - - pdev_search: for (pdevs) |pdev| { - const props = vki.getPhysicalDeviceProperties(pdev); - if (props.device_type != .discrete_gpu) continue :pdev_search; - - var format_count: u32 = undefined; - _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null); - if (format_count == 0) continue :pdev_search; - - var mode_count: u32 = undefined; - _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &mode_count, null); - if (mode_count == 0) continue :pdev_search; - - var ext_count: u32 = undefined; - _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, null); - const exts = try ally.alloc(vk.ExtensionProperties, ext_count); - defer ally.free(exts); - _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, exts.ptr); - - for (required_device_extensions) |name| { - for (exts) |ext| { - if (std.mem.eql( - u8, - std.mem.span(name), - std.mem.sliceTo(&ext.extension_name, 0), - )) { - break; - } - } else { - continue :pdev_search; - } - } - - var family_count: u32 = undefined; - vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); - const families = try ally.alloc(vk.QueueFamilyProperties, family_count); - defer ally.free(families); - vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr); - - // just find one family that does graphics and present, so we can use exclusive sharing - // on the swapchain. apparently most hardware supports this. logic for queue allocation - // and swapchain creation is so much simpler this way. swapchain creation needs to know - // the list of queue family indices which will have access to the images, and there's a - // performance penalty to allow concurrent access to multiple queue families. - // - // multiple _queues_ may have exclusive access, but only if they're in the smae family. - - const graphics_family: u32 = for (families, 0..) |family, idx| { - const graphics = family.queue_flags.graphics_bit; - const present = try vki.getPhysicalDeviceSurfaceSupportKHR(pdev, @intCast(idx), surface) == vk.TRUE; - if (graphics and present) { - break @intCast(idx); - } - } else { - continue :pdev_search; - }; - - std.log.debug("selecting device {s}", .{std.mem.sliceTo(&props.device_name, 0)}); - - const qci: []const vk.DeviceQueueCreateInfo = &.{ - vk.DeviceQueueCreateInfo{ - .queue_family_index = graphics_family, - .queue_count = 1, - .p_queue_priorities = &[_]f32{1.0}, - }, - }; - - const dev = try vki.createDevice(pdev, &.{ - .queue_create_info_count = @intCast(qci.len), - .p_queue_create_infos = qci.ptr, - .enabled_extension_count = @intCast(required_device_extensions.len), - .pp_enabled_extension_names = required_device_extensions.ptr, - .p_next = &vk.PhysicalDeviceDynamicRenderingFeaturesKHR{ - .dynamic_rendering = vk.TRUE, - }, - }, null); - const vkd = try DeviceDispatch.load(dev, vki.dispatch.vkGetDeviceProcAddr); - errdefer vkd.destroyDevice(dev, null); - - return .{ pdev, dev, vkd, graphics_family }; - } - - return error.NoSuitableDevice; -} - -pub fn find_surface_format( - pdev: vk.PhysicalDevice, - vki: InstanceDispatch, - surface: vk.SurfaceKHR, - preferred: vk.SurfaceFormatKHR, -) !vk.SurfaceFormatKHR { - var formats_buf: [64]vk.SurfaceFormatKHR = undefined; - var formats_count: u32 = @intCast(formats_buf.len); - _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &formats_count, &formats_buf); - const formats = formats_buf[0..formats_count]; - - for (formats) |format| { - if (std.meta.eql(format, preferred)) { - return format; - } - } - - return formats[0]; -} - -pub fn find_present_mode( - pdev: vk.PhysicalDevice, - vki: InstanceDispatch, - surface: vk.SurfaceKHR, - preferred: vk.PresentModeKHR, -) !vk.PresentModeKHR { - var modes_buf: [8]vk.PresentModeKHR = undefined; - var modes_count: u32 = @intCast(modes_buf.len); - _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &modes_count, &modes_buf); - const modes = modes_buf[0..modes_count]; - - for (modes) |mode| { - if (std.meta.eql(mode, preferred)) { - return mode; - } - } - - return .mailbox_khr; -} - -pub fn find_swap_extent( - pdev: vk.PhysicalDevice, - vki: InstanceDispatch, - surface: vk.SurfaceKHR, - window: *c.GLFWwindow, -) !vk.Extent2D { - const caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, surface); - var extent = caps.current_extent; - - if (extent.width == std.math.maxInt(u32)) { - c.glfwGetFramebufferSize(window, @ptrCast(&extent.width), @ptrCast(&extent.height)); - extent.width = std.math.clamp(extent.width, caps.min_image_extent.width, caps.max_image_extent.width); - extent.height = std.math.clamp(extent.height, caps.min_image_extent.height, caps.max_image_extent.height); - } - - return extent; -} - -pub fn find_swap_image_count( - pdev: vk.PhysicalDevice, - vki: InstanceDispatch, - surface: vk.SurfaceKHR, -) !u32 { - const caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, surface); - var count = @max(3, caps.min_image_count + 1); - if (caps.max_image_count > 0) { - count = @min(count, caps.max_image_count); - } - return count; -} +// const DevicePair = std.meta.Tuple(&.{ vk.PhysicalDevice, vk.Device, Device.Wrapper, u32 }); +// +// /// note: destroy with vkd.destroyDevice(dev, null) +// pub fn cr eate_device( +// ally: std.mem.Allocator, +// instance: vk.Instance, +// surface: vk.SurfaceKHR, +// vki: InstanceDispatch, +// ) !DevicePair { +// const required_device_extensions: []const [*:0]const u8 = &.{ +// vk.extension_info.khr_swapchain.name, +// vk.extension_info.khr_dynamic_rendering.name, +// }; +// +// var pdev_count: u32 = undefined; +// _ = try vki.enumeratePhysicalDevices(instance, &pdev_count, null); +// const pdevs = try ally.alloc(vk.PhysicalDevice, pdev_count); +// defer ally.free(pdevs); +// _ = try vki.enumeratePhysicalDevices(instance, &pdev_count, pdevs.ptr); +// +// pdev_search: for (pdevs) |pdev| { +// const props = vki.getPhysicalDeviceProperties(pdev); +// if (props.device_type != .discrete_gpu) continue :pdev_search; +// +// var format_count: u32 = undefined; +// _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null); +// if (format_count == 0) continue :pdev_search; +// +// var mode_count: u32 = undefined; +// _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &mode_count, null); +// if (mode_count == 0) continue :pdev_search; +// +// var ext_count: u32 = undefined; +// _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, null); +// const exts = try ally.alloc(vk.ExtensionProperties, ext_count); +// defer ally.free(exts); +// _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, exts.ptr); +// +// for (required_device_extensions) |name| { +// for (exts) |ext| { +// if (std.mem.eql( +// u8, +// std.mem.span(name), +// std.mem.sliceTo(&ext.extension_name, 0), +// )) { +// break; +// } +// } else { +// continue :pdev_search; +// } +// } +// +// var family_count: u32 = undefined; +// vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); +// const families = try ally.alloc(vk.QueueFamilyProperties, family_count); +// defer ally.free(families); +// vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr); +// +// // just find one family that does graphics and present, so we can use exclusive sharing +// // on the swapchain. apparently most hardware supports this. logic for queue allocation +// // and swapchain creation is so much simpler this way. swapchain creation needs to know +// // the list of queue family indices which will have access to the images, and there's a +// // performance penalty to allow concurrent access to multiple queue families. +// // +// // multiple _queues_ may have exclusive access, but only if they're in the smae family. +// +// const graphics_family: u32 = for (families, 0..) |family, idx| { +// const graphics = family.queue_flags.graphics_bit; +// const present = try vki.getPhysicalDeviceSurfaceSupportKHR(pdev, @intCast(idx), surface) == vk.TRUE; +// if (graphics and present) { +// break @intCast(idx); +// } +// } else { +// continue :pdev_search; +// }; +// +// std.log.debug("selecting device {s}", .{std.mem.sliceTo(&props.device_name, 0)}); +// +// const qci: []const vk.DeviceQueueCreateInfo = &.{ +// vk.DeviceQueueCreateInfo{ +// .queue_family_index = graphics_family, +// .queue_count = 1, +// .p_queue_priorities = &[_]f32{1.0}, +// }, +// }; +// +// const dev = try vki.createDevice(pdev, &.{ +// .queue_create_info_count = @intCast(qci.len), +// .p_queue_create_infos = qci.ptr, +// .enabled_extension_count = @intCast(required_device_extensions.len), +// .pp_enabled_extension_names = required_device_extensions.ptr, +// .p_next = &vk.PhysicalDeviceDynamicRenderingFeaturesKHR{ +// .dynamic_rendering = vk.TRUE, +// }, +// }, null); +// const vkd = try Device.Wrapper.load(dev, vki.dispatch.vkGetDeviceProcAddr); +// errdefer vkd.destroyDevice(dev, null); +// +// return .{ pdev, dev, vkd, graphics_family }; +// } +// +// return error.NoSuitableDevice; +// } +// +// pub fn find_surface_format( +// pdev: vk.PhysicalDevice, +// vki: InstanceDispatch, +// surface: vk.SurfaceKHR, +// preferred: vk.SurfaceFormatKHR, +// ) !vk.SurfaceFormatKHR { +// var formats_buf: [64]vk.SurfaceFormatKHR = undefined; +// var formats_count: u32 = @intCast(formats_buf.len); +// _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &formats_count, &formats_buf); +// const formats = formats_buf[0..formats_count]; +// +// for (formats) |format| { +// if (std.meta.eql(format, preferred)) { +// return format; +// } +// } +// +// return formats[0]; +// } +// +// pub fn find_present_mode( +// pdev: vk.PhysicalDevice, +// vki: InstanceDispatch, +// surface: vk.SurfaceKHR, +// preferred: vk.PresentModeKHR, +// ) !vk.PresentModeKHR { +// var modes_buf: [8]vk.PresentModeKHR = undefined; +// var modes_count: u32 = @intCast(modes_buf.len); +// _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &modes_count, &modes_buf); +// const modes = modes_buf[0..modes_count]; +// +// for (modes) |mode| { +// if (std.meta.eql(mode, preferred)) { +// return mode; +// } +// } +// +// return .mailbox_khr; +// } +// +// pub fn find_swap_extent( +// pdev: vk.PhysicalDevice, +// vki: InstanceDispatch, +// surface: vk.SurfaceKHR, +// window: *c.GLFWwindow, +// ) !vk.Extent2D { +// const caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, surface); +// var extent = caps.current_extent; +// +// if (extent.width == std.math.maxInt(u32)) { +// c.glfwGetFramebufferSize(window, @ptrCast(&extent.width), @ptrCast(&extent.height)); +// extent.width = std.math.clamp(extent.width, caps.min_image_extent.width, caps.max_image_extent.width); +// extent.height = std.math.clamp(extent.height, caps.min_image_extent.height, caps.max_image_extent.height); +// } +// +// return extent; +// } +// +// pub fn find_swap_image_count( +// pdev: vk.PhysicalDevice, +// vki: InstanceDispatch, +// surface: vk.SurfaceKHR, +// ) !u32 { +// const caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, surface); +// var count = @max(3, caps.min_image_count + 1); +// if (caps.max_image_count > 0) { +// count = @min(count, caps.max_image_count); +// } +// return count; +// } pub fn uploadData( comptime T: type, pdev: vk.PhysicalDevice, - vki: InstanceDispatch, + vki: Instance.Wrapper, dev: vk.Device, - vkd: DeviceDispatch, + vkd: Device.Wrapper, queue: vk.Queue, pool: vk.CommandPool, buffer: vk.Buffer, @@ -326,7 +326,7 @@ pub fn copyBuffer( dst: vk.Buffer, src: vk.Buffer, size: vk.DeviceSize, - vkd: DeviceDispatch, + vkd: Device.Wrapper, ) !void { var cmdbuf: vk.CommandBuffer = undefined; try vkd.allocateCommandBuffers(dev, &.{ @@ -371,7 +371,7 @@ pub const VkAllocator = struct { pub fn init( pdev: vk.PhysicalDevice, - vki: InstanceDispatch, + vki: Instance.Wrapper, ) VkAllocator { const props = vki.getPhysicalDeviceMemoryProperties(pdev); @@ -384,7 +384,7 @@ pub const VkAllocator = struct { pub fn alloc( self: VkAllocator, dev: vk.Device, - vkd: DeviceDispatch, + vkd: Device.Wrapper, reqs: vk.MemoryRequirements, flags: vk.MemoryPropertyFlags, ) !vk.DeviceMemory { @@ -403,82 +403,82 @@ pub const VkAllocator = struct { } }; -pub const BaseDispatch = vk.BaseWrapper(.{ - .createInstance = true, - .getInstanceProcAddr = true, -}); +// pub const BaseDispatch = vk.BaseWrapper(.{ +// .createInstance = true, +// .getInstanceProcAddr = true, +// }); -pub const InstanceDispatch = vk.InstanceWrapper(.{ - .destroyInstance = true, - .createDevice = true, - .destroySurfaceKHR = true, - .enumeratePhysicalDevices = true, - .getPhysicalDeviceProperties = true, - .enumerateDeviceExtensionProperties = true, - .getPhysicalDeviceSurfaceFormatsKHR = true, - .getPhysicalDeviceSurfacePresentModesKHR = true, - .getPhysicalDeviceSurfaceCapabilitiesKHR = true, - .getPhysicalDeviceQueueFamilyProperties = true, - .getPhysicalDeviceSurfaceSupportKHR = true, - .getPhysicalDeviceMemoryProperties = true, - .getDeviceProcAddr = true, - .createDebugUtilsMessengerEXT = use_debug_messenger, - .destroyDebugUtilsMessengerEXT = use_debug_messenger, -}); +// pub const Instance.Wrapper = vk.InstanceWrapper(.{ +// .destroyInstance = true, +// .createDevice = true, +// .destroySurfaceKHR = true, +// .enumeratePhysicalDevices = true, +// .getPhysicalDeviceProperties = true, +// .enumerateDeviceExtensionProperties = true, +// .getPhysicalDeviceSurfaceFormatsKHR = true, +// .getPhysicalDeviceSurfacePresentModesKHR = true, +// .getPhysicalDeviceSurfaceCapabilitiesKHR = true, +// .getPhysicalDeviceQueueFamilyProperties = true, +// .getPhysicalDeviceSurfaceSupportKHR = true, +// .getPhysicalDeviceMemoryProperties = true, +// .getDeviceProcAddr = true, +// .createDebugUtilsMessengerEXT = use_debug_messenger, +// .destroyDebugUtilsMessengerEXT = use_debug_messenger, +// }); -pub const DeviceDispatch = vk.DeviceWrapper(.{ - .destroyDevice = true, - .getDeviceQueue = true, - .createSemaphore = true, - .createFence = true, - .createImageView = true, - .destroyImageView = true, - .destroySemaphore = true, - .destroyFence = true, - .getSwapchainImagesKHR = true, - .createSwapchainKHR = true, - .destroySwapchainKHR = true, - .acquireNextImageKHR = true, - .deviceWaitIdle = true, - .waitForFences = true, - .resetFences = true, - .queueSubmit = true, - .queuePresentKHR = true, - .createCommandPool = true, - .destroyCommandPool = true, - .allocateCommandBuffers = true, - .freeCommandBuffers = true, - .queueWaitIdle = true, - .createShaderModule = true, - .destroyShaderModule = true, - .createPipelineLayout = true, - .destroyPipelineLayout = true, - .createGraphicsPipelines = true, - .destroyPipeline = true, - .beginCommandBuffer = true, - .endCommandBuffer = true, - .allocateMemory = true, - .freeMemory = true, - .createBuffer = true, - .destroyBuffer = true, - .getBufferMemoryRequirements = true, - .mapMemory = true, - .unmapMemory = true, - .bindBufferMemory = true, - .cmdBeginRenderPass = true, - .cmdEndRenderPass = true, - .cmdBindPipeline = true, - .cmdDraw = true, - .cmdDrawIndexed = true, - .cmdSetViewport = true, - .cmdSetScissor = true, - .cmdBindVertexBuffers = true, - .cmdBindIndexBuffer = true, - .cmdCopyBuffer = true, - .cmdBeginRenderingKHR = true, - .cmdEndRenderingKHR = true, - .cmdPipelineBarrier = true, -}); +// pub const Device.Wrapper = vk.DeviceWrapper(.{ +// .destroyDevice = true, +// .getDeviceQueue = true, +// .createSemaphore = true, +// .createFence = true, +// .createImageView = true, +// .destroyImageView = true, +// .destroySemaphore = true, +// .destroyFence = true, +// .getSwapchainImagesKHR = true, +// .createSwapchainKHR = true, +// .destroySwapchainKHR = true, +// .acquireNextImageKHR = true, +// .deviceWaitIdle = true, +// .waitForFences = true, +// .resetFences = true, +// .queueSubmit = true, +// .queuePresentKHR = true, +// .createCommandPool = true, +// .destroyCommandPool = true, +// .allocateCommandBuffers = true, +// .freeCommandBuffers = true, +// .queueWaitIdle = true, +// .createShaderModule = true, +// .destroyShaderModule = true, +// .createPipelineLayout = true, +// .destroyPipelineLayout = true, +// .createGraphicsPipelines = true, +// .destroyPipeline = true, +// .beginCommandBuffer = true, +// .endCommandBuffer = true, +// .allocateMemory = true, +// .freeMemory = true, +// .createBuffer = true, +// .destroyBuffer = true, +// .getBufferMemoryRequirements = true, +// .mapMemory = true, +// .unmapMemory = true, +// .bindBufferMemory = true, +// .cmdBeginRenderPass = true, +// .cmdEndRenderPass = true, +// .cmdBindPipeline = true, +// .cmdDraw = true, +// .cmdDrawIndexed = true, +// .cmdSetViewport = true, +// .cmdSetScissor = true, +// .cmdBindVertexBuffers = true, +// .cmdBindIndexBuffer = true, +// .cmdCopyBuffer = true, +// .cmdBeginRenderingKHR = true, +// .cmdEndRenderingKHR = true, +// .cmdPipelineBarrier = true, +// }); pub fn debug_callback( msg_severity: vk.DebugUtilsMessageSeverityFlagsEXT, diff --git a/src/gfx/Base.zig b/src/gfx/Base.zig new file mode 100644 index 0000000..a348067 --- /dev/null +++ b/src/gfx/Base.zig @@ -0,0 +1,32 @@ +const std = @import("std"); +const builtin = @import("builtin"); + +const vk = @import("vk"); +const c = @import("../c.zig"); + +const Self = @This(); + +vkb: Wrapper, + +pub fn init() !Self { + if (c.glfwInit() != c.GLFW_TRUE) + return error.GLFWInitFailed; + errdefer c.glfwTerminate(); + + if (c.glfwVulkanSupported() != c.GLFW_TRUE) { + return error.GLFWNoVulkan; + } + + return .{ + .vkb = try Wrapper.load(c.glfwGetInstanceProcAddress), + }; +} + +pub fn deinit(_: Self) void { + c.glfwTerminate(); +} + +pub const Wrapper = vk.BaseWrapper(.{ + .createInstance = true, + .getInstanceProcAddr = true, +}); diff --git a/src/gfx/Context.zig b/src/gfx/Context.zig new file mode 100644 index 0000000..646d492 --- /dev/null +++ b/src/gfx/Context.zig @@ -0,0 +1,5 @@ +const std = @import("std"); +const builtin = @import("builtin"); +const vk = @import("vk"); +const c = @import("../c.zig"); +const gfx = @import("../c.zig"); diff --git a/src/gfx/Device.zig b/src/gfx/Device.zig new file mode 100644 index 0000000..56d79f9 --- /dev/null +++ b/src/gfx/Device.zig @@ -0,0 +1,231 @@ +//! The point here is to select _a_ physical device and create a logical device around it. + +const std = @import("std"); +const builtin = @import("builtin"); + +const vk = @import("vk"); +const c = @import("../c.zig"); + +const Instance = @import("Instance.zig"); +const Window = @import("Window.zig"); + +const Self = @This(); + +const required_extensions: []const [*:0]const u8 = &.{ + vk.extension_info.khr_swapchain.name, + vk.extension_info.khr_dynamic_rendering.name, +}; + +const preferred_surface_format: vk.SurfaceFormatKHR = .{ + .color_space = .srgb_nonlinear_khr, + .format = .r8g8b8a8_sint, +}; + +const preferred_present_mode: vk.PresentModeKHR = .mailbox_khr; + +dev: vk.Device, +pdev: vk.PhysicalDevice, +vkd: Wrapper, + +inst: *const Instance, +win: *const Window, + +format: vk.SurfaceFormatKHR, +mode: vk.PresentModeKHR, +family: u32, +queue: vk.Queue, + +pool: vk.CommandPool, + +pub fn init( + ally: std.mem.Allocator, + inst: *const Instance, + win: *const Window, +) !Self { + var pdev_count: u32 = undefined; + _ = try inst.vki.enumeratePhysicalDevices(inst.ref, &pdev_count, null); + const pdevs = try ally.alloc(vk.PhysicalDevice, pdev_count); + defer ally.free(pdevs); + _ = try inst.vki.enumeratePhysicalDevices(inst.ref, &pdev_count, pdevs.ptr); + + for (pdevs) |pdev| { + return wrap(ally, inst, win, pdev) catch continue; + } + + return error.NoSuitableDevice; +} + +pub fn wrap( + ally: std.mem.Allocator, + inst: *const Instance, + win: *const Window, + pdev: vk.PhysicalDevice, +) !Self { + const props = inst.vki.getPhysicalDeviceProperties(pdev); + if (props.device_type != .discrete_gpu) return error.NotDiscrete; + + var format_count: u32 = undefined; + _ = try inst.vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, win.surface, &format_count, null); + if (format_count == 0) return error.NoSurfaceFormat; + const formats = try ally.alloc(vk.SurfaceFormatKHR, format_count); + defer ally.free(formats); + _ = try inst.vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, win.surface, &format_count, formats.ptr); + + const format = for (formats) |f| { + if (std.meta.eql(f, preferred_surface_format)) + break f; + } else formats[0]; + + var mode_count: u32 = undefined; + _ = try inst.vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, win.surface, &mode_count, null); + if (mode_count == 0) return error.NoSurfaceMode; + const modes = try ally.alloc(vk.PresentModeKHR, mode_count); + defer ally.free(modes); + _ = try inst.vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, win.surface, &mode_count, modes.ptr); + + const mode = for (modes) |m| { + if (std.meta.eql(m, preferred_present_mode)) + break m; + } else modes[0]; + + var ext_count: u32 = undefined; + _ = try inst.vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, null); + const exts = try ally.alloc(vk.ExtensionProperties, ext_count); + defer ally.free(exts); + _ = try inst.vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, exts.ptr); + + for (required_extensions) |name| { + for (exts) |ext| { + if (std.mem.eql( + u8, + std.mem.span(name), + std.mem.sliceTo(&ext.extension_name, 0), + )) { + break; + } + } else { + return error.MissingRequiredExtension; + } + } + + var family_count: u32 = undefined; + inst.vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); + const families = try ally.alloc(vk.QueueFamilyProperties, family_count); + defer ally.free(families); + inst.vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr); + + // just find one family that does graphics and present, so we can use exclusive sharing + // on the swapchain. apparently most hardware supports this. logic for queue allocation + // and swapchain creation is so much simpler this way. swapchain creation needs to know + // the list of queue family indices which will have access to the images, and there's a + // performance penalty to allow concurrent access to multiple queue families. + // + // multiple _queues_ may have exclusive access, but only if they're in the smae family. + + const family: u32 = for (families, 0..) |family, idx| { + const graphics = family.queue_flags.graphics_bit; + const present = try inst.vki.getPhysicalDeviceSurfaceSupportKHR(pdev, @intCast(idx), win.surface) == vk.TRUE; + if (graphics and present) break @intCast(idx); + } else { + return error.NoSuitableQueueFamily; + }; + + const qci: []const vk.DeviceQueueCreateInfo = &.{ + vk.DeviceQueueCreateInfo{ + .queue_family_index = family, + .queue_count = 1, + .p_queue_priorities = &[_]f32{1.0}, + }, + }; + + const dev = try inst.vki.createDevice(pdev, &.{ + .queue_create_info_count = @intCast(qci.len), + .p_queue_create_infos = qci.ptr, + .enabled_extension_count = @intCast(required_extensions.len), + .pp_enabled_extension_names = required_extensions.ptr, + .p_next = &vk.PhysicalDeviceDynamicRenderingFeaturesKHR{ + .dynamic_rendering = vk.TRUE, + }, + }, null); + const vkd = try Wrapper.load(dev, inst.vki.dispatch.vkGetDeviceProcAddr); + errdefer vkd.destroyDevice(dev, null); + + const pool = try vkd.createCommandPool(dev, &.{ + .queue_family_index = family, + }, null); + errdefer vkd.destroyCommandPool(dev, pool, null); + + const queue = vkd.getDeviceQueue(dev, family, 0); + + return .{ + .dev = dev, + .pdev = pdev, + .vkd = vkd, + .inst = inst, + .win = win, + .format = format, + .mode = mode, + .pool = pool, + .family = family, + .queue = queue, + }; +} + +pub fn deinit(self: Self) void { + self.vkd.destroyCommandPool(self.dev, self.pool, null); + self.vkd.destroyDevice(self.dev, null); +} + +pub const Wrapper = vk.DeviceWrapper(.{ + .destroyDevice = true, + .getDeviceQueue = true, + .createSemaphore = true, + .createFence = true, + .createImageView = true, + .destroyImageView = true, + .destroySemaphore = true, + .destroyFence = true, + .getSwapchainImagesKHR = true, + .createSwapchainKHR = true, + .destroySwapchainKHR = true, + .acquireNextImageKHR = true, + .deviceWaitIdle = true, + .waitForFences = true, + .resetFences = true, + .queueSubmit = true, + .queuePresentKHR = true, + .createCommandPool = true, + .destroyCommandPool = true, + .allocateCommandBuffers = true, + .freeCommandBuffers = true, + .queueWaitIdle = true, + .createShaderModule = true, + .destroyShaderModule = true, + .createPipelineLayout = true, + .destroyPipelineLayout = true, + .createGraphicsPipelines = true, + .destroyPipeline = true, + .beginCommandBuffer = true, + .endCommandBuffer = true, + .allocateMemory = true, + .freeMemory = true, + .createBuffer = true, + .destroyBuffer = true, + .getBufferMemoryRequirements = true, + .mapMemory = true, + .unmapMemory = true, + .bindBufferMemory = true, + .cmdBeginRenderPass = true, + .cmdEndRenderPass = true, + .cmdBindPipeline = true, + .cmdDraw = true, + .cmdDrawIndexed = true, + .cmdSetViewport = true, + .cmdSetScissor = true, + .cmdBindVertexBuffers = true, + .cmdBindIndexBuffer = true, + .cmdCopyBuffer = true, + .cmdBeginRenderingKHR = true, + .cmdEndRenderingKHR = true, + .cmdPipelineBarrier = true, +}); diff --git a/src/gfx/Instance.zig b/src/gfx/Instance.zig new file mode 100644 index 0000000..c3f553b --- /dev/null +++ b/src/gfx/Instance.zig @@ -0,0 +1,165 @@ +const std = @import("std"); +const builtin = @import("builtin"); + +const vk = @import("vk"); +const c = @import("../c.zig"); + +const Base = @import("Base.zig"); + +const Self = @This(); + +const app_info: vk.ApplicationInfo = .{ + .p_application_name = "zig-glfw-vulkan", + .application_version = vk.makeApiVersion(0, 0, 0, 0), + .p_engine_name = "zig-glfw-vulkan", + .engine_version = vk.makeApiVersion(0, 0, 0, 0), + .api_version = vk.API_VERSION_1_3, +}; + +pub const use_debug_messenger = switch (builtin.mode) { + .Debug, .ReleaseSafe => true, + .ReleaseSmall, .ReleaseFast => false, +}; + +ref: vk.Instance, +vki: Wrapper, +base: *const Base, + +messenger: if (use_debug_messenger) vk.DebugUtilsMessengerEXT else void, + +pub fn init( + base: *const Base, +) !Self { + var exts: std.BoundedArray([*:0]const u8, 32) = .{}; + var layers: std.BoundedArray([*:0]const u8, 32) = .{}; + + if (use_debug_messenger) { + try exts.appendSlice(&.{ + vk.extension_info.ext_debug_utils.name, + }); + + try layers.appendSlice(&.{ + "VK_LAYER_KHRONOS_validation", + }); + } + + var glfw_exts_count: u32 = 0; + const glfw_exts: [*]const [*:0]const u8 = + @ptrCast(c.glfwGetRequiredInstanceExtensions(&glfw_exts_count)); + try exts.appendSlice(glfw_exts[0..glfw_exts_count]); + + const mci: vk.DebugUtilsMessengerCreateInfoEXT = .{ + .message_severity = .{ + .error_bit_ext = true, + .info_bit_ext = true, + .verbose_bit_ext = true, + .warning_bit_ext = true, + }, + .message_type = .{ + .device_address_binding_bit_ext = true, + .general_bit_ext = false, + .performance_bit_ext = true, + .validation_bit_ext = true, + }, + .pfn_user_callback = &debug_callback, + .p_user_data = null, + }; + + const ref = try base.vkb.createInstance(&.{ + .p_application_info = &app_info, + .enabled_extension_count = @intCast(exts.len), + .pp_enabled_extension_names = &exts.buffer, + .enabled_layer_count = @intCast(layers.len), + .pp_enabled_layer_names = &layers.buffer, + .p_next = if (use_debug_messenger) &mci else null, + }, null); + + const vki = try Wrapper.load(ref, base.vkb.dispatch.vkGetInstanceProcAddr); + errdefer vki.destroyInstance(ref, null); + + const messenger = if (use_debug_messenger) + try vki.createDebugUtilsMessengerEXT(ref, &mci, null) + else + void{}; + + errdefer if (use_debug_messenger) + vki.destroyDebugUtilsMessengerEXT(ref, messenger, null); + + return .{ + .ref = ref, + .vki = vki, + .base = base, + .messenger = messenger, + }; +} + +pub fn deinit(self: Self) void { + if (use_debug_messenger) + self.vki.destroyDebugUtilsMessengerEXT(self.ref, self.messenger, null); + self.vki.destroyInstance(self.ref, null); +} + +pub const Wrapper = vk.InstanceWrapper(.{ + .destroyInstance = true, + .createDevice = true, + .destroySurfaceKHR = true, + .enumeratePhysicalDevices = true, + .getPhysicalDeviceProperties = true, + .enumerateDeviceExtensionProperties = true, + .getPhysicalDeviceSurfaceFormatsKHR = true, + .getPhysicalDeviceSurfacePresentModesKHR = true, + .getPhysicalDeviceSurfaceCapabilitiesKHR = true, + .getPhysicalDeviceQueueFamilyProperties = true, + .getPhysicalDeviceSurfaceSupportKHR = true, + .getPhysicalDeviceMemoryProperties = true, + .getDeviceProcAddr = true, + .createDebugUtilsMessengerEXT = use_debug_messenger, + .destroyDebugUtilsMessengerEXT = use_debug_messenger, +}); + +pub fn debug_callback( + msg_severity: vk.DebugUtilsMessageSeverityFlagsEXT, + msg_type: vk.DebugUtilsMessageTypeFlagsEXT, + p_data: ?*const vk.DebugUtilsMessengerCallbackDataEXT, + _: ?*anyopaque, +) callconv(vk.vulkan_call_conv) vk.Bool32 { + // ripped from std.log.defaultLog + + const data = p_data orelse return vk.FALSE; + const message = data.p_message orelse return vk.FALSE; + + const severity_prefix = if (msg_severity.verbose_bit_ext) + "verbose:" + else if (msg_severity.info_bit_ext) + "info:" + else if (msg_severity.warning_bit_ext) + "warning:" + else if (msg_severity.error_bit_ext) + "error:" + else + "?:"; + + const type_prefix = if (msg_type.general_bit_ext) + "" + else if (msg_type.validation_bit_ext) + "validation:" + else if (msg_type.performance_bit_ext) + "performance:" + else if (msg_type.device_address_binding_bit_ext) + "device_address_binding:" + else + "?:"; + + const stderr = std.io.getStdErr().writer(); + var bw = std.io.bufferedWriter(stderr); + const writer = bw.writer(); + + std.debug.getStderrMutex().lock(); + defer std.debug.getStderrMutex().unlock(); + nosuspend { + writer.print("vk-{s}{s} {s}\n", .{ severity_prefix, type_prefix, message }) catch return vk.FALSE; + bw.flush() catch return vk.FALSE; + } + + return vk.FALSE; +} diff --git a/src/gfx/Swapchain.zig b/src/gfx/Swapchain.zig new file mode 100644 index 0000000..64ea09f --- /dev/null +++ b/src/gfx/Swapchain.zig @@ -0,0 +1,169 @@ +const std = @import("std"); +const builtin = @import("builtin"); + +const vk = @import("vk"); +const c = @import("../c.zig"); + +const Instance = @import("Instance.zig"); +const Window = @import("Window.zig"); +const Device = @import("Device.zig"); + +const Self = @This(); + +pub const ChainImage = struct { + image: vk.Image = .null_handle, + view: vk.ImageView = .null_handle, + cmdbuf: vk.CommandBuffer = .null_handle, + fence: vk.Fence = .null_handle, + image_available: vk.Semaphore = .null_handle, + render_finished: vk.Semaphore = .null_handle, +}; + +ally: std.mem.Allocator, + +ref: vk.SwapchainKHR, +dev: *const Device, + +extent: vk.Extent2D, +min_image_count: u32, +chain: std.MultiArrayList(ChainImage), + +pub fn create(ally: std.mem.Allocator, dev: *const Device) !Self { + const caps = try dev.inst.vki.getPhysicalDeviceSurfaceCapabilitiesKHR(dev.pdev, dev.win.surface); + var min_image_count = @max(3, caps.min_image_count + 1); + if (caps.max_image_count > 0) { + min_image_count = @min(min_image_count, caps.max_image_count); + } + + return .{ + .ally = ally, + .ref = .null_handle, + .dev = dev, + .extent = undefined, + .min_image_count = min_image_count, + .chain = .{}, + }; +} + +pub fn init(self: *Self) !void { + const caps = try self.dev.inst.vki.getPhysicalDeviceSurfaceCapabilitiesKHR(self.dev.pdev, self.dev.win.surface); + + self.extent = caps.current_extent; + if (caps.current_extent.width == std.math.maxInt(u32)) { + c.glfwGetFramebufferSize(self.dev.win.ref, @ptrCast(&self.extent.width), @ptrCast(&self.extent.height)); + } + self.extent.width = std.math.clamp(self.extent.width, caps.min_image_extent.width, caps.max_image_extent.width); + self.extent.height = std.math.clamp(self.extent.height, caps.min_image_extent.height, caps.max_image_extent.height); + + const prev = self.ref; + self.ref = try self.dev.vkd.createSwapchainKHR(self.dev.dev, &.{ + .surface = self.dev.win.surface, + .min_image_count = self.min_image_count, + .image_format = self.dev.format.format, + .image_color_space = self.dev.format.color_space, + .image_extent = self.extent, + .image_array_layers = 1, + .image_usage = .{ .color_attachment_bit = true }, + .image_sharing_mode = .exclusive, + .pre_transform = .{ .identity_bit_khr = true }, + .composite_alpha = .{ .opaque_bit_khr = true }, + .present_mode = self.dev.mode, + .clipped = vk.TRUE, + .old_swapchain = prev, + }, null); + self.dev.vkd.destroySwapchainKHR(self.dev.dev, prev, null); + + var image_count: u32 = undefined; + _ = try self.dev.vkd.getSwapchainImagesKHR(self.dev.dev, self.ref, &image_count, null); + + // todo try to reuse contents if possible. + // not even sure at this point which parts can be safely reused. the trick to fix the tearing while resizing + // on laptop is probably in doing this correctly, to present any remaining images that can be presented. + + self.deinit_chain(); + + try self.chain.resize(self.ally, image_count); + _ = try self.dev.vkd.getSwapchainImagesKHR( + self.dev.dev, + self.ref, + &image_count, + self.chain.items(.image).ptr, + ); + + try self.init_chain(); +} + +// requires self.chain.len and self.chain.items(.image) be populated by getSwapchainImagesKHR +fn init_chain(self: *Self) !void { + @memset(self.chain.items(.view), .null_handle); + @memset(self.chain.items(.cmdbuf), .null_handle); + @memset(self.chain.items(.fence), .null_handle); + @memset(self.chain.items(.image_available), .null_handle); + @memset(self.chain.items(.render_finished), .null_handle); + errdefer self.deinit_chain(); + + for (self.chain.items(.image), self.chain.items(.view)) |image, *view| { + view.* = try self.dev.vkd.createImageView(self.dev.dev, &.{ + .image = image, + .view_type = .@"2d", + .format = self.dev.format.format, + .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity }, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }, null); + } + + for (self.chain.items(.fence)) |*fence| { + fence.* = try self.dev.vkd.createFence(self.dev.dev, &.{ + .flags = .{ .signaled_bit = true }, + }, null); + } + + for (self.chain.items(.image_available)) |*sem| { + sem.* = try self.dev.vkd.createSemaphore(self.dev.dev, &.{}, null); + } + + for (self.chain.items(.render_finished)) |*sem| { + sem.* = try self.dev.vkd.createSemaphore(self.dev.dev, &.{}, null); + } + + try self.dev.vkd.allocateCommandBuffers(self.dev.dev, &.{ + .command_pool = self.dev.pool, + .command_buffer_count = @intCast(self.chain.len), + .level = .primary, + }, self.chain.items(.cmdbuf).ptr); +} + +fn deinit_chain(self: Self) void { + for (self.chain.items(.view)) |view| { + self.dev.vkd.destroyImageView(self.dev.dev, view, null); + } + for (self.chain.items(.fence)) |fence| { + self.dev.vkd.destroyFence(self.dev.dev, fence, null); + } + for (self.chain.items(.image_available)) |sem| { + self.dev.vkd.destroySemaphore(self.dev.dev, sem, null); + } + for (self.chain.items(.render_finished)) |sem| { + self.dev.vkd.destroySemaphore(self.dev.dev, sem, null); + } + if (self.chain.len > 0) { + self.dev.vkd.freeCommandBuffers( + self.dev.dev, + self.dev.pool, + @intCast(self.chain.len), + self.chain.items(.cmdbuf).ptr, + ); + } +} + +pub fn deinit(self: *Self) void { + self.deinit_chain(); + self.chain.deinit(self.ally); + self.dev.vkd.destroySwapchainKHR(self.dev.dev, self.ref, null); +} diff --git a/src/gfx/Window.zig b/src/gfx/Window.zig new file mode 100644 index 0000000..d2c2c2b --- /dev/null +++ b/src/gfx/Window.zig @@ -0,0 +1,41 @@ +const std = @import("std"); +const builtin = @import("builtin"); + +const vk = @import("vk"); +const c = @import("../c.zig"); + +const Base = @import("Base.zig"); +const Instance = @import("Instance.zig"); + +const Self = @This(); + +ref: *c.GLFWwindow, +surface: vk.SurfaceKHR, + +inst: *const Instance, + +pub fn init(inst: *const Instance, title: [*:0]const u8, extent: vk.Extent2D) !Self { + c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, "floating_window"); + c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, "floating_window"); + c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); + + const ref = c.glfwCreateWindow(@intCast(extent.width), @intCast(extent.height), title, null, null) orelse + return error.GLFWWindowCreateFailed; + errdefer c.glfwDestroyWindow(ref); + + var surface: vk.SurfaceKHR = undefined; + if (c.glfwCreateWindowSurface(inst.ref, ref, null, &surface) != .success) + return error.GLFWWindowSurfaceFailed; + errdefer inst.vki.destroySurfaceKHR(inst.ref, surface, null); + + return .{ + .ref = ref, + .surface = surface, + .inst = inst, + }; +} + +pub fn deinit(self: Self) void { + self.inst.vki.destroySurfaceKHR(self.inst.ref, self.surface, null); + c.glfwDestroyWindow(self.ref); +} diff --git a/src/main.zig b/src/main.zig index 7f5d3a1..806ca08 100644 --- a/src/main.zig +++ b/src/main.zig @@ -50,113 +50,104 @@ const vertices = [_]Vertex{ const indices = [_]Index{ 4, 5, 6, 6, 5, 7 }; -const ChainImage = struct { - image: vk.Image = .null_handle, - view: vk.ImageView = .null_handle, - cmdbuf: vk.CommandBuffer = .null_handle, - fence: vk.Fence = .null_handle, - image_available: vk.Semaphore = .null_handle, - render_finished: vk.Semaphore = .null_handle, -}; +// pub fn create_swapchain( +// chain: *std.MultiArrayList(ChainImage), +// swapchain: *vk.SwapchainKHR, +// ally: std.mem.Allocator, +// pdev: vk.PhysicalDevice, +// vki: gfx.InstanceDispatch, +// window: *c.GLFWwindow, +// dev: vk.Device, +// vkd: Device.Wrapper, +// pool: vk.CommandPool, +// surface: vk.SurfaceKHR, +// swap_image_count: u32, +// format: vk.SurfaceFormatKHR, +// present_mode: vk.PresentModeKHR, +// ) !vk.Extent2D { +// const extent = try gfx.find_swap_extent(pdev, vki, surface, window); +// +// const prev_swapchain = swapchain.*; +// swapchain.* = try vkd.createSwapchainKHR(dev, &.{ +// .surface = surface, +// .min_image_count = swap_image_count, +// .image_format = format.format, +// .image_color_space = format.color_space, +// .image_extent = extent, +// .image_array_layers = 1, +// .image_usage = .{ .color_attachment_bit = true }, +// .image_sharing_mode = .exclusive, +// .pre_transform = .{ .identity_bit_khr = true }, +// .composite_alpha = .{ .opaque_bit_khr = true }, +// .present_mode = present_mode, +// .clipped = vk.TRUE, +// .old_swapchain = prev_swapchain, +// }, null); +// vkd.destroySwapchainKHR(dev, prev_swapchain, null); +// +// var image_count: u32 = undefined; +// _ = try vkd.getSwapchainImagesKHR(dev, swapchain.*, &image_count, null); +// try chain.resize(ally, image_count); +// _ = try vkd.getSwapchainImagesKHR(dev, swapchain.*, &image_count, chain.items(.image).ptr); +// +// // memset so that deinit_chain will succeed with .null_handle if error part-way through a loop. +// @memset(chain.items(.view), .null_handle); +// @memset(chain.items(.cmdbuf), .null_handle); +// @memset(chain.items(.fence), .null_handle); +// @memset(chain.items(.image_available), .null_handle); +// errdefer deinit_chain(chain.*, dev, vkd, pool); +// +// for (chain.items(.image), chain.items(.view)) |image, *view| { +// view.* = try vkd.createImageView(dev, &.{ +// .image = image, +// .view_type = .@"2d", +// .format = format.format, +// .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity }, +// .subresource_range = .{ +// .aspect_mask = .{ .color_bit = true }, +// .base_mip_level = 0, +// .level_count = 1, +// .base_array_layer = 0, +// .layer_count = 1, +// }, +// }, null); +// } +// +// for (chain.items(.fence)) |*fence| { +// fence.* = try vkd.createFence(dev, &.{ .flags = .{ .signaled_bit = true } }, null); +// } +// +// for (chain.items(.image_available)) |*sem| { +// sem.* = try vkd.createSemaphore(dev, &.{}, null); +// } +// +// for (chain.items(.render_finished)) |*sem| { +// sem.* = try vkd.createSemaphore(dev, &.{}, null); +// } +// +// try vkd.allocateCommandBuffers(dev, &.{ +// .command_buffer_count = @intCast(chain.len), +// .command_pool = pool, +// .level = .primary, +// }, chain.items(.cmdbuf).ptr); +// +// return extent; +// } +// +// pub fn deinit_chain( +// chain: std.MultiArrayList(ChainImage), +// dev: vk.Device, +// vkd: Device.Wrapper, +// pool: vk.CommandPool, +// ) void { +// vkd.freeCommandBuffers(dev, pool, @intCast(chain.len), chain.items(.cmdbuf).ptr); +// for (chain.items(.view)) |view| vkd.destroyImageView(dev, view, null); +// for (chain.items(.fence)) |fence| vkd.destroyFence(dev, fence, null); +// for (chain.items(.image_available)) |sem| vkd.destroySemaphore(dev, sem, null); +// for (chain.items(.render_finished)) |sem| vkd.destroySemaphore(dev, sem, null); +// } -pub fn create_swapchain( - chain: *std.MultiArrayList(ChainImage), - swapchain: *vk.SwapchainKHR, - ally: std.mem.Allocator, - pdev: vk.PhysicalDevice, - vki: gfx.InstanceDispatch, - window: *c.GLFWwindow, - dev: vk.Device, - vkd: gfx.DeviceDispatch, - pool: vk.CommandPool, - surface: vk.SurfaceKHR, - swap_image_count: u32, - format: vk.SurfaceFormatKHR, - present_mode: vk.PresentModeKHR, -) !vk.Extent2D { - const extent = try gfx.find_swap_extent(pdev, vki, surface, window); - - const prev_swapchain = swapchain.*; - swapchain.* = try vkd.createSwapchainKHR(dev, &.{ - .surface = surface, - .min_image_count = swap_image_count, - .image_format = format.format, - .image_color_space = format.color_space, - .image_extent = extent, - .image_array_layers = 1, - .image_usage = .{ .color_attachment_bit = true }, - .image_sharing_mode = .exclusive, - .pre_transform = .{ .identity_bit_khr = true }, - .composite_alpha = .{ .opaque_bit_khr = true }, - .present_mode = present_mode, - .clipped = vk.TRUE, - .old_swapchain = prev_swapchain, - }, null); - vkd.destroySwapchainKHR(dev, prev_swapchain, null); - - var image_count: u32 = undefined; - _ = try vkd.getSwapchainImagesKHR(dev, swapchain.*, &image_count, null); - try chain.resize(ally, image_count); - _ = try vkd.getSwapchainImagesKHR(dev, swapchain.*, &image_count, chain.items(.image).ptr); - - // memset so that deinit_chain will succeed with .null_handle if error part-way through a loop. - @memset(chain.items(.view), .null_handle); - @memset(chain.items(.cmdbuf), .null_handle); - @memset(chain.items(.fence), .null_handle); - @memset(chain.items(.image_available), .null_handle); - errdefer deinit_chain(chain.*, dev, vkd, pool); - - for (chain.items(.image), chain.items(.view)) |image, *view| { - view.* = try vkd.createImageView(dev, &.{ - .image = image, - .view_type = .@"2d", - .format = format.format, - .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity }, - .subresource_range = .{ - .aspect_mask = .{ .color_bit = true }, - .base_mip_level = 0, - .level_count = 1, - .base_array_layer = 0, - .layer_count = 1, - }, - }, null); - } - - for (chain.items(.fence)) |*fence| { - fence.* = try vkd.createFence(dev, &.{ .flags = .{ .signaled_bit = true } }, null); - } - - for (chain.items(.image_available)) |*sem| { - sem.* = try vkd.createSemaphore(dev, &.{}, null); - } - - for (chain.items(.render_finished)) |*sem| { - sem.* = try vkd.createSemaphore(dev, &.{}, null); - } - - try vkd.allocateCommandBuffers(dev, &.{ - .command_buffer_count = @intCast(chain.len), - .command_pool = pool, - .level = .primary, - }, chain.items(.cmdbuf).ptr); - - return extent; -} - -pub fn deinit_chain( - chain: std.MultiArrayList(ChainImage), - dev: vk.Device, - vkd: gfx.DeviceDispatch, - pool: vk.CommandPool, -) void { - vkd.freeCommandBuffers(dev, pool, @intCast(chain.len), chain.items(.cmdbuf).ptr); - for (chain.items(.view)) |view| vkd.destroyImageView(dev, view, null); - for (chain.items(.fence)) |fence| vkd.destroyFence(dev, fence, null); - for (chain.items(.image_available)) |sem| vkd.destroySemaphore(dev, sem, null); - for (chain.items(.render_finished)) |sem| vkd.destroySemaphore(dev, sem, null); -} - -fn render(dev: vk.Device, vkd: gfx.DeviceDispatch, swapchain: vk.SwapchainKHR, frame: ChainImage, queue: vk.Queue) !void { +fn render(dev: vk.Device, vkd: Device.Wrapper, swapchain: vk.SwapchainKHR, frame: Swapchain.ChainImage, queue: vk.Queue) !void { _ = try vkd.waitForFences(dev, 1, @ptrCast(&frame.fence), vk.TRUE, std.math.maxInt(u64)); const result = try vkd.acquireNextImageKHR( @@ -189,130 +180,86 @@ fn render(dev: vk.Device, vkd: gfx.DeviceDispatch, swapchain: vk.SwapchainKHR, f }); } +const Base = @import("gfx/Base.zig"); +const Instance = @import("gfx/Instance.zig"); +const Context = @import("gfx/Context.zig"); +const Window = @import("gfx/Window.zig"); +const Device = @import("gfx/Device.zig"); +const Swapchain = @import("gfx/Swapchain.zig"); + pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); const ally = gpa.allocator(); - if (c.glfwInit() != c.GLFW_TRUE) return error.GlfwInitFailed; - defer c.glfwTerminate(); + const base = try Base.init(); + defer base.deinit(); - if (c.glfwVulkanSupported() != c.GLFW_TRUE) { - std.log.err("GLFW could not find libvulkan", .{}); - return error.NoVulkan; - } + const inst = try Instance.init(&base); + defer inst.deinit(); - var extent = vk.Extent2D{ .width = 800, .height = 600 }; + const win = try Window.init(&inst, "zig-glfw-vulkan", .{ .width = 800, .height = 600 }); + defer win.deinit(); - const window = try gfx.create_window(extent, app_name); - defer c.glfwDestroyWindow(window); + const dev = try Device.init(ally, &inst, &win); + defer dev.deinit(); - const vkb = try gfx.BaseDispatch.load(c.glfwGetInstanceProcAddress); + var sc = try Swapchain.create(ally, &dev); + defer sc.deinit(); - const instance, const vki, const messenger = try gfx.create_instance(vkb, app_name); - defer vki.destroyInstance(instance, null); - defer if (gfx.use_debug_messenger) - vki.destroyDebugUtilsMessengerEXT(instance, messenger, null); + const device_local = gfx.VkAllocator.init(dev.pdev, inst.vki); - const surface = try gfx.create_surface(instance, window); - defer vki.destroySurfaceKHR(instance, surface, null); - - const pdev: vk.PhysicalDevice, const dev: vk.Device, const vkd: gfx.DeviceDispatch, const family: u32 = - try gfx.create_device(ally, instance, surface, vki); - defer vkd.destroyDevice(dev, null); - - const queue = vkd.getDeviceQueue(dev, family, 0); - - const pool = try vkd.createCommandPool(dev, &.{ - .queue_family_index = family, - }, null); - defer vkd.destroyCommandPool(dev, pool, null); - - const preferred_format: vk.SurfaceFormatKHR = .{ - .format = .b8g8r8a8_srgb, - .color_space = .srgb_nonlinear_khr, - }; - const format = try gfx.find_surface_format(pdev, vki, surface, preferred_format); - - const present_mode = try gfx.find_present_mode(pdev, vki, surface, .mailbox_khr); - - const swap_image_count = try gfx.find_swap_image_count(pdev, vki, surface); - - var swapchain: vk.SwapchainKHR = .null_handle; - defer vkd.destroySwapchainKHR(dev, swapchain, null); - - var chain = std.MultiArrayList(ChainImage){}; - defer chain.deinit(ally); - defer deinit_chain(chain, dev, vkd, pool); - - const device_local = gfx.VkAllocator.init(pdev, vki); - - const pipeline_layout = try vkd.createPipelineLayout(dev, &.{ + const pipeline_layout = try dev.vkd.createPipelineLayout(dev.dev, &.{ .flags = .{}, .set_layout_count = 0, .p_set_layouts = undefined, .push_constant_range_count = 0, .p_push_constant_ranges = undefined, }, null); - defer vkd.destroyPipelineLayout(dev, pipeline_layout, null); + defer dev.vkd.destroyPipelineLayout(dev.dev, pipeline_layout, null); - const pipeline = try createPipeline(dev, pipeline_layout, format, vkd); - defer vkd.destroyPipeline(dev, pipeline, null); + const pipeline = try createPipeline(dev.dev, pipeline_layout, dev.format, dev.vkd); + defer dev.vkd.destroyPipeline(dev.dev, pipeline, null); - const vertex_buffer = try vkd.createBuffer(dev, &.{ + const vertex_buffer = try dev.vkd.createBuffer(dev.dev, &.{ .size = @sizeOf(@TypeOf(vertices)), .usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, .sharing_mode = .exclusive, }, null); - defer vkd.destroyBuffer(dev, vertex_buffer, null); - const vertex_mem_reqs = vkd.getBufferMemoryRequirements(dev, vertex_buffer); - // const vertex_memory = try gfx.allocate(pdev, vki, dev, vkd, vertex_mem_reqs, .{ .device_local_bit = true }); - const vertex_memory = try device_local.alloc(dev, vkd, vertex_mem_reqs, .{ .device_local_bit = true }); - defer vkd.freeMemory(dev, vertex_memory, null); - try vkd.bindBufferMemory(dev, vertex_buffer, vertex_memory, 0); + defer dev.vkd.destroyBuffer(dev.dev, vertex_buffer, null); + const vertex_mem_reqs = dev.vkd.getBufferMemoryRequirements(dev.dev, vertex_buffer); + // const vertex_memory = try gfx.allocate(pdev, vki, dev, dev.vkd, vertex_mem_reqs, .{ .device_local_bit = true }); + const vertex_memory = try device_local.alloc(dev.dev, dev.vkd, vertex_mem_reqs, .{ .device_local_bit = true }); + defer dev.vkd.freeMemory(dev.dev, vertex_memory, null); + try dev.vkd.bindBufferMemory(dev.dev, vertex_buffer, vertex_memory, 0); - try gfx.uploadData(Vertex, pdev, vki, dev, vkd, queue, pool, vertex_buffer, &vertices); + try gfx.uploadData(Vertex, dev.pdev, inst.vki, dev.dev, dev.vkd, dev.queue, dev.pool, vertex_buffer, &vertices); - const index_buffer = try vkd.createBuffer(dev, &.{ + const index_buffer = try dev.vkd.createBuffer(dev.dev, &.{ .size = @sizeOf(@TypeOf(indices)), .usage = .{ .transfer_dst_bit = true, .index_buffer_bit = true }, .sharing_mode = .exclusive, }, null); - defer vkd.destroyBuffer(dev, index_buffer, null); - const index_mem_reqs = vkd.getBufferMemoryRequirements(dev, index_buffer); - // const index_memory = try gfx.allocate(pdev, vki, dev, vkd, index_mem_reqs, .{ .device_local_bit = true }); - const index_memory = try device_local.alloc(dev, vkd, index_mem_reqs, .{ .device_local_bit = true }); - defer vkd.freeMemory(dev, index_memory, null); - try vkd.bindBufferMemory(dev, index_buffer, index_memory, 0); + defer dev.vkd.destroyBuffer(dev.dev, index_buffer, null); + const index_mem_reqs = dev.vkd.getBufferMemoryRequirements(dev.dev, index_buffer); + // const index_memory = try gfx.allocate(pdev, vki, dev, dev.vkd, index_mem_reqs, .{ .device_local_bit = true }); + const index_memory = try device_local.alloc(dev.dev, dev.vkd, index_mem_reqs, .{ .device_local_bit = true }); + defer dev.vkd.freeMemory(dev.dev, index_memory, null); + try dev.vkd.bindBufferMemory(dev.dev, index_buffer, index_memory, 0); - try gfx.uploadData(Index, pdev, vki, dev, vkd, queue, pool, index_buffer, &indices); + try gfx.uploadData(Index, dev.pdev, inst.vki, dev.dev, dev.vkd, dev.queue, dev.pool, index_buffer, &indices); - extent = try create_swapchain( - &chain, - &swapchain, - ally, - pdev, - vki, - window, - dev, - vkd, - pool, - surface, - swap_image_count, - format, - present_mode, - ); - - for (chain.items(.image), chain.items(.view), chain.items(.cmdbuf)) |image, view, cmdbuf| { - try record_cmdbuf(cmdbuf, vkd, image, view, extent, pipeline, vertex_buffer, index_buffer); + try sc.init(); + for (sc.chain.items(.image), sc.chain.items(.view), sc.chain.items(.cmdbuf)) |image, view, cmdbuf| { + try record_cmdbuf(cmdbuf, dev.vkd, image, view, sc.extent, pipeline, vertex_buffer, index_buffer); } var index: u32 = 0; - while (c.glfwWindowShouldClose(window) == c.GLFW_FALSE) { + while (c.glfwWindowShouldClose(win.ref) == c.GLFW_FALSE) { var w: c_int = undefined; var h: c_int = undefined; - c.glfwGetFramebufferSize(window, &w, &h); + c.glfwGetFramebufferSize(win.ref, &w, &h); // Don't present or resize swapchain while the window is minimized if (w == 0 or h == 0) { @@ -320,32 +267,15 @@ pub fn main() !void { continue; } - const frame: ChainImage = chain.get(index); + const frame = sc.chain.get(index); - render(dev, vkd, swapchain, frame, queue) catch |err| switch (err) { + render(dev.dev, dev.vkd, sc.ref, frame, dev.queue) catch |err| switch (err) { error.OutOfDateKHR => { - try vkd.deviceWaitIdle(dev); + try dev.vkd.deviceWaitIdle(dev.dev); - deinit_chain(chain, dev, vkd, pool); - - extent = try create_swapchain( - &chain, - &swapchain, - ally, - pdev, - vki, - window, - dev, - vkd, - pool, - surface, - swap_image_count, - format, - present_mode, - ); - - for (chain.items(.image), chain.items(.view), chain.items(.cmdbuf)) |image, view, cmdbuf| { - try record_cmdbuf(cmdbuf, vkd, image, view, extent, pipeline, vertex_buffer, index_buffer); + try sc.init(); + for (sc.chain.items(.image), sc.chain.items(.view), sc.chain.items(.cmdbuf)) |image, view, cmdbuf| { + try record_cmdbuf(cmdbuf, dev.vkd, image, view, sc.extent, pipeline, vertex_buffer, index_buffer); } index = 0; @@ -357,15 +287,15 @@ pub fn main() !void { c.glfwPollEvents(); - index = @intCast((index + 1) % chain.len); + index = @intCast((index + 1) % sc.chain.len); } - try vkd.deviceWaitIdle(dev); + try dev.vkd.deviceWaitIdle(dev.dev); } fn record_cmdbuf( cmdbuf: vk.CommandBuffer, - vkd: gfx.DeviceDispatch, + vkd: Device.Wrapper, image: vk.Image, view: vk.ImageView, extent: vk.Extent2D, @@ -486,7 +416,7 @@ fn record_cmdbuf( try vkd.endCommandBuffer(cmdbuf); } -fn createPipeline(dev: vk.Device, layout: vk.PipelineLayout, format: vk.SurfaceFormatKHR, vkd: gfx.DeviceDispatch) !vk.Pipeline { +fn createPipeline(dev: vk.Device, layout: vk.PipelineLayout, format: vk.SurfaceFormatKHR, vkd: Device.Wrapper) !vk.Pipeline { const vert = try vkd.createShaderModule(dev, &.{ .code_size = shaders.triangle_vert.len, .p_code = @as([*]const u32, @ptrCast(&shaders.triangle_vert)), From cfb76c2c46e67281993befbc31e251c7d09eefda Mon Sep 17 00:00:00 2001 From: David Allemang Date: Fri, 5 Apr 2024 23:50:14 -0400 Subject: [PATCH 043/113] remove vestigial code --- src/gfx.zig | 399 +-------------------------------------------------- src/main.zig | 133 +++-------------- 2 files changed, 25 insertions(+), 507 deletions(-) diff --git a/src/gfx.zig b/src/gfx.zig index 4770709..208312e 100644 --- a/src/gfx.zig +++ b/src/gfx.zig @@ -4,275 +4,12 @@ const builtin = @import("builtin"); const vk = @import("vk"); const c = @import("c.zig"); -const Base = @import("gfx/Base.zig"); -const Instance = @import("gfx/Instance.zig"); -const Device = @import("gfx/Device.zig"); - -// const InstancePair = std.meta.Tuple(&.{ vk.Instance, InstanceDispatch, vk.DebugUtilsMessengerEXT }); - -// /// note: destroy with vki.destroyInstance(instance, null) -// pub fn create_instance(vkb: BaseDispatch, app_name: [*:0]const u8) !InstancePair { -// var exts = std.BoundedArray([*:0]const u8, 32){}; -// var layers = std.BoundedArray([*:0]const u8, 32){}; -// -// if (use_debug_messenger) { -// try exts.appendSlice(&.{ -// vk.extension_info.ext_debug_utils.name, -// }); -// -// try layers.appendSlice(&.{ -// "VK_LAYER_KHRONOS_validation", -// }); -// } -// -// var glfw_exts_count: u32 = 0; -// const glfw_exts: [*]const [*:0]const u8 = -// @ptrCast(c.glfwGetRequiredInstanceExtensions(&glfw_exts_count)); -// try exts.appendSlice(glfw_exts[0..glfw_exts_count]); -// -// const dumci: vk.DebugUtilsMessengerCreateInfoEXT = .{ -// .message_severity = .{ -// .error_bit_ext = true, -// .info_bit_ext = true, -// .verbose_bit_ext = true, -// .warning_bit_ext = true, -// }, -// .message_type = .{ -// .device_address_binding_bit_ext = true, -// .general_bit_ext = false, -// .performance_bit_ext = true, -// .validation_bit_ext = true, -// }, -// .pfn_user_callback = &debug_callback, -// .p_user_data = null, -// }; -// -// const instance = try vkb.createInstance(&vk.InstanceCreateInfo{ -// .p_application_info = &vk.ApplicationInfo{ -// .p_application_name = app_name, -// .application_version = vk.makeApiVersion(0, 0, 0, 0), -// .p_engine_name = app_name, -// .engine_version = vk.makeApiVersion(0, 0, 0, 0), -// .api_version = vk.API_VERSION_1_3, -// }, -// .enabled_extension_count = @intCast(exts.len), -// .pp_enabled_extension_names = &exts.buffer, -// .enabled_layer_count = @intCast(layers.len), -// .pp_enabled_layer_names = &layers.buffer, -// .p_next = if (use_debug_messenger) &dumci else null, -// }, null); -// -// const vki = try InstanceDispatch.load(instance, vkb.dispatch.vkGetInstanceProcAddr); -// errdefer vki.destroyInstance(instance, null); -// -// const messenger: vk.DebugUtilsMessengerEXT = if (use_debug_messenger) -// try vki.createDebugUtilsMessengerEXT(instance, &dumci, null) -// else -// .null_handle; -// errdefer if (use_debug_messenger) -// vki.destroyDebugUtilsMessengerEXT(instance, messenger, null); -// -// return .{ instance, vki, messenger }; -// } - -// /// note: destroy with vki.destroySurfaceKHR(instance, surface, null) -// pub fn create_surface(instance: vk.Instance, window: *c.GLFWwindow) !vk.SurfaceKHR { -// var surface: vk.SurfaceKHR = undefined; -// if (c.glfwCreateWindowSurface(instance, window, null, &surface) != .success) { -// return error.SurfaceInitFailed; -// } -// return surface; -// } - -// /// note: destroy with c.glfwDestroyWindow(window) -// pub fn create_window(extent: vk.Extent2D, title: [*:0]const u8) !*c.GLFWwindow { - // c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, "floating_window"); - // c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, "floating_window"); - // c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); -// -// return c.glfwCreateWindow( -// @intCast(extent.width), -// @intCast(extent.height), -// title, -// null, -// null, -// ) orelse error.WindowInitFailed; -// } - -// const DevicePair = std.meta.Tuple(&.{ vk.PhysicalDevice, vk.Device, Device.Wrapper, u32 }); -// -// /// note: destroy with vkd.destroyDevice(dev, null) -// pub fn cr eate_device( -// ally: std.mem.Allocator, -// instance: vk.Instance, -// surface: vk.SurfaceKHR, -// vki: InstanceDispatch, -// ) !DevicePair { -// const required_device_extensions: []const [*:0]const u8 = &.{ -// vk.extension_info.khr_swapchain.name, -// vk.extension_info.khr_dynamic_rendering.name, -// }; -// -// var pdev_count: u32 = undefined; -// _ = try vki.enumeratePhysicalDevices(instance, &pdev_count, null); -// const pdevs = try ally.alloc(vk.PhysicalDevice, pdev_count); -// defer ally.free(pdevs); -// _ = try vki.enumeratePhysicalDevices(instance, &pdev_count, pdevs.ptr); -// -// pdev_search: for (pdevs) |pdev| { -// const props = vki.getPhysicalDeviceProperties(pdev); -// if (props.device_type != .discrete_gpu) continue :pdev_search; -// -// var format_count: u32 = undefined; -// _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null); -// if (format_count == 0) continue :pdev_search; -// -// var mode_count: u32 = undefined; -// _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &mode_count, null); -// if (mode_count == 0) continue :pdev_search; -// -// var ext_count: u32 = undefined; -// _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, null); -// const exts = try ally.alloc(vk.ExtensionProperties, ext_count); -// defer ally.free(exts); -// _ = try vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, exts.ptr); -// -// for (required_device_extensions) |name| { -// for (exts) |ext| { -// if (std.mem.eql( -// u8, -// std.mem.span(name), -// std.mem.sliceTo(&ext.extension_name, 0), -// )) { -// break; -// } -// } else { -// continue :pdev_search; -// } -// } -// -// var family_count: u32 = undefined; -// vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); -// const families = try ally.alloc(vk.QueueFamilyProperties, family_count); -// defer ally.free(families); -// vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr); -// -// // just find one family that does graphics and present, so we can use exclusive sharing -// // on the swapchain. apparently most hardware supports this. logic for queue allocation -// // and swapchain creation is so much simpler this way. swapchain creation needs to know -// // the list of queue family indices which will have access to the images, and there's a -// // performance penalty to allow concurrent access to multiple queue families. -// // -// // multiple _queues_ may have exclusive access, but only if they're in the smae family. -// -// const graphics_family: u32 = for (families, 0..) |family, idx| { -// const graphics = family.queue_flags.graphics_bit; -// const present = try vki.getPhysicalDeviceSurfaceSupportKHR(pdev, @intCast(idx), surface) == vk.TRUE; -// if (graphics and present) { -// break @intCast(idx); -// } -// } else { -// continue :pdev_search; -// }; -// -// std.log.debug("selecting device {s}", .{std.mem.sliceTo(&props.device_name, 0)}); -// -// const qci: []const vk.DeviceQueueCreateInfo = &.{ -// vk.DeviceQueueCreateInfo{ -// .queue_family_index = graphics_family, -// .queue_count = 1, -// .p_queue_priorities = &[_]f32{1.0}, -// }, -// }; -// -// const dev = try vki.createDevice(pdev, &.{ -// .queue_create_info_count = @intCast(qci.len), -// .p_queue_create_infos = qci.ptr, -// .enabled_extension_count = @intCast(required_device_extensions.len), -// .pp_enabled_extension_names = required_device_extensions.ptr, -// .p_next = &vk.PhysicalDeviceDynamicRenderingFeaturesKHR{ -// .dynamic_rendering = vk.TRUE, -// }, -// }, null); -// const vkd = try Device.Wrapper.load(dev, vki.dispatch.vkGetDeviceProcAddr); -// errdefer vkd.destroyDevice(dev, null); -// -// return .{ pdev, dev, vkd, graphics_family }; -// } -// -// return error.NoSuitableDevice; -// } -// -// pub fn find_surface_format( -// pdev: vk.PhysicalDevice, -// vki: InstanceDispatch, -// surface: vk.SurfaceKHR, -// preferred: vk.SurfaceFormatKHR, -// ) !vk.SurfaceFormatKHR { -// var formats_buf: [64]vk.SurfaceFormatKHR = undefined; -// var formats_count: u32 = @intCast(formats_buf.len); -// _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &formats_count, &formats_buf); -// const formats = formats_buf[0..formats_count]; -// -// for (formats) |format| { -// if (std.meta.eql(format, preferred)) { -// return format; -// } -// } -// -// return formats[0]; -// } -// -// pub fn find_present_mode( -// pdev: vk.PhysicalDevice, -// vki: InstanceDispatch, -// surface: vk.SurfaceKHR, -// preferred: vk.PresentModeKHR, -// ) !vk.PresentModeKHR { -// var modes_buf: [8]vk.PresentModeKHR = undefined; -// var modes_count: u32 = @intCast(modes_buf.len); -// _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &modes_count, &modes_buf); -// const modes = modes_buf[0..modes_count]; -// -// for (modes) |mode| { -// if (std.meta.eql(mode, preferred)) { -// return mode; -// } -// } -// -// return .mailbox_khr; -// } -// -// pub fn find_swap_extent( -// pdev: vk.PhysicalDevice, -// vki: InstanceDispatch, -// surface: vk.SurfaceKHR, -// window: *c.GLFWwindow, -// ) !vk.Extent2D { -// const caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, surface); -// var extent = caps.current_extent; -// -// if (extent.width == std.math.maxInt(u32)) { -// c.glfwGetFramebufferSize(window, @ptrCast(&extent.width), @ptrCast(&extent.height)); -// extent.width = std.math.clamp(extent.width, caps.min_image_extent.width, caps.max_image_extent.width); -// extent.height = std.math.clamp(extent.height, caps.min_image_extent.height, caps.max_image_extent.height); -// } -// -// return extent; -// } -// -// pub fn find_swap_image_count( -// pdev: vk.PhysicalDevice, -// vki: InstanceDispatch, -// surface: vk.SurfaceKHR, -// ) !u32 { -// const caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, surface); -// var count = @max(3, caps.min_image_count + 1); -// if (caps.max_image_count > 0) { -// count = @min(count, caps.max_image_count); -// } -// return count; -// } +pub const Base = @import("gfx/Base.zig"); +pub const Context = @import("gfx/Context.zig"); +pub const Device = @import("gfx/Device.zig"); +pub const Instance = @import("gfx/Instance.zig"); +pub const Swapchain = @import("gfx/Swapchain.zig"); +pub const Window = @import("gfx/Window.zig"); pub fn uploadData( comptime T: type, @@ -402,127 +139,3 @@ pub const VkAllocator = struct { return error.NoSuitableMemoryType; } }; - -// pub const BaseDispatch = vk.BaseWrapper(.{ -// .createInstance = true, -// .getInstanceProcAddr = true, -// }); - -// pub const Instance.Wrapper = vk.InstanceWrapper(.{ -// .destroyInstance = true, -// .createDevice = true, -// .destroySurfaceKHR = true, -// .enumeratePhysicalDevices = true, -// .getPhysicalDeviceProperties = true, -// .enumerateDeviceExtensionProperties = true, -// .getPhysicalDeviceSurfaceFormatsKHR = true, -// .getPhysicalDeviceSurfacePresentModesKHR = true, -// .getPhysicalDeviceSurfaceCapabilitiesKHR = true, -// .getPhysicalDeviceQueueFamilyProperties = true, -// .getPhysicalDeviceSurfaceSupportKHR = true, -// .getPhysicalDeviceMemoryProperties = true, -// .getDeviceProcAddr = true, -// .createDebugUtilsMessengerEXT = use_debug_messenger, -// .destroyDebugUtilsMessengerEXT = use_debug_messenger, -// }); - -// pub const Device.Wrapper = vk.DeviceWrapper(.{ -// .destroyDevice = true, -// .getDeviceQueue = true, -// .createSemaphore = true, -// .createFence = true, -// .createImageView = true, -// .destroyImageView = true, -// .destroySemaphore = true, -// .destroyFence = true, -// .getSwapchainImagesKHR = true, -// .createSwapchainKHR = true, -// .destroySwapchainKHR = true, -// .acquireNextImageKHR = true, -// .deviceWaitIdle = true, -// .waitForFences = true, -// .resetFences = true, -// .queueSubmit = true, -// .queuePresentKHR = true, -// .createCommandPool = true, -// .destroyCommandPool = true, -// .allocateCommandBuffers = true, -// .freeCommandBuffers = true, -// .queueWaitIdle = true, -// .createShaderModule = true, -// .destroyShaderModule = true, -// .createPipelineLayout = true, -// .destroyPipelineLayout = true, -// .createGraphicsPipelines = true, -// .destroyPipeline = true, -// .beginCommandBuffer = true, -// .endCommandBuffer = true, -// .allocateMemory = true, -// .freeMemory = true, -// .createBuffer = true, -// .destroyBuffer = true, -// .getBufferMemoryRequirements = true, -// .mapMemory = true, -// .unmapMemory = true, -// .bindBufferMemory = true, -// .cmdBeginRenderPass = true, -// .cmdEndRenderPass = true, -// .cmdBindPipeline = true, -// .cmdDraw = true, -// .cmdDrawIndexed = true, -// .cmdSetViewport = true, -// .cmdSetScissor = true, -// .cmdBindVertexBuffers = true, -// .cmdBindIndexBuffer = true, -// .cmdCopyBuffer = true, -// .cmdBeginRenderingKHR = true, -// .cmdEndRenderingKHR = true, -// .cmdPipelineBarrier = true, -// }); - -pub fn debug_callback( - msg_severity: vk.DebugUtilsMessageSeverityFlagsEXT, - msg_type: vk.DebugUtilsMessageTypeFlagsEXT, - p_data: ?*const vk.DebugUtilsMessengerCallbackDataEXT, - _: ?*anyopaque, -) callconv(vk.vulkan_call_conv) vk.Bool32 { - // ripped from std.log.defaultLog - - const data = p_data orelse return vk.FALSE; - const message = data.p_message orelse return vk.FALSE; - - const severity_prefix = if (msg_severity.verbose_bit_ext) - "verbose:" - else if (msg_severity.info_bit_ext) - "info:" - else if (msg_severity.warning_bit_ext) - "warning:" - else if (msg_severity.error_bit_ext) - "error:" - else - "?:"; - - const type_prefix = if (msg_type.general_bit_ext) - "" - else if (msg_type.validation_bit_ext) - "validation:" - else if (msg_type.performance_bit_ext) - "performance:" - else if (msg_type.device_address_binding_bit_ext) - "device_address_binding:" - else - "?:"; - - const stderr = std.io.getStdErr().writer(); - var bw = std.io.bufferedWriter(stderr); - const writer = bw.writer(); - - std.debug.getStderrMutex().lock(); - defer std.debug.getStderrMutex().unlock(); - nosuspend { - writer.print("vk-{s}{s} {s}\n", .{ severity_prefix, type_prefix, message }) catch return vk.FALSE; - bw.flush() catch return vk.FALSE; - } - - return vk.FALSE; -} diff --git a/src/main.zig b/src/main.zig index 806ca08..25489c6 100644 --- a/src/main.zig +++ b/src/main.zig @@ -50,104 +50,13 @@ const vertices = [_]Vertex{ const indices = [_]Index{ 4, 5, 6, 6, 5, 7 }; -// pub fn create_swapchain( -// chain: *std.MultiArrayList(ChainImage), -// swapchain: *vk.SwapchainKHR, -// ally: std.mem.Allocator, -// pdev: vk.PhysicalDevice, -// vki: gfx.InstanceDispatch, -// window: *c.GLFWwindow, -// dev: vk.Device, -// vkd: Device.Wrapper, -// pool: vk.CommandPool, -// surface: vk.SurfaceKHR, -// swap_image_count: u32, -// format: vk.SurfaceFormatKHR, -// present_mode: vk.PresentModeKHR, -// ) !vk.Extent2D { -// const extent = try gfx.find_swap_extent(pdev, vki, surface, window); -// -// const prev_swapchain = swapchain.*; -// swapchain.* = try vkd.createSwapchainKHR(dev, &.{ -// .surface = surface, -// .min_image_count = swap_image_count, -// .image_format = format.format, -// .image_color_space = format.color_space, -// .image_extent = extent, -// .image_array_layers = 1, -// .image_usage = .{ .color_attachment_bit = true }, -// .image_sharing_mode = .exclusive, -// .pre_transform = .{ .identity_bit_khr = true }, -// .composite_alpha = .{ .opaque_bit_khr = true }, -// .present_mode = present_mode, -// .clipped = vk.TRUE, -// .old_swapchain = prev_swapchain, -// }, null); -// vkd.destroySwapchainKHR(dev, prev_swapchain, null); -// -// var image_count: u32 = undefined; -// _ = try vkd.getSwapchainImagesKHR(dev, swapchain.*, &image_count, null); -// try chain.resize(ally, image_count); -// _ = try vkd.getSwapchainImagesKHR(dev, swapchain.*, &image_count, chain.items(.image).ptr); -// -// // memset so that deinit_chain will succeed with .null_handle if error part-way through a loop. -// @memset(chain.items(.view), .null_handle); -// @memset(chain.items(.cmdbuf), .null_handle); -// @memset(chain.items(.fence), .null_handle); -// @memset(chain.items(.image_available), .null_handle); -// errdefer deinit_chain(chain.*, dev, vkd, pool); -// -// for (chain.items(.image), chain.items(.view)) |image, *view| { -// view.* = try vkd.createImageView(dev, &.{ -// .image = image, -// .view_type = .@"2d", -// .format = format.format, -// .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity }, -// .subresource_range = .{ -// .aspect_mask = .{ .color_bit = true }, -// .base_mip_level = 0, -// .level_count = 1, -// .base_array_layer = 0, -// .layer_count = 1, -// }, -// }, null); -// } -// -// for (chain.items(.fence)) |*fence| { -// fence.* = try vkd.createFence(dev, &.{ .flags = .{ .signaled_bit = true } }, null); -// } -// -// for (chain.items(.image_available)) |*sem| { -// sem.* = try vkd.createSemaphore(dev, &.{}, null); -// } -// -// for (chain.items(.render_finished)) |*sem| { -// sem.* = try vkd.createSemaphore(dev, &.{}, null); -// } -// -// try vkd.allocateCommandBuffers(dev, &.{ -// .command_buffer_count = @intCast(chain.len), -// .command_pool = pool, -// .level = .primary, -// }, chain.items(.cmdbuf).ptr); -// -// return extent; -// } -// -// pub fn deinit_chain( -// chain: std.MultiArrayList(ChainImage), -// dev: vk.Device, -// vkd: Device.Wrapper, -// pool: vk.CommandPool, -// ) void { -// vkd.freeCommandBuffers(dev, pool, @intCast(chain.len), chain.items(.cmdbuf).ptr); -// for (chain.items(.view)) |view| vkd.destroyImageView(dev, view, null); -// for (chain.items(.fence)) |fence| vkd.destroyFence(dev, fence, null); -// for (chain.items(.image_available)) |sem| vkd.destroySemaphore(dev, sem, null); -// for (chain.items(.render_finished)) |sem| vkd.destroySemaphore(dev, sem, null); -// } - -fn render(dev: vk.Device, vkd: Device.Wrapper, swapchain: vk.SwapchainKHR, frame: Swapchain.ChainImage, queue: vk.Queue) !void { +fn render( + dev: vk.Device, + vkd: gfx.Device.Wrapper, + swapchain: vk.SwapchainKHR, + frame: gfx.Swapchain.ChainImage, + queue: vk.Queue, +) !void { _ = try vkd.waitForFences(dev, 1, @ptrCast(&frame.fence), vk.TRUE, std.math.maxInt(u64)); const result = try vkd.acquireNextImageKHR( @@ -180,31 +89,24 @@ fn render(dev: vk.Device, vkd: Device.Wrapper, swapchain: vk.SwapchainKHR, frame }); } -const Base = @import("gfx/Base.zig"); -const Instance = @import("gfx/Instance.zig"); -const Context = @import("gfx/Context.zig"); -const Window = @import("gfx/Window.zig"); -const Device = @import("gfx/Device.zig"); -const Swapchain = @import("gfx/Swapchain.zig"); - pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); const ally = gpa.allocator(); - const base = try Base.init(); + const base = try gfx.Base.init(); defer base.deinit(); - const inst = try Instance.init(&base); + const inst = try gfx.Instance.init(&base); defer inst.deinit(); - const win = try Window.init(&inst, "zig-glfw-vulkan", .{ .width = 800, .height = 600 }); + const win = try gfx.Window.init(&inst, "zig-glfw-vulkan", .{ .width = 800, .height = 600 }); defer win.deinit(); - const dev = try Device.init(ally, &inst, &win); + const dev = try gfx.Device.init(ally, &inst, &win); defer dev.deinit(); - var sc = try Swapchain.create(ally, &dev); + var sc = try gfx.Swapchain.create(ally, &dev); defer sc.deinit(); const device_local = gfx.VkAllocator.init(dev.pdev, inst.vki); @@ -228,7 +130,6 @@ pub fn main() !void { }, null); defer dev.vkd.destroyBuffer(dev.dev, vertex_buffer, null); const vertex_mem_reqs = dev.vkd.getBufferMemoryRequirements(dev.dev, vertex_buffer); - // const vertex_memory = try gfx.allocate(pdev, vki, dev, dev.vkd, vertex_mem_reqs, .{ .device_local_bit = true }); const vertex_memory = try device_local.alloc(dev.dev, dev.vkd, vertex_mem_reqs, .{ .device_local_bit = true }); defer dev.vkd.freeMemory(dev.dev, vertex_memory, null); try dev.vkd.bindBufferMemory(dev.dev, vertex_buffer, vertex_memory, 0); @@ -242,7 +143,6 @@ pub fn main() !void { }, null); defer dev.vkd.destroyBuffer(dev.dev, index_buffer, null); const index_mem_reqs = dev.vkd.getBufferMemoryRequirements(dev.dev, index_buffer); - // const index_memory = try gfx.allocate(pdev, vki, dev, dev.vkd, index_mem_reqs, .{ .device_local_bit = true }); const index_memory = try device_local.alloc(dev.dev, dev.vkd, index_mem_reqs, .{ .device_local_bit = true }); defer dev.vkd.freeMemory(dev.dev, index_memory, null); try dev.vkd.bindBufferMemory(dev.dev, index_buffer, index_memory, 0); @@ -295,7 +195,7 @@ pub fn main() !void { fn record_cmdbuf( cmdbuf: vk.CommandBuffer, - vkd: Device.Wrapper, + vkd: gfx.Device.Wrapper, image: vk.Image, view: vk.ImageView, extent: vk.Extent2D, @@ -416,7 +316,12 @@ fn record_cmdbuf( try vkd.endCommandBuffer(cmdbuf); } -fn createPipeline(dev: vk.Device, layout: vk.PipelineLayout, format: vk.SurfaceFormatKHR, vkd: Device.Wrapper) !vk.Pipeline { +fn createPipeline( + dev: vk.Device, + layout: vk.PipelineLayout, + format: vk.SurfaceFormatKHR, + vkd: gfx.Device.Wrapper, +) !vk.Pipeline { const vert = try vkd.createShaderModule(dev, &.{ .code_size = shaders.triangle_vert.len, .p_code = @as([*]const u32, @ptrCast(&shaders.triangle_vert)), From 3d4323b1fcfe137ef4fae8ddd59ca0d91b7c63a0 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Fri, 26 Apr 2024 15:19:15 -0400 Subject: [PATCH 044/113] upgrade vulkan-zig for current zig master --- build.zig.zon | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.zig.zon b/build.zig.zon index bac4b3c..9936a80 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -4,8 +4,8 @@ .dependencies = .{ .@"vulkan-zig" = .{ - .url = "https://github.com/Snektron/vulkan-zig/archive/ac4103a733c479b599aae8d42c08cabd7d5cf48a.tar.gz", - .hash = "122085abbcfa0328f5f6e0e702d25ee0a61bb92d0ce9ba415a2fea1d33f43129cb66", + .url = "https://github.com/Snektron/vulkan-zig/archive/2047f7e7f22d9aca17d0abb4ea5fb03763fce39a.tar.gz", + .hash = "122067b39a4f454ece4e800ee95e0002a767b535d647c6042ac93bc195100683ba03", }, }, From f3c94fbdfa8f0f2474981ae6ba9321d1214d3191 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Fri, 26 Apr 2024 15:19:37 -0400 Subject: [PATCH 045/113] drop discrete gpu requirement for laptop --- src/gfx/Device.zig | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/gfx/Device.zig b/src/gfx/Device.zig index 56d79f9..f43e445 100644 --- a/src/gfx/Device.zig +++ b/src/gfx/Device.zig @@ -61,8 +61,10 @@ pub fn wrap( win: *const Window, pdev: vk.PhysicalDevice, ) !Self { - const props = inst.vki.getPhysicalDeviceProperties(pdev); - if (props.device_type != .discrete_gpu) return error.NotDiscrete; + // TODO: Need to rank devices and select the best one + // the new laptop doesn't have a discrete gpu. + // const props = inst.vki.getPhysicalDeviceProperties(pdev); + // if (props.device_type != .discrete_gpu) return error.NotDiscrete; var format_count: u32 = undefined; _ = try inst.vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, win.surface, &format_count, null); From f58107ad7766f14761a0b95ab1ff1931fe8966a0 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 5 Jun 2024 16:04:50 -0400 Subject: [PATCH 046/113] compat: support latest zig, vulkan-zig --- .gitignore | 2 +- build.zig | 21 +++------------- build.zig.zon | 4 +-- src/gfx.zig | 16 ++++++++++++ src/gfx/Base.zig | 6 ++--- src/gfx/Device.zig | 59 +++----------------------------------------- src/gfx/Instance.zig | 48 ++++++++++------------------------- 7 files changed, 42 insertions(+), 114 deletions(-) diff --git a/.gitignore b/.gitignore index 7ac1866..7626714 100644 --- a/.gitignore +++ b/.gitignore @@ -37,7 +37,7 @@ modules.order Module.symvers Mkfile.old dkms.conf -zig-cache/ +[.]zig-cache/ zig-out/ build/ build-*/ diff --git a/build.zig b/build.zig index 523bea7..c213265 100644 --- a/build.zig +++ b/build.zig @@ -13,7 +13,7 @@ pub fn build(b: *std.Build) void { const exe = b.addExecutable(.{ .name = "scratchzig", - .root_source_file = .{ .path = "src/main.zig" }, + .root_source_file = b.path("src/main.zig"), .target = target, .optimize = optimize, }); @@ -39,19 +39,6 @@ pub fn build(b: *std.Build) void { b.installArtifact(exe); - var docs_dir: std.Build.GeneratedFile = .{ - .path = "docs", - .step = &exe.step, - }; - exe.generated_docs = &docs_dir; - const docs = b.addInstallDirectory(.{ - .source_dir = .{ .generated = &docs_dir }, - .install_dir = .{ .custom = "docs" }, - .install_subdir = "", - }); - const docs_step = b.step("docs", "Build the docs"); - docs_step.dependOn(&docs.step); - const run_cmd = b.addRunArtifact(exe); run_cmd.step.dependOn(b.getInstallStep()); @@ -63,7 +50,7 @@ pub fn build(b: *std.Build) void { run_step.dependOn(&run_cmd.step); const exe_unit_tests = b.addTest(.{ - .root_source_file = .{ .path = "src/main.zig" }, + .root_source_file = b.path("src/main.zig"), .target = target, .optimize = optimize, }); @@ -77,7 +64,7 @@ pub fn build(b: *std.Build) void { const dsa_unit_tests = b.addTest(.{ .name = "dsa.zig tests", - .root_source_file = .{ .path = "src/dsa.zig" }, + .root_source_file = b.path("src/dsa.zig"), .target = target, .optimize = optimize, }); @@ -89,7 +76,7 @@ pub fn build(b: *std.Build) void { const inspect = b.addExecutable(.{ .name = "vkinspect", - .root_source_file = .{ .path = "src/inspect.zig" }, + .root_source_file = b.path("src/inspect.zig"), .target = target, .optimize = optimize, }); diff --git a/build.zig.zon b/build.zig.zon index 9936a80..43a3b53 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -4,8 +4,8 @@ .dependencies = .{ .@"vulkan-zig" = .{ - .url = "https://github.com/Snektron/vulkan-zig/archive/2047f7e7f22d9aca17d0abb4ea5fb03763fce39a.tar.gz", - .hash = "122067b39a4f454ece4e800ee95e0002a767b535d647c6042ac93bc195100683ba03", + .url = "https://github.com/Snektron/vulkan-zig/archive/f2c2e0ff80374563357cc4fe72bf7d8a2c956824.tar.gz", + .hash = "1220cf0972c6fe05437c1a8689b955084385eb7ca1f8c14010d49ca5a89570a5d90d", }, }, diff --git a/src/gfx.zig b/src/gfx.zig index 208312e..e3b51c9 100644 --- a/src/gfx.zig +++ b/src/gfx.zig @@ -11,6 +11,22 @@ pub const Instance = @import("gfx/Instance.zig"); pub const Swapchain = @import("gfx/Swapchain.zig"); pub const Window = @import("gfx/Window.zig"); +pub const use_debug_messenger = switch (builtin.mode) { + .Debug, .ReleaseSafe => true, + .ReleaseSmall, .ReleaseFast => false, +}; + +pub const apis: []const vk.ApiInfo = &.{ + vk.features.version_1_0, + vk.features.version_1_1, + vk.features.version_1_2, + vk.features.version_1_3, + vk.extensions.khr_surface, + vk.extensions.khr_swapchain, + vk.extensions.khr_dynamic_rendering, + if (use_debug_messenger) vk.extensions.ext_debug_utils else .{}, +}; + pub fn uploadData( comptime T: type, pdev: vk.PhysicalDevice, diff --git a/src/gfx/Base.zig b/src/gfx/Base.zig index a348067..e0f3fd7 100644 --- a/src/gfx/Base.zig +++ b/src/gfx/Base.zig @@ -3,6 +3,7 @@ const builtin = @import("builtin"); const vk = @import("vk"); const c = @import("../c.zig"); +const gfx = @import("../gfx.zig"); const Self = @This(); @@ -26,7 +27,4 @@ pub fn deinit(_: Self) void { c.glfwTerminate(); } -pub const Wrapper = vk.BaseWrapper(.{ - .createInstance = true, - .getInstanceProcAddr = true, -}); +pub const Wrapper = vk.BaseWrapper(gfx.apis); diff --git a/src/gfx/Device.zig b/src/gfx/Device.zig index f43e445..d27f63c 100644 --- a/src/gfx/Device.zig +++ b/src/gfx/Device.zig @@ -5,6 +5,7 @@ const builtin = @import("builtin"); const vk = @import("vk"); const c = @import("../c.zig"); +const gfx = @import("../gfx.zig"); const Instance = @import("Instance.zig"); const Window = @import("Window.zig"); @@ -12,8 +13,8 @@ const Window = @import("Window.zig"); const Self = @This(); const required_extensions: []const [*:0]const u8 = &.{ - vk.extension_info.khr_swapchain.name, - vk.extension_info.khr_dynamic_rendering.name, + vk.extensions.khr_swapchain.name, + vk.extensions.khr_dynamic_rendering.name, }; const preferred_surface_format: vk.SurfaceFormatKHR = .{ @@ -178,56 +179,4 @@ pub fn deinit(self: Self) void { self.vkd.destroyDevice(self.dev, null); } -pub const Wrapper = vk.DeviceWrapper(.{ - .destroyDevice = true, - .getDeviceQueue = true, - .createSemaphore = true, - .createFence = true, - .createImageView = true, - .destroyImageView = true, - .destroySemaphore = true, - .destroyFence = true, - .getSwapchainImagesKHR = true, - .createSwapchainKHR = true, - .destroySwapchainKHR = true, - .acquireNextImageKHR = true, - .deviceWaitIdle = true, - .waitForFences = true, - .resetFences = true, - .queueSubmit = true, - .queuePresentKHR = true, - .createCommandPool = true, - .destroyCommandPool = true, - .allocateCommandBuffers = true, - .freeCommandBuffers = true, - .queueWaitIdle = true, - .createShaderModule = true, - .destroyShaderModule = true, - .createPipelineLayout = true, - .destroyPipelineLayout = true, - .createGraphicsPipelines = true, - .destroyPipeline = true, - .beginCommandBuffer = true, - .endCommandBuffer = true, - .allocateMemory = true, - .freeMemory = true, - .createBuffer = true, - .destroyBuffer = true, - .getBufferMemoryRequirements = true, - .mapMemory = true, - .unmapMemory = true, - .bindBufferMemory = true, - .cmdBeginRenderPass = true, - .cmdEndRenderPass = true, - .cmdBindPipeline = true, - .cmdDraw = true, - .cmdDrawIndexed = true, - .cmdSetViewport = true, - .cmdSetScissor = true, - .cmdBindVertexBuffers = true, - .cmdBindIndexBuffer = true, - .cmdCopyBuffer = true, - .cmdBeginRenderingKHR = true, - .cmdEndRenderingKHR = true, - .cmdPipelineBarrier = true, -}); +pub const Wrapper = vk.DeviceWrapper(gfx.apis); diff --git a/src/gfx/Instance.zig b/src/gfx/Instance.zig index c3f553b..c5c04d3 100644 --- a/src/gfx/Instance.zig +++ b/src/gfx/Instance.zig @@ -3,8 +3,7 @@ const builtin = @import("builtin"); const vk = @import("vk"); const c = @import("../c.zig"); - -const Base = @import("Base.zig"); +const gfx = @import("../gfx.zig"); const Self = @This(); @@ -16,26 +15,21 @@ const app_info: vk.ApplicationInfo = .{ .api_version = vk.API_VERSION_1_3, }; -pub const use_debug_messenger = switch (builtin.mode) { - .Debug, .ReleaseSafe => true, - .ReleaseSmall, .ReleaseFast => false, -}; - ref: vk.Instance, vki: Wrapper, -base: *const Base, +base: *const gfx.Base, -messenger: if (use_debug_messenger) vk.DebugUtilsMessengerEXT else void, +messenger: if (gfx.use_debug_messenger) vk.DebugUtilsMessengerEXT else void, pub fn init( - base: *const Base, + base: *const gfx.Base, ) !Self { var exts: std.BoundedArray([*:0]const u8, 32) = .{}; var layers: std.BoundedArray([*:0]const u8, 32) = .{}; - if (use_debug_messenger) { + if (gfx.use_debug_messenger) { try exts.appendSlice(&.{ - vk.extension_info.ext_debug_utils.name, + vk.extensions.ext_debug_utils.name, }); try layers.appendSlice(&.{ @@ -71,18 +65,18 @@ pub fn init( .pp_enabled_extension_names = &exts.buffer, .enabled_layer_count = @intCast(layers.len), .pp_enabled_layer_names = &layers.buffer, - .p_next = if (use_debug_messenger) &mci else null, + .p_next = if (gfx.use_debug_messenger) &mci else null, }, null); const vki = try Wrapper.load(ref, base.vkb.dispatch.vkGetInstanceProcAddr); errdefer vki.destroyInstance(ref, null); - const messenger = if (use_debug_messenger) + const messenger = if (gfx.use_debug_messenger) try vki.createDebugUtilsMessengerEXT(ref, &mci, null) else void{}; - errdefer if (use_debug_messenger) + errdefer if (gfx.use_debug_messenger) vki.destroyDebugUtilsMessengerEXT(ref, messenger, null); return .{ @@ -94,28 +88,12 @@ pub fn init( } pub fn deinit(self: Self) void { - if (use_debug_messenger) + if (gfx.use_debug_messenger) self.vki.destroyDebugUtilsMessengerEXT(self.ref, self.messenger, null); self.vki.destroyInstance(self.ref, null); } -pub const Wrapper = vk.InstanceWrapper(.{ - .destroyInstance = true, - .createDevice = true, - .destroySurfaceKHR = true, - .enumeratePhysicalDevices = true, - .getPhysicalDeviceProperties = true, - .enumerateDeviceExtensionProperties = true, - .getPhysicalDeviceSurfaceFormatsKHR = true, - .getPhysicalDeviceSurfacePresentModesKHR = true, - .getPhysicalDeviceSurfaceCapabilitiesKHR = true, - .getPhysicalDeviceQueueFamilyProperties = true, - .getPhysicalDeviceSurfaceSupportKHR = true, - .getPhysicalDeviceMemoryProperties = true, - .getDeviceProcAddr = true, - .createDebugUtilsMessengerEXT = use_debug_messenger, - .destroyDebugUtilsMessengerEXT = use_debug_messenger, -}); +pub const Wrapper = vk.InstanceWrapper(gfx.apis); pub fn debug_callback( msg_severity: vk.DebugUtilsMessageSeverityFlagsEXT, @@ -154,8 +132,8 @@ pub fn debug_callback( var bw = std.io.bufferedWriter(stderr); const writer = bw.writer(); - std.debug.getStderrMutex().lock(); - defer std.debug.getStderrMutex().unlock(); + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); nosuspend { writer.print("vk-{s}{s} {s}\n", .{ severity_prefix, type_prefix, message }) catch return vk.FALSE; bw.flush() catch return vk.FALSE; From 876a11457809d47e6ba5a989beb0e96952693b4c Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 5 Jun 2024 21:32:05 -0400 Subject: [PATCH 047/113] todo note --- src/main.zig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/main.zig b/src/main.zig index 25489c6..2162eca 100644 --- a/src/main.zig +++ b/src/main.zig @@ -171,6 +171,9 @@ pub fn main() !void { render(dev.dev, dev.vkd, sc.ref, frame, dev.queue) catch |err| switch (err) { error.OutOfDateKHR => { + // TODO: this is a hack to safely destroy sync primitives + // don't do this. be smart about sync primitive reuse or + // move them to "garbage" to be destroyed later. try dev.vkd.deviceWaitIdle(dev.dev); try sc.init(); From 237dc9eb7e85166b6eccd6708e7c4c2152213848 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Tue, 25 Jun 2024 20:41:05 -0400 Subject: [PATCH 048/113] add .tool-versions --- .tool-versions | 1 + 1 file changed, 1 insertion(+) create mode 100644 .tool-versions diff --git a/.tool-versions b/.tool-versions new file mode 100644 index 0000000..02050d7 --- /dev/null +++ b/.tool-versions @@ -0,0 +1 @@ +zig 0.13.0 From 9d99902b429dc79b0bc27466235e750fbaf5e048 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 26 Jun 2024 12:14:36 -0400 Subject: [PATCH 049/113] move to au globals --- src/au.zig | 432 +++++++++++++++++++++++++++++++++++++++++++++++++++ src/main.zig | 194 ++++++++++++----------- 2 files changed, 531 insertions(+), 95 deletions(-) create mode 100644 src/au.zig diff --git a/src/au.zig b/src/au.zig new file mode 100644 index 0000000..8b6f2ae --- /dev/null +++ b/src/au.zig @@ -0,0 +1,432 @@ +const std = @import("std"); +const builtin = @import("builtin"); + +const vk = @import("vk"); +const c = @import("c.zig"); + +pub const use_debug_messenger = switch (builtin.mode) { + .Debug, .ReleaseSafe => true, + .ReleaseSmall, .ReleaseFast => false, +}; + +pub const apis: []const vk.ApiInfo = &.{ + vk.features.version_1_0, + vk.features.version_1_1, + vk.features.version_1_2, + vk.features.version_1_3, + vk.extensions.khr_surface, + vk.extensions.khr_swapchain, + vk.extensions.khr_dynamic_rendering, + if (use_debug_messenger) vk.extensions.ext_debug_utils else .{}, +}; + +pub const device_extensions: []const [*:0]const u8 = &.{ + // todo somehow sync this with APIs above? + vk.extensions.khr_swapchain.name, + vk.extensions.khr_dynamic_rendering.name, +}; + +pub const app_info: vk.ApplicationInfo = .{ + .p_application_name = "zig-glfw-vulkan", + .application_version = vk.makeApiVersion(0, 0, 0, 0), + .p_engine_name = "zig-glfw-vulkan", + .engine_version = vk.makeApiVersion(0, 0, 0, 0), + .api_version = vk.API_VERSION_1_3, +}; + +pub const BaseWrapper = vk.BaseWrapper(apis); +pub const InstanceWrapper = vk.InstanceWrapper(apis); +pub const DeviceWrapper = vk.DeviceWrapper(apis); + +pub const InstanceProxy = vk.InstanceProxy(apis); +pub const DeviceProxy = vk.DeviceProxy(apis); +pub const QueueProxy = vk.QueueProxy(apis); +pub const CommandBufferProxy = vk.CommandBufferProxy(apis); + +pub const B: *const BaseWrapper = &_bw; +pub const I: *const InstanceProxy = &_ip; +pub const D: *const DeviceProxy = &_dp; +pub const W: *const Window = &_window; +pub const Q: *const QueueProxy = &_qp; + +pub const device_config: *const CandidateDeviceInfo = &_dconfig; + +var _bw: BaseWrapper = undefined; +var _iw: InstanceWrapper = undefined; +var _dw: DeviceWrapper = undefined; + +var _ip: InstanceProxy = undefined; +var _dp: DeviceProxy = undefined; +var _qp: QueueProxy = undefined; + +var _instance: vk.Instance = undefined; +var _window: Window = undefined; +var _device: vk.Device = undefined; +var _dconfig: CandidateDeviceInfo = undefined; +var _queue: vk.Queue = undefined; + +pub fn init(alloc: std.mem.Allocator) !void { + try init_glfw(); + errdefer deinit_glfw(); + + try init_base(); + errdefer deinit_base(); + + try init_instance(alloc); + errdefer deinit_instance(); + + try init_window(); + errdefer deinit_window(); + + try init_device(alloc); + errdefer deinit_device(); +} + +pub fn deinit() void { + deinit_device(); + deinit_window(); + deinit_instance(); + deinit_base(); + deinit_glfw(); +} + +fn init_glfw() !void { + if (c.glfwInit() != c.GLFW_TRUE) + return error.glfwInitFailed; + errdefer c.glfwTerminate(); + + if (c.glfwVulkanSupported() != c.GLFW_TRUE) + return error.glfwNoVulkan; +} + +fn deinit_glfw() void { + c.glfwTerminate(); +} + +fn init_base() !void { + if (use_debug_messenger) { + _bw = try BaseWrapper.load(c.glfwGetInstanceProcAddress); + } else { + _bw = BaseWrapper.loadNoFail(c.glfwGetInstanceProcAddress); + } +} + +fn deinit_base() void {} + +fn init_instance(alloc: std.mem.Allocator) !void { + var extensions = std.ArrayList([*:0]const u8).init(alloc); + defer extensions.deinit(); + + var layers = std.ArrayList([*:0]const u8).init(alloc); + defer layers.deinit(); + + if (use_debug_messenger) { + try extensions.appendSlice(&.{ + vk.extensions.ext_debug_utils.name, + }); + + try layers.appendSlice(&.{ + "VK_LAYER_KHRONOS_validation", + }); + } + + var glfw_exts_count: u32 = 0; + const glfw_exts: [*]const [*:0]const u8 = + @ptrCast(c.glfwGetRequiredInstanceExtensions(&glfw_exts_count)); + try extensions.appendSlice(glfw_exts[0..glfw_exts_count]); + + const mci: vk.DebugUtilsMessengerCreateInfoEXT = .{ + .message_severity = .{ + .error_bit_ext = true, + .info_bit_ext = true, + .verbose_bit_ext = true, + .warning_bit_ext = true, + }, + .message_type = .{ + .device_address_binding_bit_ext = true, + .general_bit_ext = false, + .performance_bit_ext = true, + .validation_bit_ext = true, + }, + .pfn_user_callback = &debug_callback, + .p_user_data = null, + }; + + _instance = try B.createInstance(&.{ + .p_application_info = &app_info, + .enabled_extension_count = @intCast(extensions.items.len), + .pp_enabled_extension_names = extensions.items.ptr, + .enabled_layer_count = @intCast(layers.items.len), + .pp_enabled_layer_names = layers.items.ptr, + .p_next = if (use_debug_messenger) &mci else null, + }, null); + + if (use_debug_messenger) { + _iw = try InstanceWrapper.load(_instance, _bw.dispatch.vkGetInstanceProcAddr); + } else { + _iw = InstanceWrapper.loadNoFail(_instance, _bw.dispatch.vkGetInstanceProcAddr); + } + + _ip = InstanceProxy.init(_instance, &_iw); +} + +fn deinit_instance() void { + _ip.destroyInstance(null); +} + +fn init_window() !void { + _window = try Window.init( + app_info.p_application_name orelse "Au Window", + .{ .height = 720, .width = 1280 }, + ); + errdefer _window.deinit(); +} + +fn deinit_window() void { + _window.deinit(); +} + +const CandidateDeviceInfo = struct { + pdev: vk.PhysicalDevice, + format: vk.SurfaceFormatKHR, + mode: vk.PresentModeKHR, + family: u32, // must support graphics and present for now + + fn init(alloc: std.mem.Allocator, pdev: vk.PhysicalDevice) !struct { i32, CandidateDeviceInfo } { + var score: i32 = 0; + var res: CandidateDeviceInfo = undefined; + + res.pdev = pdev; + + var format_count: u32 = undefined; + _ = try I.getPhysicalDeviceSurfaceFormatsKHR(pdev, W.surface, &format_count, null); + if (format_count == 0) return error.NoSurfaceFormats; + const formats = try alloc.alloc(vk.SurfaceFormatKHR, format_count); + defer alloc.free(formats); + _ = try I.getPhysicalDeviceSurfaceFormatsKHR(pdev, W.surface, &format_count, formats.ptr); + + for (formats) |fmt| { + if (fmt.color_space == .srgb_nonlinear_khr) { + res.format = fmt; + break; + } + } else { + res.format = formats[0]; + score -= 100; + } + + var mode_count: u32 = undefined; + _ = try I.getPhysicalDeviceSurfacePresentModesKHR(pdev, W.surface, &mode_count, null); + if (mode_count == 0) return error.NoSurfacePresentModes; + const modes = try alloc.alloc(vk.PresentModeKHR, mode_count); + defer alloc.free(modes); + _ = try I.getPhysicalDeviceSurfacePresentModesKHR(pdev, W.surface, &mode_count, modes.ptr); + + if (std.mem.indexOfAny(vk.PresentModeKHR, modes, &.{ + vk.PresentModeKHR.mailbox_khr, + })) |idx| { + res.mode = modes[idx]; + } else { + score -= 50; + res.mode = .fifo_khr; // this is guaranteed + } + + var ext_count: u32 = undefined; + _ = try I.enumerateDeviceExtensionProperties(pdev, null, &ext_count, null); + const exts = try alloc.alloc(vk.ExtensionProperties, ext_count); + defer alloc.free(exts); + _ = try I.enumerateDeviceExtensionProperties(pdev, null, &ext_count, exts.ptr); + + for (device_extensions) |needle| { + for (exts) |ext| { + if (std.mem.eql( + u8, + std.mem.span(needle), + std.mem.sliceTo(&ext.extension_name, 0), + )) + break; + } else { + return error.MissingDeviceExtension; + } + } + + var family_count: u32 = undefined; + I.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); + const families = try alloc.alloc(vk.QueueFamilyProperties, family_count); + defer alloc.free(families); + I.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr); + + for (families, 0..) |prop, idx| { + const graphics_support = prop.queue_flags.graphics_bit; + const present_support = try I.getPhysicalDeviceSurfaceSupportKHR(pdev, @intCast(idx), W.surface) == vk.TRUE; + + if (graphics_support and present_support) { + res.family = @intCast(idx); + break; + } + } else { + return error.NoSuitableFamily; + } + + return .{ score, res }; + } +}; + +fn init_device(alloc: std.mem.Allocator) !void { + var pdev_count: u32 = undefined; + _ = try I.enumeratePhysicalDevices(&pdev_count, null); + if (pdev_count == 0) return error.NoDevice; + const pdevs = try alloc.alloc(vk.PhysicalDevice, pdev_count); + defer alloc.free(pdevs); + _ = try I.enumeratePhysicalDevices(&pdev_count, pdevs.ptr); + + // const scores = std.ArrayList(i32). + var scores: std.MultiArrayList(struct { score: i32, ci: CandidateDeviceInfo }) = .{}; + defer scores.deinit(alloc); + + for (pdevs) |pdev| { + const score, const ci = CandidateDeviceInfo.init(alloc, pdev) catch continue; + try scores.append(alloc, .{ .score = score, .ci = ci }); + } + + const idx = std.sort.argMax(i32, scores.items(.score), {}, std.sort.asc(i32)) orelse + return error.NoSuitableDevice; + _dconfig = scores.get(idx).ci; + + const qci: []const vk.DeviceQueueCreateInfo = &.{ + vk.DeviceQueueCreateInfo{ + .queue_family_index = _dconfig.family, + .queue_count = 1, + .p_queue_priorities = &[_]f32{1.0}, + }, + }; + + _device = try I.createDevice(_dconfig.pdev, &.{ + .queue_create_info_count = @intCast(qci.len), + .p_queue_create_infos = qci.ptr, + .enabled_extension_count = @intCast(device_extensions.len), + .pp_enabled_extension_names = device_extensions.ptr, + .p_next = &vk.PhysicalDeviceDynamicRenderingFeaturesKHR{ + .dynamic_rendering = vk.TRUE, + }, + }, null); + + if (use_debug_messenger) { + _dw = try DeviceWrapper.load(_device, _iw.dispatch.vkGetDeviceProcAddr); + } else { + _dw = DeviceWrapper.loadNoFail(_device, _iw.dispatch.vkGetDeviceProcAddr); + } + _dp = DeviceProxy.init(_device, &_dw); + errdefer D.destroyDevice(null); + + _queue = D.getDeviceQueue(_dconfig.family, 0); + + _qp = QueueProxy.init(_queue, &_dw); + + // todo i'm thinking this needs to be a more complex pointer structure... i'm making assumptions here about how the + // command pools are meant to work. probably I am cooking too much. +} + +fn deinit_device() void { + D.destroyDevice(null); +} + +pub fn debug_callback( + msg_severity: vk.DebugUtilsMessageSeverityFlagsEXT, + msg_type: vk.DebugUtilsMessageTypeFlagsEXT, + p_data: ?*const vk.DebugUtilsMessengerCallbackDataEXT, + _: ?*anyopaque, +) callconv(vk.vulkan_call_conv) vk.Bool32 { + // ripped from std.log.defaultLog + + const data = p_data orelse return vk.FALSE; + const message = data.p_message orelse return vk.FALSE; + + const severity_prefix = if (msg_severity.verbose_bit_ext) + "verbose:" + else if (msg_severity.info_bit_ext) + "info:" + else if (msg_severity.warning_bit_ext) + "warning:" + else if (msg_severity.error_bit_ext) + "error:" + else + "?:"; + + const type_prefix = if (msg_type.general_bit_ext) + "" + else if (msg_type.validation_bit_ext) + "validation:" + else if (msg_type.performance_bit_ext) + "performance:" + else if (msg_type.device_address_binding_bit_ext) + "device_address_binding:" + else + "?:"; + + const stderr = std.io.getStdErr().writer(); + var bw = std.io.bufferedWriter(stderr); + const writer = bw.writer(); + + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); + nosuspend { + writer.print("vk-{s}{s} {s}\n", .{ severity_prefix, type_prefix, message }) catch return vk.FALSE; + bw.flush() catch return vk.FALSE; + } + + return vk.FALSE; +} + +const Window = struct { + const Self = @This(); + + handle: *c.GLFWwindow, + surface: vk.SurfaceKHR, + + pub fn init(title: [*:0]const u8, extent: vk.Extent2D) !Self { + var self: Self = undefined; + + c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, "floating_window"); + c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, "floating_window"); + c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); + + self.handle = c.glfwCreateWindow( + @intCast(extent.width), + @intCast(extent.height), + title, + null, + null, + ) orelse return error.glfwWindowFailed; + errdefer c.glfwDestroyWindow(self.handle); + + if (c.glfwCreateWindowSurface(_instance, self.handle, null, &self.surface) != .success) { + return error.glfwSurfaceFailed; + } + errdefer I.destroySurfaceKHR(self.surface, null); + + return self; + } + + pub fn deinit(self: Self) void { + I.destroySurfaceKHR(self.surface, null); + c.glfwDestroyWindow(self.handle); + } + + pub fn should_close(self: Self) bool { + return c.glfwWindowShouldClose(self.handle) == c.GLFW_TRUE; + } + + pub fn wait_events(_: Self) void { + c.glfwWaitEvents(); + // todo events as values? push into a buffer and return here? + } + + pub fn poll_events(_: Self) void { + c.glfwPollEvents(); + } + + pub fn wait_events_timeout(seconds: f64) void { + c.glfwWaitEventsTimeout(seconds); + } +}; diff --git a/src/main.zig b/src/main.zig index 2162eca..b513378 100644 --- a/src/main.zig +++ b/src/main.zig @@ -6,6 +6,8 @@ const Allocator = std.mem.Allocator; const gfx = @import("gfx.zig"); +const au = @import("au.zig"); + const app_name = "vulkan-zig triangle example"; const Vertex = extern struct { @@ -94,106 +96,108 @@ pub fn main() !void { defer _ = gpa.deinit(); const ally = gpa.allocator(); - const base = try gfx.Base.init(); - defer base.deinit(); + try au.init(ally); + defer au.deinit(); - const inst = try gfx.Instance.init(&base); - defer inst.deinit(); + // std.debug.print("Initialized!!\n", .{ }); - const win = try gfx.Window.init(&inst, "zig-glfw-vulkan", .{ .width = 800, .height = 600 }); - defer win.deinit(); - - const dev = try gfx.Device.init(ally, &inst, &win); - defer dev.deinit(); - - var sc = try gfx.Swapchain.create(ally, &dev); - defer sc.deinit(); - - const device_local = gfx.VkAllocator.init(dev.pdev, inst.vki); - - const pipeline_layout = try dev.vkd.createPipelineLayout(dev.dev, &.{ - .flags = .{}, - .set_layout_count = 0, - .p_set_layouts = undefined, - .push_constant_range_count = 0, - .p_push_constant_ranges = undefined, - }, null); - defer dev.vkd.destroyPipelineLayout(dev.dev, pipeline_layout, null); - - const pipeline = try createPipeline(dev.dev, pipeline_layout, dev.format, dev.vkd); - defer dev.vkd.destroyPipeline(dev.dev, pipeline, null); - - const vertex_buffer = try dev.vkd.createBuffer(dev.dev, &.{ - .size = @sizeOf(@TypeOf(vertices)), - .usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, - .sharing_mode = .exclusive, - }, null); - defer dev.vkd.destroyBuffer(dev.dev, vertex_buffer, null); - const vertex_mem_reqs = dev.vkd.getBufferMemoryRequirements(dev.dev, vertex_buffer); - const vertex_memory = try device_local.alloc(dev.dev, dev.vkd, vertex_mem_reqs, .{ .device_local_bit = true }); - defer dev.vkd.freeMemory(dev.dev, vertex_memory, null); - try dev.vkd.bindBufferMemory(dev.dev, vertex_buffer, vertex_memory, 0); - - try gfx.uploadData(Vertex, dev.pdev, inst.vki, dev.dev, dev.vkd, dev.queue, dev.pool, vertex_buffer, &vertices); - - const index_buffer = try dev.vkd.createBuffer(dev.dev, &.{ - .size = @sizeOf(@TypeOf(indices)), - .usage = .{ .transfer_dst_bit = true, .index_buffer_bit = true }, - .sharing_mode = .exclusive, - }, null); - defer dev.vkd.destroyBuffer(dev.dev, index_buffer, null); - const index_mem_reqs = dev.vkd.getBufferMemoryRequirements(dev.dev, index_buffer); - const index_memory = try device_local.alloc(dev.dev, dev.vkd, index_mem_reqs, .{ .device_local_bit = true }); - defer dev.vkd.freeMemory(dev.dev, index_memory, null); - try dev.vkd.bindBufferMemory(dev.dev, index_buffer, index_memory, 0); - - try gfx.uploadData(Index, dev.pdev, inst.vki, dev.dev, dev.vkd, dev.queue, dev.pool, index_buffer, &indices); - - try sc.init(); - for (sc.chain.items(.image), sc.chain.items(.view), sc.chain.items(.cmdbuf)) |image, view, cmdbuf| { - try record_cmdbuf(cmdbuf, dev.vkd, image, view, sc.extent, pipeline, vertex_buffer, index_buffer); + while (!au.W.should_close()) { + au.W.wait_events(); + // std.debug.print("Event!!\n", .{ }); } - var index: u32 = 0; + try au.D.deviceWaitIdle(); - while (c.glfwWindowShouldClose(win.ref) == c.GLFW_FALSE) { - var w: c_int = undefined; - var h: c_int = undefined; - c.glfwGetFramebufferSize(win.ref, &w, &h); - - // Don't present or resize swapchain while the window is minimized - if (w == 0 or h == 0) { - c.glfwPollEvents(); - continue; - } - - const frame = sc.chain.get(index); - - render(dev.dev, dev.vkd, sc.ref, frame, dev.queue) catch |err| switch (err) { - error.OutOfDateKHR => { - // TODO: this is a hack to safely destroy sync primitives - // don't do this. be smart about sync primitive reuse or - // move them to "garbage" to be destroyed later. - try dev.vkd.deviceWaitIdle(dev.dev); - - try sc.init(); - for (sc.chain.items(.image), sc.chain.items(.view), sc.chain.items(.cmdbuf)) |image, view, cmdbuf| { - try record_cmdbuf(cmdbuf, dev.vkd, image, view, sc.extent, pipeline, vertex_buffer, index_buffer); - } - - index = 0; - - continue; - }, - else => |errx| return errx, - }; - - c.glfwPollEvents(); - - index = @intCast((index + 1) % sc.chain.len); - } - - try dev.vkd.deviceWaitIdle(dev.dev); + // // todo create command pool + // + // var sc = try gfx.Swapchain.create(ally, &dev); + // defer sc.deinit(); + // + // const device_local = gfx.VkAllocator.init(dev.pdev, inst.vki); + // + // const pipeline_layout = try dev.vkd.createPipelineLayout(dev.dev, &.{ + // .flags = .{}, + // .set_layout_count = 0, + // .p_set_layouts = undefined, + // .push_constant_range_count = 0, + // .p_push_constant_ranges = undefined, + // }, null); + // defer dev.vkd.destroyPipelineLayout(dev.dev, pipeline_layout, null); + // + // const pipeline = try createPipeline(dev.dev, pipeline_layout, dev.format, dev.vkd); + // defer dev.vkd.destroyPipeline(dev.dev, pipeline, null); + // + // const vertex_buffer = try dev.vkd.createBuffer(dev.dev, &.{ + // .size = @sizeOf(@TypeOf(vertices)), + // .usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, + // .sharing_mode = .exclusive, + // }, null); + // defer dev.vkd.destroyBuffer(dev.dev, vertex_buffer, null); + // const vertex_mem_reqs = dev.vkd.getBufferMemoryRequirements(dev.dev, vertex_buffer); + // const vertex_memory = try device_local.alloc(dev.dev, dev.vkd, vertex_mem_reqs, .{ .device_local_bit = true }); + // defer dev.vkd.freeMemory(dev.dev, vertex_memory, null); + // try dev.vkd.bindBufferMemory(dev.dev, vertex_buffer, vertex_memory, 0); + // + // try gfx.uploadData(Vertex, dev.pdev, inst.vki, dev.dev, dev.vkd, dev.queue, dev.pool, vertex_buffer, &vertices); + // + // const index_buffer = try dev.vkd.createBuffer(dev.dev, &.{ + // .size = @sizeOf(@TypeOf(indices)), + // .usage = .{ .transfer_dst_bit = true, .index_buffer_bit = true }, + // .sharing_mode = .exclusive, + // }, null); + // defer dev.vkd.destroyBuffer(dev.dev, index_buffer, null); + // const index_mem_reqs = dev.vkd.getBufferMemoryRequirements(dev.dev, index_buffer); + // const index_memory = try device_local.alloc(dev.dev, dev.vkd, index_mem_reqs, .{ .device_local_bit = true }); + // defer dev.vkd.freeMemory(dev.dev, index_memory, null); + // try dev.vkd.bindBufferMemory(dev.dev, index_buffer, index_memory, 0); + // + // try gfx.uploadData(Index, dev.pdev, inst.vki, dev.dev, dev.vkd, dev.queue, dev.pool, index_buffer, &indices); + // + // try sc.init(); + // for (sc.chain.items(.image), sc.chain.items(.view), sc.chain.items(.cmdbuf)) |image, view, cmdbuf| { + // try record_cmdbuf(cmdbuf, dev.vkd, image, view, sc.extent, pipeline, vertex_buffer, index_buffer); + // } + // + // var index: u32 = 0; + // + // while (c.glfwWindowShouldClose(win.ref) == c.GLFW_FALSE) { + // var w: c_int = undefined; + // var h: c_int = undefined; + // c.glfwGetFramebufferSize(win.ref, &w, &h); + // + // // Don't present or resize swapchain while the window is minimized + // if (w == 0 or h == 0) { + // c.glfwPollEvents(); + // continue; + // } + // + // const frame = sc.chain.get(index); + // + // render(dev.dev, dev.vkd, sc.ref, frame, dev.queue) catch |err| switch (err) { + // error.OutOfDateKHR => { + // // TODO: this is a hack to safely destroy sync primitives + // // don't do this. be smart about sync primitive reuse or + // // move them to "garbage" to be destroyed later. + // try dev.vkd.deviceWaitIdle(dev.dev); + // + // try sc.init(); + // for (sc.chain.items(.image), sc.chain.items(.view), sc.chain.items(.cmdbuf)) |image, view, cmdbuf| { + // try record_cmdbuf(cmdbuf, dev.vkd, image, view, sc.extent, pipeline, vertex_buffer, index_buffer); + // } + // + // index = 0; + // + // continue; + // }, + // else => |errx| return errx, + // }; + // + // c.glfwPollEvents(); + // + // index = @intCast((index + 1) % sc.chain.len); + // } + // + // try dev.vkd.deviceWaitIdle(dev.dev); } fn record_cmdbuf( From 181f29970e0cc592f5a0da3bd61fd5c7318c0318 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 27 Jun 2024 09:26:53 -0400 Subject: [PATCH 050/113] WIP: event bus --- src/au.zig | 89 +++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 84 insertions(+), 5 deletions(-) diff --git a/src/au.zig b/src/au.zig index 8b6f2ae..046ae15 100644 --- a/src/au.zig +++ b/src/au.zig @@ -80,9 +80,13 @@ pub fn init(alloc: std.mem.Allocator) !void { try init_device(alloc); errdefer deinit_device(); + + try init_event_bus(alloc); + errdefer deinit_event_bus(); } pub fn deinit() void { + deinit_event_bus(); deinit_device(); deinit_window(); deinit_instance(); @@ -383,9 +387,11 @@ const Window = struct { handle: *c.GLFWwindow, surface: vk.SurfaceKHR, + events: std.ArrayList(Event), - pub fn init(title: [*:0]const u8, extent: vk.Extent2D) !Self { + pub fn init(alloc: std.mem.Allocator, title: [*:0]const u8, extent: vk.Extent2D) !Self { var self: Self = undefined; + self.events = std.ArrayList(Event).init(alloc); c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, "floating_window"); c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, "floating_window"); @@ -409,6 +415,7 @@ const Window = struct { } pub fn deinit(self: Self) void { + self.events.deinit(); I.destroySurfaceKHR(self.surface, null); c.glfwDestroyWindow(self.handle); } @@ -417,16 +424,88 @@ const Window = struct { return c.glfwWindowShouldClose(self.handle) == c.GLFW_TRUE; } - pub fn wait_events(_: Self) void { + pub fn wait_events(self: Self, events: std.ArrayList(Event)) void { c.glfwWaitEvents(); - // todo events as values? push into a buffer and return here? + events.clearRetainingCapacity(); + events.appendSlice(self.events.items); + self.events.clearRetainingCapacity(); } - pub fn poll_events(_: Self) void { + pub fn poll_events(self: Self, events: *std.ArrayList(Event)) void { c.glfwPollEvents(); + events.clearRetainingCapacity(); + events.appendSlice(self.events.items); + self.events.clearRetainingCapacity(); } - pub fn wait_events_timeout(seconds: f64) void { + pub fn wait_events_timeout(seconds: f64, events: *std.ArrayList(Event)) void { c.glfwWaitEventsTimeout(seconds); } }; + +pub fn onCharMods(window: ?*c.GLFWwindow, code: u32, mods: i32) callconv(.C) void { + const w: *Window = + @ptrCast(c.glfwGetWindowUserPointer(window)); + w.events.append(.{ + .charMods = .{ + .codepoint = @intCast(code), + .mods = mods, + }, + }); +} + +pub fn onMouseButton(window: ?*c.GLFWwindow, btn: c_int, action: c_int, mods: c_int) callconv(.C) void { + const w: *Window = + @ptrCast(c.glfwGetWindowUserPointer(window)); + w.events.append(.{ + .mouseButton = .{ + .btn = btn, + .action = action == c.GLFW_PRESS, + .mods = mods, + }, + }); +} + +const Event = union(enum) { + charMods: struct { + codepoint: u21, + mods: i32, + }, + mouseButton: struct { + btn: u32, // todo enum + action: bool, // todo enum + mods: u32, + }, +}; + +// pub const GLFWwindowposfun = ?*const fn (?*GLFWwindow, c_int, c_int) callconv(.C) void; +// pub const GLFWwindowsizefun = ?*const fn (?*GLFWwindow, c_int, c_int) callconv(.C) void; +// pub const GLFWwindowclosefun = ?*const fn (?*GLFWwindow) callconv(.C) void; +// pub const GLFWwindowrefreshfun = ?*const fn (?*GLFWwindow) callconv(.C) void; +// pub const GLFWwindowfocusfun = ?*const fn (?*GLFWwindow, c_int) callconv(.C) void; +// pub const GLFWwindowiconifyfun = ?*const fn (?*GLFWwindow, c_int) callconv(.C) void; +// pub const GLFWwindowmaximizefun = ?*const fn (?*GLFWwindow, c_int) callconv(.C) void; +// pub const GLFWframebuffersizefun = ?*const fn (?*GLFWwindow, c_int, c_int) callconv(.C) void; +// pub const GLFWwindowcontentscalefun = ?*const fn (?*GLFWwindow, f32, f32) callconv(.C) void; +// pub const GLFWmousebuttonfun = ?*const fn (?*GLFWwindow, c_int, c_int, c_int) callconv(.C) void; +// pub const GLFWcursorposfun = ?*const fn (?*GLFWwindow, f64, f64) callconv(.C) void; +// pub const GLFWcursorenterfun = ?*const fn (?*GLFWwindow, c_int) callconv(.C) void; +// pub const GLFWscrollfun = ?*const fn (?*GLFWwindow, f64, f64) callconv(.C) void; +// pub const GLFWkeyfun = ?*const fn (?*GLFWwindow, c_int, c_int, c_int, c_int) callconv(.C) void; +// pub const GLFWcharfun = ?*const fn (?*GLFWwindow, c_uint) callconv(.C) void; +// pub const GLFWcharmodsfun = ?*const fn (?*GLFWwindow, c_uint, c_int) callconv(.C) void; +// pub const GLFWdropfun = ?*const fn (?*GLFWwindow, c_int, [*c][*c]const u8) callconv(.C) void; +// pub const GLFWmonitorfun = ?*const fn (?*GLFWmonitor, c_int) callconv(.C) void; +// pub const GLFWjoystickfun = ?*const fn (c_int, c_int) callconv(.C) void; + +fn init_event_bus() !void { + c.glfwSetWindowUserPointer(W.handle, W); + _ = c.glfwSetCharModsCallback(W.handle, onCharMods); + _ = c.glfwSetMouseButtonCallback(W.handle, onMouseButton); +} + +fn deinit_event_bus() void { + _ = c.glfwSetCharModsCallback(W.handle, null); + _ = c.glfwSetMouseButtonCallback(W.handle, null); + c.glfwSetWindowUserPointer(W.handle, null); +} From 1eaf98973bf0186d13dec1b8e12cccd6f68fc6f7 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 27 Jun 2024 10:27:39 -0400 Subject: [PATCH 051/113] event bus seems to be working --- .gitignore | 3 ++ src/au.zig | 134 +++++++++++++++++++++++++++++++-------------------- src/main.zig | 6 ++- 3 files changed, 90 insertions(+), 53 deletions(-) diff --git a/.gitignore b/.gitignore index 7626714..3516d89 100644 --- a/.gitignore +++ b/.gitignore @@ -37,8 +37,11 @@ modules.order Module.symvers Mkfile.old dkms.conf + [.]zig-cache/ zig-out/ build/ build-*/ docgen_tmp/ + +.idea/ diff --git a/src/au.zig b/src/au.zig index 046ae15..00a4c2d 100644 --- a/src/au.zig +++ b/src/au.zig @@ -47,6 +47,7 @@ pub const B: *const BaseWrapper = &_bw; pub const I: *const InstanceProxy = &_ip; pub const D: *const DeviceProxy = &_dp; pub const W: *const Window = &_window; +pub const E: *const EventBus = &_events; pub const Q: *const QueueProxy = &_qp; pub const device_config: *const CandidateDeviceInfo = &_dconfig; @@ -61,6 +62,7 @@ var _qp: QueueProxy = undefined; var _instance: vk.Instance = undefined; var _window: Window = undefined; +var _events: EventBus = undefined; var _device: vk.Device = undefined; var _dconfig: CandidateDeviceInfo = undefined; var _queue: vk.Queue = undefined; @@ -75,7 +77,7 @@ pub fn init(alloc: std.mem.Allocator) !void { try init_instance(alloc); errdefer deinit_instance(); - try init_window(); + try init_window(alloc); errdefer deinit_window(); try init_device(alloc); @@ -178,8 +180,9 @@ fn deinit_instance() void { _ip.destroyInstance(null); } -fn init_window() !void { +fn init_window(alloc: std.mem.Allocator) !void { _window = try Window.init( + alloc, app_info.p_application_name orelse "Au Window", .{ .height = 720, .width = 1280 }, ); @@ -385,13 +388,13 @@ pub fn debug_callback( const Window = struct { const Self = @This(); + alloc: std.mem.Allocator, handle: *c.GLFWwindow, surface: vk.SurfaceKHR, - events: std.ArrayList(Event), pub fn init(alloc: std.mem.Allocator, title: [*:0]const u8, extent: vk.Extent2D) !Self { var self: Self = undefined; - self.events = std.ArrayList(Event).init(alloc); + self.alloc = alloc; c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, "floating_window"); c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, "floating_window"); @@ -415,7 +418,6 @@ const Window = struct { } pub fn deinit(self: Self) void { - self.events.deinit(); I.destroySurfaceKHR(self.surface, null); c.glfwDestroyWindow(self.handle); } @@ -423,47 +425,21 @@ const Window = struct { pub fn should_close(self: Self) bool { return c.glfwWindowShouldClose(self.handle) == c.GLFW_TRUE; } - - pub fn wait_events(self: Self, events: std.ArrayList(Event)) void { - c.glfwWaitEvents(); - events.clearRetainingCapacity(); - events.appendSlice(self.events.items); - self.events.clearRetainingCapacity(); - } - - pub fn poll_events(self: Self, events: *std.ArrayList(Event)) void { - c.glfwPollEvents(); - events.clearRetainingCapacity(); - events.appendSlice(self.events.items); - self.events.clearRetainingCapacity(); - } - - pub fn wait_events_timeout(seconds: f64, events: *std.ArrayList(Event)) void { - c.glfwWaitEventsTimeout(seconds); - } }; -pub fn onCharMods(window: ?*c.GLFWwindow, code: u32, mods: i32) callconv(.C) void { - const w: *Window = - @ptrCast(c.glfwGetWindowUserPointer(window)); - w.events.append(.{ - .charMods = .{ - .codepoint = @intCast(code), - .mods = mods, - }, - }); +pub fn wait_events() void { + _events.clear(); + c.glfwWaitEvents(); } -pub fn onMouseButton(window: ?*c.GLFWwindow, btn: c_int, action: c_int, mods: c_int) callconv(.C) void { - const w: *Window = - @ptrCast(c.glfwGetWindowUserPointer(window)); - w.events.append(.{ - .mouseButton = .{ - .btn = btn, - .action = action == c.GLFW_PRESS, - .mods = mods, - }, - }); +pub fn poll_events() void { + _events.clear(); + c.glfwPollEvents(); +} + +pub fn wait_events_timeout(seconds: f64) void { + _events.clear(); + c.glfwWaitEventsTimeout(seconds); } const Event = union(enum) { @@ -494,18 +470,74 @@ const Event = union(enum) { // pub const GLFWkeyfun = ?*const fn (?*GLFWwindow, c_int, c_int, c_int, c_int) callconv(.C) void; // pub const GLFWcharfun = ?*const fn (?*GLFWwindow, c_uint) callconv(.C) void; // pub const GLFWcharmodsfun = ?*const fn (?*GLFWwindow, c_uint, c_int) callconv(.C) void; -// pub const GLFWdropfun = ?*const fn (?*GLFWwindow, c_int, [*c][*c]const u8) callconv(.C) void; +// pub const GLFWdropfun = ?*const fn (?*GLFWwindow, c_int, [*c][*c]const u8) callconv(.C) void; // todo lifetime issues // pub const GLFWmonitorfun = ?*const fn (?*GLFWmonitor, c_int) callconv(.C) void; -// pub const GLFWjoystickfun = ?*const fn (c_int, c_int) callconv(.C) void; +// pub const GLFWjoystickfun = ?*const fn (c_int, c_int) callconv(.C) void; // todo skip for now -fn init_event_bus() !void { - c.glfwSetWindowUserPointer(W.handle, W); - _ = c.glfwSetCharModsCallback(W.handle, onCharMods); - _ = c.glfwSetMouseButtonCallback(W.handle, onMouseButton); +pub fn onCharMods(handle: ?*c.GLFWwindow, code: u32, mods: i32) callconv(.C) void { + const bus: *EventBus = + @alignCast(@ptrCast(c.glfwGetWindowUserPointer(handle))); + bus.events.append(bus.alloc, .{ + .charMods = .{ + .codepoint = @intCast(code), + .mods = mods, + }, + }) catch unreachable; // todo circular queue; warn. +} + +pub fn onMouseButton(handle: ?*c.GLFWwindow, btn: c_int, action: c_int, mods: c_int) callconv(.C) void { + const bus: *EventBus = + @alignCast(@ptrCast(c.glfwGetWindowUserPointer(handle))); + bus.events.append(bus.alloc, .{ + .mouseButton = .{ + .btn = @intCast(btn), + .action = action == c.GLFW_PRESS, + .mods = @intCast(mods), + }, + }) catch unreachable; // todo circular queue; warn. +} + +const EventBus = struct { + const Self = @This(); + + alloc: std.mem.Allocator, + events: std.ArrayListUnmanaged(Event), // todo bounded array? + + pub fn init(alloc: std.mem.Allocator) Self { + return .{ + .alloc = alloc, + .events = .{}, + }; + } + + pub fn deinit(self: *EventBus) void { + self.events.deinit(self.alloc); + } + + pub fn connect(self: *EventBus, window: *Window) !void { + c.glfwSetWindowUserPointer(window.handle, self); + _ = c.glfwSetCharModsCallback(window.handle, onCharMods); + _ = c.glfwSetMouseButtonCallback(window.handle, onMouseButton); + } + + pub fn disconnect(_: *EventBus, window: *Window) !void { + // todo somehow prevent double-disconnect? + _ = c.glfwSetCharModsCallback(window.handle, null); + _ = c.glfwSetMouseButtonCallback(window.handle, null); + } + + pub fn clear(self: *EventBus) void { + self.events.clearRetainingCapacity(); + } +}; + +fn init_event_bus(alloc: std.mem.Allocator) !void { + _events = EventBus.init(alloc); + errdefer _events.deinit(); + try _events.connect(&_window); } fn deinit_event_bus() void { - _ = c.glfwSetCharModsCallback(W.handle, null); - _ = c.glfwSetMouseButtonCallback(W.handle, null); - c.glfwSetWindowUserPointer(W.handle, null); + try _events.disconnect(&_window); + _events.deinit(); } diff --git a/src/main.zig b/src/main.zig index b513378..84c4a61 100644 --- a/src/main.zig +++ b/src/main.zig @@ -102,8 +102,10 @@ pub fn main() !void { // std.debug.print("Initialized!!\n", .{ }); while (!au.W.should_close()) { - au.W.wait_events(); - // std.debug.print("Event!!\n", .{ }); + au.wait_events(); + // if (au.E.events.items.len > 0) { + // std.debug.print("Events: {any}\n", .{au.E.events.items}); + // } } try au.D.deviceWaitIdle(); From 07c96af5d76c13bdf29d6c0dfc6282fdb231c732 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 27 Jun 2024 11:34:05 -0400 Subject: [PATCH 052/113] the rest of the events --- src/au.zig | 103 ++------------------ src/au/Bus.zig | 257 +++++++++++++++++++++++++++++++++++++++++++++++++ src/main.zig | 8 +- 3 files changed, 270 insertions(+), 98 deletions(-) create mode 100644 src/au/Bus.zig diff --git a/src/au.zig b/src/au.zig index 00a4c2d..a21adc6 100644 --- a/src/au.zig +++ b/src/au.zig @@ -4,6 +4,8 @@ const builtin = @import("builtin"); const vk = @import("vk"); const c = @import("c.zig"); +const EventBus = @import("au/EventBus.zig"); + pub const use_debug_messenger = switch (builtin.mode) { .Debug, .ReleaseSafe => true, .ReleaseSmall, .ReleaseFast => false, @@ -47,7 +49,6 @@ pub const B: *const BaseWrapper = &_bw; pub const I: *const InstanceProxy = &_ip; pub const D: *const DeviceProxy = &_dp; pub const W: *const Window = &_window; -pub const E: *const EventBus = &_events; pub const Q: *const QueueProxy = &_qp; pub const device_config: *const CandidateDeviceInfo = &_dconfig; @@ -385,7 +386,7 @@ pub fn debug_callback( return vk.FALSE; } -const Window = struct { +pub const Window = struct { const Self = @This(); alloc: std.mem.Allocator, @@ -427,110 +428,24 @@ const Window = struct { } }; -pub fn wait_events() void { +pub fn wait_events() []const EventBus.Event { _events.clear(); c.glfwWaitEvents(); + return _events.events.items; } -pub fn poll_events() void { +pub fn poll_events() []const EventBus.Event { _events.clear(); c.glfwPollEvents(); + return _events.events.items; } -pub fn wait_events_timeout(seconds: f64) void { +pub fn wait_events_timeout(seconds: f64) []const EventBus.Event { _events.clear(); c.glfwWaitEventsTimeout(seconds); + return _events.events.items; } -const Event = union(enum) { - charMods: struct { - codepoint: u21, - mods: i32, - }, - mouseButton: struct { - btn: u32, // todo enum - action: bool, // todo enum - mods: u32, - }, -}; - -// pub const GLFWwindowposfun = ?*const fn (?*GLFWwindow, c_int, c_int) callconv(.C) void; -// pub const GLFWwindowsizefun = ?*const fn (?*GLFWwindow, c_int, c_int) callconv(.C) void; -// pub const GLFWwindowclosefun = ?*const fn (?*GLFWwindow) callconv(.C) void; -// pub const GLFWwindowrefreshfun = ?*const fn (?*GLFWwindow) callconv(.C) void; -// pub const GLFWwindowfocusfun = ?*const fn (?*GLFWwindow, c_int) callconv(.C) void; -// pub const GLFWwindowiconifyfun = ?*const fn (?*GLFWwindow, c_int) callconv(.C) void; -// pub const GLFWwindowmaximizefun = ?*const fn (?*GLFWwindow, c_int) callconv(.C) void; -// pub const GLFWframebuffersizefun = ?*const fn (?*GLFWwindow, c_int, c_int) callconv(.C) void; -// pub const GLFWwindowcontentscalefun = ?*const fn (?*GLFWwindow, f32, f32) callconv(.C) void; -// pub const GLFWmousebuttonfun = ?*const fn (?*GLFWwindow, c_int, c_int, c_int) callconv(.C) void; -// pub const GLFWcursorposfun = ?*const fn (?*GLFWwindow, f64, f64) callconv(.C) void; -// pub const GLFWcursorenterfun = ?*const fn (?*GLFWwindow, c_int) callconv(.C) void; -// pub const GLFWscrollfun = ?*const fn (?*GLFWwindow, f64, f64) callconv(.C) void; -// pub const GLFWkeyfun = ?*const fn (?*GLFWwindow, c_int, c_int, c_int, c_int) callconv(.C) void; -// pub const GLFWcharfun = ?*const fn (?*GLFWwindow, c_uint) callconv(.C) void; -// pub const GLFWcharmodsfun = ?*const fn (?*GLFWwindow, c_uint, c_int) callconv(.C) void; -// pub const GLFWdropfun = ?*const fn (?*GLFWwindow, c_int, [*c][*c]const u8) callconv(.C) void; // todo lifetime issues -// pub const GLFWmonitorfun = ?*const fn (?*GLFWmonitor, c_int) callconv(.C) void; -// pub const GLFWjoystickfun = ?*const fn (c_int, c_int) callconv(.C) void; // todo skip for now - -pub fn onCharMods(handle: ?*c.GLFWwindow, code: u32, mods: i32) callconv(.C) void { - const bus: *EventBus = - @alignCast(@ptrCast(c.glfwGetWindowUserPointer(handle))); - bus.events.append(bus.alloc, .{ - .charMods = .{ - .codepoint = @intCast(code), - .mods = mods, - }, - }) catch unreachable; // todo circular queue; warn. -} - -pub fn onMouseButton(handle: ?*c.GLFWwindow, btn: c_int, action: c_int, mods: c_int) callconv(.C) void { - const bus: *EventBus = - @alignCast(@ptrCast(c.glfwGetWindowUserPointer(handle))); - bus.events.append(bus.alloc, .{ - .mouseButton = .{ - .btn = @intCast(btn), - .action = action == c.GLFW_PRESS, - .mods = @intCast(mods), - }, - }) catch unreachable; // todo circular queue; warn. -} - -const EventBus = struct { - const Self = @This(); - - alloc: std.mem.Allocator, - events: std.ArrayListUnmanaged(Event), // todo bounded array? - - pub fn init(alloc: std.mem.Allocator) Self { - return .{ - .alloc = alloc, - .events = .{}, - }; - } - - pub fn deinit(self: *EventBus) void { - self.events.deinit(self.alloc); - } - - pub fn connect(self: *EventBus, window: *Window) !void { - c.glfwSetWindowUserPointer(window.handle, self); - _ = c.glfwSetCharModsCallback(window.handle, onCharMods); - _ = c.glfwSetMouseButtonCallback(window.handle, onMouseButton); - } - - pub fn disconnect(_: *EventBus, window: *Window) !void { - // todo somehow prevent double-disconnect? - _ = c.glfwSetCharModsCallback(window.handle, null); - _ = c.glfwSetMouseButtonCallback(window.handle, null); - } - - pub fn clear(self: *EventBus) void { - self.events.clearRetainingCapacity(); - } -}; - fn init_event_bus(alloc: std.mem.Allocator) !void { _events = EventBus.init(alloc); errdefer _events.deinit(); diff --git a/src/au/Bus.zig b/src/au/Bus.zig new file mode 100644 index 0000000..95f0f90 --- /dev/null +++ b/src/au/Bus.zig @@ -0,0 +1,257 @@ +const std = @import("std"); +const c = @import("../c.zig"); +const Window = @import("../au.zig").Window; +const Self = @This(); + +alloc: std.mem.Allocator, +events: std.ArrayListUnmanaged(Event), // todo bounded array? + +pub fn init(alloc: std.mem.Allocator) Self { + return .{ + .alloc = alloc, + .events = .{}, + }; +} + +pub fn deinit(self: *Self) void { + self.events.deinit(self.alloc); +} + +pub fn connect(self: *Self, window: *Window) !void { + // todo somehow prevent double-connect? + c.glfwSetWindowUserPointer(window.handle, self); + _ = c.glfwSetWindowPosCallback(window.handle, onWindowPos); + _ = c.glfwSetWindowSizeCallback(window.handle, onWindowSize); + _ = c.glfwSetWindowCloseCallback(window.handle, onWindowClose); + _ = c.glfwSetWindowRefreshCallback(window.handle, onWindowRefresh); + _ = c.glfwSetWindowFocusCallback(window.handle, onWindowFocus); + _ = c.glfwSetWindowIconifyCallback(window.handle, onWindowIconify); + _ = c.glfwSetWindowMaximizeCallback(window.handle, onWindowMaximize); + _ = c.glfwSetFramebufferSizeCallback(window.handle, onFramebufferSize); + _ = c.glfwSetWindowContentScaleCallback(window.handle, onWindowContentScale); + _ = c.glfwSetMouseButtonCallback(window.handle, onMouseButton); + _ = c.glfwSetCursorPosCallback(window.handle, onCursorPos); + _ = c.glfwSetCursorEnterCallback(window.handle, onCursorEnter); + _ = c.glfwSetScrollCallback(window.handle, onScroll); + _ = c.glfwSetKeyCallback(window.handle, onKey); + _ = c.glfwSetCharModsCallback(window.handle, onCharMods); + // _ = c.glfwSetDropCallback(window.handle, onDrop); +} + +pub fn disconnect(_: *Self, window: *Window) !void { + // todo somehow prevent double-disconnect? + c.glfwSetWindowUserPointer(window.handle, null); + _ = c.glfwSetWindowPosCallback(window.handle, null); + _ = c.glfwSetWindowSizeCallback(window.handle, null); + _ = c.glfwSetWindowCloseCallback(window.handle, null); + _ = c.glfwSetWindowRefreshCallback(window.handle, null); + _ = c.glfwSetWindowFocusCallback(window.handle, null); + _ = c.glfwSetWindowIconifyCallback(window.handle, null); + _ = c.glfwSetWindowMaximizeCallback(window.handle, null); + _ = c.glfwSetFramebufferSizeCallback(window.handle, null); + _ = c.glfwSetWindowContentScaleCallback(window.handle, null); + _ = c.glfwSetMouseButtonCallback(window.handle, null); + _ = c.glfwSetCursorPosCallback(window.handle, null); + _ = c.glfwSetCursorEnterCallback(window.handle, null); + _ = c.glfwSetScrollCallback(window.handle, null); + _ = c.glfwSetKeyCallback(window.handle, null); + _ = c.glfwSetCharModsCallback(window.handle, null); + // _ = c.glfwSetDropCallback(window.handle, null); +} + +pub fn clear(self: *Self) void { + self.events.clearRetainingCapacity(); +} + +fn getBus(handle: ?*c.GLFWwindow) *Self { + return @alignCast(@ptrCast(c.glfwGetWindowUserPointer(handle))); +} + +pub const Event = union(enum) { + const WindowPos = struct { x: i32, y: i32 }; + const WindowSize = struct { x: i32, y: i32 }; + const WindowClose = struct {}; + const WindowRefresh = struct {}; + const WindowFocus = struct { focused: bool }; + const WindowIconify = struct { iconified: bool }; + const WindowMaximize = struct { maximized: bool }; + const FramebufferSize = struct { x: i32, y: i32 }; + const WindowContentScale = struct { x: f32, y: f32 }; + const MouseButton = struct { + button: c_int, // todo enum + action: c_int, // todo enum + mods: c_int, // todo bitmask + }; + const CursorPos = struct { x: f64, y: f64 }; + const CursorEnter = struct { entered: bool }; + const Scroll = struct { dx: f64, dy: f64 }; + const Key = struct { + key: c_int, // todo enum + scan: c_int, // todo ??? + action: c_int, // todo enum + mods: c_int, // todo bitmask + }; + const Char = struct { + code: u21, + }; + const CharMods = struct { + code: u21, + mods: c_int, // todo bitmask + }; + const Drop = struct { + paths: []const []const u8, // todo lifetime issues + }; + + windowPos: WindowPos, + windowSize: WindowSize, + windowClose: WindowClose, + windowRefresh: WindowRefresh, + windowFocus: WindowFocus, + windowIconify: WindowIconify, + windowMaximize: WindowMaximize, + framebufferSize: FramebufferSize, + windowContentScale: WindowContentScale, + mouseButton: MouseButton, + cursorPos: CursorPos, + cursorEnter: CursorEnter, + scroll: Scroll, + key: Key, + char: Char, + charMods: CharMods, + drop: Drop, +}; + +fn onWindowPos(handle: ?*c.GLFWwindow, x: c_int, y: c_int) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .windowPos = .{ + .x = @intCast(x), + .y = @intCast(y), + }, + }) catch unreachable; // todo circular queue; warn +} +fn onWindowSize(handle: ?*c.GLFWwindow, x: c_int, y: c_int) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .windowSize = .{ + .x = @intCast(x), + .y = @intCast(y), + }, + }) catch unreachable; // todo circular queue; warn +} +fn onWindowClose(handle: ?*c.GLFWwindow) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .windowClose = .{}, + }) catch unreachable; // todo circular queue; warn +} +fn onWindowRefresh(handle: ?*c.GLFWwindow) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .windowRefresh = .{}, + }) catch unreachable; // todo circular queue; warn +} +fn onWindowFocus(handle: ?*c.GLFWwindow, focused: c_int) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .windowFocus = .{ + .focused = focused == c.GLFW_TRUE, + }, + }) catch unreachable; // todo circular queue; warn +} +fn onWindowIconify(handle: ?*c.GLFWwindow, iconified: c_int) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .windowIconify = .{ + .iconified = iconified == c.GLFW_TRUE, + }, + }) catch unreachable; // todo circular queue; warn +} +fn onWindowMaximize(handle: ?*c.GLFWwindow, maximized: c_int) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .windowMaximize = .{ + .maximized = maximized == c.GLFW_TRUE, + }, + }) catch unreachable; // todo circular queue; warn +} +fn onFramebufferSize(handle: ?*c.GLFWwindow, x: c_int, y: c_int) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .framebufferSize = .{ + .x = @intCast(x), + .y = @intCast(y), + }, + }) catch unreachable; // todo circular queue; warn +} +fn onWindowContentScale(handle: ?*c.GLFWwindow, x: f32, y: f32) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .windowContentScale = .{ + .x = x, + .y = y, + }, + }) catch unreachable; // todo circular queue; warn +} +fn onMouseButton(handle: ?*c.GLFWwindow, button: c_int, action: c_int, mods: c_int) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .mouseButton = .{ + .button = button, + .action = action, + .mods = mods, + }, + }) catch unreachable; // todo circular queue; warn +} +fn onCursorPos(handle: ?*c.GLFWwindow, x: f64, y: f64) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .cursorPos = .{ + .x = x, + .y = y, + }, + }) catch unreachable; // todo circular queue; warn +} +fn onCursorEnter(handle: ?*c.GLFWwindow, entered: c_int) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .cursorEnter = .{ + .entered = entered == c.GLFW_TRUE, + }, + }) catch unreachable; // todo circular queue; warn +} +fn onScroll(handle: ?*c.GLFWwindow, dx: f64, dy: f64) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .scroll = .{ + .dx = dx, + .dy = dy, + }, + }) catch unreachable; // todo circular queue; warn +} +fn onKey(handle: ?*c.GLFWwindow, key: c_int, scan: c_int, action: c_int, mods: c_int) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .key = .{ + .key = key, + .scan = scan, + .action = action, + .mods = mods, + }, + }) catch unreachable; // todo circular queue; warn +} +fn onCharMods(handle: ?*c.GLFWwindow, code: c_uint, mods: c_int) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .charMods = .{ + .code = @intCast(code), + .mods = mods, + }, + }) catch unreachable; // todo circular queue; warn +} +// fn onDrop(handle: ?*c.GLFWwindow, count: c_int, paths: [*c][*c]const u8) callconv(.C) void { +// const bus = getBus(handle); +// bus.events.append(bus.alloc, .{ +// .drop = .{}, +// }) catch unreachable; // todo circular queue; warn +// } diff --git a/src/main.zig b/src/main.zig index 84c4a61..1481741 100644 --- a/src/main.zig +++ b/src/main.zig @@ -102,10 +102,10 @@ pub fn main() !void { // std.debug.print("Initialized!!\n", .{ }); while (!au.W.should_close()) { - au.wait_events(); - // if (au.E.events.items.len > 0) { - // std.debug.print("Events: {any}\n", .{au.E.events.items}); - // } + for (au.wait_events()) |event| switch (event) { + .charMods => |e| std.debug.print("{any}\n", .{e}), + .mouseButton => |e| std.debug.print("{any}\n", .{e}), + }; } try au.D.deviceWaitIdle(); From fbfa8ee8d65a5a62829368e78ecf7ddede81efac Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 27 Jun 2024 11:58:18 -0400 Subject: [PATCH 053/113] support drop event --- src/au.zig | 32 ++++++++++++++++---------------- src/au/Bus.zig | 40 +++++++++++++++++++++++++++++----------- src/main.zig | 22 ++++++++++++++-------- 3 files changed, 59 insertions(+), 35 deletions(-) diff --git a/src/au.zig b/src/au.zig index a21adc6..168926f 100644 --- a/src/au.zig +++ b/src/au.zig @@ -4,7 +4,7 @@ const builtin = @import("builtin"); const vk = @import("vk"); const c = @import("c.zig"); -const EventBus = @import("au/EventBus.zig"); +const Bus = @import("au/Bus.zig"); pub const use_debug_messenger = switch (builtin.mode) { .Debug, .ReleaseSafe => true, @@ -63,7 +63,7 @@ var _qp: QueueProxy = undefined; var _instance: vk.Instance = undefined; var _window: Window = undefined; -var _events: EventBus = undefined; +var _bus: Bus = undefined; var _device: vk.Device = undefined; var _dconfig: CandidateDeviceInfo = undefined; var _queue: vk.Queue = undefined; @@ -428,31 +428,31 @@ pub const Window = struct { } }; -pub fn wait_events() []const EventBus.Event { - _events.clear(); +pub fn wait_events() []const Bus.Event { + _bus.clear(); c.glfwWaitEvents(); - return _events.events.items; + return _bus.events.items; } -pub fn poll_events() []const EventBus.Event { - _events.clear(); +pub fn poll_events() []const Bus.Event { + _bus.clear(); c.glfwPollEvents(); - return _events.events.items; + return _bus.events.items; } -pub fn wait_events_timeout(seconds: f64) []const EventBus.Event { - _events.clear(); +pub fn wait_events_timeout(seconds: f64) []const Bus.Event { + _bus.clear(); c.glfwWaitEventsTimeout(seconds); - return _events.events.items; + return _bus.events.items; } fn init_event_bus(alloc: std.mem.Allocator) !void { - _events = EventBus.init(alloc); - errdefer _events.deinit(); - try _events.connect(&_window); + _bus = Bus.init(alloc); + errdefer _bus.deinit(); + try _bus.connect(&_window); } fn deinit_event_bus() void { - try _events.disconnect(&_window); - _events.deinit(); + try _bus.disconnect(&_window); + _bus.deinit(); } diff --git a/src/au/Bus.zig b/src/au/Bus.zig index 95f0f90..5bd55c3 100644 --- a/src/au/Bus.zig +++ b/src/au/Bus.zig @@ -4,17 +4,20 @@ const Window = @import("../au.zig").Window; const Self = @This(); alloc: std.mem.Allocator, -events: std.ArrayListUnmanaged(Event), // todo bounded array? +events: std.ArrayListUnmanaged(Event) = .{}, // todo bounded array? +drops: std.ArrayListUnmanaged([][]const u8) = .{}, // todo bounded array? pub fn init(alloc: std.mem.Allocator) Self { return .{ .alloc = alloc, - .events = .{}, }; } pub fn deinit(self: *Self) void { + self.clear(); + self.events.deinit(self.alloc); + self.drops.deinit(self.alloc); } pub fn connect(self: *Self, window: *Window) !void { @@ -35,7 +38,7 @@ pub fn connect(self: *Self, window: *Window) !void { _ = c.glfwSetScrollCallback(window.handle, onScroll); _ = c.glfwSetKeyCallback(window.handle, onKey); _ = c.glfwSetCharModsCallback(window.handle, onCharMods); - // _ = c.glfwSetDropCallback(window.handle, onDrop); + _ = c.glfwSetDropCallback(window.handle, onDrop); } pub fn disconnect(_: *Self, window: *Window) !void { @@ -56,10 +59,18 @@ pub fn disconnect(_: *Self, window: *Window) !void { _ = c.glfwSetScrollCallback(window.handle, null); _ = c.glfwSetKeyCallback(window.handle, null); _ = c.glfwSetCharModsCallback(window.handle, null); - // _ = c.glfwSetDropCallback(window.handle, null); + _ = c.glfwSetDropCallback(window.handle, null); } pub fn clear(self: *Self) void { + for (self.drops.items) |drop| { + for (drop) |path| { + self.alloc.free(path); + } + self.alloc.free(drop); + } + self.drops.clearAndFree(self.alloc); + self.events.clearRetainingCapacity(); } @@ -99,7 +110,7 @@ pub const Event = union(enum) { mods: c_int, // todo bitmask }; const Drop = struct { - paths: []const []const u8, // todo lifetime issues + paths: []const []const u8, }; windowPos: WindowPos, @@ -249,9 +260,16 @@ fn onCharMods(handle: ?*c.GLFWwindow, code: c_uint, mods: c_int) callconv(.C) vo }, }) catch unreachable; // todo circular queue; warn } -// fn onDrop(handle: ?*c.GLFWwindow, count: c_int, paths: [*c][*c]const u8) callconv(.C) void { -// const bus = getBus(handle); -// bus.events.append(bus.alloc, .{ -// .drop = .{}, -// }) catch unreachable; // todo circular queue; warn -// } +fn onDrop(handle: ?*c.GLFWwindow, count: c_int, paths: [*c][*c]const u8) callconv(.C) void { + const bus = getBus(handle); + + const drops = bus.alloc.alloc([]const u8, @intCast(count)) catch unreachable; // todo warn + for (drops, paths) |*dst, src| { + dst.* = bus.alloc.dupe(u8, std.mem.sliceTo(src, 0)) catch unreachable; // todo warn + } + bus.drops.append(bus.alloc, drops) catch unreachable; // todo warn + + bus.events.append(bus.alloc, .{ + .drop = .{ .paths = drops }, + }) catch unreachable; // todo circular queue; warn +} diff --git a/src/main.zig b/src/main.zig index 1481741..40fb247 100644 --- a/src/main.zig +++ b/src/main.zig @@ -93,18 +93,24 @@ fn render( pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - defer _ = gpa.deinit(); - const ally = gpa.allocator(); + defer _ = gpa.detectLeaks(); + const alloc = gpa.allocator(); - try au.init(ally); + try au.init(alloc); defer au.deinit(); - // std.debug.print("Initialized!!\n", .{ }); - while (!au.W.should_close()) { - for (au.wait_events()) |event| switch (event) { - .charMods => |e| std.debug.print("{any}\n", .{e}), - .mouseButton => |e| std.debug.print("{any}\n", .{e}), + // todo switch mode depending on if window is focused + const events = au.wait_events_timeout(0.10); + + for (events) |u| switch (u) { + .cursorPos, + .windowPos, + .windowSize, + .framebufferSize, + .windowRefresh, + => {}, + else => |e| std.debug.print("{any}\n", .{e}), }; } From 1c748022debd901a3b4a6891763632df1c8ac003 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 27 Jun 2024 11:58:59 -0400 Subject: [PATCH 054/113] style --- src/au/Bus.zig | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/au/Bus.zig b/src/au/Bus.zig index 5bd55c3..a996dc4 100644 --- a/src/au/Bus.zig +++ b/src/au/Bus.zig @@ -141,6 +141,7 @@ fn onWindowPos(handle: ?*c.GLFWwindow, x: c_int, y: c_int) callconv(.C) void { }, }) catch unreachable; // todo circular queue; warn } + fn onWindowSize(handle: ?*c.GLFWwindow, x: c_int, y: c_int) callconv(.C) void { const bus = getBus(handle); bus.events.append(bus.alloc, .{ @@ -150,18 +151,21 @@ fn onWindowSize(handle: ?*c.GLFWwindow, x: c_int, y: c_int) callconv(.C) void { }, }) catch unreachable; // todo circular queue; warn } + fn onWindowClose(handle: ?*c.GLFWwindow) callconv(.C) void { const bus = getBus(handle); bus.events.append(bus.alloc, .{ .windowClose = .{}, }) catch unreachable; // todo circular queue; warn } + fn onWindowRefresh(handle: ?*c.GLFWwindow) callconv(.C) void { const bus = getBus(handle); bus.events.append(bus.alloc, .{ .windowRefresh = .{}, }) catch unreachable; // todo circular queue; warn } + fn onWindowFocus(handle: ?*c.GLFWwindow, focused: c_int) callconv(.C) void { const bus = getBus(handle); bus.events.append(bus.alloc, .{ @@ -170,6 +174,7 @@ fn onWindowFocus(handle: ?*c.GLFWwindow, focused: c_int) callconv(.C) void { }, }) catch unreachable; // todo circular queue; warn } + fn onWindowIconify(handle: ?*c.GLFWwindow, iconified: c_int) callconv(.C) void { const bus = getBus(handle); bus.events.append(bus.alloc, .{ @@ -178,6 +183,7 @@ fn onWindowIconify(handle: ?*c.GLFWwindow, iconified: c_int) callconv(.C) void { }, }) catch unreachable; // todo circular queue; warn } + fn onWindowMaximize(handle: ?*c.GLFWwindow, maximized: c_int) callconv(.C) void { const bus = getBus(handle); bus.events.append(bus.alloc, .{ @@ -186,6 +192,7 @@ fn onWindowMaximize(handle: ?*c.GLFWwindow, maximized: c_int) callconv(.C) void }, }) catch unreachable; // todo circular queue; warn } + fn onFramebufferSize(handle: ?*c.GLFWwindow, x: c_int, y: c_int) callconv(.C) void { const bus = getBus(handle); bus.events.append(bus.alloc, .{ @@ -195,6 +202,7 @@ fn onFramebufferSize(handle: ?*c.GLFWwindow, x: c_int, y: c_int) callconv(.C) vo }, }) catch unreachable; // todo circular queue; warn } + fn onWindowContentScale(handle: ?*c.GLFWwindow, x: f32, y: f32) callconv(.C) void { const bus = getBus(handle); bus.events.append(bus.alloc, .{ @@ -204,6 +212,7 @@ fn onWindowContentScale(handle: ?*c.GLFWwindow, x: f32, y: f32) callconv(.C) voi }, }) catch unreachable; // todo circular queue; warn } + fn onMouseButton(handle: ?*c.GLFWwindow, button: c_int, action: c_int, mods: c_int) callconv(.C) void { const bus = getBus(handle); bus.events.append(bus.alloc, .{ @@ -214,6 +223,7 @@ fn onMouseButton(handle: ?*c.GLFWwindow, button: c_int, action: c_int, mods: c_i }, }) catch unreachable; // todo circular queue; warn } + fn onCursorPos(handle: ?*c.GLFWwindow, x: f64, y: f64) callconv(.C) void { const bus = getBus(handle); bus.events.append(bus.alloc, .{ @@ -223,6 +233,7 @@ fn onCursorPos(handle: ?*c.GLFWwindow, x: f64, y: f64) callconv(.C) void { }, }) catch unreachable; // todo circular queue; warn } + fn onCursorEnter(handle: ?*c.GLFWwindow, entered: c_int) callconv(.C) void { const bus = getBus(handle); bus.events.append(bus.alloc, .{ @@ -231,6 +242,7 @@ fn onCursorEnter(handle: ?*c.GLFWwindow, entered: c_int) callconv(.C) void { }, }) catch unreachable; // todo circular queue; warn } + fn onScroll(handle: ?*c.GLFWwindow, dx: f64, dy: f64) callconv(.C) void { const bus = getBus(handle); bus.events.append(bus.alloc, .{ @@ -240,6 +252,7 @@ fn onScroll(handle: ?*c.GLFWwindow, dx: f64, dy: f64) callconv(.C) void { }, }) catch unreachable; // todo circular queue; warn } + fn onKey(handle: ?*c.GLFWwindow, key: c_int, scan: c_int, action: c_int, mods: c_int) callconv(.C) void { const bus = getBus(handle); bus.events.append(bus.alloc, .{ @@ -251,6 +264,7 @@ fn onKey(handle: ?*c.GLFWwindow, key: c_int, scan: c_int, action: c_int, mods: c }, }) catch unreachable; // todo circular queue; warn } + fn onCharMods(handle: ?*c.GLFWwindow, code: c_uint, mods: c_int) callconv(.C) void { const bus = getBus(handle); bus.events.append(bus.alloc, .{ @@ -260,6 +274,7 @@ fn onCharMods(handle: ?*c.GLFWwindow, code: c_uint, mods: c_int) callconv(.C) vo }, }) catch unreachable; // todo circular queue; warn } + fn onDrop(handle: ?*c.GLFWwindow, count: c_int, paths: [*c][*c]const u8) callconv(.C) void { const bus = getBus(handle); From 1f82923f59570fc569b56e6b350fdac690e1a36d Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 27 Jun 2024 14:17:06 -0400 Subject: [PATCH 055/113] starting to test swapchain --- src/au.zig | 3 +- src/au/Bus.zig | 11 +++--- src/au/SwapChain.zig | 68 ++++++++++++++++++++++++++++++++++++++ src/main.zig | 79 ++++++++++++++++++++++++++++++++++++++++---- 4 files changed, 150 insertions(+), 11 deletions(-) create mode 100644 src/au/SwapChain.zig diff --git a/src/au.zig b/src/au.zig index 168926f..2fb76af 100644 --- a/src/au.zig +++ b/src/au.zig @@ -4,7 +4,8 @@ const builtin = @import("builtin"); const vk = @import("vk"); const c = @import("c.zig"); -const Bus = @import("au/Bus.zig"); +pub const Bus = @import("au/Bus.zig"); +pub const SwapChain = @import("au/SwapChain.zig"); pub const use_debug_messenger = switch (builtin.mode) { .Debug, .ReleaseSafe => true, diff --git a/src/au/Bus.zig b/src/au/Bus.zig index a996dc4..117a3e4 100644 --- a/src/au/Bus.zig +++ b/src/au/Bus.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const vk = @import("vk"); const c = @import("../c.zig"); const Window = @import("../au.zig").Window; const Self = @This(); @@ -86,7 +87,7 @@ pub const Event = union(enum) { const WindowFocus = struct { focused: bool }; const WindowIconify = struct { iconified: bool }; const WindowMaximize = struct { maximized: bool }; - const FramebufferSize = struct { x: i32, y: i32 }; + const FramebufferSize = struct { extent: vk.Extent2D }; const WindowContentScale = struct { x: f32, y: f32 }; const MouseButton = struct { button: c_int, // todo enum @@ -193,12 +194,14 @@ fn onWindowMaximize(handle: ?*c.GLFWwindow, maximized: c_int) callconv(.C) void }) catch unreachable; // todo circular queue; warn } -fn onFramebufferSize(handle: ?*c.GLFWwindow, x: c_int, y: c_int) callconv(.C) void { +fn onFramebufferSize(handle: ?*c.GLFWwindow, width: c_int, height: c_int) callconv(.C) void { const bus = getBus(handle); bus.events.append(bus.alloc, .{ .framebufferSize = .{ - .x = @intCast(x), - .y = @intCast(y), + .extent = .{ + .width = @intCast(width), + .height = @intCast(height), + }, }, }) catch unreachable; // todo circular queue; warn } diff --git a/src/au/SwapChain.zig b/src/au/SwapChain.zig new file mode 100644 index 0000000..7773ac1 --- /dev/null +++ b/src/au/SwapChain.zig @@ -0,0 +1,68 @@ +const std = @import("std"); +const au = @import("../au.zig"); +const vk = @import("vk"); + +const Self = @This(); + +alloc: std.mem.Allocator, +cinfo: vk.SwapchainCreateInfoKHR, +handle: vk.SwapchainKHR, + +pub fn init(alloc: std.mem.Allocator) !Self { + const caps = try au.I.getPhysicalDeviceSurfaceCapabilitiesKHR(au.device_config.pdev, au.W.surface); + + var min_image_count = @max(3, caps.min_image_count + 1); // todo magic numbers + if (caps.max_image_count > 0) { + min_image_count = @min(min_image_count, caps.max_image_count); + } + + // determine format + const format = au.device_config.format; + + return .{ + .alloc = alloc, + .cinfo = .{ + .surface = au.W.surface, + .min_image_count = min_image_count, + .image_format = format.format, + .image_color_space = format.color_space, + .image_extent = undefined, // set in rebuild + .image_array_layers = 1, + .image_usage = .{ .color_attachment_bit = true }, + .image_sharing_mode = .exclusive, + .pre_transform = .{ .identity_bit_khr = true }, + .composite_alpha = .{ .opaque_bit_khr = true }, + .present_mode = au.device_config.mode, + .clipped = vk.TRUE, + .old_swapchain = .null_handle, + }, + .handle = .null_handle, + }; +} + +pub fn deinit(self: *Self) void { + au.D.destroySwapchainKHR(self.handle, null); +} + +/// mark that the swapchain _should_ be rebuilt with the given extent +/// this function is reentrant, so the swapchain can be marked multiple times +/// and only one rebuild occur +pub fn mark(self: *Self) void { + self.handle = .null_handle; +} + +/// rebuild the swapchain only if it is marked. return true if the swapchain was rebuilt. +pub fn rebuild(self: *Self) !bool { + if (self.handle != .null_handle) return false; + + const caps = try au.I.getPhysicalDeviceSurfaceCapabilitiesKHR(au.device_config.pdev, self.cinfo.surface); + self.cinfo.image_extent = caps.current_extent; + + self.handle = try au.D.createSwapchainKHR(&self.cinfo, null); + au.D.destroySwapchainKHR(self.cinfo.old_swapchain, null); + self.cinfo.old_swapchain = self.handle; + + // todo repopulate images and synchronization + + return true; +} diff --git a/src/main.zig b/src/main.zig index 40fb247..7cdfae4 100644 --- a/src/main.zig +++ b/src/main.zig @@ -99,19 +99,86 @@ pub fn main() !void { try au.init(alloc); defer au.deinit(); + var sc = try au.SwapChain.init(alloc); + defer sc.deinit(); + + const pool = try au.D.createCommandPool(&.{ .queue_family_index = au.device_config.family }, null); + defer au.D.destroyCommandPool(pool, null); + + const fence: vk.Fence = try au.D.createFence(&.{ .flags = .{ .signaled_bit = true } }, null); + defer au.D.destroyFence(fence, null); + + const sem_ready: vk.Semaphore = try au.D.createSemaphore(&.{}, null); + defer au.D.destroySemaphore(sem_ready, null); + + const sem_done: vk.Semaphore = try au.D.createSemaphore(&.{}, null); + defer au.D.destroySemaphore(sem_done, null); + + const cmdbufs: [1]vk.CommandBuffer = undefined; + try au.D.allocateCommandBuffers( + &.{ .command_pool = pool, .command_buffer_count = @intCast(cmdbufs.len), .level = .primary }, + cmdbufs.ptr, + ); + defer au.D.freeCommandBuffers(pool, @intCast(cmdbufs.len), cmdbufs.ptr); + + { + const cmd = au.CommandBufferProxy.init(cmdbufs[0], au.D); + const clear = vk.ClearValue{ .color = .{ .float_32 = .{ 0, 0, 0, 1 } } }; + const viewport = vk.Viewport{ + .x = 0, + .y = 0, + .width = sc.cinfo.image_extent.width, + .height = sc.cinfo.image_extent.height, + .min_depth = 0, + .max_depth = 1, + }; + const scissor = vk.Rect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = sc.cinfo.image_extent }; + + try cmd.beginCommandBuffer(&.{}); + cmd.pipelineBarrier( + .{ .top_of_pipe_bit = true }, + .{ .color_attachment_output_bit = true }, + .{}, + 0, + null, + 0, + null, + 1, + @ptrCast(&vk.ImageMemoryBarrier{ + .src_access_mask = .{}, + .dst_access_mask = .{ .color_attachment_write_bit = true }, + .old_layout = .undefined, + .new_layout = .color_attachment_optimal, + .src_queue_family_index = 0, + .dst_queue_family_index = 0, + .image = .null_handle, // todo this needs to point to the swapchain image, so I can't get away from recording a command buffer for each one. + }), + ); + } + while (!au.W.should_close()) { // todo switch mode depending on if window is focused const events = au.wait_events_timeout(0.10); for (events) |u| switch (u) { - .cursorPos, - .windowPos, - .windowSize, - .framebufferSize, - .windowRefresh, - => {}, + .framebufferSize => sc.mark(), + .cursorPos, .windowPos, .windowSize, .windowRefresh => {}, else => |e| std.debug.print("{any}\n", .{e}), }; + + _ = try sc.rebuild(); + + const acq = try au.D.acquireNextImageKHR(sc.handle, std.math.maxInt(u64), sem_ready, .null_handle); + + const pre = try au.Q.presentKHR(&vk.PresentInfoKHR{ + .wait_semaphore_count = 1, + .p_wait_semaphores = &.{sem_done}, + .swapchain_count = 1, + .p_swapchains = &.{sc.handle}, + .p_image_indices = &.{acq.image_index}, + .p_results = null, + }); + std.debug.print("present result: {}\n", .{pre}); } try au.D.deviceWaitIdle(); From 7b80ef4dbf3c1fe4adc9593194d158c0c1e52a2a Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 27 Jun 2024 21:08:07 -0400 Subject: [PATCH 056/113] submit command --- src/au/SwapChain.zig | 14 ++++- src/main.zig | 118 ++++++++++++++++++++++++++++--------------- 2 files changed, 88 insertions(+), 44 deletions(-) diff --git a/src/au/SwapChain.zig b/src/au/SwapChain.zig index 7773ac1..22a02a9 100644 --- a/src/au/SwapChain.zig +++ b/src/au/SwapChain.zig @@ -6,7 +6,8 @@ const Self = @This(); alloc: std.mem.Allocator, cinfo: vk.SwapchainCreateInfoKHR, -handle: vk.SwapchainKHR, +handle: vk.SwapchainKHR = .null_handle, +images: std.ArrayListUnmanaged(vk.Image) = .{}, pub fn init(alloc: std.mem.Allocator) !Self { const caps = try au.I.getPhysicalDeviceSurfaceCapabilitiesKHR(au.device_config.pdev, au.W.surface); @@ -36,11 +37,11 @@ pub fn init(alloc: std.mem.Allocator) !Self { .clipped = vk.TRUE, .old_swapchain = .null_handle, }, - .handle = .null_handle, }; } pub fn deinit(self: *Self) void { + self.images.deinit(self.alloc); au.D.destroySwapchainKHR(self.handle, null); } @@ -62,7 +63,16 @@ pub fn rebuild(self: *Self) !bool { au.D.destroySwapchainKHR(self.cinfo.old_swapchain, null); self.cinfo.old_swapchain = self.handle; + var count: u32 = undefined; + _ = try au.D.getSwapchainImagesKHR(self.handle, &count, null); + try self.images.resize(self.alloc, count); + _ = try au.D.getSwapchainImagesKHR(self.handle, &count, self.images.items.ptr); + // todo repopulate images and synchronization return true; } + +pub fn get(self: Self, idx: u32) vk.Image { + return self.images.items[idx]; +} diff --git a/src/main.zig b/src/main.zig index 7cdfae4..314190d 100644 --- a/src/main.zig +++ b/src/main.zig @@ -105,39 +105,67 @@ pub fn main() !void { const pool = try au.D.createCommandPool(&.{ .queue_family_index = au.device_config.family }, null); defer au.D.destroyCommandPool(pool, null); - const fence: vk.Fence = try au.D.createFence(&.{ .flags = .{ .signaled_bit = true } }, null); - defer au.D.destroyFence(fence, null); - const sem_ready: vk.Semaphore = try au.D.createSemaphore(&.{}, null); defer au.D.destroySemaphore(sem_ready, null); const sem_done: vk.Semaphore = try au.D.createSemaphore(&.{}, null); defer au.D.destroySemaphore(sem_done, null); - const cmdbufs: [1]vk.CommandBuffer = undefined; - try au.D.allocateCommandBuffers( - &.{ .command_pool = pool, .command_buffer_count = @intCast(cmdbufs.len), .level = .primary }, - cmdbufs.ptr, - ); - defer au.D.freeCommandBuffers(pool, @intCast(cmdbufs.len), cmdbufs.ptr); + const fence: vk.Fence = try au.D.createFence(&.{ .flags = .{ .signaled_bit = true } }, null); + defer au.D.destroyFence(fence, null); - { - const cmd = au.CommandBufferProxy.init(cmdbufs[0], au.D); - const clear = vk.ClearValue{ .color = .{ .float_32 = .{ 0, 0, 0, 1 } } }; - const viewport = vk.Viewport{ - .x = 0, - .y = 0, - .width = sc.cinfo.image_extent.width, - .height = sc.cinfo.image_extent.height, - .min_depth = 0, - .max_depth = 1, - }; - const scissor = vk.Rect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = sc.cinfo.image_extent }; + while (!au.W.should_close()) { + // todo switch mode depending on if window is focused + const events = au.wait_events_timeout(0.10); + + for (events) |u| { + switch (u) { + .framebufferSize => sc.mark(), + .cursorPos, .windowPos, .windowSize, .windowRefresh => {}, + else => |e| std.debug.print("{any}\n", .{e}), + } + } + + _ = try sc.rebuild(); + + _ = try au.D.waitForFences(1, &.{fence}, vk.TRUE, std.math.maxInt(u64)); + try au.D.resetFences(1, &.{fence}); + try au.D.resetCommandPool(pool, .{}); + + const acq = try au.D.acquireNextImageKHR( + sc.handle, + std.math.maxInt(u64), + sem_ready, + .null_handle, + ); + const image = sc.get(acq.image_index); + + var cmd = au.CommandBufferProxy.init(.null_handle, au.D.wrapper); + try au.D.allocateCommandBuffers(&.{ + .command_pool = pool, + .level = .primary, + .command_buffer_count = 1, + }, @ptrCast(&cmd.handle)); + + try cmd.beginCommandBuffer(&.{ .flags = .{ .one_time_submit_bit = true } }); + + // const clear = vk.ClearValue{ .color = .{ .float_32 = .{ 0, 0, 0, 1 } } }; + // const viewport = vk.Viewport{ + // .x = 0, + // .y = 0, + // .width = sc.cinfo.image_extent, + // .height = sc.cinfo.image_extent.height, + // .min_depth = 0, + // .max_depth = 1, + // }; + // const scissor = vk.Rect2D{ + // .offset = .{ .x = 0, .y = 0 }, + // .extent = sc.cinfo.image_extent, + // }; - try cmd.beginCommandBuffer(&.{}); cmd.pipelineBarrier( .{ .top_of_pipe_bit = true }, - .{ .color_attachment_output_bit = true }, + .{ .bottom_of_pipe_bit = true }, .{}, 0, null, @@ -146,31 +174,38 @@ pub fn main() !void { 1, @ptrCast(&vk.ImageMemoryBarrier{ .src_access_mask = .{}, - .dst_access_mask = .{ .color_attachment_write_bit = true }, + .dst_access_mask = .{}, .old_layout = .undefined, - .new_layout = .color_attachment_optimal, + .new_layout = .present_src_khr, .src_queue_family_index = 0, .dst_queue_family_index = 0, - .image = .null_handle, // todo this needs to point to the swapchain image, so I can't get away from recording a command buffer for each one. + .image = image, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, }), ); - } + try cmd.endCommandBuffer(); - while (!au.W.should_close()) { - // todo switch mode depending on if window is focused - const events = au.wait_events_timeout(0.10); + try au.Q.submit( + 1, + @ptrCast(&vk.SubmitInfo{ + .wait_semaphore_count = 1, + .p_wait_semaphores = @ptrCast(&sem_ready), + .p_wait_dst_stage_mask = @ptrCast(&vk.PipelineStageFlags{ .color_attachment_output_bit = true }), + .command_buffer_count = 1, + .p_command_buffers = @ptrCast(&cmd.handle), + .signal_semaphore_count = 1, + .p_signal_semaphores = @ptrCast(&sem_done), + }), + fence, + ); - for (events) |u| switch (u) { - .framebufferSize => sc.mark(), - .cursorPos, .windowPos, .windowSize, .windowRefresh => {}, - else => |e| std.debug.print("{any}\n", .{e}), - }; - - _ = try sc.rebuild(); - - const acq = try au.D.acquireNextImageKHR(sc.handle, std.math.maxInt(u64), sem_ready, .null_handle); - - const pre = try au.Q.presentKHR(&vk.PresentInfoKHR{ + _ = try au.Q.presentKHR(&vk.PresentInfoKHR{ .wait_semaphore_count = 1, .p_wait_semaphores = &.{sem_done}, .swapchain_count = 1, @@ -178,7 +213,6 @@ pub fn main() !void { .p_image_indices = &.{acq.image_index}, .p_results = null, }); - std.debug.print("present result: {}\n", .{pre}); } try au.D.deviceWaitIdle(); From aaa22c058dbc7b8d2a21afe599cad196f9a2293b Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 27 Jun 2024 21:31:50 -0400 Subject: [PATCH 057/113] clear to red with one frame in flight --- src/au/SwapChain.zig | 27 +++- src/main.zig | 311 +++++++++++-------------------------------- 2 files changed, 101 insertions(+), 237 deletions(-) diff --git a/src/au/SwapChain.zig b/src/au/SwapChain.zig index 22a02a9..9f451d2 100644 --- a/src/au/SwapChain.zig +++ b/src/au/SwapChain.zig @@ -8,6 +8,7 @@ alloc: std.mem.Allocator, cinfo: vk.SwapchainCreateInfoKHR, handle: vk.SwapchainKHR = .null_handle, images: std.ArrayListUnmanaged(vk.Image) = .{}, +views: std.ArrayListUnmanaged(vk.ImageView) = .{}, pub fn init(alloc: std.mem.Allocator) !Self { const caps = try au.I.getPhysicalDeviceSurfaceCapabilitiesKHR(au.device_config.pdev, au.W.surface); @@ -41,7 +42,13 @@ pub fn init(alloc: std.mem.Allocator) !Self { } pub fn deinit(self: *Self) void { + for (self.views.items) |view| { + au.D.destroyImageView(view, null); + } + self.views.deinit(self.alloc); + self.images.deinit(self.alloc); + au.D.destroySwapchainKHR(self.handle, null); } @@ -68,11 +75,29 @@ pub fn rebuild(self: *Self) !bool { try self.images.resize(self.alloc, count); _ = try au.D.getSwapchainImagesKHR(self.handle, &count, self.images.items.ptr); + for (self.views.items) |view| { + au.D.destroyImageView(view, null); + } + try self.views.resize(self.alloc, count); + for (self.images.items, self.views.items) |image, *view| { + view.* = try au.D.createImageView(&vk.ImageViewCreateInfo{ .image = image, .view_type = .@"2d", .format = self.cinfo.image_format, .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity }, .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + } }, null); + } + // todo repopulate images and synchronization return true; } -pub fn get(self: Self, idx: u32) vk.Image { +pub fn getImage(self: Self, idx: u32) vk.Image { return self.images.items[idx]; } + +pub fn getView(self: Self, idx: u32) vk.ImageView { + return self.views.items[idx]; +} diff --git a/src/main.zig b/src/main.zig index 314190d..e080826 100644 --- a/src/main.zig +++ b/src/main.zig @@ -52,45 +52,6 @@ const vertices = [_]Vertex{ const indices = [_]Index{ 4, 5, 6, 6, 5, 7 }; -fn render( - dev: vk.Device, - vkd: gfx.Device.Wrapper, - swapchain: vk.SwapchainKHR, - frame: gfx.Swapchain.ChainImage, - queue: vk.Queue, -) !void { - _ = try vkd.waitForFences(dev, 1, @ptrCast(&frame.fence), vk.TRUE, std.math.maxInt(u64)); - - const result = try vkd.acquireNextImageKHR( - dev, - swapchain, - std.math.maxInt(u64), - frame.image_available, - .null_handle, - ); - - try vkd.resetFences(dev, 1, @ptrCast(&frame.fence)); - - try vkd.queueSubmit(queue, 1, @ptrCast(&vk.SubmitInfo{ - .wait_semaphore_count = 1, - .p_wait_semaphores = @ptrCast(&frame.image_available), - .p_wait_dst_stage_mask = @ptrCast(&vk.PipelineStageFlags{ .color_attachment_output_bit = true }), - .command_buffer_count = 1, - .p_command_buffers = @ptrCast(&frame.cmdbuf), - .signal_semaphore_count = 1, - .p_signal_semaphores = @ptrCast(&frame.render_finished), - }), frame.fence); - - _ = try vkd.queuePresentKHR(queue, &.{ - .wait_semaphore_count = 1, - .p_wait_semaphores = @ptrCast(&frame.render_finished), - .swapchain_count = 1, - .p_swapchains = @ptrCast(&swapchain), - .p_image_indices = @ptrCast(&result.image_index), - .p_results = null, - }); -} - pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.detectLeaks(); @@ -138,7 +99,8 @@ pub fn main() !void { sem_ready, .null_handle, ); - const image = sc.get(acq.image_index); + const image = sc.getImage(acq.image_index); + const view = sc.getView(acq.image_index); var cmd = au.CommandBufferProxy.init(.null_handle, au.D.wrapper); try au.D.allocateCommandBuffers(&.{ @@ -149,23 +111,9 @@ pub fn main() !void { try cmd.beginCommandBuffer(&.{ .flags = .{ .one_time_submit_bit = true } }); - // const clear = vk.ClearValue{ .color = .{ .float_32 = .{ 0, 0, 0, 1 } } }; - // const viewport = vk.Viewport{ - // .x = 0, - // .y = 0, - // .width = sc.cinfo.image_extent, - // .height = sc.cinfo.image_extent.height, - // .min_depth = 0, - // .max_depth = 1, - // }; - // const scissor = vk.Rect2D{ - // .offset = .{ .x = 0, .y = 0 }, - // .extent = sc.cinfo.image_extent, - // }; - cmd.pipelineBarrier( .{ .top_of_pipe_bit = true }, - .{ .bottom_of_pipe_bit = true }, + .{ .color_attachment_output_bit = true }, .{}, 0, null, @@ -174,8 +122,77 @@ pub fn main() !void { 1, @ptrCast(&vk.ImageMemoryBarrier{ .src_access_mask = .{}, - .dst_access_mask = .{}, + .dst_access_mask = .{ .color_attachment_write_bit = true }, .old_layout = .undefined, + .new_layout = .color_attachment_optimal, + .src_queue_family_index = 0, + .dst_queue_family_index = 0, + .image = image, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }), + ); + + const viewport = vk.Viewport{ + .x = 0, + .y = 0, + .width = @floatFromInt(sc.cinfo.image_extent.width), + .height = @floatFromInt(sc.cinfo.image_extent.height), + .min_depth = 0, + .max_depth = 1, + }; + const scissor = vk.Rect2D{ + .offset = .{ .x = 0, .y = 0 }, + .extent = sc.cinfo.image_extent, + }; + const info = vk.RenderingInfoKHR{ + .render_area = scissor, + .layer_count = 1, + .view_mask = 0, + .color_attachment_count = 1, + .p_color_attachments = &.{vk.RenderingAttachmentInfo{ + .image_view = view, + .image_layout = .color_attachment_optimal, + .resolve_mode = .{}, + .resolve_image_view = .null_handle, + .resolve_image_layout = .undefined, + .load_op = .clear, + .store_op = .store, + .clear_value = .{ .color = .{ .float_32 = .{ 1, 0, 0, 1 } } }, + }}, + }; + + cmd.setViewport(0, 1, &.{viewport}); + cmd.setScissor(0, 1, &.{scissor}); + cmd.beginRendering(&info); + + // todo + // vkd.cmdBindPipeline(cmdbuf, .graphics, pipeline); + // const offset = [_]vk.DeviceSize{0}; + // vkd.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast(&vertex_buffer), &offset); + // vkd.cmdBindIndexBuffer(cmdbuf, index_buffer, 0, .uint16); + // vkd.cmdDrawIndexed(cmdbuf, indices.len, 1, 0, 0, 0); + + cmd.endRendering(); + + cmd.pipelineBarrier( + .{ .color_attachment_output_bit = true }, + .{ .bottom_of_pipe_bit = true }, + .{}, + 0, + null, + 0, + null, + 1, + @ptrCast(&vk.ImageMemoryBarrier{ + .src_access_mask = .{ .color_attachment_write_bit = true }, + .dst_access_mask = .{}, + .old_layout = .color_attachment_optimal, .new_layout = .present_src_khr, .src_queue_family_index = 0, .dst_queue_family_index = 0, @@ -217,13 +234,6 @@ pub fn main() !void { try au.D.deviceWaitIdle(); - // // todo create command pool - // - // var sc = try gfx.Swapchain.create(ally, &dev); - // defer sc.deinit(); - // - // const device_local = gfx.VkAllocator.init(dev.pdev, inst.vki); - // // const pipeline_layout = try dev.vkd.createPipelineLayout(dev.dev, &.{ // .flags = .{}, // .set_layout_count = 0, @@ -235,7 +245,7 @@ pub fn main() !void { // // const pipeline = try createPipeline(dev.dev, pipeline_layout, dev.format, dev.vkd); // defer dev.vkd.destroyPipeline(dev.dev, pipeline, null); - // + // const vertex_buffer = try dev.vkd.createBuffer(dev.dev, &.{ // .size = @sizeOf(@TypeOf(vertices)), // .usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, @@ -246,9 +256,8 @@ pub fn main() !void { // const vertex_memory = try device_local.alloc(dev.dev, dev.vkd, vertex_mem_reqs, .{ .device_local_bit = true }); // defer dev.vkd.freeMemory(dev.dev, vertex_memory, null); // try dev.vkd.bindBufferMemory(dev.dev, vertex_buffer, vertex_memory, 0); - // // try gfx.uploadData(Vertex, dev.pdev, inst.vki, dev.dev, dev.vkd, dev.queue, dev.pool, vertex_buffer, &vertices); - // + // const index_buffer = try dev.vkd.createBuffer(dev.dev, &.{ // .size = @sizeOf(@TypeOf(indices)), // .usage = .{ .transfer_dst_bit = true, .index_buffer_bit = true }, @@ -259,177 +268,7 @@ pub fn main() !void { // const index_memory = try device_local.alloc(dev.dev, dev.vkd, index_mem_reqs, .{ .device_local_bit = true }); // defer dev.vkd.freeMemory(dev.dev, index_memory, null); // try dev.vkd.bindBufferMemory(dev.dev, index_buffer, index_memory, 0); - // // try gfx.uploadData(Index, dev.pdev, inst.vki, dev.dev, dev.vkd, dev.queue, dev.pool, index_buffer, &indices); - // - // try sc.init(); - // for (sc.chain.items(.image), sc.chain.items(.view), sc.chain.items(.cmdbuf)) |image, view, cmdbuf| { - // try record_cmdbuf(cmdbuf, dev.vkd, image, view, sc.extent, pipeline, vertex_buffer, index_buffer); - // } - // - // var index: u32 = 0; - // - // while (c.glfwWindowShouldClose(win.ref) == c.GLFW_FALSE) { - // var w: c_int = undefined; - // var h: c_int = undefined; - // c.glfwGetFramebufferSize(win.ref, &w, &h); - // - // // Don't present or resize swapchain while the window is minimized - // if (w == 0 or h == 0) { - // c.glfwPollEvents(); - // continue; - // } - // - // const frame = sc.chain.get(index); - // - // render(dev.dev, dev.vkd, sc.ref, frame, dev.queue) catch |err| switch (err) { - // error.OutOfDateKHR => { - // // TODO: this is a hack to safely destroy sync primitives - // // don't do this. be smart about sync primitive reuse or - // // move them to "garbage" to be destroyed later. - // try dev.vkd.deviceWaitIdle(dev.dev); - // - // try sc.init(); - // for (sc.chain.items(.image), sc.chain.items(.view), sc.chain.items(.cmdbuf)) |image, view, cmdbuf| { - // try record_cmdbuf(cmdbuf, dev.vkd, image, view, sc.extent, pipeline, vertex_buffer, index_buffer); - // } - // - // index = 0; - // - // continue; - // }, - // else => |errx| return errx, - // }; - // - // c.glfwPollEvents(); - // - // index = @intCast((index + 1) % sc.chain.len); - // } - // - // try dev.vkd.deviceWaitIdle(dev.dev); -} - -fn record_cmdbuf( - cmdbuf: vk.CommandBuffer, - vkd: gfx.Device.Wrapper, - image: vk.Image, - view: vk.ImageView, - extent: vk.Extent2D, - pipeline: vk.Pipeline, - vertex_buffer: vk.Buffer, - index_buffer: vk.Buffer, -) !void { - const clear = vk.ClearValue{ - .color = .{ .float_32 = .{ 0, 0, 0, 1 } }, - }; - - const viewport = vk.Viewport{ - .x = 0, - .y = 0, - .width = @floatFromInt(extent.width), - .height = @floatFromInt(extent.height), - .min_depth = 0, - .max_depth = 1, - }; - - const scissor = vk.Rect2D{ - .offset = .{ .x = 0, .y = 0 }, - .extent = extent, - }; - - try vkd.beginCommandBuffer(cmdbuf, &.{}); - - vkd.cmdPipelineBarrier( - cmdbuf, - .{ .top_of_pipe_bit = true }, - .{ .color_attachment_output_bit = true }, - .{}, - 0, - null, - 0, - null, - 1, - @ptrCast(&vk.ImageMemoryBarrier{ - .src_access_mask = .{}, - .dst_access_mask = .{ .color_attachment_write_bit = true }, - .old_layout = .undefined, - .new_layout = .color_attachment_optimal, - .src_queue_family_index = 0, - .dst_queue_family_index = 0, - .image = image, - .subresource_range = .{ - .aspect_mask = .{ .color_bit = true }, - .base_mip_level = 0, - .level_count = 1, - .base_array_layer = 0, - .layer_count = 1, - }, - }), - ); - - vkd.cmdSetViewport(cmdbuf, 0, 1, @ptrCast(&viewport)); - vkd.cmdSetScissor(cmdbuf, 0, 1, @ptrCast(&scissor)); - - const color_attachments = [_]vk.RenderingAttachmentInfoKHR{ - .{ - .image_view = view, - .image_layout = .color_attachment_optimal, - .resolve_mode = .{}, - .resolve_image_view = .null_handle, - .resolve_image_layout = .undefined, - .load_op = .clear, - .store_op = .store, - .clear_value = clear, - }, - }; - - const render_info = vk.RenderingInfoKHR{ - .render_area = scissor, // since we always do full-frame changes - .layer_count = 1, - .view_mask = 0, - .color_attachment_count = color_attachments.len, - .p_color_attachments = &color_attachments, - }; - - vkd.cmdBeginRenderingKHR(cmdbuf, &render_info); - - vkd.cmdBindPipeline(cmdbuf, .graphics, pipeline); - const offset = [_]vk.DeviceSize{0}; - vkd.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast(&vertex_buffer), &offset); - vkd.cmdBindIndexBuffer(cmdbuf, index_buffer, 0, .uint16); - vkd.cmdDrawIndexed(cmdbuf, indices.len, 1, 0, 0, 0); - - vkd.cmdEndRenderingKHR(cmdbuf); - - vkd.cmdPipelineBarrier( - cmdbuf, - .{ .color_attachment_output_bit = true }, - .{ .bottom_of_pipe_bit = true }, - .{}, - 0, - null, - 0, - null, - 1, - @ptrCast(&vk.ImageMemoryBarrier{ - .src_access_mask = .{ .color_attachment_write_bit = true }, - .dst_access_mask = .{}, - .old_layout = .color_attachment_optimal, - .new_layout = .present_src_khr, - .src_queue_family_index = 0, - .dst_queue_family_index = 0, - .image = image, - .subresource_range = .{ - .aspect_mask = .{ .color_bit = true }, - .base_mip_level = 0, - .level_count = 1, - .base_array_layer = 0, - .layer_count = 1, - }, - }), - ); - - try vkd.endCommandBuffer(cmdbuf); } fn createPipeline( From 350cbb34f0af18c3e8e8a0b0e2bfd1f592776442 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 27 Jun 2024 21:47:21 -0400 Subject: [PATCH 058/113] remove vestigial gfx components --- src/gfx/Base.zig | 30 ------- src/gfx/Context.zig | 5 -- src/gfx/Device.zig | 182 ------------------------------------------- src/gfx/Instance.zig | 143 ---------------------------------- src/gfx/Window.zig | 41 ---------- 5 files changed, 401 deletions(-) delete mode 100644 src/gfx/Base.zig delete mode 100644 src/gfx/Context.zig delete mode 100644 src/gfx/Device.zig delete mode 100644 src/gfx/Instance.zig delete mode 100644 src/gfx/Window.zig diff --git a/src/gfx/Base.zig b/src/gfx/Base.zig deleted file mode 100644 index e0f3fd7..0000000 --- a/src/gfx/Base.zig +++ /dev/null @@ -1,30 +0,0 @@ -const std = @import("std"); -const builtin = @import("builtin"); - -const vk = @import("vk"); -const c = @import("../c.zig"); -const gfx = @import("../gfx.zig"); - -const Self = @This(); - -vkb: Wrapper, - -pub fn init() !Self { - if (c.glfwInit() != c.GLFW_TRUE) - return error.GLFWInitFailed; - errdefer c.glfwTerminate(); - - if (c.glfwVulkanSupported() != c.GLFW_TRUE) { - return error.GLFWNoVulkan; - } - - return .{ - .vkb = try Wrapper.load(c.glfwGetInstanceProcAddress), - }; -} - -pub fn deinit(_: Self) void { - c.glfwTerminate(); -} - -pub const Wrapper = vk.BaseWrapper(gfx.apis); diff --git a/src/gfx/Context.zig b/src/gfx/Context.zig deleted file mode 100644 index 646d492..0000000 --- a/src/gfx/Context.zig +++ /dev/null @@ -1,5 +0,0 @@ -const std = @import("std"); -const builtin = @import("builtin"); -const vk = @import("vk"); -const c = @import("../c.zig"); -const gfx = @import("../c.zig"); diff --git a/src/gfx/Device.zig b/src/gfx/Device.zig deleted file mode 100644 index d27f63c..0000000 --- a/src/gfx/Device.zig +++ /dev/null @@ -1,182 +0,0 @@ -//! The point here is to select _a_ physical device and create a logical device around it. - -const std = @import("std"); -const builtin = @import("builtin"); - -const vk = @import("vk"); -const c = @import("../c.zig"); -const gfx = @import("../gfx.zig"); - -const Instance = @import("Instance.zig"); -const Window = @import("Window.zig"); - -const Self = @This(); - -const required_extensions: []const [*:0]const u8 = &.{ - vk.extensions.khr_swapchain.name, - vk.extensions.khr_dynamic_rendering.name, -}; - -const preferred_surface_format: vk.SurfaceFormatKHR = .{ - .color_space = .srgb_nonlinear_khr, - .format = .r8g8b8a8_sint, -}; - -const preferred_present_mode: vk.PresentModeKHR = .mailbox_khr; - -dev: vk.Device, -pdev: vk.PhysicalDevice, -vkd: Wrapper, - -inst: *const Instance, -win: *const Window, - -format: vk.SurfaceFormatKHR, -mode: vk.PresentModeKHR, -family: u32, -queue: vk.Queue, - -pool: vk.CommandPool, - -pub fn init( - ally: std.mem.Allocator, - inst: *const Instance, - win: *const Window, -) !Self { - var pdev_count: u32 = undefined; - _ = try inst.vki.enumeratePhysicalDevices(inst.ref, &pdev_count, null); - const pdevs = try ally.alloc(vk.PhysicalDevice, pdev_count); - defer ally.free(pdevs); - _ = try inst.vki.enumeratePhysicalDevices(inst.ref, &pdev_count, pdevs.ptr); - - for (pdevs) |pdev| { - return wrap(ally, inst, win, pdev) catch continue; - } - - return error.NoSuitableDevice; -} - -pub fn wrap( - ally: std.mem.Allocator, - inst: *const Instance, - win: *const Window, - pdev: vk.PhysicalDevice, -) !Self { - // TODO: Need to rank devices and select the best one - // the new laptop doesn't have a discrete gpu. - // const props = inst.vki.getPhysicalDeviceProperties(pdev); - // if (props.device_type != .discrete_gpu) return error.NotDiscrete; - - var format_count: u32 = undefined; - _ = try inst.vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, win.surface, &format_count, null); - if (format_count == 0) return error.NoSurfaceFormat; - const formats = try ally.alloc(vk.SurfaceFormatKHR, format_count); - defer ally.free(formats); - _ = try inst.vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, win.surface, &format_count, formats.ptr); - - const format = for (formats) |f| { - if (std.meta.eql(f, preferred_surface_format)) - break f; - } else formats[0]; - - var mode_count: u32 = undefined; - _ = try inst.vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, win.surface, &mode_count, null); - if (mode_count == 0) return error.NoSurfaceMode; - const modes = try ally.alloc(vk.PresentModeKHR, mode_count); - defer ally.free(modes); - _ = try inst.vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, win.surface, &mode_count, modes.ptr); - - const mode = for (modes) |m| { - if (std.meta.eql(m, preferred_present_mode)) - break m; - } else modes[0]; - - var ext_count: u32 = undefined; - _ = try inst.vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, null); - const exts = try ally.alloc(vk.ExtensionProperties, ext_count); - defer ally.free(exts); - _ = try inst.vki.enumerateDeviceExtensionProperties(pdev, null, &ext_count, exts.ptr); - - for (required_extensions) |name| { - for (exts) |ext| { - if (std.mem.eql( - u8, - std.mem.span(name), - std.mem.sliceTo(&ext.extension_name, 0), - )) { - break; - } - } else { - return error.MissingRequiredExtension; - } - } - - var family_count: u32 = undefined; - inst.vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); - const families = try ally.alloc(vk.QueueFamilyProperties, family_count); - defer ally.free(families); - inst.vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr); - - // just find one family that does graphics and present, so we can use exclusive sharing - // on the swapchain. apparently most hardware supports this. logic for queue allocation - // and swapchain creation is so much simpler this way. swapchain creation needs to know - // the list of queue family indices which will have access to the images, and there's a - // performance penalty to allow concurrent access to multiple queue families. - // - // multiple _queues_ may have exclusive access, but only if they're in the smae family. - - const family: u32 = for (families, 0..) |family, idx| { - const graphics = family.queue_flags.graphics_bit; - const present = try inst.vki.getPhysicalDeviceSurfaceSupportKHR(pdev, @intCast(idx), win.surface) == vk.TRUE; - if (graphics and present) break @intCast(idx); - } else { - return error.NoSuitableQueueFamily; - }; - - const qci: []const vk.DeviceQueueCreateInfo = &.{ - vk.DeviceQueueCreateInfo{ - .queue_family_index = family, - .queue_count = 1, - .p_queue_priorities = &[_]f32{1.0}, - }, - }; - - const dev = try inst.vki.createDevice(pdev, &.{ - .queue_create_info_count = @intCast(qci.len), - .p_queue_create_infos = qci.ptr, - .enabled_extension_count = @intCast(required_extensions.len), - .pp_enabled_extension_names = required_extensions.ptr, - .p_next = &vk.PhysicalDeviceDynamicRenderingFeaturesKHR{ - .dynamic_rendering = vk.TRUE, - }, - }, null); - const vkd = try Wrapper.load(dev, inst.vki.dispatch.vkGetDeviceProcAddr); - errdefer vkd.destroyDevice(dev, null); - - const pool = try vkd.createCommandPool(dev, &.{ - .queue_family_index = family, - }, null); - errdefer vkd.destroyCommandPool(dev, pool, null); - - const queue = vkd.getDeviceQueue(dev, family, 0); - - return .{ - .dev = dev, - .pdev = pdev, - .vkd = vkd, - .inst = inst, - .win = win, - .format = format, - .mode = mode, - .pool = pool, - .family = family, - .queue = queue, - }; -} - -pub fn deinit(self: Self) void { - self.vkd.destroyCommandPool(self.dev, self.pool, null); - self.vkd.destroyDevice(self.dev, null); -} - -pub const Wrapper = vk.DeviceWrapper(gfx.apis); diff --git a/src/gfx/Instance.zig b/src/gfx/Instance.zig deleted file mode 100644 index c5c04d3..0000000 --- a/src/gfx/Instance.zig +++ /dev/null @@ -1,143 +0,0 @@ -const std = @import("std"); -const builtin = @import("builtin"); - -const vk = @import("vk"); -const c = @import("../c.zig"); -const gfx = @import("../gfx.zig"); - -const Self = @This(); - -const app_info: vk.ApplicationInfo = .{ - .p_application_name = "zig-glfw-vulkan", - .application_version = vk.makeApiVersion(0, 0, 0, 0), - .p_engine_name = "zig-glfw-vulkan", - .engine_version = vk.makeApiVersion(0, 0, 0, 0), - .api_version = vk.API_VERSION_1_3, -}; - -ref: vk.Instance, -vki: Wrapper, -base: *const gfx.Base, - -messenger: if (gfx.use_debug_messenger) vk.DebugUtilsMessengerEXT else void, - -pub fn init( - base: *const gfx.Base, -) !Self { - var exts: std.BoundedArray([*:0]const u8, 32) = .{}; - var layers: std.BoundedArray([*:0]const u8, 32) = .{}; - - if (gfx.use_debug_messenger) { - try exts.appendSlice(&.{ - vk.extensions.ext_debug_utils.name, - }); - - try layers.appendSlice(&.{ - "VK_LAYER_KHRONOS_validation", - }); - } - - var glfw_exts_count: u32 = 0; - const glfw_exts: [*]const [*:0]const u8 = - @ptrCast(c.glfwGetRequiredInstanceExtensions(&glfw_exts_count)); - try exts.appendSlice(glfw_exts[0..glfw_exts_count]); - - const mci: vk.DebugUtilsMessengerCreateInfoEXT = .{ - .message_severity = .{ - .error_bit_ext = true, - .info_bit_ext = true, - .verbose_bit_ext = true, - .warning_bit_ext = true, - }, - .message_type = .{ - .device_address_binding_bit_ext = true, - .general_bit_ext = false, - .performance_bit_ext = true, - .validation_bit_ext = true, - }, - .pfn_user_callback = &debug_callback, - .p_user_data = null, - }; - - const ref = try base.vkb.createInstance(&.{ - .p_application_info = &app_info, - .enabled_extension_count = @intCast(exts.len), - .pp_enabled_extension_names = &exts.buffer, - .enabled_layer_count = @intCast(layers.len), - .pp_enabled_layer_names = &layers.buffer, - .p_next = if (gfx.use_debug_messenger) &mci else null, - }, null); - - const vki = try Wrapper.load(ref, base.vkb.dispatch.vkGetInstanceProcAddr); - errdefer vki.destroyInstance(ref, null); - - const messenger = if (gfx.use_debug_messenger) - try vki.createDebugUtilsMessengerEXT(ref, &mci, null) - else - void{}; - - errdefer if (gfx.use_debug_messenger) - vki.destroyDebugUtilsMessengerEXT(ref, messenger, null); - - return .{ - .ref = ref, - .vki = vki, - .base = base, - .messenger = messenger, - }; -} - -pub fn deinit(self: Self) void { - if (gfx.use_debug_messenger) - self.vki.destroyDebugUtilsMessengerEXT(self.ref, self.messenger, null); - self.vki.destroyInstance(self.ref, null); -} - -pub const Wrapper = vk.InstanceWrapper(gfx.apis); - -pub fn debug_callback( - msg_severity: vk.DebugUtilsMessageSeverityFlagsEXT, - msg_type: vk.DebugUtilsMessageTypeFlagsEXT, - p_data: ?*const vk.DebugUtilsMessengerCallbackDataEXT, - _: ?*anyopaque, -) callconv(vk.vulkan_call_conv) vk.Bool32 { - // ripped from std.log.defaultLog - - const data = p_data orelse return vk.FALSE; - const message = data.p_message orelse return vk.FALSE; - - const severity_prefix = if (msg_severity.verbose_bit_ext) - "verbose:" - else if (msg_severity.info_bit_ext) - "info:" - else if (msg_severity.warning_bit_ext) - "warning:" - else if (msg_severity.error_bit_ext) - "error:" - else - "?:"; - - const type_prefix = if (msg_type.general_bit_ext) - "" - else if (msg_type.validation_bit_ext) - "validation:" - else if (msg_type.performance_bit_ext) - "performance:" - else if (msg_type.device_address_binding_bit_ext) - "device_address_binding:" - else - "?:"; - - const stderr = std.io.getStdErr().writer(); - var bw = std.io.bufferedWriter(stderr); - const writer = bw.writer(); - - std.debug.lockStdErr(); - defer std.debug.unlockStdErr(); - nosuspend { - writer.print("vk-{s}{s} {s}\n", .{ severity_prefix, type_prefix, message }) catch return vk.FALSE; - bw.flush() catch return vk.FALSE; - } - - return vk.FALSE; -} diff --git a/src/gfx/Window.zig b/src/gfx/Window.zig deleted file mode 100644 index d2c2c2b..0000000 --- a/src/gfx/Window.zig +++ /dev/null @@ -1,41 +0,0 @@ -const std = @import("std"); -const builtin = @import("builtin"); - -const vk = @import("vk"); -const c = @import("../c.zig"); - -const Base = @import("Base.zig"); -const Instance = @import("Instance.zig"); - -const Self = @This(); - -ref: *c.GLFWwindow, -surface: vk.SurfaceKHR, - -inst: *const Instance, - -pub fn init(inst: *const Instance, title: [*:0]const u8, extent: vk.Extent2D) !Self { - c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, "floating_window"); - c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, "floating_window"); - c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); - - const ref = c.glfwCreateWindow(@intCast(extent.width), @intCast(extent.height), title, null, null) orelse - return error.GLFWWindowCreateFailed; - errdefer c.glfwDestroyWindow(ref); - - var surface: vk.SurfaceKHR = undefined; - if (c.glfwCreateWindowSurface(inst.ref, ref, null, &surface) != .success) - return error.GLFWWindowSurfaceFailed; - errdefer inst.vki.destroySurfaceKHR(inst.ref, surface, null); - - return .{ - .ref = ref, - .surface = surface, - .inst = inst, - }; -} - -pub fn deinit(self: Self) void { - self.inst.vki.destroySurfaceKHR(self.inst.ref, self.surface, null); - c.glfwDestroyWindow(self.ref); -} From 1801959c4e268bdd4f128f23f987579f0b7bfb0f Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 27 Jun 2024 21:47:49 -0400 Subject: [PATCH 059/113] multiple frames in flight --- src/main.zig | 315 +++++++++++++++++++++++++++------------------------ 1 file changed, 170 insertions(+), 145 deletions(-) diff --git a/src/main.zig b/src/main.zig index e080826..6800f6f 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4,8 +4,6 @@ const c = @import("c.zig"); const shaders = @import("shaders"); const Allocator = std.mem.Allocator; -const gfx = @import("gfx.zig"); - const au = @import("au.zig"); const app_name = "vulkan-zig triangle example"; @@ -63,19 +61,46 @@ pub fn main() !void { var sc = try au.SwapChain.init(alloc); defer sc.deinit(); - const pool = try au.D.createCommandPool(&.{ .queue_family_index = au.device_config.family }, null); - defer au.D.destroyCommandPool(pool, null); + var flight = std.MultiArrayList(struct { + acquire: vk.Semaphore, + complete: vk.Semaphore, + fence: vk.Fence, + pool: vk.CommandPool, + }){}; + defer { + for (flight.items(.acquire)) |sem| { + au.D.destroySemaphore(sem, null); + } + for (flight.items(.complete)) |sem| { + au.D.destroySemaphore(sem, null); + } + for (flight.items(.fence)) |fnc| { + au.D.destroyFence(fnc, null); + } + for (flight.items(.pool)) |pool| { + au.D.destroyCommandPool(pool, null); + } + flight.deinit(alloc); + } - const sem_ready: vk.Semaphore = try au.D.createSemaphore(&.{}, null); - defer au.D.destroySemaphore(sem_ready, null); + try flight.resize(alloc, 3); // FRAMES IN FLIGHT + for (flight.items(.acquire)) |*sem| { + sem.* = try au.D.createSemaphore(&.{}, null); + } + for (flight.items(.complete)) |*sem| { + sem.* = try au.D.createSemaphore(&.{}, null); + } + for (flight.items(.fence)) |*fnc| { + fnc.* = try au.D.createFence(&.{ .flags = .{ .signaled_bit = true } }, null); + } + for (flight.items(.pool)) |*pool| { + pool.* = try au.D.createCommandPool(&.{ .queue_family_index = au.device_config.family }, null); + } - const sem_done: vk.Semaphore = try au.D.createSemaphore(&.{}, null); - defer au.D.destroySemaphore(sem_done, null); + var flight_idx: usize = 0; + while (!au.W.should_close()) : (flight_idx = (flight_idx + 1) % flight.len) { + const frame = flight.get(flight_idx); - const fence: vk.Fence = try au.D.createFence(&.{ .flags = .{ .signaled_bit = true } }, null); - defer au.D.destroyFence(fence, null); - - while (!au.W.should_close()) { // todo switch mode depending on if window is focused const events = au.wait_events_timeout(0.10); @@ -89,14 +114,14 @@ pub fn main() !void { _ = try sc.rebuild(); - _ = try au.D.waitForFences(1, &.{fence}, vk.TRUE, std.math.maxInt(u64)); - try au.D.resetFences(1, &.{fence}); - try au.D.resetCommandPool(pool, .{}); + _ = try au.D.waitForFences(1, &.{frame.fence}, vk.TRUE, std.math.maxInt(u64)); + try au.D.resetFences(1, &.{frame.fence}); + try au.D.resetCommandPool(frame.pool, .{}); const acq = try au.D.acquireNextImageKHR( sc.handle, std.math.maxInt(u64), - sem_ready, + frame.acquire, .null_handle, ); const image = sc.getImage(acq.image_index); @@ -104,7 +129,7 @@ pub fn main() !void { var cmd = au.CommandBufferProxy.init(.null_handle, au.D.wrapper); try au.D.allocateCommandBuffers(&.{ - .command_pool = pool, + .command_pool = frame.pool, .level = .primary, .command_buffer_count = 1, }, @ptrCast(&cmd.handle)); @@ -212,24 +237,24 @@ pub fn main() !void { 1, @ptrCast(&vk.SubmitInfo{ .wait_semaphore_count = 1, - .p_wait_semaphores = @ptrCast(&sem_ready), + .p_wait_semaphores = @ptrCast(&frame.acquire), .p_wait_dst_stage_mask = @ptrCast(&vk.PipelineStageFlags{ .color_attachment_output_bit = true }), .command_buffer_count = 1, .p_command_buffers = @ptrCast(&cmd.handle), .signal_semaphore_count = 1, - .p_signal_semaphores = @ptrCast(&sem_done), + .p_signal_semaphores = @ptrCast(&frame.complete), }), - fence, + frame.fence, ); _ = try au.Q.presentKHR(&vk.PresentInfoKHR{ .wait_semaphore_count = 1, - .p_wait_semaphores = &.{sem_done}, + .p_wait_semaphores = &.{frame.complete}, .swapchain_count = 1, .p_swapchains = &.{sc.handle}, .p_image_indices = &.{acq.image_index}, .p_results = null, - }); + }); // todo suboptimal? } try au.D.deviceWaitIdle(); @@ -271,126 +296,126 @@ pub fn main() !void { // try gfx.uploadData(Index, dev.pdev, inst.vki, dev.dev, dev.vkd, dev.queue, dev.pool, index_buffer, &indices); } -fn createPipeline( - dev: vk.Device, - layout: vk.PipelineLayout, - format: vk.SurfaceFormatKHR, - vkd: gfx.Device.Wrapper, -) !vk.Pipeline { - const vert = try vkd.createShaderModule(dev, &.{ - .code_size = shaders.triangle_vert.len, - .p_code = @as([*]const u32, @ptrCast(&shaders.triangle_vert)), - }, null); - defer vkd.destroyShaderModule(dev, vert, null); - - const frag = try vkd.createShaderModule(dev, &.{ - .code_size = shaders.triangle_frag.len, - .p_code = @as([*]const u32, @ptrCast(&shaders.triangle_frag)), - }, null); - defer vkd.destroyShaderModule(dev, frag, null); - - const pssci = [_]vk.PipelineShaderStageCreateInfo{ - .{ - .stage = .{ .vertex_bit = true }, - .module = vert, - .p_name = "main", - }, - .{ - .stage = .{ .fragment_bit = true }, - .module = frag, - .p_name = "main", - }, - }; - - const color_blend_attachment_states = [_]vk.PipelineColorBlendAttachmentState{ - vk.PipelineColorBlendAttachmentState{ - .blend_enable = vk.FALSE, - .src_color_blend_factor = .one, - .dst_color_blend_factor = .zero, - .color_blend_op = .add, - .src_alpha_blend_factor = .one, - .dst_alpha_blend_factor = .zero, - .alpha_blend_op = .add, - .color_write_mask = .{ .r_bit = true, .g_bit = true, .b_bit = true, .a_bit = true }, - }, - }; - - const dynamic_states = [_]vk.DynamicState{ - .viewport, - .scissor, - }; - - const create_infos = [_]vk.GraphicsPipelineCreateInfo{ - .{ - .flags = .{}, - .stage_count = @intCast(pssci.len), - .p_stages = &pssci, - .p_vertex_input_state = &vk.PipelineVertexInputStateCreateInfo{ - .vertex_binding_description_count = 1, - .p_vertex_binding_descriptions = @ptrCast(&Vertex.binding_description), - .vertex_attribute_description_count = Vertex.attribute_description.len, - .p_vertex_attribute_descriptions = &Vertex.attribute_description, - }, - .p_input_assembly_state = &vk.PipelineInputAssemblyStateCreateInfo{ - .topology = .triangle_list, - .primitive_restart_enable = vk.FALSE, - }, - .p_tessellation_state = null, - .p_viewport_state = &vk.PipelineViewportStateCreateInfo{ - .viewport_count = 1, - .p_viewports = undefined, // set in createCommandBuffers with cmdSetViewport - .scissor_count = 1, - .p_scissors = undefined, // set in createCommandBuffers with cmdSetScissor - }, - .p_rasterization_state = &vk.PipelineRasterizationStateCreateInfo{ - .depth_clamp_enable = vk.FALSE, - .rasterizer_discard_enable = vk.FALSE, - .polygon_mode = .fill, - .cull_mode = .{ .back_bit = true }, - .front_face = .counter_clockwise, - .depth_bias_enable = vk.FALSE, - .depth_bias_constant_factor = 0, - .depth_bias_clamp = 0, - .depth_bias_slope_factor = 0, - .line_width = 1, - }, - .p_multisample_state = &vk.PipelineMultisampleStateCreateInfo{ - .rasterization_samples = .{ .@"1_bit" = true }, - .sample_shading_enable = vk.FALSE, - .min_sample_shading = 1, - .alpha_to_coverage_enable = vk.FALSE, - .alpha_to_one_enable = vk.FALSE, - }, - .p_depth_stencil_state = null, - .p_color_blend_state = &vk.PipelineColorBlendStateCreateInfo{ - .logic_op_enable = vk.FALSE, - .logic_op = .copy, - .attachment_count = @intCast(color_blend_attachment_states.len), - .p_attachments = &color_blend_attachment_states, - .blend_constants = [_]f32{ 0, 0, 0, 0 }, - }, - .p_dynamic_state = &vk.PipelineDynamicStateCreateInfo{ - .flags = .{}, - .dynamic_state_count = @intCast(dynamic_states.len), - .p_dynamic_states = &dynamic_states, - }, - .layout = layout, - .render_pass = .null_handle, - .subpass = 0, - .base_pipeline_handle = .null_handle, - .base_pipeline_index = -1, - .p_next = &vk.PipelineRenderingCreateInfoKHR{ - .color_attachment_count = 1, - .p_color_attachment_formats = @ptrCast(&format), - .depth_attachment_format = .undefined, - .stencil_attachment_format = .undefined, - .view_mask = 0, - }, - }, - }; - - var pipelines: [create_infos.len]vk.Pipeline = undefined; - _ = try vkd.createGraphicsPipelines(dev, .null_handle, @intCast(create_infos.len), &create_infos, null, &pipelines); - std.debug.assert(pipelines.len == 1); - return pipelines[0]; -} +// fn createPipeline( +// dev: vk.Device, +// layout: vk.PipelineLayout, +// format: vk.SurfaceFormatKHR, +// vkd: gfx.Device.Wrapper, +// ) !vk.Pipeline { +// const vert = try vkd.createShaderModule(dev, &.{ +// .code_size = shaders.triangle_vert.len, +// .p_code = @as([*]const u32, @ptrCast(&shaders.triangle_vert)), +// }, null); +// defer vkd.destroyShaderModule(dev, vert, null); +// +// const frag = try vkd.createShaderModule(dev, &.{ +// .code_size = shaders.triangle_frag.len, +// .p_code = @as([*]const u32, @ptrCast(&shaders.triangle_frag)), +// }, null); +// defer vkd.destroyShaderModule(dev, frag, null); +// +// const pssci = [_]vk.PipelineShaderStageCreateInfo{ +// .{ +// .stage = .{ .vertex_bit = true }, +// .module = vert, +// .p_name = "main", +// }, +// .{ +// .stage = .{ .fragment_bit = true }, +// .module = frag, +// .p_name = "main", +// }, +// }; +// +// const color_blend_attachment_states = [_]vk.PipelineColorBlendAttachmentState{ +// vk.PipelineColorBlendAttachmentState{ +// .blend_enable = vk.FALSE, +// .src_color_blend_factor = .one, +// .dst_color_blend_factor = .zero, +// .color_blend_op = .add, +// .src_alpha_blend_factor = .one, +// .dst_alpha_blend_factor = .zero, +// .alpha_blend_op = .add, +// .color_write_mask = .{ .r_bit = true, .g_bit = true, .b_bit = true, .a_bit = true }, +// }, +// }; +// +// const dynamic_states = [_]vk.DynamicState{ +// .viewport, +// .scissor, +// }; +// +// const create_infos = [_]vk.GraphicsPipelineCreateInfo{ +// .{ +// .flags = .{}, +// .stage_count = @intCast(pssci.len), +// .p_stages = &pssci, +// .p_vertex_input_state = &vk.PipelineVertexInputStateCreateInfo{ +// .vertex_binding_description_count = 1, +// .p_vertex_binding_descriptions = @ptrCast(&Vertex.binding_description), +// .vertex_attribute_description_count = Vertex.attribute_description.len, +// .p_vertex_attribute_descriptions = &Vertex.attribute_description, +// }, +// .p_input_assembly_state = &vk.PipelineInputAssemblyStateCreateInfo{ +// .topology = .triangle_list, +// .primitive_restart_enable = vk.FALSE, +// }, +// .p_tessellation_state = null, +// .p_viewport_state = &vk.PipelineViewportStateCreateInfo{ +// .viewport_count = 1, +// .p_viewports = undefined, // set in createCommandBuffers with cmdSetViewport +// .scissor_count = 1, +// .p_scissors = undefined, // set in createCommandBuffers with cmdSetScissor +// }, +// .p_rasterization_state = &vk.PipelineRasterizationStateCreateInfo{ +// .depth_clamp_enable = vk.FALSE, +// .rasterizer_discard_enable = vk.FALSE, +// .polygon_mode = .fill, +// .cull_mode = .{ .back_bit = true }, +// .front_face = .counter_clockwise, +// .depth_bias_enable = vk.FALSE, +// .depth_bias_constant_factor = 0, +// .depth_bias_clamp = 0, +// .depth_bias_slope_factor = 0, +// .line_width = 1, +// }, +// .p_multisample_state = &vk.PipelineMultisampleStateCreateInfo{ +// .rasterization_samples = .{ .@"1_bit" = true }, +// .sample_shading_enable = vk.FALSE, +// .min_sample_shading = 1, +// .alpha_to_coverage_enable = vk.FALSE, +// .alpha_to_one_enable = vk.FALSE, +// }, +// .p_depth_stencil_state = null, +// .p_color_blend_state = &vk.PipelineColorBlendStateCreateInfo{ +// .logic_op_enable = vk.FALSE, +// .logic_op = .copy, +// .attachment_count = @intCast(color_blend_attachment_states.len), +// .p_attachments = &color_blend_attachment_states, +// .blend_constants = [_]f32{ 0, 0, 0, 0 }, +// }, +// .p_dynamic_state = &vk.PipelineDynamicStateCreateInfo{ +// .flags = .{}, +// .dynamic_state_count = @intCast(dynamic_states.len), +// .p_dynamic_states = &dynamic_states, +// }, +// .layout = layout, +// .render_pass = .null_handle, +// .subpass = 0, +// .base_pipeline_handle = .null_handle, +// .base_pipeline_index = -1, +// .p_next = &vk.PipelineRenderingCreateInfoKHR{ +// .color_attachment_count = 1, +// .p_color_attachment_formats = @ptrCast(&format), +// .depth_attachment_format = .undefined, +// .stencil_attachment_format = .undefined, +// .view_mask = 0, +// }, +// }, +// }; +// +// var pipelines: [create_infos.len]vk.Pipeline = undefined; +// _ = try vkd.createGraphicsPipelines(dev, .null_handle, @intCast(create_infos.len), &create_infos, null, &pipelines); +// std.debug.assert(pipelines.len == 1); +// return pipelines[0]; +// } From 74aa20b1c0bf5d8f9d260101c61a2fb65eff0119 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 27 Jun 2024 22:07:48 -0400 Subject: [PATCH 060/113] remove vestigial gfx components --- src/gfx.zig | 23 ------ src/gfx/Swapchain.zig | 169 ------------------------------------------ 2 files changed, 192 deletions(-) delete mode 100644 src/gfx/Swapchain.zig diff --git a/src/gfx.zig b/src/gfx.zig index e3b51c9..2574e64 100644 --- a/src/gfx.zig +++ b/src/gfx.zig @@ -4,29 +4,6 @@ const builtin = @import("builtin"); const vk = @import("vk"); const c = @import("c.zig"); -pub const Base = @import("gfx/Base.zig"); -pub const Context = @import("gfx/Context.zig"); -pub const Device = @import("gfx/Device.zig"); -pub const Instance = @import("gfx/Instance.zig"); -pub const Swapchain = @import("gfx/Swapchain.zig"); -pub const Window = @import("gfx/Window.zig"); - -pub const use_debug_messenger = switch (builtin.mode) { - .Debug, .ReleaseSafe => true, - .ReleaseSmall, .ReleaseFast => false, -}; - -pub const apis: []const vk.ApiInfo = &.{ - vk.features.version_1_0, - vk.features.version_1_1, - vk.features.version_1_2, - vk.features.version_1_3, - vk.extensions.khr_surface, - vk.extensions.khr_swapchain, - vk.extensions.khr_dynamic_rendering, - if (use_debug_messenger) vk.extensions.ext_debug_utils else .{}, -}; - pub fn uploadData( comptime T: type, pdev: vk.PhysicalDevice, diff --git a/src/gfx/Swapchain.zig b/src/gfx/Swapchain.zig deleted file mode 100644 index 64ea09f..0000000 --- a/src/gfx/Swapchain.zig +++ /dev/null @@ -1,169 +0,0 @@ -const std = @import("std"); -const builtin = @import("builtin"); - -const vk = @import("vk"); -const c = @import("../c.zig"); - -const Instance = @import("Instance.zig"); -const Window = @import("Window.zig"); -const Device = @import("Device.zig"); - -const Self = @This(); - -pub const ChainImage = struct { - image: vk.Image = .null_handle, - view: vk.ImageView = .null_handle, - cmdbuf: vk.CommandBuffer = .null_handle, - fence: vk.Fence = .null_handle, - image_available: vk.Semaphore = .null_handle, - render_finished: vk.Semaphore = .null_handle, -}; - -ally: std.mem.Allocator, - -ref: vk.SwapchainKHR, -dev: *const Device, - -extent: vk.Extent2D, -min_image_count: u32, -chain: std.MultiArrayList(ChainImage), - -pub fn create(ally: std.mem.Allocator, dev: *const Device) !Self { - const caps = try dev.inst.vki.getPhysicalDeviceSurfaceCapabilitiesKHR(dev.pdev, dev.win.surface); - var min_image_count = @max(3, caps.min_image_count + 1); - if (caps.max_image_count > 0) { - min_image_count = @min(min_image_count, caps.max_image_count); - } - - return .{ - .ally = ally, - .ref = .null_handle, - .dev = dev, - .extent = undefined, - .min_image_count = min_image_count, - .chain = .{}, - }; -} - -pub fn init(self: *Self) !void { - const caps = try self.dev.inst.vki.getPhysicalDeviceSurfaceCapabilitiesKHR(self.dev.pdev, self.dev.win.surface); - - self.extent = caps.current_extent; - if (caps.current_extent.width == std.math.maxInt(u32)) { - c.glfwGetFramebufferSize(self.dev.win.ref, @ptrCast(&self.extent.width), @ptrCast(&self.extent.height)); - } - self.extent.width = std.math.clamp(self.extent.width, caps.min_image_extent.width, caps.max_image_extent.width); - self.extent.height = std.math.clamp(self.extent.height, caps.min_image_extent.height, caps.max_image_extent.height); - - const prev = self.ref; - self.ref = try self.dev.vkd.createSwapchainKHR(self.dev.dev, &.{ - .surface = self.dev.win.surface, - .min_image_count = self.min_image_count, - .image_format = self.dev.format.format, - .image_color_space = self.dev.format.color_space, - .image_extent = self.extent, - .image_array_layers = 1, - .image_usage = .{ .color_attachment_bit = true }, - .image_sharing_mode = .exclusive, - .pre_transform = .{ .identity_bit_khr = true }, - .composite_alpha = .{ .opaque_bit_khr = true }, - .present_mode = self.dev.mode, - .clipped = vk.TRUE, - .old_swapchain = prev, - }, null); - self.dev.vkd.destroySwapchainKHR(self.dev.dev, prev, null); - - var image_count: u32 = undefined; - _ = try self.dev.vkd.getSwapchainImagesKHR(self.dev.dev, self.ref, &image_count, null); - - // todo try to reuse contents if possible. - // not even sure at this point which parts can be safely reused. the trick to fix the tearing while resizing - // on laptop is probably in doing this correctly, to present any remaining images that can be presented. - - self.deinit_chain(); - - try self.chain.resize(self.ally, image_count); - _ = try self.dev.vkd.getSwapchainImagesKHR( - self.dev.dev, - self.ref, - &image_count, - self.chain.items(.image).ptr, - ); - - try self.init_chain(); -} - -// requires self.chain.len and self.chain.items(.image) be populated by getSwapchainImagesKHR -fn init_chain(self: *Self) !void { - @memset(self.chain.items(.view), .null_handle); - @memset(self.chain.items(.cmdbuf), .null_handle); - @memset(self.chain.items(.fence), .null_handle); - @memset(self.chain.items(.image_available), .null_handle); - @memset(self.chain.items(.render_finished), .null_handle); - errdefer self.deinit_chain(); - - for (self.chain.items(.image), self.chain.items(.view)) |image, *view| { - view.* = try self.dev.vkd.createImageView(self.dev.dev, &.{ - .image = image, - .view_type = .@"2d", - .format = self.dev.format.format, - .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity }, - .subresource_range = .{ - .aspect_mask = .{ .color_bit = true }, - .base_mip_level = 0, - .level_count = 1, - .base_array_layer = 0, - .layer_count = 1, - }, - }, null); - } - - for (self.chain.items(.fence)) |*fence| { - fence.* = try self.dev.vkd.createFence(self.dev.dev, &.{ - .flags = .{ .signaled_bit = true }, - }, null); - } - - for (self.chain.items(.image_available)) |*sem| { - sem.* = try self.dev.vkd.createSemaphore(self.dev.dev, &.{}, null); - } - - for (self.chain.items(.render_finished)) |*sem| { - sem.* = try self.dev.vkd.createSemaphore(self.dev.dev, &.{}, null); - } - - try self.dev.vkd.allocateCommandBuffers(self.dev.dev, &.{ - .command_pool = self.dev.pool, - .command_buffer_count = @intCast(self.chain.len), - .level = .primary, - }, self.chain.items(.cmdbuf).ptr); -} - -fn deinit_chain(self: Self) void { - for (self.chain.items(.view)) |view| { - self.dev.vkd.destroyImageView(self.dev.dev, view, null); - } - for (self.chain.items(.fence)) |fence| { - self.dev.vkd.destroyFence(self.dev.dev, fence, null); - } - for (self.chain.items(.image_available)) |sem| { - self.dev.vkd.destroySemaphore(self.dev.dev, sem, null); - } - for (self.chain.items(.render_finished)) |sem| { - self.dev.vkd.destroySemaphore(self.dev.dev, sem, null); - } - if (self.chain.len > 0) { - self.dev.vkd.freeCommandBuffers( - self.dev.dev, - self.dev.pool, - @intCast(self.chain.len), - self.chain.items(.cmdbuf).ptr, - ); - } -} - -pub fn deinit(self: *Self) void { - self.deinit_chain(); - self.chain.deinit(self.ally); - self.dev.vkd.destroySwapchainKHR(self.dev.dev, self.ref, null); -} From 7f7269fb07c2d9d5cb7f5bf0e3b8024e56dc29b5 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 27 Jun 2024 22:08:15 -0400 Subject: [PATCH 061/113] simplify frames in flight struct --- src/main.zig | 56 +++++++++++++++++++++------------------------------- 1 file changed, 22 insertions(+), 34 deletions(-) diff --git a/src/main.zig b/src/main.zig index 6800f6f..86fdc23 100644 --- a/src/main.zig +++ b/src/main.zig @@ -61,45 +61,33 @@ pub fn main() !void { var sc = try au.SwapChain.init(alloc); defer sc.deinit(); - var flight = std.MultiArrayList(struct { - acquire: vk.Semaphore, - complete: vk.Semaphore, - fence: vk.Fence, - pool: vk.CommandPool, - }){}; - defer { - for (flight.items(.acquire)) |sem| { - au.D.destroySemaphore(sem, null); - } - for (flight.items(.complete)) |sem| { - au.D.destroySemaphore(sem, null); - } - for (flight.items(.fence)) |fnc| { - au.D.destroyFence(fnc, null); - } - for (flight.items(.pool)) |pool| { - au.D.destroyCommandPool(pool, null); - } - flight.deinit(alloc); - } + const flight = try alloc.alloc( + struct { + acquire: vk.Semaphore = .null_handle, + complete: vk.Semaphore = .null_handle, + fence: vk.Fence = .null_handle, + pool: vk.CommandPool = .null_handle, + }, + 3, // FRAMES IN FLIGHT + ); + defer alloc.free(flight); - try flight.resize(alloc, 3); // FRAMES IN FLIGHT - for (flight.items(.acquire)) |*sem| { - sem.* = try au.D.createSemaphore(&.{}, null); - } - for (flight.items(.complete)) |*sem| { - sem.* = try au.D.createSemaphore(&.{}, null); - } - for (flight.items(.fence)) |*fnc| { - fnc.* = try au.D.createFence(&.{ .flags = .{ .signaled_bit = true } }, null); - } - for (flight.items(.pool)) |*pool| { - pool.* = try au.D.createCommandPool(&.{ .queue_family_index = au.device_config.family }, null); + for (flight) |*frame| { + frame.acquire = try au.D.createSemaphore(&.{}, null); + frame.complete = try au.D.createSemaphore(&.{}, null); + frame.fence = try au.D.createFence(&.{ .flags = .{ .signaled_bit = true } }, null); + frame.pool = try au.D.createCommandPool(&.{ .queue_family_index = au.device_config.family }, null); } + defer for (flight) |frame| { + au.D.destroySemaphore(frame.acquire, null); + au.D.destroySemaphore(frame.complete, null); + au.D.destroyFence(frame.fence, null); + au.D.destroyCommandPool(frame.pool, null); + }; var flight_idx: usize = 0; while (!au.W.should_close()) : (flight_idx = (flight_idx + 1) % flight.len) { - const frame = flight.get(flight_idx); + const frame = flight[flight_idx]; // todo switch mode depending on if window is focused const events = au.wait_events_timeout(0.10); From 03d9c607c2cc90a9a90ab444a4a599f2adaee6ba Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 27 Jun 2024 22:08:36 -0400 Subject: [PATCH 062/113] lower framerate when not focused --- src/au.zig | 4 ++++ src/main.zig | 6 ++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/au.zig b/src/au.zig index 2fb76af..4cf299f 100644 --- a/src/au.zig +++ b/src/au.zig @@ -427,6 +427,10 @@ pub const Window = struct { pub fn should_close(self: Self) bool { return c.glfwWindowShouldClose(self.handle) == c.GLFW_TRUE; } + + pub fn focused(self: Self) bool { + return c.glfwGetWindowAttrib(self.handle, c.GLFW_FOCUSED) == c.GLFW_TRUE; + } }; pub fn wait_events() []const Bus.Event { diff --git a/src/main.zig b/src/main.zig index 86fdc23..e4f899a 100644 --- a/src/main.zig +++ b/src/main.zig @@ -89,8 +89,10 @@ pub fn main() !void { while (!au.W.should_close()) : (flight_idx = (flight_idx + 1) % flight.len) { const frame = flight[flight_idx]; - // todo switch mode depending on if window is focused - const events = au.wait_events_timeout(0.10); + const events = if (au.W.focused()) + au.wait_events_timeout(0.1) + else + au.wait_events_timeout(0.5); for (events) |u| { switch (u) { From cd9d77c24a6e246e605666921f0f42ea448a45e8 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Fri, 28 Jun 2024 21:31:30 -0400 Subject: [PATCH 063/113] extract Frame.record_render --- src/main.zig | 223 +++++++++++++++++++++++++++++---------------------- 1 file changed, 127 insertions(+), 96 deletions(-) diff --git a/src/main.zig b/src/main.zig index e4f899a..a45bc89 100644 --- a/src/main.zig +++ b/src/main.zig @@ -50,81 +50,31 @@ const vertices = [_]Vertex{ const indices = [_]Index{ 4, 5, 6, 6, 5, 7 }; -pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - defer _ = gpa.detectLeaks(); - const alloc = gpa.allocator(); +const Flight = struct { + acquire: vk.Semaphore = .null_handle, + complete: vk.Semaphore = .null_handle, + fence: vk.Fence = .null_handle, + pool: vk.CommandPool = .null_handle, + frame: Frame, +}; - try au.init(alloc); - defer au.deinit(); - - var sc = try au.SwapChain.init(alloc); - defer sc.deinit(); - - const flight = try alloc.alloc( - struct { - acquire: vk.Semaphore = .null_handle, - complete: vk.Semaphore = .null_handle, - fence: vk.Fence = .null_handle, - pool: vk.CommandPool = .null_handle, - }, - 3, // FRAMES IN FLIGHT - ); - defer alloc.free(flight); - - for (flight) |*frame| { - frame.acquire = try au.D.createSemaphore(&.{}, null); - frame.complete = try au.D.createSemaphore(&.{}, null); - frame.fence = try au.D.createFence(&.{ .flags = .{ .signaled_bit = true } }, null); - frame.pool = try au.D.createCommandPool(&.{ .queue_family_index = au.device_config.family }, null); +const Frame = struct { + pub fn init() !Frame { + return .{}; } - defer for (flight) |frame| { - au.D.destroySemaphore(frame.acquire, null); - au.D.destroySemaphore(frame.complete, null); - au.D.destroyFence(frame.fence, null); - au.D.destroyCommandPool(frame.pool, null); - }; - var flight_idx: usize = 0; - while (!au.W.should_close()) : (flight_idx = (flight_idx + 1) % flight.len) { - const frame = flight[flight_idx]; + pub fn deinit(self: Frame) void { + _ = self; + } - const events = if (au.W.focused()) - au.wait_events_timeout(0.1) - else - au.wait_events_timeout(0.5); - - for (events) |u| { - switch (u) { - .framebufferSize => sc.mark(), - .cursorPos, .windowPos, .windowSize, .windowRefresh => {}, - else => |e| std.debug.print("{any}\n", .{e}), - } - } - - _ = try sc.rebuild(); - - _ = try au.D.waitForFences(1, &.{frame.fence}, vk.TRUE, std.math.maxInt(u64)); - try au.D.resetFences(1, &.{frame.fence}); - try au.D.resetCommandPool(frame.pool, .{}); - - const acq = try au.D.acquireNextImageKHR( - sc.handle, - std.math.maxInt(u64), - frame.acquire, - .null_handle, - ); - const image = sc.getImage(acq.image_index); - const view = sc.getView(acq.image_index); - - var cmd = au.CommandBufferProxy.init(.null_handle, au.D.wrapper); - try au.D.allocateCommandBuffers(&.{ - .command_pool = frame.pool, - .level = .primary, - .command_buffer_count = 1, - }, @ptrCast(&cmd.handle)); - - try cmd.beginCommandBuffer(&.{ .flags = .{ .one_time_submit_bit = true } }); + pub fn record_render( + self: Frame, + cmd: au.CommandBufferProxy, + image: vk.Image, + view: vk.ImageView, + scissor: vk.Rect2D, + ) !void { + _ = self; cmd.pipelineBarrier( .{ .top_of_pipe_bit = true }, @@ -153,18 +103,6 @@ pub fn main() !void { }), ); - const viewport = vk.Viewport{ - .x = 0, - .y = 0, - .width = @floatFromInt(sc.cinfo.image_extent.width), - .height = @floatFromInt(sc.cinfo.image_extent.height), - .min_depth = 0, - .max_depth = 1, - }; - const scissor = vk.Rect2D{ - .offset = .{ .x = 0, .y = 0 }, - .extent = sc.cinfo.image_extent, - }; const info = vk.RenderingInfoKHR{ .render_area = scissor, .layer_count = 1, @@ -182,8 +120,16 @@ pub fn main() !void { }}, }; - cmd.setViewport(0, 1, &.{viewport}); + cmd.setViewport(0, 1, &.{.{ + .x = @floatFromInt(scissor.offset.x), + .y = @floatFromInt(scissor.offset.y), + .width = @floatFromInt(scissor.extent.width), + .height = @floatFromInt(scissor.extent.height), + .min_depth = 0, + .max_depth = 1, + }}); cmd.setScissor(0, 1, &.{scissor}); + cmd.beginRendering(&info); // todo @@ -221,25 +167,110 @@ pub fn main() !void { }, }), ); - try cmd.endCommandBuffer(); + } +}; + +pub fn main() !void { + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + defer _ = gpa.detectLeaks(); + const alloc = gpa.allocator(); + + try au.init(alloc); + defer au.deinit(); + + var sc = try au.SwapChain.init(alloc); + defer sc.deinit(); + + const flights = try alloc.alloc(Flight, 3); // FRAMES IN FLIGHT + defer alloc.free(flights); + + for (flights) |*flight| { + flight.acquire = try au.D.createSemaphore(&.{}, null); + flight.complete = try au.D.createSemaphore(&.{}, null); + flight.fence = try au.D.createFence(&.{ .flags = .{ .signaled_bit = true } }, null); + flight.pool = try au.D.createCommandPool(&.{ .queue_family_index = au.device_config.family }, null); + flight.frame = try Frame.init(); + } + defer for (flights) |flight| { + au.D.destroySemaphore(flight.acquire, null); + au.D.destroySemaphore(flight.complete, null); + au.D.destroyFence(flight.fence, null); + au.D.destroyCommandPool(flight.pool, null); + flight.frame.deinit(); + }; + + var flight_idx: usize = 0; + while (!au.W.should_close()) : (flight_idx = (flight_idx + 1) % flights.len) { + const flight = flights[flight_idx]; + + const events = if (au.W.focused()) + au.wait_events_timeout(0.1) + else + au.wait_events_timeout(0.5); + + for (events) |u| { + switch (u) { + .framebufferSize => sc.mark(), + .cursorPos, .windowPos, .windowSize, .windowRefresh => {}, + else => |e| std.debug.print("{any}\n", .{e}), + } + } + + _ = try sc.rebuild(); + + _ = try au.D.waitForFences(1, &.{flight.fence}, vk.TRUE, std.math.maxInt(u64)); + try au.D.resetFences(1, &.{flight.fence}); + try au.D.resetCommandPool(flight.pool, .{}); + + const acq = try au.D.acquireNextImageKHR( + sc.handle, + std.math.maxInt(u64), + flight.acquire, + .null_handle, + ); + const image = sc.getImage(acq.image_index); + const view = sc.getView(acq.image_index); + + var render_cmd = au.CommandBufferProxy.init(.null_handle, au.D.wrapper); + try au.D.allocateCommandBuffers( + &.{ + .command_pool = flight.pool, + .level = .primary, + .command_buffer_count = 1, + }, + @ptrCast(&render_cmd.handle), + ); + + try render_cmd.beginCommandBuffer(&.{ .flags = .{ .one_time_submit_bit = true } }); + + try flight.frame.record_render( + render_cmd, + image, + view, + vk.Rect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = sc.cinfo.image_extent }, + ); + + try render_cmd.endCommandBuffer(); try au.Q.submit( 1, - @ptrCast(&vk.SubmitInfo{ - .wait_semaphore_count = 1, - .p_wait_semaphores = @ptrCast(&frame.acquire), - .p_wait_dst_stage_mask = @ptrCast(&vk.PipelineStageFlags{ .color_attachment_output_bit = true }), - .command_buffer_count = 1, - .p_command_buffers = @ptrCast(&cmd.handle), - .signal_semaphore_count = 1, - .p_signal_semaphores = @ptrCast(&frame.complete), - }), - frame.fence, + &.{ + vk.SubmitInfo{ + .wait_semaphore_count = 1, + .p_wait_semaphores = @ptrCast(&flight.acquire), + .p_wait_dst_stage_mask = @ptrCast(&vk.PipelineStageFlags{ .color_attachment_output_bit = true }), + .command_buffer_count = 1, + .p_command_buffers = @ptrCast(&render_cmd.handle), + .signal_semaphore_count = 1, + .p_signal_semaphores = @ptrCast(&flight.complete), + }, + }, + flight.fence, ); _ = try au.Q.presentKHR(&vk.PresentInfoKHR{ .wait_semaphore_count = 1, - .p_wait_semaphores = &.{frame.complete}, + .p_wait_semaphores = &.{flight.complete}, .swapchain_count = 1, .p_swapchains = &.{sc.handle}, .p_image_indices = &.{acq.image_index}, From bc4421b7541b895e273634beeb3bd0c8196101f3 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Fri, 28 Jun 2024 21:43:30 -0400 Subject: [PATCH 064/113] extract generic au.Flights --- src/au.zig | 1 + src/au/flights.zig | 57 ++++++++++++++++++++++++++++++++++++++++++++++ src/main.zig | 33 ++++----------------------- 3 files changed, 63 insertions(+), 28 deletions(-) create mode 100644 src/au/flights.zig diff --git a/src/au.zig b/src/au.zig index 4cf299f..11a1815 100644 --- a/src/au.zig +++ b/src/au.zig @@ -6,6 +6,7 @@ const c = @import("c.zig"); pub const Bus = @import("au/Bus.zig"); pub const SwapChain = @import("au/SwapChain.zig"); +pub const Flights = @import("au/flights.zig").Flights; pub const use_debug_messenger = switch (builtin.mode) { .Debug, .ReleaseSafe => true, diff --git a/src/au/flights.zig b/src/au/flights.zig new file mode 100644 index 0000000..65350a9 --- /dev/null +++ b/src/au/flights.zig @@ -0,0 +1,57 @@ +const std = @import("std"); +const vk = @import("vk"); +const au = @import("../au.zig"); + +pub fn Flights(T: type) type { + return struct { + const Self = @This(); + + const Flight = struct { + acquire: vk.Semaphore = .null_handle, + complete: vk.Semaphore = .null_handle, + fence: vk.Fence = .null_handle, + pool: vk.CommandPool = .null_handle, + ctx: T, + }; + + alloc: std.mem.Allocator, + flights: []Flight, + idx: usize, + + pub fn init(alloc: std.mem.Allocator, n: usize) !Self { + var self: Self = .{ + .alloc = alloc, + .flights = try alloc.alloc(Flight, n), + .idx = 0, + }; + errdefer self.deinit(); + + for (self.flights) |*flight| { + flight.acquire = try au.D.createSemaphore(&.{}, null); + flight.complete = try au.D.createSemaphore(&.{}, null); + flight.fence = try au.D.createFence(&.{ .flags = .{ .signaled_bit = true } }, null); + flight.pool = try au.D.createCommandPool(&.{ .queue_family_index = au.device_config.family }, null); + flight.ctx = try T.init(); + } + + return self; + } + + pub fn deinit(self: Self) void { + for (self.flights) |flight| { + au.D.destroySemaphore(flight.acquire, null); + au.D.destroySemaphore(flight.complete, null); + au.D.destroyFence(flight.fence, null); + au.D.destroyCommandPool(flight.pool, null); + flight.ctx.deinit(); + } + self.alloc.free(self.flights); + } + + pub fn next(self: *Self) Flight { + const idx = self.idx; + self.idx = (self.idx + 1) % self.flights.len; + return self.flights[idx]; + } + }; +} diff --git a/src/main.zig b/src/main.zig index a45bc89..a1169d5 100644 --- a/src/main.zig +++ b/src/main.zig @@ -50,13 +50,6 @@ const vertices = [_]Vertex{ const indices = [_]Index{ 4, 5, 6, 6, 5, 7 }; -const Flight = struct { - acquire: vk.Semaphore = .null_handle, - complete: vk.Semaphore = .null_handle, - fence: vk.Fence = .null_handle, - pool: vk.CommandPool = .null_handle, - frame: Frame, -}; const Frame = struct { pub fn init() !Frame { @@ -181,27 +174,11 @@ pub fn main() !void { var sc = try au.SwapChain.init(alloc); defer sc.deinit(); - const flights = try alloc.alloc(Flight, 3); // FRAMES IN FLIGHT - defer alloc.free(flights); + var flights = try au.Flights(Frame).init(alloc, 3); // FRAMES IN FLIGHT + defer flights.deinit(); - for (flights) |*flight| { - flight.acquire = try au.D.createSemaphore(&.{}, null); - flight.complete = try au.D.createSemaphore(&.{}, null); - flight.fence = try au.D.createFence(&.{ .flags = .{ .signaled_bit = true } }, null); - flight.pool = try au.D.createCommandPool(&.{ .queue_family_index = au.device_config.family }, null); - flight.frame = try Frame.init(); - } - defer for (flights) |flight| { - au.D.destroySemaphore(flight.acquire, null); - au.D.destroySemaphore(flight.complete, null); - au.D.destroyFence(flight.fence, null); - au.D.destroyCommandPool(flight.pool, null); - flight.frame.deinit(); - }; - - var flight_idx: usize = 0; - while (!au.W.should_close()) : (flight_idx = (flight_idx + 1) % flights.len) { - const flight = flights[flight_idx]; + while (!au.W.should_close()) { + const flight = flights.next(); const events = if (au.W.focused()) au.wait_events_timeout(0.1) @@ -243,7 +220,7 @@ pub fn main() !void { try render_cmd.beginCommandBuffer(&.{ .flags = .{ .one_time_submit_bit = true } }); - try flight.frame.record_render( + try flight.ctx.record_render( render_cmd, image, view, From 154427d5bc2629313799b5488a24d29a5bc365d1 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Mon, 1 Jul 2024 17:24:04 -0400 Subject: [PATCH 065/113] ImGui demo window --- .gitignore | 2 + build.zig | 3 ++ build.zig.zon | 3 ++ cimgui/build.zig | 93 ++++++++++++++++++++++++++++++++++++++++++++ cimgui/build.zig.zon | 19 +++++++++ cimgui/src/root.zig | 10 +++++ src/au.zig | 2 +- src/main.zig | 68 ++++++++++++++++++++++++++++++-- 8 files changed, 196 insertions(+), 4 deletions(-) create mode 100644 cimgui/build.zig create mode 100644 cimgui/build.zig.zon create mode 100644 cimgui/src/root.zig diff --git a/.gitignore b/.gitignore index 3516d89..c33569d 100644 --- a/.gitignore +++ b/.gitignore @@ -45,3 +45,5 @@ build-*/ docgen_tmp/ .idea/ + +imgui.ini diff --git a/build.zig b/build.zig index c213265..c6d8fbb 100644 --- a/build.zig +++ b/build.zig @@ -11,12 +11,15 @@ pub fn build(b: *std.Build) void { }); const vkmod = vk.module("vulkan-zig"); + const cimgui = b.dependency("cimgui", .{}); + const exe = b.addExecutable(.{ .name = "scratchzig", .root_source_file = b.path("src/main.zig"), .target = target, .optimize = optimize, }); + exe.root_module.addImport("cimgui", cimgui.module("cimgui")); const shaders = vkgen.ShaderCompileStep.create( b, diff --git a/build.zig.zon b/build.zig.zon index 43a3b53..6942b2e 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -7,6 +7,9 @@ .url = "https://github.com/Snektron/vulkan-zig/archive/f2c2e0ff80374563357cc4fe72bf7d8a2c956824.tar.gz", .hash = "1220cf0972c6fe05437c1a8689b955084385eb7ca1f8c14010d49ca5a89570a5d90d", }, + .cimgui = .{ + .path="cimgui", + }, }, .paths = .{ diff --git a/cimgui/build.zig b/cimgui/build.zig new file mode 100644 index 0000000..11349b1 --- /dev/null +++ b/cimgui/build.zig @@ -0,0 +1,93 @@ +const std = @import("std"); +const vkgen = @import("vulkan-zig"); + +pub fn build(b: *std.Build) !void { + const target = b.standardTargetOptions(.{}); + const optimize = b.standardOptimizeOption(.{}); + + const imgui_dep = b.dependency("imgui", .{}); + const cimgui_dep = b.dependency("cimgui", .{}); + + const luajit = try b.findProgram(&.{"luajit"}, &.{}); + + const gen = b.addSystemCommand(&.{luajit}); + gen.setCwd(cimgui_dep.path("generator/")); + gen.addFileArg(cimgui_dep.path("generator/generator.lua")); + gen.addArgs(&.{ + "zig cc", + "comments internal noimstrv", + "glfw", + "vulkan", + }); + _ = gen.captureStdOut(); // to quiet output + + { + const relpath = try std.fs.path.relative( + b.allocator, + cimgui_dep.path("generator").getPath(b), + imgui_dep.path("").getPath(b), + ); + defer b.allocator.free(relpath); + gen.setEnvironmentVariable( + "IMGUI_PATH", + relpath, + ); + } + + const copy = b.addWriteFiles(); + copy.step.dependOn(&gen.step); + _ = copy.addCopyDirectory(imgui_dep.path(""), "imgui", .{ + .include_extensions = &.{ ".h", ".cpp" }, + }); + _ = copy.addCopyFile(cimgui_dep.path("cimgui.h"), "cimgui.h"); + _ = copy.addCopyFile(cimgui_dep.path("cimgui.cpp"), "cimgui.cpp"); + _ = copy.addCopyFile(cimgui_dep.path("generator/output/cimgui_impl.h"), "cimgui_impl.h"); + + const cimgui = b.addSharedLibrary(.{ + .name = "cimgui", + .target = target, + .optimize = optimize, + }); + cimgui.step.dependOn(©.step); + cimgui.linkLibC(); + cimgui.linkLibCpp(); + cimgui.addIncludePath(copy.getDirectory()); + cimgui.addIncludePath(copy.getDirectory().path(b, "imgui")); + cimgui.addCSourceFiles(.{ + .root = copy.getDirectory(), + .files = &.{ + "cimgui.cpp", + "imgui/imgui.cpp", + "imgui/imgui_tables.cpp", + "imgui/imgui_widgets.cpp", + "imgui/imgui_demo.cpp", + "imgui/imgui_draw.cpp", + "imgui/backends/imgui_impl_glfw.cpp", + "imgui/backends/imgui_impl_vulkan.cpp", + }, + .flags = &.{ + "-DIMGUI_IMPL_VULKAN_NO_PROTOTYPES", + "-DCIMGUI_USE_GLFW", + "-DCIMGUI_USE_VULKAN", + "-DIMGUI_IMPL_API=extern \"C\"", + }, + }); + cimgui.installHeader(copy.getDirectory().path(b, "cimgui.h"), "cimgui.h"); + cimgui.installHeader(copy.getDirectory().path(b, "cimgui_impl.h"), "cimgui_impl.h"); + + // todo separate impls into different shared libraries for easier linkage + cimgui.linkSystemLibrary2("glfw3", .{ + .needed = true, + .preferred_link_mode = .static, + .use_pkg_config = .force, + }); + + b.installArtifact(cimgui); + + const cimgui_mod = b.addModule("cimgui", .{ + .target = target, + .optimize = optimize, + .root_source_file = b.path("src/root.zig"), + }); + cimgui_mod.linkLibrary(cimgui); +} diff --git a/cimgui/build.zig.zon b/cimgui/build.zig.zon new file mode 100644 index 0000000..935c753 --- /dev/null +++ b/cimgui/build.zig.zon @@ -0,0 +1,19 @@ +.{ + .name = "cimgui", + .version = "0.0.0", + + .dependencies = .{ + .cimgui = .{ + .url = "https://github.com/cimgui/cimgui/archive/refs/tags/1.90.8dock.tar.gz", + .hash = "12207ee69164f88f4b41ee5d44edf3835ec4dab0c0cd885799da67d56668f4a3d46b", + }, + .imgui = .{ + .url = "https://github.com/ocornut/imgui/archive/refs/tags/v1.90.8-docking.tar.gz", + .hash = "122065151b97161e25abb71c9df2fd9fba42aaca8c33d689a480b883d82411c8fabe", + }, + }, + + .paths = .{ + "", + }, +} diff --git a/cimgui/src/root.zig b/cimgui/src/root.zig new file mode 100644 index 0000000..bd5d960 --- /dev/null +++ b/cimgui/src/root.zig @@ -0,0 +1,10 @@ +pub const c = @cImport({ + @cDefine("CIMGUI_DEFINE_ENUMS_AND_STRUCTS", {}); + @cInclude("cimgui.h"); + + @cInclude("vulkan/vulkan.h"); + + @cDefine("CIMGUI_USE_VULKAN", {}); + @cDefine("CIMGUI_USE_GLFW", {}); + @cInclude("cimgui_impl.h"); +}); diff --git a/src/au.zig b/src/au.zig index 11a1815..79e1a37 100644 --- a/src/au.zig +++ b/src/au.zig @@ -31,7 +31,7 @@ pub const device_extensions: []const [*:0]const u8 = &.{ }; pub const app_info: vk.ApplicationInfo = .{ - .p_application_name = "zig-glfw-vulkan", + .p_application_name = "hey tildes!", .application_version = vk.makeApiVersion(0, 0, 0, 0), .p_engine_name = "zig-glfw-vulkan", .engine_version = vk.makeApiVersion(0, 0, 0, 0), diff --git a/src/main.zig b/src/main.zig index a1169d5..2cc82d4 100644 --- a/src/main.zig +++ b/src/main.zig @@ -5,8 +5,7 @@ const shaders = @import("shaders"); const Allocator = std.mem.Allocator; const au = @import("au.zig"); - -const app_name = "vulkan-zig triangle example"; +const im = @import("cimgui"); const Vertex = extern struct { const binding_description = vk.VertexInputBindingDescription{ @@ -50,7 +49,6 @@ const vertices = [_]Vertex{ const indices = [_]Index{ 4, 5, 6, 6, 5, 7 }; - const Frame = struct { pub fn init() !Frame { return .{}; @@ -132,6 +130,8 @@ const Frame = struct { // vkd.cmdBindIndexBuffer(cmdbuf, index_buffer, 0, .uint16); // vkd.cmdDrawIndexed(cmdbuf, indices.len, 1, 0, 0, 0); + im.c.ImGui_ImplVulkan_RenderDrawData(im.c.igGetDrawData(), @ptrFromInt(@intFromEnum(cmd.handle)), null); + cmd.endRendering(); cmd.pipelineBarrier( @@ -163,6 +163,10 @@ const Frame = struct { } }; +pub fn loader_wrapper(procname: [*c]const u8, _: ?*anyopaque) callconv(.C) vk.PfnVoidFunction { + return c.glfwGetInstanceProcAddress(au.I.handle, procname); +} + pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.detectLeaks(); @@ -177,7 +181,65 @@ pub fn main() !void { var flights = try au.Flights(Frame).init(alloc, 3); // FRAMES IN FLIGHT defer flights.deinit(); + const ctx = im.c.igCreateContext(null) orelse return error.igCreateContextFailed; + defer im.c.igDestroyContext(ctx); + + // _ = im.c.ImGui_ImplGlfw_InitForOther(@ptrCast(au.W.handle), true); + // defer im.c.ImGui_ImplGlfw_Shutdown(); + + const descriptorPool = try au.D.createDescriptorPool(&vk.DescriptorPoolCreateInfo{ + .flags = .{ .free_descriptor_set_bit = true }, + .pool_size_count = 1, + .p_pool_sizes = &.{ + vk.DescriptorPoolSize{ .descriptor_count = 32, .type = .combined_image_sampler }, + }, + .max_sets = 32, + }, null); + defer au.D.destroyDescriptorPool(descriptorPool, null); + + _ = im.c.ImGui_ImplVulkan_LoadFunctions(loader_wrapper, null); + _ = im.c.ImGui_ImplGlfw_InitForVulkan(@ptrCast(au.W.handle), true); + defer im.c.ImGui_ImplGlfw_Shutdown(); + + _ = try sc.rebuild(); + + const prci: vk.PipelineRenderingCreateInfo = .{ + .view_mask = 0, + .depth_attachment_format = .undefined, + .stencil_attachment_format = .undefined, + .color_attachment_count = 1, + .p_color_attachment_formats = &.{au.device_config.format.format}, + }; + + var info: im.c.ImGui_ImplVulkan_InitInfo = .{ + .Instance = @ptrFromInt(@intFromEnum(au.I.handle)), + .PhysicalDevice = @ptrFromInt(@intFromEnum(au.device_config.pdev)), + .Device = @ptrFromInt(@intFromEnum(au.D.handle)), + .QueueFamily = au.device_config.family, + .Queue = @ptrFromInt(@intFromEnum(au.Q.handle)), + .DescriptorPool = @ptrFromInt(@intFromEnum(descriptorPool)), + .RenderPass = null, + .MinImageCount = sc.cinfo.min_image_count, + .ImageCount = @intCast(sc.images.items.len), + .PipelineRenderingCreateInfo = @bitCast(prci), + .MSAASamples = 0, + .PipelineCache = null, + .Subpass = 0, + .UseDynamicRendering = true, + .Allocator = null, + }; + _ = im.c.ImGui_ImplVulkan_Init(&info); + _ = im.c.ImGui_ImplVulkan_CreateFontsTexture(); + defer im.c.ImGui_ImplVulkan_Shutdown(); + while (!au.W.should_close()) { + im.c.ImGui_ImplGlfw_NewFrame(); + im.c.ImGui_ImplVulkan_NewFrame(); + im.c.igNewFrame(); + im.c.igShowDemoWindow(null); + im.c.igEndFrame(); + im.c.igRender(); + const flight = flights.next(); const events = if (au.W.focused()) From 792ceeb1d9a5178ff59928285752a1a486dc7697 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Mon, 1 Jul 2024 23:37:34 -0400 Subject: [PATCH 066/113] fix imgui backend image count - should be the number of frames in flight --- src/main.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main.zig b/src/main.zig index 2cc82d4..c7d90aa 100644 --- a/src/main.zig +++ b/src/main.zig @@ -219,8 +219,8 @@ pub fn main() !void { .Queue = @ptrFromInt(@intFromEnum(au.Q.handle)), .DescriptorPool = @ptrFromInt(@intFromEnum(descriptorPool)), .RenderPass = null, - .MinImageCount = sc.cinfo.min_image_count, - .ImageCount = @intCast(sc.images.items.len), + .MinImageCount = 2, + .ImageCount = @intCast(flights.flights.len), .PipelineRenderingCreateInfo = @bitCast(prci), .MSAASamples = 0, .PipelineCache = null, From 437a60bd5cd5af11f1b57dd8495df34678c6a6a0 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Mon, 1 Jul 2024 23:37:51 -0400 Subject: [PATCH 067/113] poll events while focused more responsive --- src/main.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main.zig b/src/main.zig index c7d90aa..7582341 100644 --- a/src/main.zig +++ b/src/main.zig @@ -243,7 +243,7 @@ pub fn main() !void { const flight = flights.next(); const events = if (au.W.focused()) - au.wait_events_timeout(0.1) + au.poll_events() else au.wait_events_timeout(0.5); From 6b2715eebe848c5becec32076e57f9f78e15609a Mon Sep 17 00:00:00 2001 From: David Allemang Date: Tue, 2 Jul 2024 00:01:36 -0400 Subject: [PATCH 068/113] reuse command buffers --- src/au/flights.zig | 7 +++++++ src/main.zig | 27 ++++++++++++--------------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/src/au/flights.zig b/src/au/flights.zig index 65350a9..86825c3 100644 --- a/src/au/flights.zig +++ b/src/au/flights.zig @@ -11,6 +11,7 @@ pub fn Flights(T: type) type { complete: vk.Semaphore = .null_handle, fence: vk.Fence = .null_handle, pool: vk.CommandPool = .null_handle, + cmd: vk.CommandBuffer = .null_handle, ctx: T, }; @@ -31,6 +32,11 @@ pub fn Flights(T: type) type { flight.complete = try au.D.createSemaphore(&.{}, null); flight.fence = try au.D.createFence(&.{ .flags = .{ .signaled_bit = true } }, null); flight.pool = try au.D.createCommandPool(&.{ .queue_family_index = au.device_config.family }, null); + try au.D.allocateCommandBuffers(&vk.CommandBufferAllocateInfo{ + .command_buffer_count = 1, + .command_pool = flight.pool, + .level = .primary, + }, @ptrCast(&flight.cmd)); flight.ctx = try T.init(); } @@ -42,6 +48,7 @@ pub fn Flights(T: type) type { au.D.destroySemaphore(flight.acquire, null); au.D.destroySemaphore(flight.complete, null); au.D.destroyFence(flight.fence, null); + au.D.freeCommandBuffers(flight.pool, 1, &.{flight.cmd}); au.D.destroyCommandPool(flight.pool, null); flight.ctx.deinit(); } diff --git a/src/main.zig b/src/main.zig index 7582341..1f6ad39 100644 --- a/src/main.zig +++ b/src/main.zig @@ -270,28 +270,20 @@ pub fn main() !void { const image = sc.getImage(acq.image_index); const view = sc.getView(acq.image_index); - var render_cmd = au.CommandBufferProxy.init(.null_handle, au.D.wrapper); - try au.D.allocateCommandBuffers( - &.{ - .command_pool = flight.pool, - .level = .primary, - .command_buffer_count = 1, - }, - @ptrCast(&render_cmd.handle), - ); + var cmd = au.CommandBufferProxy.init(flight.cmd, au.D.wrapper); - try render_cmd.beginCommandBuffer(&.{ .flags = .{ .one_time_submit_bit = true } }); + try cmd.beginCommandBuffer(&.{ .flags = .{ .one_time_submit_bit = true } }); try flight.ctx.record_render( - render_cmd, + cmd, image, view, vk.Rect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = sc.cinfo.image_extent }, ); - try render_cmd.endCommandBuffer(); + try cmd.endCommandBuffer(); - try au.Q.submit( + au.Q.submit( 1, &.{ vk.SubmitInfo{ @@ -299,13 +291,18 @@ pub fn main() !void { .p_wait_semaphores = @ptrCast(&flight.acquire), .p_wait_dst_stage_mask = @ptrCast(&vk.PipelineStageFlags{ .color_attachment_output_bit = true }), .command_buffer_count = 1, - .p_command_buffers = @ptrCast(&render_cmd.handle), + .p_command_buffers = @ptrCast(&cmd.handle), .signal_semaphore_count = 1, .p_signal_semaphores = @ptrCast(&flight.complete), }, }, flight.fence, - ); + ) catch { + std.debug.print("Failed to submit.\nWaiting for idle...", .{}); + au.D.deviceWaitIdle() catch + std.debug.print("deviceWaitIdle failed\n", .{}); + @panic("Submission failed"); + }; _ = try au.Q.presentKHR(&vk.PresentInfoKHR{ .wait_semaphore_count = 1, From 8a9bbee5363c0b944d4e5579515238d733c348ff Mon Sep 17 00:00:00 2001 From: David Allemang Date: Tue, 2 Jul 2024 10:03:22 -0400 Subject: [PATCH 069/113] remove vestigial modules --- src/gfx.zig | 134 -------------------------------------------- src/inspect.zig | 146 ------------------------------------------------ 2 files changed, 280 deletions(-) delete mode 100644 src/gfx.zig delete mode 100644 src/inspect.zig diff --git a/src/gfx.zig b/src/gfx.zig deleted file mode 100644 index 2574e64..0000000 --- a/src/gfx.zig +++ /dev/null @@ -1,134 +0,0 @@ -const std = @import("std"); -const builtin = @import("builtin"); - -const vk = @import("vk"); -const c = @import("c.zig"); - -pub fn uploadData( - comptime T: type, - pdev: vk.PhysicalDevice, - vki: Instance.Wrapper, - dev: vk.Device, - vkd: Device.Wrapper, - queue: vk.Queue, - pool: vk.CommandPool, - buffer: vk.Buffer, - source: []const T, -) !void { - // if (@typeInfo(T) == .Struct and @typeInfo(T).Struct.layout == .auto) @compileError("Requires defined T layout"); - - const size = @sizeOf(T) * source.len; - - const staging_buffer = try vkd.createBuffer(dev, &.{ - .size = size, - .usage = .{ .transfer_src_bit = true }, - .sharing_mode = .exclusive, - }, null); - defer vkd.destroyBuffer(dev, staging_buffer, null); - - const vally = VkAllocator.init(pdev, vki); - - const mem_reqs = vkd.getBufferMemoryRequirements(dev, staging_buffer); - const staging_memory = try vally.alloc(dev, vkd, mem_reqs, .{ .host_visible_bit = true, .host_coherent_bit = true }); - // const staging_memory = try allocate(pdev, vki, dev, vkd, mem_reqs, .{ - // .host_visible_bit = true, - // .host_coherent_bit = true, - // }); - defer vkd.freeMemory(dev, staging_memory, null); - - try vkd.bindBufferMemory(dev, staging_buffer, staging_memory, 0); - - { - const data = try vkd.mapMemory(dev, staging_memory, 0, vk.WHOLE_SIZE, .{}); - defer vkd.unmapMemory(dev, staging_memory); - - const dest: [*]T = @ptrCast(@alignCast(data)); - @memcpy(dest, source); - } - - try copyBuffer(dev, queue, pool, buffer, staging_buffer, size, vkd); -} - -pub fn copyBuffer( - dev: vk.Device, - queue: vk.Queue, - pool: vk.CommandPool, - dst: vk.Buffer, - src: vk.Buffer, - size: vk.DeviceSize, - vkd: Device.Wrapper, -) !void { - var cmdbuf: vk.CommandBuffer = undefined; - try vkd.allocateCommandBuffers(dev, &.{ - .command_pool = pool, - .level = .primary, - .command_buffer_count = 1, - }, @ptrCast(&cmdbuf)); - defer vkd.freeCommandBuffers(dev, pool, 1, @ptrCast(&cmdbuf)); - - try vkd.beginCommandBuffer(cmdbuf, &.{ - .flags = .{ .one_time_submit_bit = true }, - }); - - const region = vk.BufferCopy{ - .src_offset = 0, - .dst_offset = 0, - .size = size, - }; - vkd.cmdCopyBuffer(cmdbuf, src, dst, 1, @ptrCast(®ion)); - - try vkd.endCommandBuffer(cmdbuf); - - const si = vk.SubmitInfo{ - .command_buffer_count = 1, - .p_command_buffers = @ptrCast(&cmdbuf), - .p_wait_dst_stage_mask = undefined, - }; - - // creating and submitting a queue for every copy operation seems a bad idea for "streamed" data - // gonna want a way to send a copy operation WITH SYNCHRONIZATION PRIMITIVES on a particular queue - // see https://stackoverflow.com/a/62183243 - // - // this may be a misunderstanding on how submission works... - - try vkd.queueSubmit(queue, 1, @ptrCast(&si), .null_handle); - try vkd.queueWaitIdle(queue); -} - -pub const VkAllocator = struct { - memory_types: [vk.MAX_MEMORY_TYPES]vk.MemoryType, - memory_type_count: u32, - - pub fn init( - pdev: vk.PhysicalDevice, - vki: Instance.Wrapper, - ) VkAllocator { - const props = vki.getPhysicalDeviceMemoryProperties(pdev); - - return VkAllocator{ - .memory_types = props.memory_types, - .memory_type_count = props.memory_type_count, - }; - } - - pub fn alloc( - self: VkAllocator, - dev: vk.Device, - vkd: Device.Wrapper, - reqs: vk.MemoryRequirements, - flags: vk.MemoryPropertyFlags, - ) !vk.DeviceMemory { - const memory_type_bits = reqs.memory_type_bits; - - for (self.memory_types[0..self.memory_type_count], 0..) |mem_type, idx| { - if (memory_type_bits & (@as(u32, 1) << @truncate(idx)) != 0 and mem_type.property_flags.contains(flags)) { - return try vkd.allocateMemory(dev, &.{ - .allocation_size = reqs.size, - .memory_type_index = @intCast(idx), - }, null); - } - } - - return error.NoSuitableMemoryType; - } -}; diff --git a/src/inspect.zig b/src/inspect.zig deleted file mode 100644 index 66cc397..0000000 --- a/src/inspect.zig +++ /dev/null @@ -1,146 +0,0 @@ -const std = @import("std"); -const vk = @import("vk"); - -const c = @cImport({ - @cDefine("GLFW_INCLUDE_NONE", {}); - @cInclude("GLFW/glfw3.h"); -}); - -const BaseWrapper = vk.BaseWrapper(.{ - .getInstanceProcAddr = true, - .createInstance = true, -}); - -const InstanceWrapper = vk.InstanceWrapper(.{ - .destroyInstance = true, - .enumeratePhysicalDevices = true, - .getPhysicalDeviceProperties = true, - .getPhysicalDeviceQueueFamilyProperties = true, - .getPhysicalDeviceSurfaceFormatsKHR = true, - .getPhysicalDeviceSurfacePresentModesKHR = true, - .getPhysicalDeviceSurfaceSupportKHR = true, - .getPhysicalDeviceSurfaceCapabilitiesKHR = true, - .destroySurfaceKHR = true, -}); - -extern fn glfwGetRequiredInstanceExtensions(count: *u32) [*]const [*:0]const u8; - -extern fn glfwCreateWindowSurface( - instance: vk.Instance, - window: *c.GLFWwindow, - allocation_callbacks: ?*const vk.AllocationCallbacks, - surface: *vk.SurfaceKHR, -) vk.Result; - -extern fn vkGetInstanceProcAddr(instance: vk.Instance, procname: [*:0]const u8) vk.PfnVoidFunction; -extern fn vkGetDeviceProcAddr(device: vk.Device, procname: [*:0]const u8) vk.PfnVoidFunction; - -pub fn main() !void { - if (c.glfwInit() == c.GLFW_FALSE) return error.glfwInitFailed; - defer c.glfwTerminate(); - - c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); - c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, "floating_window"); - c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, "floating_window"); - const window = c.glfwCreateWindow(400, 300, "vkinspect", null, null) orelse - return error.glfwWindowCreateFailed; - defer c.glfwDestroyWindow(window); - - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - const ally = gpa.allocator(); - - const vkb = try BaseWrapper.load(vkGetInstanceProcAddr); - - var ext_count: u32 = undefined; - const exts = glfwGetRequiredInstanceExtensions(&ext_count); - - const instance = try vkb.createInstance(&.{ - .p_application_info = &.{ - .p_application_name = "vkinspect", - .application_version = 0, - .p_engine_name = "vkinspect", - .engine_version = 0, - .api_version = vk.API_VERSION_1_3, - }, - .enabled_extension_count = ext_count, - .pp_enabled_extension_names = exts, - .enabled_layer_count = 0, - }, null); - const vki = try InstanceWrapper.load(instance, vkGetInstanceProcAddr); - defer vki.destroyInstance(instance, null); - - var surface: vk.SurfaceKHR = undefined; - switch (glfwCreateWindowSurface(instance, window, null, &surface)) { - .success => {}, - else => return error.Unknown, - } - defer vki.destroySurfaceKHR(instance, surface, null); - - var pdev_count: u32 = undefined; - _ = try vki.enumeratePhysicalDevices(instance, &pdev_count, null); - const pdevs = try ally.alloc(vk.PhysicalDevice, pdev_count); - defer ally.free(pdevs); - _ = try vki.enumeratePhysicalDevices(instance, &pdev_count, pdevs.ptr); - - std.debug.print("{d} physical devices:\n", .{pdev_count}); - for (pdevs) |pdev| { - const props = vki.getPhysicalDeviceProperties(pdev); - const name = std.mem.sliceTo(&props.device_name, 0); - std.debug.print("=" ** 30 ++ "\n", .{}); - std.debug.print("= {s}\n", .{name}); - std.debug.print("=" ** 30 ++ "\n", .{}); - - std.debug.print("type: {any}\n", .{props.device_type}); - // props.device_type - - std.debug.print("max_push_constants_size: {d}\n", .{props.limits.max_push_constants_size}); - - var family_count: u32 = undefined; - vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); - const families = try ally.alloc(vk.QueueFamilyProperties, family_count); - defer ally.free(families); - vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr); - - std.debug.print(" {d} queue families:\n", .{family_count}); - for (families, 0..) |family, idx| { - const support = try vki.getPhysicalDeviceSurfaceSupportKHR(pdev, @intCast(idx), surface); - std.debug.print(" - {any}\n", .{family.queue_flags}); - std.debug.print(" (max {d}, surface {any})\n", .{ - family.queue_count, - support != 0, - }); - } - - var format_count: u32 = undefined; - _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null); - const formats = try ally.alloc(vk.SurfaceFormatKHR, format_count); - defer ally.free(formats); - _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, formats.ptr); - - var mode_count: u32 = undefined; - _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &mode_count, null); - const modes = try ally.alloc(vk.PresentModeKHR, mode_count); - defer ally.free(modes); - _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &mode_count, modes.ptr); - - std.debug.print(" {d} formats\n", .{format_count}); - for (formats) |format| { - std.debug.print(" - {any}\n", .{format}); - } - std.debug.print(" {d} present modes\n", .{mode_count}); - for (modes) |mode| { - std.debug.print(" - {any}\n", .{mode}); - } - - const caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, surface); - std.debug.print(" surface capabilities:\n", .{}); - std.debug.print(" {any}\n", .{caps.current_extent}); - std.debug.print(" current: {any}\n", .{caps.current_transform}); - std.debug.print(" supported: {any}\n", .{caps.supported_transforms}); - std.debug.print(" {any}\n", .{caps.supported_usage_flags}); - std.debug.print(" {} - {} images\n", .{ caps.min_image_count, caps.max_image_count }); - std.debug.print(" {} - {} extent\n", .{ caps.min_image_extent, caps.max_image_extent }); - std.debug.print(" 1 - {} arrays\n", .{caps.max_image_array_layers}); - std.debug.print(" {}\n", .{caps.supported_composite_alpha}); - } -} From f0e7be542b7b77d4f3b144ea649aa0741953bc86 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 3 Jul 2024 11:34:13 -0400 Subject: [PATCH 070/113] build glfw --- build.zig | 70 +++++-------------------------------- build.zig.zon | 7 ++-- cimgui/build.zig | 10 ++---- cimgui/build.zig.zon | 3 ++ glfw/build.zig | 82 ++++++++++++++++++++++++++++++++++++++++++++ glfw/build.zig.zon | 14 ++++++++ 6 files changed, 115 insertions(+), 71 deletions(-) create mode 100644 glfw/build.zig create mode 100644 glfw/build.zig.zon diff --git a/build.zig b/build.zig index c6d8fbb..b2f4fb4 100644 --- a/build.zig +++ b/build.zig @@ -1,25 +1,30 @@ const std = @import("std"); const vkgen = @import("vulkan-zig"); +const glfw_util = @import("glfw"); pub fn build(b: *std.Build) void { const target = b.standardTargetOptions(.{}); - const optimize = b.standardOptimizeOption(.{}); + const glfw_dep = b.dependency("glfw", .{}); + glfw_util.install(glfw_dep, b); + const vk = b.dependency("vulkan-zig", .{ .registry = @as([]const u8, b.pathFromRoot("reg/vk.xml")), }); - const vkmod = vk.module("vulkan-zig"); const cimgui = b.dependency("cimgui", .{}); const exe = b.addExecutable(.{ - .name = "scratchzig", + .name = "zig-glfw-vulkan", .root_source_file = b.path("src/main.zig"), .target = target, .optimize = optimize, }); + exe.root_module.addImport("vk", vk.module("vulkan-zig")); exe.root_module.addImport("cimgui", cimgui.module("cimgui")); + glfw_util.link(glfw_dep, exe); + b.installArtifact(exe); const shaders = vkgen.ShaderCompileStep.create( b, @@ -30,18 +35,6 @@ pub fn build(b: *std.Build) void { shaders.add("triangle_frag", "src/shaders/triangle.frag", .{}); exe.root_module.addImport("shaders", shaders.getModule()); - // this requires PKG_CONFIG_PATH to be set. something like: - // ~/.local/lib/pkgconfig/ - exe.linkSystemLibrary2("glfw3", .{ - .needed = true, - .preferred_link_mode = .static, - .use_pkg_config = .force, - }); - exe.linkLibC(); - exe.root_module.addImport("vk", vkmod); - - b.installArtifact(exe); - const run_cmd = b.addRunArtifact(exe); run_cmd.step.dependOn(b.getInstallStep()); @@ -51,51 +44,4 @@ pub fn build(b: *std.Build) void { const run_step = b.step("run", "Run the app"); run_step.dependOn(&run_cmd.step); - - const exe_unit_tests = b.addTest(.{ - .root_source_file = b.path("src/main.zig"), - .target = target, - .optimize = optimize, - }); - exe_unit_tests.linkSystemLibrary2("glfw3", .{ - .needed = true, - .preferred_link_mode = .static, - .use_pkg_config = .force, - }); - exe_unit_tests.linkLibC(); - const run_exe_unit_tests = b.addRunArtifact(exe_unit_tests); - - const dsa_unit_tests = b.addTest(.{ - .name = "dsa.zig tests", - .root_source_file = b.path("src/dsa.zig"), - .target = target, - .optimize = optimize, - }); - const run_dsa_unit_tests = b.addRunArtifact(dsa_unit_tests); - - const test_step = b.step("test", "Run unit tests"); - test_step.dependOn(&run_exe_unit_tests.step); - test_step.dependOn(&run_dsa_unit_tests.step); - - const inspect = b.addExecutable(.{ - .name = "vkinspect", - .root_source_file = b.path("src/inspect.zig"), - .target = target, - .optimize = optimize, - }); - inspect.linkSystemLibrary2("vulkan", .{ - .needed = true, - .preferred_link_mode = .dynamic, - }); - inspect.linkSystemLibrary2("glfw3", .{ - .needed = true, - .preferred_link_mode = .static, - .use_pkg_config = .force, - }); - exe_unit_tests.linkLibC(); - inspect.root_module.addImport("vk", vkmod); - inspect.linkLibC(); - const run_inspect = b.addRunArtifact(inspect); - const inspect_step = b.step("vki", "Vulkan Inspect"); - inspect_step.dependOn(&run_inspect.step); } diff --git a/build.zig.zon b/build.zig.zon index 6942b2e..eb9a305 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -1,5 +1,5 @@ .{ - .name = "scratchzig", + .name = "zig-glfw-vulkan", .version = "0.0.0", .dependencies = .{ @@ -7,8 +7,11 @@ .url = "https://github.com/Snektron/vulkan-zig/archive/f2c2e0ff80374563357cc4fe72bf7d8a2c956824.tar.gz", .hash = "1220cf0972c6fe05437c1a8689b955084385eb7ca1f8c14010d49ca5a89570a5d90d", }, + .glfw = .{ + .path = "glfw", + }, .cimgui = .{ - .path="cimgui", + .path = "cimgui", }, }, diff --git a/cimgui/build.zig b/cimgui/build.zig index 11349b1..19072a6 100644 --- a/cimgui/build.zig +++ b/cimgui/build.zig @@ -1,5 +1,6 @@ const std = @import("std"); const vkgen = @import("vulkan-zig"); +const glfw_util = @import("glfw"); pub fn build(b: *std.Build) !void { const target = b.standardTargetOptions(.{}); @@ -7,6 +8,7 @@ pub fn build(b: *std.Build) !void { const imgui_dep = b.dependency("imgui", .{}); const cimgui_dep = b.dependency("cimgui", .{}); + const glfw_dep = b.dependency("glfw", .{}); const luajit = try b.findProgram(&.{"luajit"}, &.{}); @@ -74,13 +76,7 @@ pub fn build(b: *std.Build) !void { }); cimgui.installHeader(copy.getDirectory().path(b, "cimgui.h"), "cimgui.h"); cimgui.installHeader(copy.getDirectory().path(b, "cimgui_impl.h"), "cimgui_impl.h"); - - // todo separate impls into different shared libraries for easier linkage - cimgui.linkSystemLibrary2("glfw3", .{ - .needed = true, - .preferred_link_mode = .static, - .use_pkg_config = .force, - }); + glfw_util.link(glfw_dep, cimgui); b.installArtifact(cimgui); diff --git a/cimgui/build.zig.zon b/cimgui/build.zig.zon index 935c753..e14b49c 100644 --- a/cimgui/build.zig.zon +++ b/cimgui/build.zig.zon @@ -11,6 +11,9 @@ .url = "https://github.com/ocornut/imgui/archive/refs/tags/v1.90.8-docking.tar.gz", .hash = "122065151b97161e25abb71c9df2fd9fba42aaca8c33d689a480b883d82411c8fabe", }, + .glfw = .{ + .path = "../glfw", + } }, .paths = .{ diff --git a/glfw/build.zig b/glfw/build.zig new file mode 100644 index 0000000..f399342 --- /dev/null +++ b/glfw/build.zig @@ -0,0 +1,82 @@ +const std = @import("std"); + +pub fn link(dep: *std.Build.Dependency, compile: *std.Build.Step.Compile) void { + compile.step.dependOn(dep.builder.getInstallStep()); + compile.linkLibC(); + compile.root_module.addLibraryPath(.{ .cwd_relative = dep.builder.getInstallPath(.lib, "") }); + compile.root_module.addIncludePath(.{ .cwd_relative = dep.builder.getInstallPath(.header, "") }); + compile.root_module.addRPathSpecial("$ORIGIN/../lib"); + + compile.root_module.linkSystemLibrary("glfw", .{}); + compile.root_module.linkSystemLibrary("rt", .{}); + compile.root_module.linkSystemLibrary("m", .{}); + compile.root_module.linkSystemLibrary("dl", .{}); +} + +pub fn install(dep: *std.Build.Dependency, owner: *std.Build) void { + const install_libs = owner.addInstallDirectory(.{ + .include_extensions = &.{ ".so", ".dll" }, + .install_subdir = "", + .install_dir = .lib, + .source_dir = .{ + .cwd_relative = dep.builder.getInstallPath(.lib, ""), + }, + }); + install_libs.step.dependOn(dep.builder.getInstallStep()); + owner.getInstallStep().dependOn(&install_libs.step); +} + +pub fn build(b: *std.Build) void { + // todo target into toolchain file + // const target = b.standardTargetOptions(.{}); + const optimize = b.standardOptimizeOption(.{}); + + const cmake = b.findProgram(&.{"cmake"}, &.{}) catch @panic("missing cmake"); + + const cmake_build_type = switch (optimize) { + .ReleaseFast => "Release", + .ReleaseSafe => "RelWithDebInfo", + .ReleaseSmall => "MinSizeRel", + .Debug => "Debug", + }; + + const config_step = b.addSystemCommand(&.{ + cmake, + "-DBUILD_SHARED_LIBS=ON", + b.fmt("-DCMAKE_BUILD_TYPE={s}", .{cmake_build_type}), + b.fmt("-DCMAKE_INSTALL_PREFIX={s}", .{b.getInstallPath(.prefix, "")}), + "-DGLFW_BUILD_DOCS=OFF", + "-DGLFW_BUILD_EXAMPLES=OFF", + "-DGLFW_BUILD_TESTS=OFF", + "-DGLFW_BUILD_X11=ON", // todo arg + "-DGLFW_BUILD_WAYLAND=OFF", // todo arg + "-DGLFW_INSTALL=ON", + }); + // config_step.setEnvironmentVariable("CC", b.fmt("{s} cc", .{b.graph.zig_exe})); + // config_step.setEnvironmentVariable("CXX", b.fmt("{s} c++", .{b.graph.zig_exe})); + config_step.addArg("-S"); + config_step.addDirectoryArg(b.dependency("glfw_real", .{}).path("")); + config_step.addArg("-B"); + const build_dir = config_step.addOutputDirectoryArg("glfw_build"); + + _ = config_step.captureStdOut(); + config_step.has_side_effects = true; + + const build_step = b.addSystemCommand(&.{cmake}); + build_step.step.dependOn(&config_step.step); + build_step.addArg("--build"); + build_step.addDirectoryArg(build_dir); + + _ = build_step.captureStdOut(); + build_step.has_side_effects = true; + + const install_step = b.addSystemCommand(&.{cmake}); + install_step.step.dependOn(&build_step.step); + install_step.addArg("--install"); + install_step.addDirectoryArg(build_dir); + + _ = install_step.captureStdOut(); + install_step.has_side_effects = true; + + b.getInstallStep().dependOn(&install_step.step); +} diff --git a/glfw/build.zig.zon b/glfw/build.zig.zon new file mode 100644 index 0000000..e5e7776 --- /dev/null +++ b/glfw/build.zig.zon @@ -0,0 +1,14 @@ +.{ + .name = "glfw", + .version = "3.4.0", + .dependencies = .{ + .glfw_real = .{ + .url = "https://github.com/glfw/glfw/releases/download/3.4/glfw-3.4.zip", + .hash = "1220625fa7ce79733c6889844cb02ea1f6e4b81b46a3fabacec181714879947f4abd", + }, + }, + .paths = .{ + "build.zig", + "build.zig.zon", + }, +} From d6fb4856010fe17d3ef30814d03aeec3a2494682 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 3 Jul 2024 11:34:16 -0400 Subject: [PATCH 071/113] Revert "build glfw" This reverts commit f0e7be542b7b77d4f3b144ea649aa0741953bc86. --- build.zig | 70 ++++++++++++++++++++++++++++++++----- build.zig.zon | 7 ++-- cimgui/build.zig | 10 ++++-- cimgui/build.zig.zon | 3 -- glfw/build.zig | 82 -------------------------------------------- glfw/build.zig.zon | 14 -------- 6 files changed, 71 insertions(+), 115 deletions(-) delete mode 100644 glfw/build.zig delete mode 100644 glfw/build.zig.zon diff --git a/build.zig b/build.zig index b2f4fb4..c6d8fbb 100644 --- a/build.zig +++ b/build.zig @@ -1,30 +1,25 @@ const std = @import("std"); const vkgen = @import("vulkan-zig"); -const glfw_util = @import("glfw"); pub fn build(b: *std.Build) void { const target = b.standardTargetOptions(.{}); - const optimize = b.standardOptimizeOption(.{}); - const glfw_dep = b.dependency("glfw", .{}); - glfw_util.install(glfw_dep, b); + const optimize = b.standardOptimizeOption(.{}); const vk = b.dependency("vulkan-zig", .{ .registry = @as([]const u8, b.pathFromRoot("reg/vk.xml")), }); + const vkmod = vk.module("vulkan-zig"); const cimgui = b.dependency("cimgui", .{}); const exe = b.addExecutable(.{ - .name = "zig-glfw-vulkan", + .name = "scratchzig", .root_source_file = b.path("src/main.zig"), .target = target, .optimize = optimize, }); - exe.root_module.addImport("vk", vk.module("vulkan-zig")); exe.root_module.addImport("cimgui", cimgui.module("cimgui")); - glfw_util.link(glfw_dep, exe); - b.installArtifact(exe); const shaders = vkgen.ShaderCompileStep.create( b, @@ -35,6 +30,18 @@ pub fn build(b: *std.Build) void { shaders.add("triangle_frag", "src/shaders/triangle.frag", .{}); exe.root_module.addImport("shaders", shaders.getModule()); + // this requires PKG_CONFIG_PATH to be set. something like: + // ~/.local/lib/pkgconfig/ + exe.linkSystemLibrary2("glfw3", .{ + .needed = true, + .preferred_link_mode = .static, + .use_pkg_config = .force, + }); + exe.linkLibC(); + exe.root_module.addImport("vk", vkmod); + + b.installArtifact(exe); + const run_cmd = b.addRunArtifact(exe); run_cmd.step.dependOn(b.getInstallStep()); @@ -44,4 +51,51 @@ pub fn build(b: *std.Build) void { const run_step = b.step("run", "Run the app"); run_step.dependOn(&run_cmd.step); + + const exe_unit_tests = b.addTest(.{ + .root_source_file = b.path("src/main.zig"), + .target = target, + .optimize = optimize, + }); + exe_unit_tests.linkSystemLibrary2("glfw3", .{ + .needed = true, + .preferred_link_mode = .static, + .use_pkg_config = .force, + }); + exe_unit_tests.linkLibC(); + const run_exe_unit_tests = b.addRunArtifact(exe_unit_tests); + + const dsa_unit_tests = b.addTest(.{ + .name = "dsa.zig tests", + .root_source_file = b.path("src/dsa.zig"), + .target = target, + .optimize = optimize, + }); + const run_dsa_unit_tests = b.addRunArtifact(dsa_unit_tests); + + const test_step = b.step("test", "Run unit tests"); + test_step.dependOn(&run_exe_unit_tests.step); + test_step.dependOn(&run_dsa_unit_tests.step); + + const inspect = b.addExecutable(.{ + .name = "vkinspect", + .root_source_file = b.path("src/inspect.zig"), + .target = target, + .optimize = optimize, + }); + inspect.linkSystemLibrary2("vulkan", .{ + .needed = true, + .preferred_link_mode = .dynamic, + }); + inspect.linkSystemLibrary2("glfw3", .{ + .needed = true, + .preferred_link_mode = .static, + .use_pkg_config = .force, + }); + exe_unit_tests.linkLibC(); + inspect.root_module.addImport("vk", vkmod); + inspect.linkLibC(); + const run_inspect = b.addRunArtifact(inspect); + const inspect_step = b.step("vki", "Vulkan Inspect"); + inspect_step.dependOn(&run_inspect.step); } diff --git a/build.zig.zon b/build.zig.zon index eb9a305..6942b2e 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -1,5 +1,5 @@ .{ - .name = "zig-glfw-vulkan", + .name = "scratchzig", .version = "0.0.0", .dependencies = .{ @@ -7,11 +7,8 @@ .url = "https://github.com/Snektron/vulkan-zig/archive/f2c2e0ff80374563357cc4fe72bf7d8a2c956824.tar.gz", .hash = "1220cf0972c6fe05437c1a8689b955084385eb7ca1f8c14010d49ca5a89570a5d90d", }, - .glfw = .{ - .path = "glfw", - }, .cimgui = .{ - .path = "cimgui", + .path="cimgui", }, }, diff --git a/cimgui/build.zig b/cimgui/build.zig index 19072a6..11349b1 100644 --- a/cimgui/build.zig +++ b/cimgui/build.zig @@ -1,6 +1,5 @@ const std = @import("std"); const vkgen = @import("vulkan-zig"); -const glfw_util = @import("glfw"); pub fn build(b: *std.Build) !void { const target = b.standardTargetOptions(.{}); @@ -8,7 +7,6 @@ pub fn build(b: *std.Build) !void { const imgui_dep = b.dependency("imgui", .{}); const cimgui_dep = b.dependency("cimgui", .{}); - const glfw_dep = b.dependency("glfw", .{}); const luajit = try b.findProgram(&.{"luajit"}, &.{}); @@ -76,7 +74,13 @@ pub fn build(b: *std.Build) !void { }); cimgui.installHeader(copy.getDirectory().path(b, "cimgui.h"), "cimgui.h"); cimgui.installHeader(copy.getDirectory().path(b, "cimgui_impl.h"), "cimgui_impl.h"); - glfw_util.link(glfw_dep, cimgui); + + // todo separate impls into different shared libraries for easier linkage + cimgui.linkSystemLibrary2("glfw3", .{ + .needed = true, + .preferred_link_mode = .static, + .use_pkg_config = .force, + }); b.installArtifact(cimgui); diff --git a/cimgui/build.zig.zon b/cimgui/build.zig.zon index e14b49c..935c753 100644 --- a/cimgui/build.zig.zon +++ b/cimgui/build.zig.zon @@ -11,9 +11,6 @@ .url = "https://github.com/ocornut/imgui/archive/refs/tags/v1.90.8-docking.tar.gz", .hash = "122065151b97161e25abb71c9df2fd9fba42aaca8c33d689a480b883d82411c8fabe", }, - .glfw = .{ - .path = "../glfw", - } }, .paths = .{ diff --git a/glfw/build.zig b/glfw/build.zig deleted file mode 100644 index f399342..0000000 --- a/glfw/build.zig +++ /dev/null @@ -1,82 +0,0 @@ -const std = @import("std"); - -pub fn link(dep: *std.Build.Dependency, compile: *std.Build.Step.Compile) void { - compile.step.dependOn(dep.builder.getInstallStep()); - compile.linkLibC(); - compile.root_module.addLibraryPath(.{ .cwd_relative = dep.builder.getInstallPath(.lib, "") }); - compile.root_module.addIncludePath(.{ .cwd_relative = dep.builder.getInstallPath(.header, "") }); - compile.root_module.addRPathSpecial("$ORIGIN/../lib"); - - compile.root_module.linkSystemLibrary("glfw", .{}); - compile.root_module.linkSystemLibrary("rt", .{}); - compile.root_module.linkSystemLibrary("m", .{}); - compile.root_module.linkSystemLibrary("dl", .{}); -} - -pub fn install(dep: *std.Build.Dependency, owner: *std.Build) void { - const install_libs = owner.addInstallDirectory(.{ - .include_extensions = &.{ ".so", ".dll" }, - .install_subdir = "", - .install_dir = .lib, - .source_dir = .{ - .cwd_relative = dep.builder.getInstallPath(.lib, ""), - }, - }); - install_libs.step.dependOn(dep.builder.getInstallStep()); - owner.getInstallStep().dependOn(&install_libs.step); -} - -pub fn build(b: *std.Build) void { - // todo target into toolchain file - // const target = b.standardTargetOptions(.{}); - const optimize = b.standardOptimizeOption(.{}); - - const cmake = b.findProgram(&.{"cmake"}, &.{}) catch @panic("missing cmake"); - - const cmake_build_type = switch (optimize) { - .ReleaseFast => "Release", - .ReleaseSafe => "RelWithDebInfo", - .ReleaseSmall => "MinSizeRel", - .Debug => "Debug", - }; - - const config_step = b.addSystemCommand(&.{ - cmake, - "-DBUILD_SHARED_LIBS=ON", - b.fmt("-DCMAKE_BUILD_TYPE={s}", .{cmake_build_type}), - b.fmt("-DCMAKE_INSTALL_PREFIX={s}", .{b.getInstallPath(.prefix, "")}), - "-DGLFW_BUILD_DOCS=OFF", - "-DGLFW_BUILD_EXAMPLES=OFF", - "-DGLFW_BUILD_TESTS=OFF", - "-DGLFW_BUILD_X11=ON", // todo arg - "-DGLFW_BUILD_WAYLAND=OFF", // todo arg - "-DGLFW_INSTALL=ON", - }); - // config_step.setEnvironmentVariable("CC", b.fmt("{s} cc", .{b.graph.zig_exe})); - // config_step.setEnvironmentVariable("CXX", b.fmt("{s} c++", .{b.graph.zig_exe})); - config_step.addArg("-S"); - config_step.addDirectoryArg(b.dependency("glfw_real", .{}).path("")); - config_step.addArg("-B"); - const build_dir = config_step.addOutputDirectoryArg("glfw_build"); - - _ = config_step.captureStdOut(); - config_step.has_side_effects = true; - - const build_step = b.addSystemCommand(&.{cmake}); - build_step.step.dependOn(&config_step.step); - build_step.addArg("--build"); - build_step.addDirectoryArg(build_dir); - - _ = build_step.captureStdOut(); - build_step.has_side_effects = true; - - const install_step = b.addSystemCommand(&.{cmake}); - install_step.step.dependOn(&build_step.step); - install_step.addArg("--install"); - install_step.addDirectoryArg(build_dir); - - _ = install_step.captureStdOut(); - install_step.has_side_effects = true; - - b.getInstallStep().dependOn(&install_step.step); -} diff --git a/glfw/build.zig.zon b/glfw/build.zig.zon deleted file mode 100644 index e5e7776..0000000 --- a/glfw/build.zig.zon +++ /dev/null @@ -1,14 +0,0 @@ -.{ - .name = "glfw", - .version = "3.4.0", - .dependencies = .{ - .glfw_real = .{ - .url = "https://github.com/glfw/glfw/releases/download/3.4/glfw-3.4.zip", - .hash = "1220625fa7ce79733c6889844cb02ea1f6e4b81b46a3fabacec181714879947f4abd", - }, - }, - .paths = .{ - "build.zig", - "build.zig.zon", - }, -} From 1c54bdb1ad493c0348b50111b8c4bdd81e224041 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 3 Jul 2024 13:13:19 -0400 Subject: [PATCH 072/113] static link --- build.zig | 2 -- cimgui/build.zig | 3 +-- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/build.zig b/build.zig index c6d8fbb..27f0961 100644 --- a/build.zig +++ b/build.zig @@ -30,8 +30,6 @@ pub fn build(b: *std.Build) void { shaders.add("triangle_frag", "src/shaders/triangle.frag", .{}); exe.root_module.addImport("shaders", shaders.getModule()); - // this requires PKG_CONFIG_PATH to be set. something like: - // ~/.local/lib/pkgconfig/ exe.linkSystemLibrary2("glfw3", .{ .needed = true, .preferred_link_mode = .static, diff --git a/cimgui/build.zig b/cimgui/build.zig index 11349b1..60d88df 100644 --- a/cimgui/build.zig +++ b/cimgui/build.zig @@ -43,7 +43,7 @@ pub fn build(b: *std.Build) !void { _ = copy.addCopyFile(cimgui_dep.path("cimgui.cpp"), "cimgui.cpp"); _ = copy.addCopyFile(cimgui_dep.path("generator/output/cimgui_impl.h"), "cimgui_impl.h"); - const cimgui = b.addSharedLibrary(.{ + const cimgui = b.addStaticLibrary(.{ .name = "cimgui", .target = target, .optimize = optimize, @@ -75,7 +75,6 @@ pub fn build(b: *std.Build) !void { cimgui.installHeader(copy.getDirectory().path(b, "cimgui.h"), "cimgui.h"); cimgui.installHeader(copy.getDirectory().path(b, "cimgui_impl.h"), "cimgui_impl.h"); - // todo separate impls into different shared libraries for easier linkage cimgui.linkSystemLibrary2("glfw3", .{ .needed = true, .preferred_link_mode = .static, From ac95264f0896f726ebc1912b286e018b5ff1812b Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 3 Jul 2024 13:50:55 -0400 Subject: [PATCH 073/113] remove vestigial build commands --- build.zig | 31 ------------------------------- 1 file changed, 31 deletions(-) diff --git a/build.zig b/build.zig index 27f0961..d22f07e 100644 --- a/build.zig +++ b/build.zig @@ -63,37 +63,6 @@ pub fn build(b: *std.Build) void { exe_unit_tests.linkLibC(); const run_exe_unit_tests = b.addRunArtifact(exe_unit_tests); - const dsa_unit_tests = b.addTest(.{ - .name = "dsa.zig tests", - .root_source_file = b.path("src/dsa.zig"), - .target = target, - .optimize = optimize, - }); - const run_dsa_unit_tests = b.addRunArtifact(dsa_unit_tests); - const test_step = b.step("test", "Run unit tests"); test_step.dependOn(&run_exe_unit_tests.step); - test_step.dependOn(&run_dsa_unit_tests.step); - - const inspect = b.addExecutable(.{ - .name = "vkinspect", - .root_source_file = b.path("src/inspect.zig"), - .target = target, - .optimize = optimize, - }); - inspect.linkSystemLibrary2("vulkan", .{ - .needed = true, - .preferred_link_mode = .dynamic, - }); - inspect.linkSystemLibrary2("glfw3", .{ - .needed = true, - .preferred_link_mode = .static, - .use_pkg_config = .force, - }); - exe_unit_tests.linkLibC(); - inspect.root_module.addImport("vk", vkmod); - inspect.linkLibC(); - const run_inspect = b.addRunArtifact(inspect); - const inspect_step = b.step("vki", "Vulkan Inspect"); - inspect_step.dependOn(&run_inspect.step); } From 61ce4c16d8648d6399963888ee3ba0ec49e117de Mon Sep 17 00:00:00 2001 From: David Allemang Date: Fri, 5 Jul 2024 09:43:37 -0400 Subject: [PATCH 074/113] create pipeline --- src/main.zig | 111 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) diff --git a/src/main.zig b/src/main.zig index 1f6ad39..eb4d34c 100644 --- a/src/main.zig +++ b/src/main.zig @@ -232,6 +232,117 @@ pub fn main() !void { _ = im.c.ImGui_ImplVulkan_CreateFontsTexture(); defer im.c.ImGui_ImplVulkan_Shutdown(); + const vert = try au.D.createShaderModule(&vk.ShaderModuleCreateInfo{ + .code_size = shaders.triangle_vert.len, + .p_code = @ptrCast(&shaders.triangle_vert), + }, null); + defer au.D.destroyShaderModule(vert, null); + + const frag = try au.D.createShaderModule(&vk.ShaderModuleCreateInfo{ + .code_size = shaders.triangle_frag.len, + .p_code = @ptrCast(&shaders.triangle_frag), + }, null); + defer au.D.destroyShaderModule(frag, null); + + const cache = try au.D.createPipelineCache(&vk.PipelineCacheCreateInfo{}, null); + defer au.D.destroyPipelineCache(cache, null); + + const layout = try au.D.createPipelineLayout(&vk.PipelineLayoutCreateInfo{ + // todo + }, null); + defer au.D.destroyPipelineLayout(layout, null); + + const gpci: vk.GraphicsPipelineCreateInfo = .{ + .stage_count = 2, + .p_stages = &.{ + vk.PipelineShaderStageCreateInfo{ .stage = .{ .vertex_bit = true }, .module = vert, .p_name = "main" }, + vk.PipelineShaderStageCreateInfo{ .stage = .{ .fragment_bit = true }, .module = frag, .p_name = "main" }, + }, + .p_vertex_input_state = &vk.PipelineVertexInputStateCreateInfo{ + .vertex_binding_description_count = 1, + .p_vertex_binding_descriptions = @ptrCast(&Vertex.binding_description), + .vertex_attribute_description_count = Vertex.attribute_description.len, + .p_vertex_attribute_descriptions = &Vertex.attribute_description, + }, + .p_input_assembly_state = &vk.PipelineInputAssemblyStateCreateInfo{ + .topology = .triangle_list, + .primitive_restart_enable = vk.FALSE, + }, + .p_tessellation_state = null, + .p_viewport_state = &vk.PipelineViewportStateCreateInfo{ + .viewport_count = 1, + .scissor_count = 1, + }, + .p_rasterization_state = &vk.PipelineRasterizationStateCreateInfo{ + .depth_clamp_enable = vk.FALSE, + .rasterizer_discard_enable = vk.FALSE, + .polygon_mode = .fill, + .cull_mode = .{ .back_bit = true }, + .front_face = .counter_clockwise, + .depth_bias_enable = vk.FALSE, + .depth_bias_constant_factor = 0.0, + .depth_bias_clamp = 0.0, + .depth_bias_slope_factor = 0.0, + .line_width = 1.0, + }, + .p_multisample_state = &vk.PipelineMultisampleStateCreateInfo{ + .rasterization_samples = .{ .@"1_bit" = true }, + .sample_shading_enable = vk.FALSE, + .min_sample_shading = 1, + .alpha_to_coverage_enable = vk.FALSE, + .alpha_to_one_enable = vk.FALSE, + }, + .p_depth_stencil_state = null, + .p_color_blend_state = &vk.PipelineColorBlendStateCreateInfo{ + .logic_op_enable = vk.FALSE, + .logic_op = .copy, + .blend_constants = [_]f32{ 0, 0, 0, 0 }, + .attachment_count = 1, + .p_attachments = &.{ + vk.PipelineColorBlendAttachmentState{ + .blend_enable = vk.FALSE, + .color_blend_op = .add, + .src_color_blend_factor = .one, + .dst_color_blend_factor = .zero, + .alpha_blend_op = .add, + .src_alpha_blend_factor = .one, + .dst_alpha_blend_factor = .zero, + .color_write_mask = .{ .r_bit = true, .g_bit = true, .b_bit = true, .a_bit = true }, + }, + }, + }, + .p_dynamic_state = &vk.PipelineDynamicStateCreateInfo{ + .flags = .{}, + .dynamic_state_count = 2, + .p_dynamic_states = &.{ + .viewport, + .scissor, + }, + }, + .layout = layout, + .render_pass = .null_handle, // set via dynamic rendering + .subpass = 0, + .base_pipeline_handle = .null_handle, + .base_pipeline_index = -1, + .p_next = &vk.PipelineRenderingCreateInfo{ + .color_attachment_count = 1, + .p_color_attachment_formats = &.{au.device_config.format.format}, + .depth_attachment_format = .undefined, + .stencil_attachment_format = .undefined, + .view_mask = 0, + }, + }; + + var pipeline: vk.Pipeline = undefined; + _ = try au.D.createGraphicsPipelines( + cache, + 1, + @ptrCast(&gpci), + null, + @ptrCast(&pipeline), + ); + defer au.D.destroyPipeline(pipeline, null); + while (!au.W.should_close()) { im.c.ImGui_ImplGlfw_NewFrame(); im.c.ImGui_ImplVulkan_NewFrame(); From 3b5eb6efaba869eaa70e29e9f0e266a40df87d3f Mon Sep 17 00:00:00 2001 From: David Allemang Date: Fri, 5 Jul 2024 15:26:20 -0400 Subject: [PATCH 075/113] render directly from mapped vertex buffer --- src/au.zig | 1 + src/au/VkAllocator.zig | 43 ++++++++++++++++++++++++ src/main.zig | 75 ++++++++++++++++++++++++++++++++---------- 3 files changed, 102 insertions(+), 17 deletions(-) create mode 100644 src/au/VkAllocator.zig diff --git a/src/au.zig b/src/au.zig index 79e1a37..658fc98 100644 --- a/src/au.zig +++ b/src/au.zig @@ -7,6 +7,7 @@ const c = @import("c.zig"); pub const Bus = @import("au/Bus.zig"); pub const SwapChain = @import("au/SwapChain.zig"); pub const Flights = @import("au/flights.zig").Flights; +pub const VkAllocator = @import("au/VkAllocator.zig"); pub const use_debug_messenger = switch (builtin.mode) { .Debug, .ReleaseSafe => true, diff --git a/src/au/VkAllocator.zig b/src/au/VkAllocator.zig new file mode 100644 index 0000000..0533c65 --- /dev/null +++ b/src/au/VkAllocator.zig @@ -0,0 +1,43 @@ +const std = @import("std"); +const vk = @import("vk"); +const au = @import("../au.zig"); + +const Self = @This(); + +props: vk.PhysicalDeviceMemoryProperties, + +pub fn init() Self { + return .{ + .props = au.I.getPhysicalDeviceMemoryProperties(au.device_config.pdev), + }; +} + +pub fn heaps(self: Self) []const vk.MemoryHeap { + return self.props.memory_heaps[0..self.props.memory_heap_count]; +} + +pub fn types(self: Self) []const vk.MemoryType { + return self.props.memory_types[0..self.props.memory_type_count]; +} + +pub fn alloc(self: Self, reqs: vk.MemoryRequirements, flags: vk.MemoryPropertyFlags) !vk.DeviceMemory { + const memory_type_bits: std.bit_set.IntegerBitSet(vk.MAX_MEMORY_TYPES) = .{ + .mask = reqs.memory_type_bits, + }; + + for (self.types(), 0..) |typ, idx| { + if (!memory_type_bits.isSet(idx)) continue; + if (!typ.property_flags.contains(flags)) continue; + + return try au.D.allocateMemory(&.{ + .allocation_size = reqs.size, + .memory_type_index = @intCast(idx), + }, null); + } + + return error.NoSuitableMemoryType; +} + +pub fn free(_: Self, memory: vk.DeviceMemory) void { + au.D.freeMemory(memory, null); +} diff --git a/src/main.zig b/src/main.zig index eb4d34c..af3c540 100644 --- a/src/main.zig +++ b/src/main.zig @@ -64,6 +64,9 @@ const Frame = struct { image: vk.Image, view: vk.ImageView, scissor: vk.Rect2D, + pipeline: vk.Pipeline, + vertex_buffer: vk.Buffer, + index_buffer: vk.Buffer, ) !void { _ = self; @@ -107,7 +110,7 @@ const Frame = struct { .resolve_image_layout = .undefined, .load_op = .clear, .store_op = .store, - .clear_value = .{ .color = .{ .float_32 = .{ 1, 0, 0, 1 } } }, + .clear_value = .{ .color = .{ .float_32 = .{ 0, 0, 0, 1 } } }, }}, }; @@ -123,6 +126,11 @@ const Frame = struct { cmd.beginRendering(&info); + cmd.bindPipeline(.graphics, pipeline); + cmd.bindVertexBuffers(0, 1, &.{vertex_buffer}, &.{0}); + cmd.bindIndexBuffer(index_buffer, 0, .uint16); + cmd.drawIndexed(indices.len, 1, 0, 0, 0); + // todo // vkd.cmdBindPipeline(cmdbuf, .graphics, pipeline); // const offset = [_]vk.DeviceSize{0}; @@ -184,9 +192,6 @@ pub fn main() !void { const ctx = im.c.igCreateContext(null) orelse return error.igCreateContextFailed; defer im.c.igDestroyContext(ctx); - // _ = im.c.ImGui_ImplGlfw_InitForOther(@ptrCast(au.W.handle), true); - // defer im.c.ImGui_ImplGlfw_Shutdown(); - const descriptorPool = try au.D.createDescriptorPool(&vk.DescriptorPoolCreateInfo{ .flags = .{ .free_descriptor_set_bit = true }, .pool_size_count = 1, @@ -247,11 +252,56 @@ pub fn main() !void { const cache = try au.D.createPipelineCache(&vk.PipelineCacheCreateInfo{}, null); defer au.D.destroyPipelineCache(cache, null); + // for descriptor sets const layout = try au.D.createPipelineLayout(&vk.PipelineLayoutCreateInfo{ - // todo + .flags = .{}, + .set_layout_count = 0, + .p_set_layouts = null, + .push_constant_range_count = 0, + .p_push_constant_ranges = null, }, null); defer au.D.destroyPipelineLayout(layout, null); + const vkalloc = au.VkAllocator.init(); + + const vertex_buffer = try au.D.createBuffer(&vk.BufferCreateInfo{ + .size = @sizeOf(@TypeOf(vertices)), + .usage = .{ .vertex_buffer_bit = true }, + .sharing_mode = .exclusive, + }, null); + defer au.D.destroyBuffer(vertex_buffer, null); + const vertex_memory = try vkalloc.alloc( + au.D.getBufferMemoryRequirements(vertex_buffer), + .{ .host_visible_bit = true, .host_coherent_bit = true }, + ); + defer vkalloc.free(vertex_memory); + try au.D.bindBufferMemory(vertex_buffer, vertex_memory, 0); + + const vertex_data: [*]Vertex = @ptrCast(@alignCast(try au.D.mapMemory(vertex_memory, 0, vk.WHOLE_SIZE, .{}))); + defer au.D.unmapMemory(vertex_memory); + + @memcpy(vertex_data[0..vertices.len], &vertices); + + const index_buffer = try au.D.createBuffer(&vk.BufferCreateInfo{ + .size = @sizeOf(@TypeOf(indices)), + .usage = .{ .index_buffer_bit = true }, + .sharing_mode = .exclusive, + }, null); + defer au.D.destroyBuffer(index_buffer, null); + const index_memory = try vkalloc.alloc( + au.D.getBufferMemoryRequirements(index_buffer), + .{ .host_visible_bit = true, .host_coherent_bit = true }, + ); + defer vkalloc.free(index_memory); + try au.D.bindBufferMemory(index_buffer, index_memory, 0); + + const index_data: [*]Index = @ptrCast(@alignCast(try au.D.mapMemory(index_memory, 0, vk.WHOLE_SIZE, .{}))); + defer au.D.unmapMemory(index_memory); + + @memcpy(index_data[0..indices.len], &indices); + + try au.D.deviceWaitIdle(); + const gpci: vk.GraphicsPipelineCreateInfo = .{ .stage_count = 2, .p_stages = &.{ @@ -390,6 +440,9 @@ pub fn main() !void { image, view, vk.Rect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = sc.cinfo.image_extent }, + pipeline, + vertex_buffer, + index_buffer, ); try cmd.endCommandBuffer(); @@ -427,18 +480,6 @@ pub fn main() !void { try au.D.deviceWaitIdle(); - // const pipeline_layout = try dev.vkd.createPipelineLayout(dev.dev, &.{ - // .flags = .{}, - // .set_layout_count = 0, - // .p_set_layouts = undefined, - // .push_constant_range_count = 0, - // .p_push_constant_ranges = undefined, - // }, null); - // defer dev.vkd.destroyPipelineLayout(dev.dev, pipeline_layout, null); - // - // const pipeline = try createPipeline(dev.dev, pipeline_layout, dev.format, dev.vkd); - // defer dev.vkd.destroyPipeline(dev.dev, pipeline, null); - // const vertex_buffer = try dev.vkd.createBuffer(dev.dev, &.{ // .size = @sizeOf(@TypeOf(vertices)), // .usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, From a89ee99fd4b390e48ccf73f6d4478647e2cb83cd Mon Sep 17 00:00:00 2001 From: David Allemang Date: Fri, 5 Jul 2024 15:30:49 -0400 Subject: [PATCH 076/113] remove vestigial copy code --- src/main.zig | 155 --------------------------------------------------- 1 file changed, 155 deletions(-) diff --git a/src/main.zig b/src/main.zig index af3c540..38e6ab9 100644 --- a/src/main.zig +++ b/src/main.zig @@ -131,13 +131,6 @@ const Frame = struct { cmd.bindIndexBuffer(index_buffer, 0, .uint16); cmd.drawIndexed(indices.len, 1, 0, 0, 0); - // todo - // vkd.cmdBindPipeline(cmdbuf, .graphics, pipeline); - // const offset = [_]vk.DeviceSize{0}; - // vkd.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast(&vertex_buffer), &offset); - // vkd.cmdBindIndexBuffer(cmdbuf, index_buffer, 0, .uint16); - // vkd.cmdDrawIndexed(cmdbuf, indices.len, 1, 0, 0, 0); - im.c.ImGui_ImplVulkan_RenderDrawData(im.c.igGetDrawData(), @ptrFromInt(@intFromEnum(cmd.handle)), null); cmd.endRendering(); @@ -479,152 +472,4 @@ pub fn main() !void { } try au.D.deviceWaitIdle(); - - // const vertex_buffer = try dev.vkd.createBuffer(dev.dev, &.{ - // .size = @sizeOf(@TypeOf(vertices)), - // .usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, - // .sharing_mode = .exclusive, - // }, null); - // defer dev.vkd.destroyBuffer(dev.dev, vertex_buffer, null); - // const vertex_mem_reqs = dev.vkd.getBufferMemoryRequirements(dev.dev, vertex_buffer); - // const vertex_memory = try device_local.alloc(dev.dev, dev.vkd, vertex_mem_reqs, .{ .device_local_bit = true }); - // defer dev.vkd.freeMemory(dev.dev, vertex_memory, null); - // try dev.vkd.bindBufferMemory(dev.dev, vertex_buffer, vertex_memory, 0); - // try gfx.uploadData(Vertex, dev.pdev, inst.vki, dev.dev, dev.vkd, dev.queue, dev.pool, vertex_buffer, &vertices); - - // const index_buffer = try dev.vkd.createBuffer(dev.dev, &.{ - // .size = @sizeOf(@TypeOf(indices)), - // .usage = .{ .transfer_dst_bit = true, .index_buffer_bit = true }, - // .sharing_mode = .exclusive, - // }, null); - // defer dev.vkd.destroyBuffer(dev.dev, index_buffer, null); - // const index_mem_reqs = dev.vkd.getBufferMemoryRequirements(dev.dev, index_buffer); - // const index_memory = try device_local.alloc(dev.dev, dev.vkd, index_mem_reqs, .{ .device_local_bit = true }); - // defer dev.vkd.freeMemory(dev.dev, index_memory, null); - // try dev.vkd.bindBufferMemory(dev.dev, index_buffer, index_memory, 0); - // try gfx.uploadData(Index, dev.pdev, inst.vki, dev.dev, dev.vkd, dev.queue, dev.pool, index_buffer, &indices); } - -// fn createPipeline( -// dev: vk.Device, -// layout: vk.PipelineLayout, -// format: vk.SurfaceFormatKHR, -// vkd: gfx.Device.Wrapper, -// ) !vk.Pipeline { -// const vert = try vkd.createShaderModule(dev, &.{ -// .code_size = shaders.triangle_vert.len, -// .p_code = @as([*]const u32, @ptrCast(&shaders.triangle_vert)), -// }, null); -// defer vkd.destroyShaderModule(dev, vert, null); -// -// const frag = try vkd.createShaderModule(dev, &.{ -// .code_size = shaders.triangle_frag.len, -// .p_code = @as([*]const u32, @ptrCast(&shaders.triangle_frag)), -// }, null); -// defer vkd.destroyShaderModule(dev, frag, null); -// -// const pssci = [_]vk.PipelineShaderStageCreateInfo{ -// .{ -// .stage = .{ .vertex_bit = true }, -// .module = vert, -// .p_name = "main", -// }, -// .{ -// .stage = .{ .fragment_bit = true }, -// .module = frag, -// .p_name = "main", -// }, -// }; -// -// const color_blend_attachment_states = [_]vk.PipelineColorBlendAttachmentState{ -// vk.PipelineColorBlendAttachmentState{ -// .blend_enable = vk.FALSE, -// .src_color_blend_factor = .one, -// .dst_color_blend_factor = .zero, -// .color_blend_op = .add, -// .src_alpha_blend_factor = .one, -// .dst_alpha_blend_factor = .zero, -// .alpha_blend_op = .add, -// .color_write_mask = .{ .r_bit = true, .g_bit = true, .b_bit = true, .a_bit = true }, -// }, -// }; -// -// const dynamic_states = [_]vk.DynamicState{ -// .viewport, -// .scissor, -// }; -// -// const create_infos = [_]vk.GraphicsPipelineCreateInfo{ -// .{ -// .flags = .{}, -// .stage_count = @intCast(pssci.len), -// .p_stages = &pssci, -// .p_vertex_input_state = &vk.PipelineVertexInputStateCreateInfo{ -// .vertex_binding_description_count = 1, -// .p_vertex_binding_descriptions = @ptrCast(&Vertex.binding_description), -// .vertex_attribute_description_count = Vertex.attribute_description.len, -// .p_vertex_attribute_descriptions = &Vertex.attribute_description, -// }, -// .p_input_assembly_state = &vk.PipelineInputAssemblyStateCreateInfo{ -// .topology = .triangle_list, -// .primitive_restart_enable = vk.FALSE, -// }, -// .p_tessellation_state = null, -// .p_viewport_state = &vk.PipelineViewportStateCreateInfo{ -// .viewport_count = 1, -// .p_viewports = undefined, // set in createCommandBuffers with cmdSetViewport -// .scissor_count = 1, -// .p_scissors = undefined, // set in createCommandBuffers with cmdSetScissor -// }, -// .p_rasterization_state = &vk.PipelineRasterizationStateCreateInfo{ -// .depth_clamp_enable = vk.FALSE, -// .rasterizer_discard_enable = vk.FALSE, -// .polygon_mode = .fill, -// .cull_mode = .{ .back_bit = true }, -// .front_face = .counter_clockwise, -// .depth_bias_enable = vk.FALSE, -// .depth_bias_constant_factor = 0, -// .depth_bias_clamp = 0, -// .depth_bias_slope_factor = 0, -// .line_width = 1, -// }, -// .p_multisample_state = &vk.PipelineMultisampleStateCreateInfo{ -// .rasterization_samples = .{ .@"1_bit" = true }, -// .sample_shading_enable = vk.FALSE, -// .min_sample_shading = 1, -// .alpha_to_coverage_enable = vk.FALSE, -// .alpha_to_one_enable = vk.FALSE, -// }, -// .p_depth_stencil_state = null, -// .p_color_blend_state = &vk.PipelineColorBlendStateCreateInfo{ -// .logic_op_enable = vk.FALSE, -// .logic_op = .copy, -// .attachment_count = @intCast(color_blend_attachment_states.len), -// .p_attachments = &color_blend_attachment_states, -// .blend_constants = [_]f32{ 0, 0, 0, 0 }, -// }, -// .p_dynamic_state = &vk.PipelineDynamicStateCreateInfo{ -// .flags = .{}, -// .dynamic_state_count = @intCast(dynamic_states.len), -// .p_dynamic_states = &dynamic_states, -// }, -// .layout = layout, -// .render_pass = .null_handle, -// .subpass = 0, -// .base_pipeline_handle = .null_handle, -// .base_pipeline_index = -1, -// .p_next = &vk.PipelineRenderingCreateInfoKHR{ -// .color_attachment_count = 1, -// .p_color_attachment_formats = @ptrCast(&format), -// .depth_attachment_format = .undefined, -// .stencil_attachment_format = .undefined, -// .view_mask = 0, -// }, -// }, -// }; -// -// var pipelines: [create_infos.len]vk.Pipeline = undefined; -// _ = try vkd.createGraphicsPipelines(dev, .null_handle, @intCast(create_infos.len), &create_infos, null, &pipelines); -// std.debug.assert(pipelines.len == 1); -// return pipelines[0]; -// } From 03c099d1bb036b3076b3ab499d0d7f5ee28dc938 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Fri, 5 Jul 2024 17:22:31 -0400 Subject: [PATCH 077/113] update vertex data each frame --- src/main.zig | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/src/main.zig b/src/main.zig index 38e6ab9..d43f809 100644 --- a/src/main.zig +++ b/src/main.zig @@ -270,10 +270,9 @@ pub fn main() !void { defer vkalloc.free(vertex_memory); try au.D.bindBufferMemory(vertex_buffer, vertex_memory, 0); - const vertex_data: [*]Vertex = @ptrCast(@alignCast(try au.D.mapMemory(vertex_memory, 0, vk.WHOLE_SIZE, .{}))); + const vertex_data: *align(1) @TypeOf(vertices) = @ptrCast(try au.D.mapMemory(vertex_memory, 0, vk.WHOLE_SIZE, .{})); defer au.D.unmapMemory(vertex_memory); - - @memcpy(vertex_data[0..vertices.len], &vertices); + vertex_data.* = vertices; const index_buffer = try au.D.createBuffer(&vk.BufferCreateInfo{ .size = @sizeOf(@TypeOf(indices)), @@ -287,11 +286,9 @@ pub fn main() !void { ); defer vkalloc.free(index_memory); try au.D.bindBufferMemory(index_buffer, index_memory, 0); - - const index_data: [*]Index = @ptrCast(@alignCast(try au.D.mapMemory(index_memory, 0, vk.WHOLE_SIZE, .{}))); + const index_data: *align(1) @TypeOf(indices) = @ptrCast(try au.D.mapMemory(index_memory, 0, vk.WHOLE_SIZE, .{})); defer au.D.unmapMemory(index_memory); - - @memcpy(index_data[0..indices.len], &indices); + index_data.* = indices; try au.D.deviceWaitIdle(); @@ -386,6 +383,9 @@ pub fn main() !void { ); defer au.D.destroyPipeline(pipeline, null); + var prng = std.Random.Sfc64.init(std.crypto.random.int(u64)); + const rand = prng.random(); + while (!au.W.should_close()) { im.c.ImGui_ImplGlfw_NewFrame(); im.c.ImGui_ImplVulkan_NewFrame(); @@ -438,6 +438,12 @@ pub fn main() !void { index_buffer, ); + for (vertex_data) |*v| { + for (v.pos[0..2]) |*f| { + f.* += (rand.float(f32) - 0.5) * 0.01; + } + } + try cmd.endCommandBuffer(); au.Q.submit( From 6d15b8d2833bf3bd7fe7ece228cc7c143c1c3be7 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Fri, 5 Jul 2024 18:55:57 -0400 Subject: [PATCH 078/113] memory barrier: graphics read -> host write --- src/main.zig | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/main.zig b/src/main.zig index d43f809..625f2c9 100644 --- a/src/main.zig +++ b/src/main.zig @@ -135,6 +135,30 @@ const Frame = struct { cmd.endRendering(); + // vulkan implicitly ensures the host writes all data before the host reads it + // be sure the shader reads all the vertex data before the host might modify it + cmd.pipelineBarrier( + .{ .all_graphics_bit = true }, + .{ .host_bit = true }, + .{}, + 0, + null, + 1, + &.{ + vk.BufferMemoryBarrier{ + .buffer = vertex_buffer, + .src_access_mask = .{ .shader_read_bit = true }, + .dst_access_mask = .{ .host_write_bit = true }, + .offset = 0, + .size = vk.WHOLE_SIZE, + .src_queue_family_index = 0, + .dst_queue_family_index = 0, + }, + }, + 0, + null, + ); + cmd.pipelineBarrier( .{ .color_attachment_output_bit = true }, .{ .bottom_of_pipe_bit = true }, From 29cd7fa5e5c9e48030cc2996dd93471a0e1a6914 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Fri, 5 Jul 2024 23:26:57 -0400 Subject: [PATCH 079/113] start migrating to uber-pipeline with descriptors --- src/Uber.zig | 178 ++++++++++++++++++++++++++++++++++++++++++++++++ src/main.zig | 187 +++++++++++---------------------------------------- 2 files changed, 218 insertions(+), 147 deletions(-) create mode 100644 src/Uber.zig diff --git a/src/Uber.zig b/src/Uber.zig new file mode 100644 index 0000000..1a64888 --- /dev/null +++ b/src/Uber.zig @@ -0,0 +1,178 @@ +const std = @import("std"); +const au = @import("au.zig"); +const vk = @import("vk"); +const shaders = @import("shaders"); + +const Self = @This(); + +set_layout: vk.DescriptorSetLayout, +layout: vk.PipelineLayout, +pipeline: vk.Pipeline, + +pub const Index = u16; + +pub const Vertex = extern struct { + pos: [4]f32, + color: [3]f32, + + const InputStateInfo = vk.PipelineVertexInputStateCreateInfo{ + .vertex_binding_description_count = 1, + .p_vertex_binding_descriptions = &.{ + vk.VertexInputBindingDescription{ + .binding = 0, + .stride = @sizeOf(Vertex), + .input_rate = .vertex, + }, + }, + + .vertex_attribute_description_count = 2, + .p_vertex_attribute_descriptions = &.{ + vk.VertexInputAttributeDescription{ + .binding = 0, + .location = 0, + .format = .r32g32b32a32_sfloat, + .offset = @offsetOf(Vertex, "pos"), + }, + vk.VertexInputAttributeDescription{ + .binding = 0, + .location = 1, + .format = .r32g32b32_sfloat, + .offset = @offsetOf(Vertex, "color"), + }, + }, + }; +}; + +pub const Uniform = extern struct { + proj: [16]f32 = .{ + 0.5, 0, 0, 0, + 0, 1, 0, 0, + 0, 0, 1, 0, + 0, 0, 0, 1, + }, + + const DescriptorLayoutInfo = vk.DescriptorSetLayoutCreateInfo{ + .flags = .{}, + .binding_count = 1, + .p_bindings = &.{ + vk.DescriptorSetLayoutBinding{ + .binding = 0, + .descriptor_type = .uniform_buffer, + .descriptor_count = 1, + .stage_flags = .{ .vertex_bit = true }, + }, + }, + }; +}; + +pub fn init(cache: vk.PipelineCache) !Self { + const vert = try au.D.createShaderModule(&.{ + .code_size = shaders.triangle_vert.len, + .p_code = @ptrCast(&shaders.triangle_vert), + }, null); + defer au.D.destroyShaderModule(vert, null); + + const frag = try au.D.createShaderModule(&.{ + .code_size = shaders.triangle_frag.len, + .p_code = @ptrCast(&shaders.triangle_frag), + }, null); + defer au.D.destroyShaderModule(frag, null); + + const set_layout = try au.D.createDescriptorSetLayout(&Uniform.DescriptorLayoutInfo, null); + errdefer au.D.destroyDescriptorSetLayout(set_layout, null); + + const layout = try au.D.createPipelineLayout(&vk.PipelineLayoutCreateInfo{ + .push_constant_range_count = 0, + .set_layout_count = 1, + .p_set_layouts = &.{set_layout}, + }, null); + errdefer au.D.destroyPipelineLayout(layout, null); + + var pipeline: vk.Pipeline = .null_handle; + _ = try au.D.createGraphicsPipelines(cache, 1, &[1]vk.GraphicsPipelineCreateInfo{ + vk.GraphicsPipelineCreateInfo{ + .stage_count = 2, + .p_stages = &.{ + vk.PipelineShaderStageCreateInfo{ .stage = .{ .vertex_bit = true }, .module = vert, .p_name = "main" }, + vk.PipelineShaderStageCreateInfo{ .stage = .{ .fragment_bit = true }, .module = frag, .p_name = "main" }, + }, + .layout = layout, + .render_pass = .null_handle, + .subpass = 0, + .base_pipeline_handle = .null_handle, + .base_pipeline_index = -1, + .p_vertex_input_state = &Vertex.InputStateInfo, + .p_input_assembly_state = &vk.PipelineInputAssemblyStateCreateInfo{ + .topology = .triangle_list, + .primitive_restart_enable = vk.FALSE, + }, + .p_tessellation_state = null, + .p_viewport_state = &vk.PipelineViewportStateCreateInfo{ + .viewport_count = 1, + .scissor_count = 1, + }, + .p_rasterization_state = &vk.PipelineRasterizationStateCreateInfo{ + .depth_clamp_enable = vk.FALSE, + .rasterizer_discard_enable = vk.FALSE, + .polygon_mode = .fill, + .cull_mode = .{ .back_bit = true }, + .front_face = .counter_clockwise, + .depth_bias_enable = vk.FALSE, + .depth_bias_constant_factor = 0.0, + .depth_bias_clamp = 0.0, + .depth_bias_slope_factor = 0.0, + .line_width = 1.0, + }, + .p_multisample_state = &vk.PipelineMultisampleStateCreateInfo{ + .rasterization_samples = .{ .@"1_bit" = true }, + .sample_shading_enable = vk.FALSE, + .min_sample_shading = 1, + .alpha_to_coverage_enable = vk.FALSE, + .alpha_to_one_enable = vk.FALSE, + }, + .p_depth_stencil_state = null, + .p_color_blend_state = &vk.PipelineColorBlendStateCreateInfo{ + .logic_op_enable = vk.FALSE, + .logic_op = .copy, + .blend_constants = [_]f32{ 0, 0, 0, 0 }, + .attachment_count = 1, + .p_attachments = &.{ + vk.PipelineColorBlendAttachmentState{ + .blend_enable = vk.FALSE, + .color_blend_op = .add, + .src_color_blend_factor = .one, + .dst_color_blend_factor = .zero, + .alpha_blend_op = .add, + .src_alpha_blend_factor = .one, + .dst_alpha_blend_factor = .zero, + .color_write_mask = .{ .r_bit = true, .g_bit = true, .b_bit = true, .a_bit = true }, + }, + }, + }, + .p_dynamic_state = &vk.PipelineDynamicStateCreateInfo{ + .flags = .{}, + .dynamic_state_count = 2, + .p_dynamic_states = &.{ + .viewport, + .scissor, + }, + }, + .p_next = &vk.PipelineRenderingCreateInfo{ + .color_attachment_count = 1, + .p_color_attachment_formats = &.{au.device_config.format.format}, + .depth_attachment_format = .undefined, + .stencil_attachment_format = .undefined, + .view_mask = 0, + }, + }, + }, null, @ptrCast(&pipeline)); + errdefer au.D.destroyPipeline(pipeline, null); + + return .{ .pipeline = pipeline, .layout = layout, .set_layout = set_layout }; +} + +pub fn deinit(self: Self) void { + au.D.destroyPipeline(self.pipeline, null); + au.D.destroyPipelineLayout(self.layout, null); + au.D.destroyDescriptorSetLayout(self.set_layout, null); +} diff --git a/src/main.zig b/src/main.zig index 625f2c9..203eb21 100644 --- a/src/main.zig +++ b/src/main.zig @@ -7,35 +7,9 @@ const Allocator = std.mem.Allocator; const au = @import("au.zig"); const im = @import("cimgui"); -const Vertex = extern struct { - const binding_description = vk.VertexInputBindingDescription{ - .binding = 0, - .stride = @sizeOf(Vertex), - .input_rate = .vertex, - }; +const Uber = @import("Uber.zig"); - const attribute_description = [_]vk.VertexInputAttributeDescription{ - .{ - .binding = 0, - .location = 0, - .format = .r32g32b32a32_sfloat, - .offset = @offsetOf(Vertex, "pos"), - }, - .{ - .binding = 0, - .location = 1, - .format = .r32g32b32_sfloat, - .offset = @offsetOf(Vertex, "color"), - }, - }; - - pos: [4]f32, - color: [3]f32, -}; - -const Index = u16; - -const vertices = [_]Vertex{ +const vertices = [_]Uber.Vertex{ // Vulkan depth range is 0, 1 instead of OpenGL -1, 1 .{ .pos = .{ -0.5, -0.5, -0.5, 1.0 }, .color = .{ 1, 0, 0 } }, .{ .pos = .{ -0.5, 0.5, -0.5, 1.0 }, .color = .{ 0, 1, 0 } }, @@ -47,7 +21,9 @@ const vertices = [_]Vertex{ .{ .pos = .{ 0.5, 0.5, 0.5, 1.0 }, .color = .{ 1, 1, 0 } }, }; -const indices = [_]Index{ 4, 5, 6, 6, 5, 7 }; +const indices = [_]Uber.Index{ 4, 5, 6, 6, 5, 7 }; + +const uniform = Uber.Uniform{}; const Frame = struct { pub fn init() !Frame { @@ -67,6 +43,7 @@ const Frame = struct { pipeline: vk.Pipeline, vertex_buffer: vk.Buffer, index_buffer: vk.Buffer, + uniform_buffer: vk.Buffer, ) !void { _ = self; @@ -143,7 +120,7 @@ const Frame = struct { .{}, 0, null, - 1, + 2, &.{ vk.BufferMemoryBarrier{ .buffer = vertex_buffer, @@ -154,6 +131,15 @@ const Frame = struct { .src_queue_family_index = 0, .dst_queue_family_index = 0, }, + vk.BufferMemoryBarrier{ + .buffer = uniform_buffer, + .src_access_mask = .{ .shader_read_bit = true }, + .dst_access_mask = .{ .host_write_bit = true }, + .offset = 0, + .size = vk.WHOLE_SIZE, + .src_queue_family_index = 0, + .dst_queue_family_index = 0, + }, }, 0, null, @@ -254,30 +240,11 @@ pub fn main() !void { _ = im.c.ImGui_ImplVulkan_CreateFontsTexture(); defer im.c.ImGui_ImplVulkan_Shutdown(); - const vert = try au.D.createShaderModule(&vk.ShaderModuleCreateInfo{ - .code_size = shaders.triangle_vert.len, - .p_code = @ptrCast(&shaders.triangle_vert), - }, null); - defer au.D.destroyShaderModule(vert, null); - - const frag = try au.D.createShaderModule(&vk.ShaderModuleCreateInfo{ - .code_size = shaders.triangle_frag.len, - .p_code = @ptrCast(&shaders.triangle_frag), - }, null); - defer au.D.destroyShaderModule(frag, null); - const cache = try au.D.createPipelineCache(&vk.PipelineCacheCreateInfo{}, null); defer au.D.destroyPipelineCache(cache, null); - // for descriptor sets - const layout = try au.D.createPipelineLayout(&vk.PipelineLayoutCreateInfo{ - .flags = .{}, - .set_layout_count = 0, - .p_set_layouts = null, - .push_constant_range_count = 0, - .p_push_constant_ranges = null, - }, null); - defer au.D.destroyPipelineLayout(layout, null); + const uber = try Uber.init(cache); + defer uber.deinit(); const vkalloc = au.VkAllocator.init(); @@ -293,8 +260,8 @@ pub fn main() !void { ); defer vkalloc.free(vertex_memory); try au.D.bindBufferMemory(vertex_buffer, vertex_memory, 0); - - const vertex_data: *align(1) @TypeOf(vertices) = @ptrCast(try au.D.mapMemory(vertex_memory, 0, vk.WHOLE_SIZE, .{})); + const vertex_data: *align(1) @TypeOf(vertices) = + @ptrCast(try au.D.mapMemory(vertex_memory, 0, vk.WHOLE_SIZE, .{})); defer au.D.unmapMemory(vertex_memory); vertex_data.* = vertices; @@ -310,102 +277,27 @@ pub fn main() !void { ); defer vkalloc.free(index_memory); try au.D.bindBufferMemory(index_buffer, index_memory, 0); - const index_data: *align(1) @TypeOf(indices) = @ptrCast(try au.D.mapMemory(index_memory, 0, vk.WHOLE_SIZE, .{})); + const index_data: *align(1) @TypeOf(indices) = + @ptrCast(try au.D.mapMemory(index_memory, 0, vk.WHOLE_SIZE, .{})); defer au.D.unmapMemory(index_memory); index_data.* = indices; - try au.D.deviceWaitIdle(); - - const gpci: vk.GraphicsPipelineCreateInfo = .{ - .stage_count = 2, - .p_stages = &.{ - vk.PipelineShaderStageCreateInfo{ .stage = .{ .vertex_bit = true }, .module = vert, .p_name = "main" }, - vk.PipelineShaderStageCreateInfo{ .stage = .{ .fragment_bit = true }, .module = frag, .p_name = "main" }, - }, - .p_vertex_input_state = &vk.PipelineVertexInputStateCreateInfo{ - .vertex_binding_description_count = 1, - .p_vertex_binding_descriptions = @ptrCast(&Vertex.binding_description), - .vertex_attribute_description_count = Vertex.attribute_description.len, - .p_vertex_attribute_descriptions = &Vertex.attribute_description, - }, - .p_input_assembly_state = &vk.PipelineInputAssemblyStateCreateInfo{ - .topology = .triangle_list, - .primitive_restart_enable = vk.FALSE, - }, - .p_tessellation_state = null, - .p_viewport_state = &vk.PipelineViewportStateCreateInfo{ - .viewport_count = 1, - .scissor_count = 1, - }, - .p_rasterization_state = &vk.PipelineRasterizationStateCreateInfo{ - .depth_clamp_enable = vk.FALSE, - .rasterizer_discard_enable = vk.FALSE, - .polygon_mode = .fill, - .cull_mode = .{ .back_bit = true }, - .front_face = .counter_clockwise, - .depth_bias_enable = vk.FALSE, - .depth_bias_constant_factor = 0.0, - .depth_bias_clamp = 0.0, - .depth_bias_slope_factor = 0.0, - .line_width = 1.0, - }, - .p_multisample_state = &vk.PipelineMultisampleStateCreateInfo{ - .rasterization_samples = .{ .@"1_bit" = true }, - .sample_shading_enable = vk.FALSE, - .min_sample_shading = 1, - .alpha_to_coverage_enable = vk.FALSE, - .alpha_to_one_enable = vk.FALSE, - }, - .p_depth_stencil_state = null, - .p_color_blend_state = &vk.PipelineColorBlendStateCreateInfo{ - .logic_op_enable = vk.FALSE, - .logic_op = .copy, - .blend_constants = [_]f32{ 0, 0, 0, 0 }, - .attachment_count = 1, - .p_attachments = &.{ - vk.PipelineColorBlendAttachmentState{ - .blend_enable = vk.FALSE, - .color_blend_op = .add, - .src_color_blend_factor = .one, - .dst_color_blend_factor = .zero, - .alpha_blend_op = .add, - .src_alpha_blend_factor = .one, - .dst_alpha_blend_factor = .zero, - .color_write_mask = .{ .r_bit = true, .g_bit = true, .b_bit = true, .a_bit = true }, - }, - }, - }, - .p_dynamic_state = &vk.PipelineDynamicStateCreateInfo{ - .flags = .{}, - .dynamic_state_count = 2, - .p_dynamic_states = &.{ - .viewport, - .scissor, - }, - }, - .layout = layout, - .render_pass = .null_handle, // set via dynamic rendering - .subpass = 0, - .base_pipeline_handle = .null_handle, - .base_pipeline_index = -1, - .p_next = &vk.PipelineRenderingCreateInfo{ - .color_attachment_count = 1, - .p_color_attachment_formats = &.{au.device_config.format.format}, - .depth_attachment_format = .undefined, - .stencil_attachment_format = .undefined, - .view_mask = 0, - }, - }; - - var pipeline: vk.Pipeline = undefined; - _ = try au.D.createGraphicsPipelines( - cache, - 1, - @ptrCast(&gpci), - null, - @ptrCast(&pipeline), + const uniform_buffer = try au.D.createBuffer(&vk.BufferCreateInfo{ + .size = @sizeOf(@TypeOf(uniform)), + .usage = .{ .uniform_buffer_bit = true }, + .sharing_mode = .exclusive, + }, null); + defer au.D.destroyBuffer(uniform_buffer, null); + const uniform_memory = try vkalloc.alloc( + au.D.getBufferMemoryRequirements(uniform_buffer), + .{ .host_visible_bit = true, .host_coherent_bit = true }, ); - defer au.D.destroyPipeline(pipeline, null); + defer vkalloc.free(uniform_memory); + try au.D.bindBufferMemory(uniform_buffer, uniform_memory, 0); + const uniform_data: *align(1) @TypeOf(uniform) = + @ptrCast(try au.D.mapMemory(uniform_memory, 0, vk.WHOLE_SIZE, .{})); + defer au.D.unmapMemory(uniform_memory); + uniform_data.* = uniform; var prng = std.Random.Sfc64.init(std.crypto.random.int(u64)); const rand = prng.random(); @@ -457,14 +349,15 @@ pub fn main() !void { image, view, vk.Rect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = sc.cinfo.image_extent }, - pipeline, + uber.pipeline, vertex_buffer, index_buffer, + uniform_buffer, ); for (vertex_data) |*v| { for (v.pos[0..2]) |*f| { - f.* += (rand.float(f32) - 0.5) * 0.01; + f.* += (rand.float(f32) - 0.5) * 0.001; } } From 53d063246b03c4fd50f31bc981b1f0c9f9a9b000 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Mon, 8 Jul 2024 10:51:42 -0400 Subject: [PATCH 080/113] pass a uniform --- src/Uber.zig | 9 ++------ src/main.zig | 48 ++++++++++++++++++++++++++++++++++++++- src/shaders/triangle.vert | 12 ++++++---- 3 files changed, 57 insertions(+), 12 deletions(-) diff --git a/src/Uber.zig b/src/Uber.zig index 1a64888..3144a25 100644 --- a/src/Uber.zig +++ b/src/Uber.zig @@ -44,14 +44,9 @@ pub const Vertex = extern struct { }; pub const Uniform = extern struct { - proj: [16]f32 = .{ - 0.5, 0, 0, 0, - 0, 1, 0, 0, - 0, 0, 1, 0, - 0, 0, 0, 1, - }, + proj: [16]f32, - const DescriptorLayoutInfo = vk.DescriptorSetLayoutCreateInfo{ + pub const DescriptorLayoutInfo = vk.DescriptorSetLayoutCreateInfo{ .flags = .{}, .binding_count = 1, .p_bindings = &.{ diff --git a/src/main.zig b/src/main.zig index 203eb21..ccd4d87 100644 --- a/src/main.zig +++ b/src/main.zig @@ -23,7 +23,14 @@ const vertices = [_]Uber.Vertex{ const indices = [_]Uber.Index{ 4, 5, 6, 6, 5, 7 }; -const uniform = Uber.Uniform{}; +const uniform = Uber.Uniform{ + .proj = .{ + 0.5, 0.0, 0.0, 0.0, + 0.0, 1.0, 0.0, 0.0, + 0.0, 0.0, 1.0, 0.0, + 0.0, 0.0, 0.0, 1.0, + }, +}; const Frame = struct { pub fn init() !Frame { @@ -41,9 +48,11 @@ const Frame = struct { view: vk.ImageView, scissor: vk.Rect2D, pipeline: vk.Pipeline, + layout: vk.PipelineLayout, vertex_buffer: vk.Buffer, index_buffer: vk.Buffer, uniform_buffer: vk.Buffer, + descriptor_set: vk.DescriptorSet, ) !void { _ = self; @@ -103,6 +112,7 @@ const Frame = struct { cmd.beginRendering(&info); + cmd.bindDescriptorSets(.graphics, layout, 0, 1, &.{descriptor_set}, 0, null); cmd.bindPipeline(.graphics, pipeline); cmd.bindVertexBuffers(0, 1, &.{vertex_buffer}, &.{0}); cmd.bindIndexBuffer(index_buffer, 0, .uint16); @@ -282,6 +292,8 @@ pub fn main() !void { defer au.D.unmapMemory(index_memory); index_data.* = indices; + // todo ring buffer for frames in flight. need to use an offset when binding + // use dynamic offset - descriptor type VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC const uniform_buffer = try au.D.createBuffer(&vk.BufferCreateInfo{ .size = @sizeOf(@TypeOf(uniform)), .usage = .{ .uniform_buffer_bit = true }, @@ -299,6 +311,38 @@ pub fn main() !void { defer au.D.unmapMemory(uniform_memory); uniform_data.* = uniform; + var descriptorSet: vk.DescriptorSet = undefined; + try au.D.allocateDescriptorSets(&vk.DescriptorSetAllocateInfo{ + .descriptor_pool = descriptorPool, + .descriptor_set_count = 1, + .p_set_layouts = &.{uber.set_layout}, + }, @ptrCast(&descriptorSet)); + defer au.D.freeDescriptorSets(descriptorPool, 1, &.{descriptorSet}) catch unreachable; // todo handle this? + + au.D.updateDescriptorSets( + 1, + &.{ + vk.WriteDescriptorSet{ + .dst_set = descriptorSet, + .dst_binding = 0, + .dst_array_element = 0, + .descriptor_type = .uniform_buffer, + .descriptor_count = 1, + .p_image_info = &[0]vk.DescriptorImageInfo{}, + .p_texel_buffer_view = &[0]vk.BufferView{}, + .p_buffer_info = &.{ + vk.DescriptorBufferInfo{ + .buffer = uniform_buffer, + .offset = 0, + .range = vk.WHOLE_SIZE, + }, + }, + }, + }, + 0, + null, + ); + var prng = std.Random.Sfc64.init(std.crypto.random.int(u64)); const rand = prng.random(); @@ -350,9 +394,11 @@ pub fn main() !void { view, vk.Rect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = sc.cinfo.image_extent }, uber.pipeline, + uber.layout, vertex_buffer, index_buffer, uniform_buffer, + descriptorSet, ); for (vertex_data) |*v| { diff --git a/src/shaders/triangle.vert b/src/shaders/triangle.vert index f820750..f98be1c 100644 --- a/src/shaders/triangle.vert +++ b/src/shaders/triangle.vert @@ -1,11 +1,15 @@ #version 450 -layout(location = 0) in vec4 a_pos; -layout(location = 1) in vec3 a_color; +layout (set = 0, binding = 0) uniform CameraBuffer { + mat4 viewproj; +} cam; -layout(location = 0) out vec3 v_color; +layout (location = 0) in vec4 a_pos; +layout (location = 1) in vec3 a_color; + +layout (location = 0) out vec3 v_color; void main() { - gl_Position = a_pos; + gl_Position = a_pos * cam.viewproj; v_color = a_color; } From 8fd94e631d2d6a4800c24d39b6f7d0c7178e4bb1 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Mon, 8 Jul 2024 14:25:10 -0400 Subject: [PATCH 081/113] better separation of swapchain, flight, and uber --- src/Uber.zig | 79 ++++++++------- src/au.zig | 2 +- src/au/Flights.zig | 57 +++++++++++ src/au/SwapChain.zig | 83 ++++++++++++++++ src/au/flights.zig | 64 ------------ src/main.zig | 202 +++++++------------------------------- src/shaders/triangle.vert | 4 +- 7 files changed, 220 insertions(+), 271 deletions(-) create mode 100644 src/au/Flights.zig delete mode 100644 src/au/flights.zig diff --git a/src/Uber.zig b/src/Uber.zig index 3144a25..b1f59f8 100644 --- a/src/Uber.zig +++ b/src/Uber.zig @@ -11,51 +11,41 @@ pipeline: vk.Pipeline, pub const Index = u16; +pub const Uniform = extern struct { + mat: [16]f32, + + const Bindings = [_]vk.DescriptorSetLayoutBinding{.{ + .binding = 0, + .descriptor_type = .uniform_buffer, + .descriptor_count = 1, + .stage_flags = .{ .vertex_bit = true }, + }}; +}; + pub const Vertex = extern struct { pos: [4]f32, color: [3]f32, - const InputStateInfo = vk.PipelineVertexInputStateCreateInfo{ - .vertex_binding_description_count = 1, - .p_vertex_binding_descriptions = &.{ - vk.VertexInputBindingDescription{ - .binding = 0, - .stride = @sizeOf(Vertex), - .input_rate = .vertex, - }, - }, - - .vertex_attribute_description_count = 2, - .p_vertex_attribute_descriptions = &.{ - vk.VertexInputAttributeDescription{ - .binding = 0, - .location = 0, - .format = .r32g32b32a32_sfloat, - .offset = @offsetOf(Vertex, "pos"), - }, - vk.VertexInputAttributeDescription{ - .binding = 0, - .location = 1, - .format = .r32g32b32_sfloat, - .offset = @offsetOf(Vertex, "color"), - }, + const Bindings = [_]vk.VertexInputBindingDescription{ + .{ + .binding = 0, + .stride = @sizeOf(Vertex), + .input_rate = .vertex, }, }; -}; -pub const Uniform = extern struct { - proj: [16]f32, - - pub const DescriptorLayoutInfo = vk.DescriptorSetLayoutCreateInfo{ - .flags = .{}, - .binding_count = 1, - .p_bindings = &.{ - vk.DescriptorSetLayoutBinding{ - .binding = 0, - .descriptor_type = .uniform_buffer, - .descriptor_count = 1, - .stage_flags = .{ .vertex_bit = true }, - }, + const Attributes = [_]vk.VertexInputAttributeDescription{ + .{ + .binding = 0, + .location = 0, + .format = .r32g32b32a32_sfloat, + .offset = @offsetOf(Vertex, "pos"), + }, + .{ + .binding = 0, + .location = 1, + .format = .r32g32b32_sfloat, + .offset = @offsetOf(Vertex, "color"), }, }; }; @@ -73,7 +63,11 @@ pub fn init(cache: vk.PipelineCache) !Self { }, null); defer au.D.destroyShaderModule(frag, null); - const set_layout = try au.D.createDescriptorSetLayout(&Uniform.DescriptorLayoutInfo, null); + const set_layout = try au.D.createDescriptorSetLayout(&vk.DescriptorSetLayoutCreateInfo{ + .flags = .{}, + .binding_count = @intCast(Uniform.Bindings.len), + .p_bindings = &Uniform.Bindings, + }, null); errdefer au.D.destroyDescriptorSetLayout(set_layout, null); const layout = try au.D.createPipelineLayout(&vk.PipelineLayoutCreateInfo{ @@ -96,7 +90,12 @@ pub fn init(cache: vk.PipelineCache) !Self { .subpass = 0, .base_pipeline_handle = .null_handle, .base_pipeline_index = -1, - .p_vertex_input_state = &Vertex.InputStateInfo, + .p_vertex_input_state = &vk.PipelineVertexInputStateCreateInfo{ + .vertex_binding_description_count = @intCast(Vertex.Bindings.len), + .p_vertex_binding_descriptions = &Vertex.Bindings, + .vertex_attribute_description_count = @intCast(Vertex.Attributes.len), + .p_vertex_attribute_descriptions = &Vertex.Attributes, + }, .p_input_assembly_state = &vk.PipelineInputAssemblyStateCreateInfo{ .topology = .triangle_list, .primitive_restart_enable = vk.FALSE, diff --git a/src/au.zig b/src/au.zig index 658fc98..4720b03 100644 --- a/src/au.zig +++ b/src/au.zig @@ -6,7 +6,7 @@ const c = @import("c.zig"); pub const Bus = @import("au/Bus.zig"); pub const SwapChain = @import("au/SwapChain.zig"); -pub const Flights = @import("au/flights.zig").Flights; +pub const Flights = @import("au/Flights.zig"); pub const VkAllocator = @import("au/VkAllocator.zig"); pub const use_debug_messenger = switch (builtin.mode) { diff --git a/src/au/Flights.zig b/src/au/Flights.zig new file mode 100644 index 0000000..d758170 --- /dev/null +++ b/src/au/Flights.zig @@ -0,0 +1,57 @@ +const std = @import("std"); +const vk = @import("vk"); +const au = @import("../au.zig"); + +const Self = @This(); + +const Flight = struct { + acquire: vk.Semaphore = .null_handle, + complete: vk.Semaphore = .null_handle, + fence: vk.Fence = .null_handle, + pool: vk.CommandPool = .null_handle, + cmd: vk.CommandBuffer = .null_handle, +}; + +alloc: std.mem.Allocator, +flights: []Flight, +idx: usize, + +pub fn init(alloc: std.mem.Allocator, n: usize) !Self { + var self: Self = .{ + .alloc = alloc, + .flights = try alloc.alloc(Flight, n), + .idx = 0, + }; + errdefer self.deinit(); + + for (self.flights) |*flight| { + flight.acquire = try au.D.createSemaphore(&.{}, null); + flight.complete = try au.D.createSemaphore(&.{}, null); + flight.fence = try au.D.createFence(&.{ .flags = .{ .signaled_bit = true } }, null); + flight.pool = try au.D.createCommandPool(&.{ .queue_family_index = au.device_config.family }, null); + try au.D.allocateCommandBuffers(&vk.CommandBufferAllocateInfo{ + .command_buffer_count = 1, + .command_pool = flight.pool, + .level = .primary, + }, @ptrCast(&flight.cmd)); + } + + return self; +} + +pub fn deinit(self: Self) void { + for (self.flights) |flight| { + au.D.destroySemaphore(flight.acquire, null); + au.D.destroySemaphore(flight.complete, null); + au.D.destroyFence(flight.fence, null); + au.D.freeCommandBuffers(flight.pool, 1, &.{flight.cmd}); + au.D.destroyCommandPool(flight.pool, null); + } + self.alloc.free(self.flights); +} + +pub fn next(self: *Self) Flight { + const idx = self.idx; + self.idx = (self.idx + 1) % self.flights.len; + return self.flights[idx]; +} diff --git a/src/au/SwapChain.zig b/src/au/SwapChain.zig index 9f451d2..bca6d60 100644 --- a/src/au/SwapChain.zig +++ b/src/au/SwapChain.zig @@ -101,3 +101,86 @@ pub fn getImage(self: Self, idx: u32) vk.Image { pub fn getView(self: Self, idx: u32) vk.ImageView { return self.views.items[idx]; } + +pub fn beginRendering(self: Self, cmd: au.CommandBufferProxy, area: vk.Rect2D, idx: u32) void { + cmd.pipelineBarrier( + .{ .top_of_pipe_bit = true }, + .{ .color_attachment_output_bit = true }, + .{}, + 0, + null, + 0, + null, + 1, + &.{ + vk.ImageMemoryBarrier{ + .src_access_mask = .{}, + .dst_access_mask = .{ .color_attachment_write_bit = true }, + .old_layout = .undefined, + .new_layout = .color_attachment_optimal, + .src_queue_family_index = 0, + .dst_queue_family_index = 0, + .image = self.getImage(idx), + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }, + }, + ); + + cmd.beginRendering(&vk.RenderingInfo{ + .render_area = area, + .layer_count = 1, + .view_mask = 0, + .color_attachment_count = 1, + .p_color_attachments = &.{ + vk.RenderingAttachmentInfo{ + .image_view = self.getView(idx), + .image_layout = .color_attachment_optimal, + .resolve_mode = .{}, + .resolve_image_view = .null_handle, + .resolve_image_layout = .undefined, + .load_op = .clear, + .store_op = .store, + .clear_value = .{ .color = .{ .float_32 = .{ 0, 0, 0, 1 } } }, + }, + }, + }); +} + +pub fn endRendering(self: Self, cmd: au.CommandBufferProxy, idx: u32) void { + cmd.endRendering(); + + cmd.pipelineBarrier( + .{ .color_attachment_output_bit = true }, + .{ .bottom_of_pipe_bit = true }, + .{}, + 0, + null, + 0, + null, + 1, + &.{ + vk.ImageMemoryBarrier{ + .src_access_mask = .{ .color_attachment_write_bit = true }, + .dst_access_mask = .{}, + .old_layout = .color_attachment_optimal, + .new_layout = .present_src_khr, + .src_queue_family_index = 0, + .dst_queue_family_index = 0, + .image = self.getImage(idx), + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }, + }, + ); +} diff --git a/src/au/flights.zig b/src/au/flights.zig deleted file mode 100644 index 86825c3..0000000 --- a/src/au/flights.zig +++ /dev/null @@ -1,64 +0,0 @@ -const std = @import("std"); -const vk = @import("vk"); -const au = @import("../au.zig"); - -pub fn Flights(T: type) type { - return struct { - const Self = @This(); - - const Flight = struct { - acquire: vk.Semaphore = .null_handle, - complete: vk.Semaphore = .null_handle, - fence: vk.Fence = .null_handle, - pool: vk.CommandPool = .null_handle, - cmd: vk.CommandBuffer = .null_handle, - ctx: T, - }; - - alloc: std.mem.Allocator, - flights: []Flight, - idx: usize, - - pub fn init(alloc: std.mem.Allocator, n: usize) !Self { - var self: Self = .{ - .alloc = alloc, - .flights = try alloc.alloc(Flight, n), - .idx = 0, - }; - errdefer self.deinit(); - - for (self.flights) |*flight| { - flight.acquire = try au.D.createSemaphore(&.{}, null); - flight.complete = try au.D.createSemaphore(&.{}, null); - flight.fence = try au.D.createFence(&.{ .flags = .{ .signaled_bit = true } }, null); - flight.pool = try au.D.createCommandPool(&.{ .queue_family_index = au.device_config.family }, null); - try au.D.allocateCommandBuffers(&vk.CommandBufferAllocateInfo{ - .command_buffer_count = 1, - .command_pool = flight.pool, - .level = .primary, - }, @ptrCast(&flight.cmd)); - flight.ctx = try T.init(); - } - - return self; - } - - pub fn deinit(self: Self) void { - for (self.flights) |flight| { - au.D.destroySemaphore(flight.acquire, null); - au.D.destroySemaphore(flight.complete, null); - au.D.destroyFence(flight.fence, null); - au.D.freeCommandBuffers(flight.pool, 1, &.{flight.cmd}); - au.D.destroyCommandPool(flight.pool, null); - flight.ctx.deinit(); - } - self.alloc.free(self.flights); - } - - pub fn next(self: *Self) Flight { - const idx = self.idx; - self.idx = (self.idx + 1) % self.flights.len; - return self.flights[idx]; - } - }; -} diff --git a/src/main.zig b/src/main.zig index ccd4d87..6fd5d86 100644 --- a/src/main.zig +++ b/src/main.zig @@ -24,7 +24,7 @@ const vertices = [_]Uber.Vertex{ const indices = [_]Uber.Index{ 4, 5, 6, 6, 5, 7 }; const uniform = Uber.Uniform{ - .proj = .{ + .mat = .{ 0.5, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, @@ -32,157 +32,30 @@ const uniform = Uber.Uniform{ }, }; -const Frame = struct { - pub fn init() !Frame { - return .{}; - } +fn record_render( + cmd: au.CommandBufferProxy, + uber: Uber, + area: vk.Rect2D, // render area, scissor, and viewport. + vertex_buffer: vk.Buffer, + index_buffer: vk.Buffer, + descriptor_set: vk.DescriptorSet, +) void { + cmd.setViewport(0, 1, &.{.{ + .x = @floatFromInt(area.offset.x), + .y = @floatFromInt(area.offset.y), + .width = @floatFromInt(area.extent.width), + .height = @floatFromInt(area.extent.height), + .min_depth = 0, + .max_depth = 1, + }}); + cmd.setScissor(0, 1, &.{area}); - pub fn deinit(self: Frame) void { - _ = self; - } - - pub fn record_render( - self: Frame, - cmd: au.CommandBufferProxy, - image: vk.Image, - view: vk.ImageView, - scissor: vk.Rect2D, - pipeline: vk.Pipeline, - layout: vk.PipelineLayout, - vertex_buffer: vk.Buffer, - index_buffer: vk.Buffer, - uniform_buffer: vk.Buffer, - descriptor_set: vk.DescriptorSet, - ) !void { - _ = self; - - cmd.pipelineBarrier( - .{ .top_of_pipe_bit = true }, - .{ .color_attachment_output_bit = true }, - .{}, - 0, - null, - 0, - null, - 1, - @ptrCast(&vk.ImageMemoryBarrier{ - .src_access_mask = .{}, - .dst_access_mask = .{ .color_attachment_write_bit = true }, - .old_layout = .undefined, - .new_layout = .color_attachment_optimal, - .src_queue_family_index = 0, - .dst_queue_family_index = 0, - .image = image, - .subresource_range = .{ - .aspect_mask = .{ .color_bit = true }, - .base_mip_level = 0, - .level_count = 1, - .base_array_layer = 0, - .layer_count = 1, - }, - }), - ); - - const info = vk.RenderingInfoKHR{ - .render_area = scissor, - .layer_count = 1, - .view_mask = 0, - .color_attachment_count = 1, - .p_color_attachments = &.{vk.RenderingAttachmentInfo{ - .image_view = view, - .image_layout = .color_attachment_optimal, - .resolve_mode = .{}, - .resolve_image_view = .null_handle, - .resolve_image_layout = .undefined, - .load_op = .clear, - .store_op = .store, - .clear_value = .{ .color = .{ .float_32 = .{ 0, 0, 0, 1 } } }, - }}, - }; - - cmd.setViewport(0, 1, &.{.{ - .x = @floatFromInt(scissor.offset.x), - .y = @floatFromInt(scissor.offset.y), - .width = @floatFromInt(scissor.extent.width), - .height = @floatFromInt(scissor.extent.height), - .min_depth = 0, - .max_depth = 1, - }}); - cmd.setScissor(0, 1, &.{scissor}); - - cmd.beginRendering(&info); - - cmd.bindDescriptorSets(.graphics, layout, 0, 1, &.{descriptor_set}, 0, null); - cmd.bindPipeline(.graphics, pipeline); - cmd.bindVertexBuffers(0, 1, &.{vertex_buffer}, &.{0}); - cmd.bindIndexBuffer(index_buffer, 0, .uint16); - cmd.drawIndexed(indices.len, 1, 0, 0, 0); - - im.c.ImGui_ImplVulkan_RenderDrawData(im.c.igGetDrawData(), @ptrFromInt(@intFromEnum(cmd.handle)), null); - - cmd.endRendering(); - - // vulkan implicitly ensures the host writes all data before the host reads it - // be sure the shader reads all the vertex data before the host might modify it - cmd.pipelineBarrier( - .{ .all_graphics_bit = true }, - .{ .host_bit = true }, - .{}, - 0, - null, - 2, - &.{ - vk.BufferMemoryBarrier{ - .buffer = vertex_buffer, - .src_access_mask = .{ .shader_read_bit = true }, - .dst_access_mask = .{ .host_write_bit = true }, - .offset = 0, - .size = vk.WHOLE_SIZE, - .src_queue_family_index = 0, - .dst_queue_family_index = 0, - }, - vk.BufferMemoryBarrier{ - .buffer = uniform_buffer, - .src_access_mask = .{ .shader_read_bit = true }, - .dst_access_mask = .{ .host_write_bit = true }, - .offset = 0, - .size = vk.WHOLE_SIZE, - .src_queue_family_index = 0, - .dst_queue_family_index = 0, - }, - }, - 0, - null, - ); - - cmd.pipelineBarrier( - .{ .color_attachment_output_bit = true }, - .{ .bottom_of_pipe_bit = true }, - .{}, - 0, - null, - 0, - null, - 1, - @ptrCast(&vk.ImageMemoryBarrier{ - .src_access_mask = .{ .color_attachment_write_bit = true }, - .dst_access_mask = .{}, - .old_layout = .color_attachment_optimal, - .new_layout = .present_src_khr, - .src_queue_family_index = 0, - .dst_queue_family_index = 0, - .image = image, - .subresource_range = .{ - .aspect_mask = .{ .color_bit = true }, - .base_mip_level = 0, - .level_count = 1, - .base_array_layer = 0, - .layer_count = 1, - }, - }), - ); - } -}; + cmd.bindPipeline(.graphics, uber.pipeline); + cmd.bindDescriptorSets(.graphics, uber.layout, 0, 1, &.{descriptor_set}, 0, null); + cmd.bindVertexBuffers(0, 1, &.{vertex_buffer}, &.{0}); + cmd.bindIndexBuffer(index_buffer, 0, .uint16); + cmd.drawIndexed(indices.len, 1, 0, 0, 0); +} pub fn loader_wrapper(procname: [*c]const u8, _: ?*anyopaque) callconv(.C) vk.PfnVoidFunction { return c.glfwGetInstanceProcAddress(au.I.handle, procname); @@ -199,7 +72,7 @@ pub fn main() !void { var sc = try au.SwapChain.init(alloc); defer sc.deinit(); - var flights = try au.Flights(Frame).init(alloc, 3); // FRAMES IN FLIGHT + var flights = try au.Flights.init(alloc, 3); // FRAMES IN FLIGHT defer flights.deinit(); const ctx = im.c.igCreateContext(null) orelse return error.igCreateContextFailed; @@ -328,8 +201,8 @@ pub fn main() !void { .dst_array_element = 0, .descriptor_type = .uniform_buffer, .descriptor_count = 1, - .p_image_info = &[0]vk.DescriptorImageInfo{}, - .p_texel_buffer_view = &[0]vk.BufferView{}, + .p_image_info = undefined, + .p_texel_buffer_view = undefined, .p_buffer_info = &.{ vk.DescriptorBufferInfo{ .buffer = uniform_buffer, @@ -381,25 +254,26 @@ pub fn main() !void { flight.acquire, .null_handle, ); - const image = sc.getImage(acq.image_index); - const view = sc.getView(acq.image_index); var cmd = au.CommandBufferProxy.init(flight.cmd, au.D.wrapper); - try cmd.beginCommandBuffer(&.{ .flags = .{ .one_time_submit_bit = true } }); - try flight.ctx.record_render( + const render_area: vk.Rect2D = .{ + .offset = .{ .x = 0, .y = 0 }, + .extent = sc.cinfo.image_extent, + }; + + sc.beginRendering(cmd, render_area, acq.image_index); + record_render( cmd, - image, - view, - vk.Rect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = sc.cinfo.image_extent }, - uber.pipeline, - uber.layout, + uber, + render_area, vertex_buffer, index_buffer, - uniform_buffer, descriptorSet, ); + im.c.ImGui_ImplVulkan_RenderDrawData(im.c.igGetDrawData(), @ptrFromInt(@intFromEnum(cmd.handle)), null); + sc.endRendering(cmd, acq.image_index); for (vertex_data) |*v| { for (v.pos[0..2]) |*f| { diff --git a/src/shaders/triangle.vert b/src/shaders/triangle.vert index f98be1c..cf7aed7 100644 --- a/src/shaders/triangle.vert +++ b/src/shaders/triangle.vert @@ -1,7 +1,7 @@ #version 450 layout (set = 0, binding = 0) uniform CameraBuffer { - mat4 viewproj; + mat4 mat; } cam; layout (location = 0) in vec4 a_pos; @@ -10,6 +10,6 @@ layout (location = 1) in vec3 a_color; layout (location = 0) out vec3 v_color; void main() { - gl_Position = a_pos * cam.viewproj; + gl_Position = a_pos * cam.mat; v_color = a_color; } From c6ed235e518bff2bea7491fdc00808f5e6a3df41 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Mon, 8 Jul 2024 14:54:44 -0400 Subject: [PATCH 082/113] clean up acquire/present --- src/au/SwapChain.zig | 182 +++++++++++++++++++++++-------------------- src/main.zig | 20 +---- 2 files changed, 103 insertions(+), 99 deletions(-) diff --git a/src/au/SwapChain.zig b/src/au/SwapChain.zig index bca6d60..2d4e62e 100644 --- a/src/au/SwapChain.zig +++ b/src/au/SwapChain.zig @@ -89,98 +89,114 @@ pub fn rebuild(self: *Self) !bool { } }, null); } - // todo repopulate images and synchronization - return true; } -pub fn getImage(self: Self, idx: u32) vk.Image { - return self.images.items[idx]; +pub fn acquire(self: Self, semaphore: vk.Semaphore, fence: vk.Fence) !Target { + const acq = try au.D.acquireNextImageKHR(self.handle, std.math.maxInt(u64), semaphore, fence); + return .{ + .idx = acq.image_index, + .image = self.images.items[acq.image_index], + .view = self.views.items[acq.image_index], + }; } -pub fn getView(self: Self, idx: u32) vk.ImageView { - return self.views.items[idx]; -} +const Target = struct { + idx: u32, + image: vk.Image, + view: vk.ImageView, -pub fn beginRendering(self: Self, cmd: au.CommandBufferProxy, area: vk.Rect2D, idx: u32) void { - cmd.pipelineBarrier( - .{ .top_of_pipe_bit = true }, - .{ .color_attachment_output_bit = true }, - .{}, - 0, - null, - 0, - null, - 1, - &.{ - vk.ImageMemoryBarrier{ - .src_access_mask = .{}, - .dst_access_mask = .{ .color_attachment_write_bit = true }, - .old_layout = .undefined, - .new_layout = .color_attachment_optimal, - .src_queue_family_index = 0, - .dst_queue_family_index = 0, - .image = self.getImage(idx), - .subresource_range = .{ - .aspect_mask = .{ .color_bit = true }, - .base_mip_level = 0, - .level_count = 1, - .base_array_layer = 0, - .layer_count = 1, + pub fn begin_rendering(self: Target, cmd: au.CommandBufferProxy, area: vk.Rect2D) void { + cmd.pipelineBarrier( + .{ .top_of_pipe_bit = true }, + .{ .color_attachment_output_bit = true }, + .{}, + 0, + null, + 0, + null, + 1, + &.{ + vk.ImageMemoryBarrier{ + .src_access_mask = .{}, + .dst_access_mask = .{ .color_attachment_write_bit = true }, + .old_layout = .undefined, + .new_layout = .color_attachment_optimal, + .src_queue_family_index = 0, + .dst_queue_family_index = 0, + .image = self.image, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, }, }, - }, - ); + ); - cmd.beginRendering(&vk.RenderingInfo{ - .render_area = area, - .layer_count = 1, - .view_mask = 0, - .color_attachment_count = 1, - .p_color_attachments = &.{ - vk.RenderingAttachmentInfo{ - .image_view = self.getView(idx), - .image_layout = .color_attachment_optimal, - .resolve_mode = .{}, - .resolve_image_view = .null_handle, - .resolve_image_layout = .undefined, - .load_op = .clear, - .store_op = .store, - .clear_value = .{ .color = .{ .float_32 = .{ 0, 0, 0, 1 } } }, + cmd.beginRendering(&vk.RenderingInfo{ + .render_area = area, + .layer_count = 1, + .view_mask = 0, + .color_attachment_count = 1, + .p_color_attachments = &.{ + vk.RenderingAttachmentInfo{ + .image_view = self.view, + .image_layout = .color_attachment_optimal, + .resolve_mode = .{}, + .resolve_image_view = .null_handle, + .resolve_image_layout = .undefined, + .load_op = .clear, + .store_op = .store, + .clear_value = .{ .color = .{ .float_32 = .{ 0, 0, 0, 1 } } }, + }, }, - }, + }); + } + + pub fn end_rendering(self: Target, cmd: au.CommandBufferProxy) void { + cmd.endRendering(); + + cmd.pipelineBarrier( + .{ .color_attachment_output_bit = true }, + .{ .bottom_of_pipe_bit = true }, + .{}, + 0, + null, + 0, + null, + 1, + &.{ + vk.ImageMemoryBarrier{ + .src_access_mask = .{ .color_attachment_write_bit = true }, + .dst_access_mask = .{}, + .old_layout = .color_attachment_optimal, + .new_layout = .present_src_khr, + .src_queue_family_index = 0, + .dst_queue_family_index = 0, + .image = self.image, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }, + }, + ); + } +}; + +pub fn present(self: Self, wait_semaphores: []const vk.Semaphore, target: Target) !vk.Result { + return try au.Q.presentKHR(&vk.PresentInfoKHR{ + .wait_semaphore_count = @intCast(wait_semaphores.len), + .p_wait_semaphores = wait_semaphores.ptr, + .swapchain_count = 1, + .p_swapchains = &.{self.handle}, + .p_image_indices = &.{target.idx}, + .p_results = null, }); } - -pub fn endRendering(self: Self, cmd: au.CommandBufferProxy, idx: u32) void { - cmd.endRendering(); - - cmd.pipelineBarrier( - .{ .color_attachment_output_bit = true }, - .{ .bottom_of_pipe_bit = true }, - .{}, - 0, - null, - 0, - null, - 1, - &.{ - vk.ImageMemoryBarrier{ - .src_access_mask = .{ .color_attachment_write_bit = true }, - .dst_access_mask = .{}, - .old_layout = .color_attachment_optimal, - .new_layout = .present_src_khr, - .src_queue_family_index = 0, - .dst_queue_family_index = 0, - .image = self.getImage(idx), - .subresource_range = .{ - .aspect_mask = .{ .color_bit = true }, - .base_mip_level = 0, - .level_count = 1, - .base_array_layer = 0, - .layer_count = 1, - }, - }, - }, - ); -} diff --git a/src/main.zig b/src/main.zig index 6fd5d86..7808765 100644 --- a/src/main.zig +++ b/src/main.zig @@ -248,12 +248,7 @@ pub fn main() !void { try au.D.resetFences(1, &.{flight.fence}); try au.D.resetCommandPool(flight.pool, .{}); - const acq = try au.D.acquireNextImageKHR( - sc.handle, - std.math.maxInt(u64), - flight.acquire, - .null_handle, - ); + const tgt = try sc.acquire(flight.acquire, .null_handle); var cmd = au.CommandBufferProxy.init(flight.cmd, au.D.wrapper); try cmd.beginCommandBuffer(&.{ .flags = .{ .one_time_submit_bit = true } }); @@ -263,7 +258,7 @@ pub fn main() !void { .extent = sc.cinfo.image_extent, }; - sc.beginRendering(cmd, render_area, acq.image_index); + tgt.begin_rendering(cmd, render_area); record_render( cmd, uber, @@ -273,7 +268,7 @@ pub fn main() !void { descriptorSet, ); im.c.ImGui_ImplVulkan_RenderDrawData(im.c.igGetDrawData(), @ptrFromInt(@intFromEnum(cmd.handle)), null); - sc.endRendering(cmd, acq.image_index); + tgt.end_rendering(cmd); for (vertex_data) |*v| { for (v.pos[0..2]) |*f| { @@ -304,14 +299,7 @@ pub fn main() !void { @panic("Submission failed"); }; - _ = try au.Q.presentKHR(&vk.PresentInfoKHR{ - .wait_semaphore_count = 1, - .p_wait_semaphores = &.{flight.complete}, - .swapchain_count = 1, - .p_swapchains = &.{sc.handle}, - .p_image_indices = &.{acq.image_index}, - .p_results = null, - }); // todo suboptimal? + _ = try sc.present(&.{flight.complete}, tgt); // todo suboptimal? } try au.D.deviceWaitIdle(); From 2f678f273f3f79bc8fd3485b27b625154a90ced3 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Mon, 8 Jul 2024 16:53:27 -0400 Subject: [PATCH 083/113] extract imgui setup --- build.zig | 5 +-- src/au.zig | 12 ++++++- src/au/ui.zig | 90 +++++++++++++++++++++++++++++++++++++++++++++++++++ src/main.zig | 61 ++++++++++------------------------ 4 files changed, 121 insertions(+), 47 deletions(-) create mode 100644 src/au/ui.zig diff --git a/build.zig b/build.zig index d22f07e..1177ab2 100644 --- a/build.zig +++ b/build.zig @@ -3,8 +3,9 @@ const vkgen = @import("vulkan-zig"); pub fn build(b: *std.Build) void { const target = b.standardTargetOptions(.{}); - - const optimize = b.standardOptimizeOption(.{}); + const optimize = b.standardOptimizeOption(.{ + .preferred_optimize_mode = .ReleaseSafe, + }); const vk = b.dependency("vulkan-zig", .{ .registry = @as([]const u8, b.pathFromRoot("reg/vk.xml")), diff --git a/src/au.zig b/src/au.zig index 4720b03..785db8e 100644 --- a/src/au.zig +++ b/src/au.zig @@ -32,7 +32,7 @@ pub const device_extensions: []const [*:0]const u8 = &.{ }; pub const app_info: vk.ApplicationInfo = .{ - .p_application_name = "hey tildes!", + .p_application_name = "zig-glfw-vulkan", .application_version = vk.makeApiVersion(0, 0, 0, 0), .p_engine_name = "zig-glfw-vulkan", .engine_version = vk.makeApiVersion(0, 0, 0, 0), @@ -209,6 +209,13 @@ const CandidateDeviceInfo = struct { res.pdev = pdev; + const props = I.getPhysicalDeviceProperties(pdev); + score += switch (props.device_type) { + vk.PhysicalDeviceType.discrete_gpu => 1000, + vk.PhysicalDeviceType.integrated_gpu => 500, + else => 0, + }; + var format_count: u32 = undefined; _ = try I.getPhysicalDeviceSurfaceFormatsKHR(pdev, W.surface, &format_count, null); if (format_count == 0) return error.NoSurfaceFormats; @@ -233,8 +240,11 @@ const CandidateDeviceInfo = struct { defer alloc.free(modes); _ = try I.getPhysicalDeviceSurfacePresentModesKHR(pdev, W.surface, &mode_count, modes.ptr); + std.debug.print("Modes ({s}): {any}\n", .{props.device_name, modes}); + if (std.mem.indexOfAny(vk.PresentModeKHR, modes, &.{ vk.PresentModeKHR.mailbox_khr, + vk.PresentModeKHR.immediate_khr, })) |idx| { res.mode = modes[idx]; } else { diff --git a/src/au/ui.zig b/src/au/ui.zig new file mode 100644 index 0000000..9d4c5f3 --- /dev/null +++ b/src/au/ui.zig @@ -0,0 +1,90 @@ +const std = @import("std"); +const vk = @import("vk"); +const im = @import("cimgui"); +const au = @import("../au.zig"); +const c = @import("../c.zig"); + +pub usingnamespace im.c; + +pub fn loader_wrapper(procname: [*c]const u8, _: ?*anyopaque) callconv(.C) vk.PfnVoidFunction { + return c.glfwGetInstanceProcAddress(au.I.handle, procname); +} + +var descriptor_pool: vk.DescriptorPool = undefined; + +pub fn init(frames_in_flight: usize) !*im.c.ImGuiContext { + const ctx = im.c.igCreateContext(null) orelse return error.igCreateContextFailed; + errdefer im.c.igDestroyContext(ctx); + + if (im.c.ImGui_ImplVulkan_LoadFunctions(loader_wrapper, null) != true) { + return error.igVulkanLoadFunctionsFailed; + } + + if (im.c.ImGui_ImplGlfw_InitForVulkan(@ptrCast(au.W.handle), true) != true) { + return error.igGlfwInitFailed; + } + errdefer im.c.ImGui_ImplGlfw_Shutdown(); + + descriptor_pool = try au.D.createDescriptorPool(&vk.DescriptorPoolCreateInfo{ + .flags = .{ .free_descriptor_set_bit = true }, + .pool_size_count = 1, + .p_pool_sizes = &.{vk.DescriptorPoolSize{ .descriptor_count = 32, .type = .combined_image_sampler }}, + .max_sets = 32, + }, null); + errdefer au.D.destroyDescriptorPool(descriptor_pool, null); + + if (im.c.ImGui_ImplVulkan_Init(@constCast(&im.c.ImGui_ImplVulkan_InitInfo{ + .Instance = @ptrFromInt(@intFromEnum(au.I.handle)), + .PhysicalDevice = @ptrFromInt(@intFromEnum(au.device_config.pdev)), + .Device = @ptrFromInt(@intFromEnum(au.D.handle)), + .QueueFamily = au.device_config.family, + .Queue = @ptrFromInt(@intFromEnum(au.Q.handle)), + .DescriptorPool = @ptrFromInt(@intFromEnum(descriptor_pool)), + .RenderPass = null, + .MinImageCount = 2, + .ImageCount = @intCast(frames_in_flight), + .PipelineRenderingCreateInfo = @bitCast(vk.PipelineRenderingCreateInfo{ + .view_mask = 0, + .depth_attachment_format = .undefined, + .stencil_attachment_format = .undefined, + .color_attachment_count = 1, + .p_color_attachment_formats = &.{au.device_config.format.format}, + }), + .MSAASamples = 0, + .PipelineCache = null, + .Subpass = 0, + .UseDynamicRendering = true, + .Allocator = null, + })) != true) { + return error.igVulkanInitFailed; + } + errdefer im.c.ImGui_ImplVulkan_Shutdown(); + + if (im.c.ImGui_ImplVulkan_CreateFontsTexture() != true) { + return error.igVulkanFontTextureFailed; + } + + return ctx; +} + +pub fn deinit(ctx: *im.c.ImGuiContext) void { + im.c.ImGui_ImplVulkan_Shutdown(); + au.D.destroyDescriptorPool(descriptor_pool, null); + im.c.ImGui_ImplGlfw_Shutdown(); + im.c.igDestroyContext(ctx); +} + +pub fn NewFrame() void { + im.c.ImGui_ImplGlfw_NewFrame(); + im.c.ImGui_ImplVulkan_NewFrame(); + im.c.igNewFrame(); +} + +pub fn EndFrame() void { + im.c.igEndFrame(); + im.c.igRender(); +} + +pub fn Draw(cmd: au.CommandBufferProxy) void { + im.c.ImGui_ImplVulkan_RenderDrawData(im.c.igGetDrawData(), @ptrFromInt(@intFromEnum(cmd.handle)), null); +} diff --git a/src/main.zig b/src/main.zig index 7808765..8ae5116 100644 --- a/src/main.zig +++ b/src/main.zig @@ -5,10 +5,10 @@ const shaders = @import("shaders"); const Allocator = std.mem.Allocator; const au = @import("au.zig"); -const im = @import("cimgui"); - const Uber = @import("Uber.zig"); +const ui = @import("au/ui.zig"); + const vertices = [_]Uber.Vertex{ // Vulkan depth range is 0, 1 instead of OpenGL -1, 1 .{ .pos = .{ -0.5, -0.5, -0.5, 1.0 }, .color = .{ 1, 0, 0 } }, @@ -69,14 +69,22 @@ pub fn main() !void { try au.init(alloc); defer au.deinit(); + { + const props = au.I.getPhysicalDeviceProperties(au.device_config.pdev); + std.debug.print( + "Selected Device:\n {s}\n mode: {}\n", + .{ props.device_name, au.device_config.mode }, + ); + } + var sc = try au.SwapChain.init(alloc); defer sc.deinit(); var flights = try au.Flights.init(alloc, 3); // FRAMES IN FLIGHT defer flights.deinit(); - const ctx = im.c.igCreateContext(null) orelse return error.igCreateContextFailed; - defer im.c.igDestroyContext(ctx); + const ctx = try ui.init(flights.flights.len); + defer ui.deinit(ctx); const descriptorPool = try au.D.createDescriptorPool(&vk.DescriptorPoolCreateInfo{ .flags = .{ .free_descriptor_set_bit = true }, @@ -88,41 +96,8 @@ pub fn main() !void { }, null); defer au.D.destroyDescriptorPool(descriptorPool, null); - _ = im.c.ImGui_ImplVulkan_LoadFunctions(loader_wrapper, null); - _ = im.c.ImGui_ImplGlfw_InitForVulkan(@ptrCast(au.W.handle), true); - defer im.c.ImGui_ImplGlfw_Shutdown(); - _ = try sc.rebuild(); - const prci: vk.PipelineRenderingCreateInfo = .{ - .view_mask = 0, - .depth_attachment_format = .undefined, - .stencil_attachment_format = .undefined, - .color_attachment_count = 1, - .p_color_attachment_formats = &.{au.device_config.format.format}, - }; - - var info: im.c.ImGui_ImplVulkan_InitInfo = .{ - .Instance = @ptrFromInt(@intFromEnum(au.I.handle)), - .PhysicalDevice = @ptrFromInt(@intFromEnum(au.device_config.pdev)), - .Device = @ptrFromInt(@intFromEnum(au.D.handle)), - .QueueFamily = au.device_config.family, - .Queue = @ptrFromInt(@intFromEnum(au.Q.handle)), - .DescriptorPool = @ptrFromInt(@intFromEnum(descriptorPool)), - .RenderPass = null, - .MinImageCount = 2, - .ImageCount = @intCast(flights.flights.len), - .PipelineRenderingCreateInfo = @bitCast(prci), - .MSAASamples = 0, - .PipelineCache = null, - .Subpass = 0, - .UseDynamicRendering = true, - .Allocator = null, - }; - _ = im.c.ImGui_ImplVulkan_Init(&info); - _ = im.c.ImGui_ImplVulkan_CreateFontsTexture(); - defer im.c.ImGui_ImplVulkan_Shutdown(); - const cache = try au.D.createPipelineCache(&vk.PipelineCacheCreateInfo{}, null); defer au.D.destroyPipelineCache(cache, null); @@ -130,6 +105,7 @@ pub fn main() !void { defer uber.deinit(); const vkalloc = au.VkAllocator.init(); + std.debug.print("heaps: {any}\ntypes: {any}\n", .{ vkalloc.heaps(), vkalloc.types() }); const vertex_buffer = try au.D.createBuffer(&vk.BufferCreateInfo{ .size = @sizeOf(@TypeOf(vertices)), @@ -220,12 +196,9 @@ pub fn main() !void { const rand = prng.random(); while (!au.W.should_close()) { - im.c.ImGui_ImplGlfw_NewFrame(); - im.c.ImGui_ImplVulkan_NewFrame(); - im.c.igNewFrame(); - im.c.igShowDemoWindow(null); - im.c.igEndFrame(); - im.c.igRender(); + ui.NewFrame(); + ui.igShowMetricsWindow(null); + ui.EndFrame(); const flight = flights.next(); @@ -267,7 +240,7 @@ pub fn main() !void { index_buffer, descriptorSet, ); - im.c.ImGui_ImplVulkan_RenderDrawData(im.c.igGetDrawData(), @ptrFromInt(@intFromEnum(cmd.handle)), null); + ui.Draw(cmd); tgt.end_rendering(cmd); for (vertex_data) |*v| { From 1cb340e154ff959c5bac8a2743378fc768f2aa5d Mon Sep 17 00:00:00 2001 From: David Allemang Date: Mon, 8 Jul 2024 17:21:09 -0400 Subject: [PATCH 084/113] fix crash in immediate present mode --- src/au.zig | 2 - src/main.zig | 125 ++++++++++++++++++++++++++------------------------- 2 files changed, 63 insertions(+), 64 deletions(-) diff --git a/src/au.zig b/src/au.zig index 785db8e..4732283 100644 --- a/src/au.zig +++ b/src/au.zig @@ -240,8 +240,6 @@ const CandidateDeviceInfo = struct { defer alloc.free(modes); _ = try I.getPhysicalDeviceSurfacePresentModesKHR(pdev, W.surface, &mode_count, modes.ptr); - std.debug.print("Modes ({s}): {any}\n", .{props.device_name, modes}); - if (std.mem.indexOfAny(vk.PresentModeKHR, modes, &.{ vk.PresentModeKHR.mailbox_khr, vk.PresentModeKHR.immediate_khr, diff --git a/src/main.zig b/src/main.zig index 8ae5116..c66ba38 100644 --- a/src/main.zig +++ b/src/main.zig @@ -57,10 +57,6 @@ fn record_render( cmd.drawIndexed(indices.len, 1, 0, 0, 0); } -pub fn loader_wrapper(procname: [*c]const u8, _: ?*anyopaque) callconv(.C) vk.PfnVoidFunction { - return c.glfwGetInstanceProcAddress(au.I.handle, procname); -} - pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.detectLeaks(); @@ -69,14 +65,6 @@ pub fn main() !void { try au.init(alloc); defer au.deinit(); - { - const props = au.I.getPhysicalDeviceProperties(au.device_config.pdev); - std.debug.print( - "Selected Device:\n {s}\n mode: {}\n", - .{ props.device_name, au.device_config.mode }, - ); - } - var sc = try au.SwapChain.init(alloc); defer sc.deinit(); @@ -105,7 +93,6 @@ pub fn main() !void { defer uber.deinit(); const vkalloc = au.VkAllocator.init(); - std.debug.print("heaps: {any}\ntypes: {any}\n", .{ vkalloc.heaps(), vkalloc.types() }); const vertex_buffer = try au.D.createBuffer(&vk.BufferCreateInfo{ .size = @sizeOf(@TypeOf(vertices)), @@ -215,64 +202,78 @@ pub fn main() !void { } } - _ = try sc.rebuild(); - _ = try au.D.waitForFences(1, &.{flight.fence}, vk.TRUE, std.math.maxInt(u64)); try au.D.resetFences(1, &.{flight.fence}); - try au.D.resetCommandPool(flight.pool, .{}); - const tgt = try sc.acquire(flight.acquire, .null_handle); + // TODO need to check the standard to see what happens to a fence or semaphore on OutOfDateKHR error. + // acquire, submit, and present. - var cmd = au.CommandBufferProxy.init(flight.cmd, au.D.wrapper); - try cmd.beginCommandBuffer(&.{ .flags = .{ .one_time_submit_bit = true } }); + while (true) { + _ = try sc.rebuild(); - const render_area: vk.Rect2D = .{ - .offset = .{ .x = 0, .y = 0 }, - .extent = sc.cinfo.image_extent, - }; + const target = sc.acquire(flight.acquire, .null_handle) catch |err| switch (err) { + error.OutOfDateKHR => { + sc.mark(); + continue; + }, + else => return err, + }; - tgt.begin_rendering(cmd, render_area); - record_render( - cmd, - uber, - render_area, - vertex_buffer, - index_buffer, - descriptorSet, - ); - ui.Draw(cmd); - tgt.end_rendering(cmd); + try au.D.resetCommandPool(flight.pool, .{}); + var cmd = au.CommandBufferProxy.init(flight.cmd, au.D.wrapper); + try cmd.beginCommandBuffer(&.{ .flags = .{ .one_time_submit_bit = true } }); - for (vertex_data) |*v| { - for (v.pos[0..2]) |*f| { - f.* += (rand.float(f32) - 0.5) * 0.001; + const render_area: vk.Rect2D = .{ + .offset = .{ .x = 0, .y = 0 }, + .extent = sc.cinfo.image_extent, + }; + + target.begin_rendering(cmd, render_area); + record_render( + cmd, + uber, + render_area, + vertex_buffer, + index_buffer, + descriptorSet, + ); + ui.Draw(cmd); + target.end_rendering(cmd); + + for (vertex_data) |*v| { + for (v.pos[0..2]) |*f| { + f.* += (rand.float(f32) - 0.5) * 0.001; + } + } + + try cmd.endCommandBuffer(); + + try au.Q.submit( + 1, + &.{ + vk.SubmitInfo{ + .wait_semaphore_count = 1, + .p_wait_semaphores = @ptrCast(&flight.acquire), + .p_wait_dst_stage_mask = @ptrCast(&vk.PipelineStageFlags{ .color_attachment_output_bit = true }), + .command_buffer_count = 1, + .p_command_buffers = @ptrCast(&cmd.handle), + .signal_semaphore_count = 1, + .p_signal_semaphores = @ptrCast(&flight.complete), + }, + }, + flight.fence, + ); + + if (sc.present(&.{flight.complete}, target)) |_| { + break; + } else |err| switch (err) { + error.OutOfDateKHR => { + sc.mark(); + continue; + }, + else => return err, } } - - try cmd.endCommandBuffer(); - - au.Q.submit( - 1, - &.{ - vk.SubmitInfo{ - .wait_semaphore_count = 1, - .p_wait_semaphores = @ptrCast(&flight.acquire), - .p_wait_dst_stage_mask = @ptrCast(&vk.PipelineStageFlags{ .color_attachment_output_bit = true }), - .command_buffer_count = 1, - .p_command_buffers = @ptrCast(&cmd.handle), - .signal_semaphore_count = 1, - .p_signal_semaphores = @ptrCast(&flight.complete), - }, - }, - flight.fence, - ) catch { - std.debug.print("Failed to submit.\nWaiting for idle...", .{}); - au.D.deviceWaitIdle() catch - std.debug.print("deviceWaitIdle failed\n", .{}); - @panic("Submission failed"); - }; - - _ = try sc.present(&.{flight.complete}, tgt); // todo suboptimal? } try au.D.deviceWaitIdle(); From 260f19e13c944ae36328b43e8db86e4689380f2a Mon Sep 17 00:00:00 2001 From: David Allemang Date: Mon, 8 Jul 2024 23:28:34 -0400 Subject: [PATCH 085/113] debug device and present mode --- src/au/SwapChain.zig | 20 +++++++++++++------- src/main.zig | 5 +++++ 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/src/au/SwapChain.zig b/src/au/SwapChain.zig index 2d4e62e..77a28a9 100644 --- a/src/au/SwapChain.zig +++ b/src/au/SwapChain.zig @@ -80,13 +80,19 @@ pub fn rebuild(self: *Self) !bool { } try self.views.resize(self.alloc, count); for (self.images.items, self.views.items) |image, *view| { - view.* = try au.D.createImageView(&vk.ImageViewCreateInfo{ .image = image, .view_type = .@"2d", .format = self.cinfo.image_format, .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity }, .subresource_range = .{ - .aspect_mask = .{ .color_bit = true }, - .base_mip_level = 0, - .level_count = 1, - .base_array_layer = 0, - .layer_count = 1, - } }, null); + view.* = try au.D.createImageView(&vk.ImageViewCreateInfo{ + .image = image, + .view_type = .@"2d", + .format = self.cinfo.image_format, + .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity }, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }, null); } return true; diff --git a/src/main.zig b/src/main.zig index c66ba38..734ef6d 100644 --- a/src/main.zig +++ b/src/main.zig @@ -65,6 +65,11 @@ pub fn main() !void { try au.init(alloc); defer au.deinit(); + { + const pdev_prop = au.I.getPhysicalDeviceProperties(au.device_config.pdev); + std.debug.print("Selected '{s}' in mode '{any}'\n", .{ pdev_prop.device_name, au.device_config.mode }); + } + var sc = try au.SwapChain.init(alloc); defer sc.deinit(); From eaf97a306f0a29d263dc7794cd4051dd0f2beb8a Mon Sep 17 00:00:00 2001 From: David Allemang Date: Tue, 9 Jul 2024 09:39:46 -0400 Subject: [PATCH 086/113] fix running on laptop --- src/main.zig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/main.zig b/src/main.zig index 734ef6d..0482c77 100644 --- a/src/main.zig +++ b/src/main.zig @@ -273,6 +273,8 @@ pub fn main() !void { break; } else |err| switch (err) { error.OutOfDateKHR => { + _ = try au.D.waitForFences(1, &.{flight.fence}, vk.TRUE, std.math.maxInt(u64)); + try au.D.resetFences(1, &.{flight.fence}); sc.mark(); continue; }, From 1269018e61894762cc23715d57455f12e4bc2949 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Tue, 9 Jul 2024 13:19:56 -0400 Subject: [PATCH 087/113] refactor stub --- src/App.zig | 16 +++ src/au.zig | 1 + src/main.zig | 288 +++------------------------------------------ src/nu.zig | 22 ++++ src/nu/Bus.zig | 294 ++++++++++++++++++++++++++++++++++++++++++++++ src/nu/ImGui.zig | 20 ++++ src/nu/Render.zig | 23 ++++ src/nu/Window.zig | 77 ++++++++++++ 8 files changed, 468 insertions(+), 273 deletions(-) create mode 100644 src/App.zig create mode 100644 src/nu.zig create mode 100644 src/nu/Bus.zig create mode 100644 src/nu/ImGui.zig create mode 100644 src/nu/Render.zig create mode 100644 src/nu/Window.zig diff --git a/src/App.zig b/src/App.zig new file mode 100644 index 0000000..69ab7ca --- /dev/null +++ b/src/App.zig @@ -0,0 +1,16 @@ +const std = @import("std"); +const nu = @import("nu.zig"); + +const Self = @This(); + +pub fn init(alloc: std.mem.Allocator, render: *nu.Render, imgui: *nu.ImGui) !Self { + _ = alloc; + _ = render; + _ = imgui; + + return .{}; +} + +pub fn deinit(self: *Self) void { + _ = self; +} diff --git a/src/au.zig b/src/au.zig index 4732283..2ce76d9 100644 --- a/src/au.zig +++ b/src/au.zig @@ -105,6 +105,7 @@ fn init_glfw() !void { return error.glfwInitFailed; errdefer c.glfwTerminate(); + // todo move to render if (c.glfwVulkanSupported() != c.GLFW_TRUE) return error.glfwNoVulkan; } diff --git a/src/main.zig b/src/main.zig index 0482c77..59eae20 100644 --- a/src/main.zig +++ b/src/main.zig @@ -1,287 +1,29 @@ const std = @import("std"); -const vk = @import("vk"); -const c = @import("c.zig"); -const shaders = @import("shaders"); -const Allocator = std.mem.Allocator; -const au = @import("au.zig"); -const Uber = @import("Uber.zig"); +const nu = @import("nu.zig"); -const ui = @import("au/ui.zig"); - -const vertices = [_]Uber.Vertex{ - // Vulkan depth range is 0, 1 instead of OpenGL -1, 1 - .{ .pos = .{ -0.5, -0.5, -0.5, 1.0 }, .color = .{ 1, 0, 0 } }, - .{ .pos = .{ -0.5, 0.5, -0.5, 1.0 }, .color = .{ 0, 1, 0 } }, - .{ .pos = .{ 0.5, -0.5, -0.5, 1.0 }, .color = .{ 0, 0, 1 } }, - .{ .pos = .{ 0.5, 0.5, -0.5, 1.0 }, .color = .{ 1, 1, 0 } }, - .{ .pos = .{ -0.5, -0.5, 0.5, 1.0 }, .color = .{ 1, 0, 0 } }, - .{ .pos = .{ -0.5, 0.5, 0.5, 1.0 }, .color = .{ 0, 1, 0 } }, - .{ .pos = .{ 0.5, -0.5, 0.5, 1.0 }, .color = .{ 0, 0, 1 } }, - .{ .pos = .{ 0.5, 0.5, 0.5, 1.0 }, .color = .{ 1, 1, 0 } }, -}; - -const indices = [_]Uber.Index{ 4, 5, 6, 6, 5, 7 }; - -const uniform = Uber.Uniform{ - .mat = .{ - 0.5, 0.0, 0.0, 0.0, - 0.0, 1.0, 0.0, 0.0, - 0.0, 0.0, 1.0, 0.0, - 0.0, 0.0, 0.0, 1.0, - }, -}; - -fn record_render( - cmd: au.CommandBufferProxy, - uber: Uber, - area: vk.Rect2D, // render area, scissor, and viewport. - vertex_buffer: vk.Buffer, - index_buffer: vk.Buffer, - descriptor_set: vk.DescriptorSet, -) void { - cmd.setViewport(0, 1, &.{.{ - .x = @floatFromInt(area.offset.x), - .y = @floatFromInt(area.offset.y), - .width = @floatFromInt(area.extent.width), - .height = @floatFromInt(area.extent.height), - .min_depth = 0, - .max_depth = 1, - }}); - cmd.setScissor(0, 1, &.{area}); - - cmd.bindPipeline(.graphics, uber.pipeline); - cmd.bindDescriptorSets(.graphics, uber.layout, 0, 1, &.{descriptor_set}, 0, null); - cmd.bindVertexBuffers(0, 1, &.{vertex_buffer}, &.{0}); - cmd.bindIndexBuffer(index_buffer, 0, .uint16); - cmd.drawIndexed(indices.len, 1, 0, 0, 0); -} +const App = @import("App.zig"); pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.detectLeaks(); const alloc = gpa.allocator(); - try au.init(alloc); - defer au.deinit(); + var window = try nu.Window.init(alloc, .{ .title = "Hello World" }); + defer window.deinit(); - { - const pdev_prop = au.I.getPhysicalDeviceProperties(au.device_config.pdev); - std.debug.print("Selected '{s}' in mode '{any}'\n", .{ pdev_prop.device_name, au.device_config.mode }); - } + var render = try nu.Render.init(alloc, &window); + defer render.deinit(); - var sc = try au.SwapChain.init(alloc); - defer sc.deinit(); + var imgui = try nu.ImGui.init(alloc, &window, &render); + defer imgui.deinit(); - var flights = try au.Flights.init(alloc, 3); // FRAMES IN FLIGHT - defer flights.deinit(); + var app = try App.init(alloc, &render, &imgui); + defer app.deinit(); - const ctx = try ui.init(flights.flights.len); - defer ui.deinit(ctx); - - const descriptorPool = try au.D.createDescriptorPool(&vk.DescriptorPoolCreateInfo{ - .flags = .{ .free_descriptor_set_bit = true }, - .pool_size_count = 1, - .p_pool_sizes = &.{ - vk.DescriptorPoolSize{ .descriptor_count = 32, .type = .combined_image_sampler }, - }, - .max_sets = 32, - }, null); - defer au.D.destroyDescriptorPool(descriptorPool, null); - - _ = try sc.rebuild(); - - const cache = try au.D.createPipelineCache(&vk.PipelineCacheCreateInfo{}, null); - defer au.D.destroyPipelineCache(cache, null); - - const uber = try Uber.init(cache); - defer uber.deinit(); - - const vkalloc = au.VkAllocator.init(); - - const vertex_buffer = try au.D.createBuffer(&vk.BufferCreateInfo{ - .size = @sizeOf(@TypeOf(vertices)), - .usage = .{ .vertex_buffer_bit = true }, - .sharing_mode = .exclusive, - }, null); - defer au.D.destroyBuffer(vertex_buffer, null); - const vertex_memory = try vkalloc.alloc( - au.D.getBufferMemoryRequirements(vertex_buffer), - .{ .host_visible_bit = true, .host_coherent_bit = true }, - ); - defer vkalloc.free(vertex_memory); - try au.D.bindBufferMemory(vertex_buffer, vertex_memory, 0); - const vertex_data: *align(1) @TypeOf(vertices) = - @ptrCast(try au.D.mapMemory(vertex_memory, 0, vk.WHOLE_SIZE, .{})); - defer au.D.unmapMemory(vertex_memory); - vertex_data.* = vertices; - - const index_buffer = try au.D.createBuffer(&vk.BufferCreateInfo{ - .size = @sizeOf(@TypeOf(indices)), - .usage = .{ .index_buffer_bit = true }, - .sharing_mode = .exclusive, - }, null); - defer au.D.destroyBuffer(index_buffer, null); - const index_memory = try vkalloc.alloc( - au.D.getBufferMemoryRequirements(index_buffer), - .{ .host_visible_bit = true, .host_coherent_bit = true }, - ); - defer vkalloc.free(index_memory); - try au.D.bindBufferMemory(index_buffer, index_memory, 0); - const index_data: *align(1) @TypeOf(indices) = - @ptrCast(try au.D.mapMemory(index_memory, 0, vk.WHOLE_SIZE, .{})); - defer au.D.unmapMemory(index_memory); - index_data.* = indices; - - // todo ring buffer for frames in flight. need to use an offset when binding - // use dynamic offset - descriptor type VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC - const uniform_buffer = try au.D.createBuffer(&vk.BufferCreateInfo{ - .size = @sizeOf(@TypeOf(uniform)), - .usage = .{ .uniform_buffer_bit = true }, - .sharing_mode = .exclusive, - }, null); - defer au.D.destroyBuffer(uniform_buffer, null); - const uniform_memory = try vkalloc.alloc( - au.D.getBufferMemoryRequirements(uniform_buffer), - .{ .host_visible_bit = true, .host_coherent_bit = true }, - ); - defer vkalloc.free(uniform_memory); - try au.D.bindBufferMemory(uniform_buffer, uniform_memory, 0); - const uniform_data: *align(1) @TypeOf(uniform) = - @ptrCast(try au.D.mapMemory(uniform_memory, 0, vk.WHOLE_SIZE, .{})); - defer au.D.unmapMemory(uniform_memory); - uniform_data.* = uniform; - - var descriptorSet: vk.DescriptorSet = undefined; - try au.D.allocateDescriptorSets(&vk.DescriptorSetAllocateInfo{ - .descriptor_pool = descriptorPool, - .descriptor_set_count = 1, - .p_set_layouts = &.{uber.set_layout}, - }, @ptrCast(&descriptorSet)); - defer au.D.freeDescriptorSets(descriptorPool, 1, &.{descriptorSet}) catch unreachable; // todo handle this? - - au.D.updateDescriptorSets( - 1, - &.{ - vk.WriteDescriptorSet{ - .dst_set = descriptorSet, - .dst_binding = 0, - .dst_array_element = 0, - .descriptor_type = .uniform_buffer, - .descriptor_count = 1, - .p_image_info = undefined, - .p_texel_buffer_view = undefined, - .p_buffer_info = &.{ - vk.DescriptorBufferInfo{ - .buffer = uniform_buffer, - .offset = 0, - .range = vk.WHOLE_SIZE, - }, - }, - }, - }, - 0, - null, - ); - - var prng = std.Random.Sfc64.init(std.crypto.random.int(u64)); - const rand = prng.random(); - - while (!au.W.should_close()) { - ui.NewFrame(); - ui.igShowMetricsWindow(null); - ui.EndFrame(); - - const flight = flights.next(); - - const events = if (au.W.focused()) - au.poll_events() - else - au.wait_events_timeout(0.5); - - for (events) |u| { - switch (u) { - .framebufferSize => sc.mark(), - .cursorPos, .windowPos, .windowSize, .windowRefresh => {}, - else => |e| std.debug.print("{any}\n", .{e}), - } - } - - _ = try au.D.waitForFences(1, &.{flight.fence}, vk.TRUE, std.math.maxInt(u64)); - try au.D.resetFences(1, &.{flight.fence}); - - // TODO need to check the standard to see what happens to a fence or semaphore on OutOfDateKHR error. - // acquire, submit, and present. - - while (true) { - _ = try sc.rebuild(); - - const target = sc.acquire(flight.acquire, .null_handle) catch |err| switch (err) { - error.OutOfDateKHR => { - sc.mark(); - continue; - }, - else => return err, - }; - - try au.D.resetCommandPool(flight.pool, .{}); - var cmd = au.CommandBufferProxy.init(flight.cmd, au.D.wrapper); - try cmd.beginCommandBuffer(&.{ .flags = .{ .one_time_submit_bit = true } }); - - const render_area: vk.Rect2D = .{ - .offset = .{ .x = 0, .y = 0 }, - .extent = sc.cinfo.image_extent, - }; - - target.begin_rendering(cmd, render_area); - record_render( - cmd, - uber, - render_area, - vertex_buffer, - index_buffer, - descriptorSet, - ); - ui.Draw(cmd); - target.end_rendering(cmd); - - for (vertex_data) |*v| { - for (v.pos[0..2]) |*f| { - f.* += (rand.float(f32) - 0.5) * 0.001; - } - } - - try cmd.endCommandBuffer(); - - try au.Q.submit( - 1, - &.{ - vk.SubmitInfo{ - .wait_semaphore_count = 1, - .p_wait_semaphores = @ptrCast(&flight.acquire), - .p_wait_dst_stage_mask = @ptrCast(&vk.PipelineStageFlags{ .color_attachment_output_bit = true }), - .command_buffer_count = 1, - .p_command_buffers = @ptrCast(&cmd.handle), - .signal_semaphore_count = 1, - .p_signal_semaphores = @ptrCast(&flight.complete), - }, - }, - flight.fence, - ); - - if (sc.present(&.{flight.complete}, target)) |_| { - break; - } else |err| switch (err) { - error.OutOfDateKHR => { - _ = try au.D.waitForFences(1, &.{flight.fence}, vk.TRUE, std.math.maxInt(u64)); - try au.D.resetFences(1, &.{flight.fence}); - sc.mark(); - continue; - }, - else => return err, - } - } - } - - try au.D.deviceWaitIdle(); + try nu.run(&window, .{ + &app, + &imgui, + &render, + }); } diff --git a/src/nu.zig b/src/nu.zig new file mode 100644 index 0000000..3bafb54 --- /dev/null +++ b/src/nu.zig @@ -0,0 +1,22 @@ +const std = @import("std"); + +pub const Window = @import("nu/Window.zig"); +pub const Render = @import("nu/Render.zig"); +pub const ImGui = @import("nu/ImGui.zig"); + +pub fn run( + driver: anytype, + modules: anytype, +) !void { + while (driver.next()) |events| { + // todo event handler + _ = events; + + inline for (modules) |module| { + if (std.meta.hasMethod(@TypeOf(module), "frame")) + module.frame(); + } + + // todo fixed timestep + } +} diff --git a/src/nu/Bus.zig b/src/nu/Bus.zig new file mode 100644 index 0000000..0dc8e33 --- /dev/null +++ b/src/nu/Bus.zig @@ -0,0 +1,294 @@ +const std = @import("std"); +// const vk = @import("vk"); +// const c = @import("../c.zig"); +// const Window = @import("../au.zig").Window; +const Self = @This(); + +const Window = @import("Window.zig"); +const c = Window.c; + +alloc: std.mem.Allocator, +events: std.ArrayListUnmanaged(Event) = .{}, // todo bounded array? +drops: std.ArrayListUnmanaged([][]const u8) = .{}, // todo bounded array? + +pub fn init(alloc: std.mem.Allocator) Self { + return .{ + .alloc = alloc, + }; +} + +pub fn deinit(self: *Self) void { + self.clear(); + + self.events.deinit(self.alloc); + self.drops.deinit(self.alloc); +} + +pub fn connect(self: *Self, handle: *c.GLFWwindow) void { + // todo somehow prevent double-connect? + c.glfwSetWindowUserPointer(handle, self); + _ = c.glfwSetWindowPosCallback(handle, onWindowPos); + _ = c.glfwSetWindowSizeCallback(handle, onWindowSize); + _ = c.glfwSetWindowCloseCallback(handle, onWindowClose); + _ = c.glfwSetWindowRefreshCallback(handle, onWindowRefresh); + _ = c.glfwSetWindowFocusCallback(handle, onWindowFocus); + _ = c.glfwSetWindowIconifyCallback(handle, onWindowIconify); + _ = c.glfwSetWindowMaximizeCallback(handle, onWindowMaximize); + _ = c.glfwSetFramebufferSizeCallback(handle, onFramebufferSize); + _ = c.glfwSetWindowContentScaleCallback(handle, onWindowContentScale); + _ = c.glfwSetMouseButtonCallback(handle, onMouseButton); + _ = c.glfwSetCursorPosCallback(handle, onCursorPos); + _ = c.glfwSetCursorEnterCallback(handle, onCursorEnter); + _ = c.glfwSetScrollCallback(handle, onScroll); + _ = c.glfwSetKeyCallback(handle, onKey); + _ = c.glfwSetCharModsCallback(handle, onCharMods); + _ = c.glfwSetDropCallback(handle, onDrop); +} + +pub fn disconnect(_: *Self, handle: *c.GLFWwindow) void { + // todo somehow prevent double-disconnect? + c.glfwSetWindowUserPointer(handle, null); + _ = c.glfwSetWindowPosCallback(handle, null); + _ = c.glfwSetWindowSizeCallback(handle, null); + _ = c.glfwSetWindowCloseCallback(handle, null); + _ = c.glfwSetWindowRefreshCallback(handle, null); + _ = c.glfwSetWindowFocusCallback(handle, null); + _ = c.glfwSetWindowIconifyCallback(handle, null); + _ = c.glfwSetWindowMaximizeCallback(handle, null); + _ = c.glfwSetFramebufferSizeCallback(handle, null); + _ = c.glfwSetWindowContentScaleCallback(handle, null); + _ = c.glfwSetMouseButtonCallback(handle, null); + _ = c.glfwSetCursorPosCallback(handle, null); + _ = c.glfwSetCursorEnterCallback(handle, null); + _ = c.glfwSetScrollCallback(handle, null); + _ = c.glfwSetKeyCallback(handle, null); + _ = c.glfwSetCharModsCallback(handle, null); + _ = c.glfwSetDropCallback(handle, null); +} + +pub fn clear(self: *Self) void { + for (self.drops.items) |drop| { + for (drop) |path| { + self.alloc.free(path); + } + self.alloc.free(drop); + } + self.drops.clearAndFree(self.alloc); + + self.events.clearRetainingCapacity(); +} + +fn getBus(handle: ?*c.GLFWwindow) *Self { + return @alignCast(@ptrCast(c.glfwGetWindowUserPointer(handle))); +} + +pub const Event = union(enum) { + const WindowPos = struct { x: i32, y: i32 }; + const WindowSize = struct { x: i32, y: i32 }; + const WindowClose = struct {}; + const WindowRefresh = struct {}; + const WindowFocus = struct { focused: bool }; + const WindowIconify = struct { iconified: bool }; + const WindowMaximize = struct { maximized: bool }; + const FramebufferSize = struct { width: u32, height: u32 }; + const WindowContentScale = struct { x: f32, y: f32 }; + const MouseButton = struct { + button: c_int, // todo enum + action: c_int, // todo enum + mods: c_int, // todo bitmask + }; + const CursorPos = struct { x: f64, y: f64 }; + const CursorEnter = struct { entered: bool }; + const Scroll = struct { dx: f64, dy: f64 }; + const Key = struct { + key: c_int, // todo enum + scan: c_int, // todo ??? + action: c_int, // todo enum + mods: c_int, // todo bitmask + }; + const Char = struct { + code: u21, + }; + const CharMods = struct { + code: u21, + mods: c_int, // todo bitmask + }; + const Drop = struct { + paths: []const []const u8, + }; + + windowPos: WindowPos, + windowSize: WindowSize, + windowClose: WindowClose, + windowRefresh: WindowRefresh, + windowFocus: WindowFocus, + windowIconify: WindowIconify, + windowMaximize: WindowMaximize, + framebufferSize: FramebufferSize, + windowContentScale: WindowContentScale, + mouseButton: MouseButton, + cursorPos: CursorPos, + cursorEnter: CursorEnter, + scroll: Scroll, + key: Key, + char: Char, + charMods: CharMods, + drop: Drop, +}; + +fn onWindowPos(handle: ?*c.GLFWwindow, x: c_int, y: c_int) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .windowPos = .{ + .x = @intCast(x), + .y = @intCast(y), + }, + }) catch unreachable; // todo circular queue; warn +} + +fn onWindowSize(handle: ?*c.GLFWwindow, x: c_int, y: c_int) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .windowSize = .{ + .x = @intCast(x), + .y = @intCast(y), + }, + }) catch unreachable; // todo circular queue; warn +} + +fn onWindowClose(handle: ?*c.GLFWwindow) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .windowClose = .{}, + }) catch unreachable; // todo circular queue; warn +} + +fn onWindowRefresh(handle: ?*c.GLFWwindow) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .windowRefresh = .{}, + }) catch unreachable; // todo circular queue; warn +} + +fn onWindowFocus(handle: ?*c.GLFWwindow, focused: c_int) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .windowFocus = .{ + .focused = focused == c.GLFW_TRUE, + }, + }) catch unreachable; // todo circular queue; warn +} + +fn onWindowIconify(handle: ?*c.GLFWwindow, iconified: c_int) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .windowIconify = .{ + .iconified = iconified == c.GLFW_TRUE, + }, + }) catch unreachable; // todo circular queue; warn +} + +fn onWindowMaximize(handle: ?*c.GLFWwindow, maximized: c_int) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .windowMaximize = .{ + .maximized = maximized == c.GLFW_TRUE, + }, + }) catch unreachable; // todo circular queue; warn +} + +fn onFramebufferSize(handle: ?*c.GLFWwindow, width: c_int, height: c_int) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .framebufferSize = .{ + .width = @intCast(width), + .height = @intCast(height), + }, + }) catch unreachable; // todo circular queue; warn +} + +fn onWindowContentScale(handle: ?*c.GLFWwindow, x: f32, y: f32) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .windowContentScale = .{ + .x = x, + .y = y, + }, + }) catch unreachable; // todo circular queue; warn +} + +fn onMouseButton(handle: ?*c.GLFWwindow, button: c_int, action: c_int, mods: c_int) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .mouseButton = .{ + .button = button, + .action = action, + .mods = mods, + }, + }) catch unreachable; // todo circular queue; warn +} + +fn onCursorPos(handle: ?*c.GLFWwindow, x: f64, y: f64) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .cursorPos = .{ + .x = x, + .y = y, + }, + }) catch unreachable; // todo circular queue; warn +} + +fn onCursorEnter(handle: ?*c.GLFWwindow, entered: c_int) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .cursorEnter = .{ + .entered = entered == c.GLFW_TRUE, + }, + }) catch unreachable; // todo circular queue; warn +} + +fn onScroll(handle: ?*c.GLFWwindow, dx: f64, dy: f64) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .scroll = .{ + .dx = dx, + .dy = dy, + }, + }) catch unreachable; // todo circular queue; warn +} + +fn onKey(handle: ?*c.GLFWwindow, key: c_int, scan: c_int, action: c_int, mods: c_int) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .key = .{ + .key = key, + .scan = scan, + .action = action, + .mods = mods, + }, + }) catch unreachable; // todo circular queue; warn +} + +fn onCharMods(handle: ?*c.GLFWwindow, code: c_uint, mods: c_int) callconv(.C) void { + const bus = getBus(handle); + bus.events.append(bus.alloc, .{ + .charMods = .{ + .code = @intCast(code), + .mods = mods, + }, + }) catch unreachable; // todo circular queue; warn +} + +fn onDrop(handle: ?*c.GLFWwindow, count: c_int, paths: [*c][*c]const u8) callconv(.C) void { + const bus = getBus(handle); + + const drops = bus.alloc.alloc([]const u8, @intCast(count)) catch unreachable; // todo warn + for (drops, paths) |*dst, src| { + dst.* = bus.alloc.dupe(u8, std.mem.sliceTo(src, 0)) catch unreachable; // todo warn + } + bus.drops.append(bus.alloc, drops) catch unreachable; // todo warn + + bus.events.append(bus.alloc, .{ + .drop = .{ .paths = drops }, + }) catch unreachable; // todo circular queue; warn +} diff --git a/src/nu/ImGui.zig b/src/nu/ImGui.zig new file mode 100644 index 0000000..8dbdf6a --- /dev/null +++ b/src/nu/ImGui.zig @@ -0,0 +1,20 @@ +//! all imgui code through this path + +const std = @import("std"); + +const Self = @This(); + +const Window = @import("Window.zig"); +const Render = @import("Render.zig"); + +pub fn init(alloc: std.mem.Allocator, window: *Window, render: *Render) !Self { + _ = alloc; + _ = window; + _ = render; + + return .{}; +} + +pub fn deinit(self: *Self) void { + _ = self; +} diff --git a/src/nu/Render.zig b/src/nu/Render.zig new file mode 100644 index 0000000..54dc41b --- /dev/null +++ b/src/nu/Render.zig @@ -0,0 +1,23 @@ +const std = @import("std"); + +const Window = @import("Window.zig"); + +const Self = @This(); + +// isolate all the vulkan code through this path +// except for imgui code + +// const au = @import("au.zig"); + +pub fn init(alloc: std.mem.Allocator, window: *Window) !Self { + _ = alloc; + _ = window; + + // todo check vulkan supported + // todo create window surface + return .{}; +} + +pub fn deinit(self: *Self) void { + _ = self; +} diff --git a/src/nu/Window.zig b/src/nu/Window.zig new file mode 100644 index 0000000..52234ca --- /dev/null +++ b/src/nu/Window.zig @@ -0,0 +1,77 @@ +//! GLFW Adaptor + +const std = @import("std"); + +pub const c = @cImport({ + @cDefine("GLFW_INCLUDE_NONE", {}); + @cInclude("GLFW/glfw3.h"); +}); + +const Self = @This(); + +pub const Bus = @import("Bus.zig"); + +pub const Options = struct { + title: [*:0]const u8, + width: u32 = 1280, + height: u32 = 720, + x11_class_name: [*:0]const u8 = "floating_window", + x11_instance_name: [*:0]const u8 = "floating_window", +}; + +alloc: std.mem.Allocator, +bus: *Bus, +handle: *c.GLFWwindow, + +pub fn init(alloc: std.mem.Allocator, options: Options) !Self { + if (c.glfwInit() != c.GLFW_TRUE) + return error.glfwInitFailed; + errdefer c.glfwTerminate(); + + const bus: *Bus = try alloc.create(Bus); + errdefer alloc.destroy(bus); + bus.* = Bus.init(alloc); + errdefer bus.deinit(); + + c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); + c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, options.x11_class_name); + c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, options.x11_instance_name); + + const handle: *c.GLFWwindow = c.glfwCreateWindow( + @intCast(options.width), + @intCast(options.height), + options.title, + null, + null, + ) orelse + return error.glfWCreateWindowFailed; + errdefer c.glfwDestroyWindow(handle); + + bus.connect(handle); + errdefer bus.disconnect(handle); + + return .{ + .alloc = alloc, + .bus = bus, + .handle = handle, + }; +} + +pub fn deinit(self: *Self) void { + self.bus.deinit(); + self.alloc.destroy(self.bus); + c.glfwDestroyWindow(self.handle); + c.glfwTerminate(); +} + +pub fn next(self: *Self) ?[]Bus.Event { + self.bus.clear(); + + if (c.glfwWindowShouldClose(self.handle) == c.GLFW_TRUE) + return null; + + // c.glfwPollEvents(); + c.glfwWaitEvents(); + + return self.bus.events.items; +} From 4d3d4e6ee6c50d35717615bf39acc1cf0a422c6d Mon Sep 17 00:00:00 2001 From: David Allemang Date: Tue, 9 Jul 2024 13:57:08 -0400 Subject: [PATCH 088/113] modules are global --- src/App.zig | 9 ++------- src/main.zig | 28 ++++++++++++++++------------ src/nu.zig | 2 +- src/nu/ImGui.zig | 12 ++---------- src/nu/Render.zig | 14 +++++++------- src/nu/Window.zig | 46 ++++++++++++++++++++-------------------------- 6 files changed, 48 insertions(+), 63 deletions(-) diff --git a/src/App.zig b/src/App.zig index 69ab7ca..c0bd5ce 100644 --- a/src/App.zig +++ b/src/App.zig @@ -3,14 +3,9 @@ const nu = @import("nu.zig"); const Self = @This(); -pub fn init(alloc: std.mem.Allocator, render: *nu.Render, imgui: *nu.ImGui) !Self { +pub fn init(alloc: std.mem.Allocator) !void { _ = alloc; - _ = render; - _ = imgui; - - return .{}; } -pub fn deinit(self: *Self) void { - _ = self; +pub fn deinit() void { } diff --git a/src/main.zig b/src/main.zig index 59eae20..a075d3e 100644 --- a/src/main.zig +++ b/src/main.zig @@ -9,21 +9,25 @@ pub fn main() !void { defer _ = gpa.detectLeaks(); const alloc = gpa.allocator(); - var window = try nu.Window.init(alloc, .{ .title = "Hello World" }); - defer window.deinit(); + // todo declare or infer module dependencies, topological sort for init order + // problem: how to specify runtime options, like Window title? + // problem: where should gpa go? probably some "Engine" structure in nu.zig - var render = try nu.Render.init(alloc, &window); - defer render.deinit(); + try nu.Window.init(alloc, .{ .title = "Hello World" }); + defer nu.Window.deinit(); - var imgui = try nu.ImGui.init(alloc, &window, &render); - defer imgui.deinit(); + try nu.Render.init(alloc); + defer nu.Render.deinit(); - var app = try App.init(alloc, &render, &imgui); - defer app.deinit(); + try nu.ImGui.init(alloc); + defer nu.ImGui.deinit(); - try nu.run(&window, .{ - &app, - &imgui, - &render, + try App.init(alloc); + defer App.deinit(); + + try nu.run(nu.Window, .{ + App, + nu.ImGui, + nu.Render, }); } diff --git a/src/nu.zig b/src/nu.zig index 3bafb54..f3e7dbc 100644 --- a/src/nu.zig +++ b/src/nu.zig @@ -13,7 +13,7 @@ pub fn run( _ = events; inline for (modules) |module| { - if (std.meta.hasMethod(@TypeOf(module), "frame")) + if (@hasDecl(module, "frame")) module.frame(); } diff --git a/src/nu/ImGui.zig b/src/nu/ImGui.zig index 8dbdf6a..e5f5c61 100644 --- a/src/nu/ImGui.zig +++ b/src/nu/ImGui.zig @@ -2,19 +2,11 @@ const std = @import("std"); -const Self = @This(); - -const Window = @import("Window.zig"); const Render = @import("Render.zig"); -pub fn init(alloc: std.mem.Allocator, window: *Window, render: *Render) !Self { +pub fn init(alloc: std.mem.Allocator) !void { _ = alloc; - _ = window; - _ = render; - - return .{}; } -pub fn deinit(self: *Self) void { - _ = self; +pub fn deinit() void { } diff --git a/src/nu/Render.zig b/src/nu/Render.zig index 54dc41b..fa48ff7 100644 --- a/src/nu/Render.zig +++ b/src/nu/Render.zig @@ -2,22 +2,22 @@ const std = @import("std"); const Window = @import("Window.zig"); -const Self = @This(); - // isolate all the vulkan code through this path // except for imgui code // const au = @import("au.zig"); -pub fn init(alloc: std.mem.Allocator, window: *Window) !Self { +pub fn init(alloc: std.mem.Allocator) !void { _ = alloc; - _ = window; + + std.debug.print("Init Render\n", .{}); // todo check vulkan supported // todo create window surface - return .{}; } -pub fn deinit(self: *Self) void { - _ = self; +pub fn frame() void { + std.debug.print("frame\n", .{ }); } + +pub fn deinit() void {} diff --git a/src/nu/Window.zig b/src/nu/Window.zig index 52234ca..dd03900 100644 --- a/src/nu/Window.zig +++ b/src/nu/Window.zig @@ -1,5 +1,7 @@ //! GLFW Adaptor +// todo restructure to handle multiple windows + const std = @import("std"); pub const c = @cImport({ @@ -7,8 +9,6 @@ pub const c = @cImport({ @cInclude("GLFW/glfw3.h"); }); -const Self = @This(); - pub const Bus = @import("Bus.zig"); pub const Options = struct { @@ -19,25 +19,23 @@ pub const Options = struct { x11_instance_name: [*:0]const u8 = "floating_window", }; -alloc: std.mem.Allocator, -bus: *Bus, -handle: *c.GLFWwindow, +var bus: Bus = undefined; +var handle: *c.GLFWwindow = undefined; +var unfocused_rate: f32 = 1.0 / 20.0; -pub fn init(alloc: std.mem.Allocator, options: Options) !Self { +pub fn init(alloc: std.mem.Allocator, options: Options) !void { if (c.glfwInit() != c.GLFW_TRUE) return error.glfwInitFailed; errdefer c.glfwTerminate(); - const bus: *Bus = try alloc.create(Bus); - errdefer alloc.destroy(bus); - bus.* = Bus.init(alloc); + bus = Bus.init(alloc); errdefer bus.deinit(); c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, options.x11_class_name); c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, options.x11_instance_name); - const handle: *c.GLFWwindow = c.glfwCreateWindow( + handle = c.glfwCreateWindow( @intCast(options.width), @intCast(options.height), options.title, @@ -49,29 +47,25 @@ pub fn init(alloc: std.mem.Allocator, options: Options) !Self { bus.connect(handle); errdefer bus.disconnect(handle); - - return .{ - .alloc = alloc, - .bus = bus, - .handle = handle, - }; } -pub fn deinit(self: *Self) void { - self.bus.deinit(); - self.alloc.destroy(self.bus); - c.glfwDestroyWindow(self.handle); +pub fn deinit() void { + bus.deinit(); + c.glfwDestroyWindow(handle); c.glfwTerminate(); } -pub fn next(self: *Self) ?[]Bus.Event { - self.bus.clear(); +pub fn next() ?[]Bus.Event { + bus.clear(); - if (c.glfwWindowShouldClose(self.handle) == c.GLFW_TRUE) + if (c.glfwWindowShouldClose(handle) == c.GLFW_TRUE) return null; - // c.glfwPollEvents(); - c.glfwWaitEvents(); + if (c.glfwGetWindowAttrib(handle, c.GLFW_FOCUSED) == c.GLFW_TRUE) { + c.glfwPollEvents(); + } else { + c.glfwWaitEventsTimeout(unfocused_rate); + } - return self.bus.events.items; + return bus.events.items; } From 59c38925fdb18cc70923035e142fad6de6223f33 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Tue, 9 Jul 2024 14:14:28 -0400 Subject: [PATCH 089/113] start port render system --- src/nu/Render.zig | 382 ++++++++++++++++++++++++++++++++++++++++++++-- src/nu/Window.zig | 2 +- 2 files changed, 372 insertions(+), 12 deletions(-) diff --git a/src/nu/Render.zig b/src/nu/Render.zig index fa48ff7..0c54891 100644 --- a/src/nu/Render.zig +++ b/src/nu/Render.zig @@ -1,23 +1,383 @@ +//! Isolate vulkan code (except for ImGui) through this module. +//! +//! Requires that Window module already be initialized. + const std = @import("std"); +const builtin = @import("builtin"); +const vk = @import("vk"); const Window = @import("Window.zig"); -// isolate all the vulkan code through this path -// except for imgui code +pub const RenderOptions = struct { + app_name: []const u8, + engine_name: []const u8 = "nu-au", +}; -// const au = @import("au.zig"); +pub const use_debug_messenger = switch (builtin.mode) { + .Debug, .ReleaseSafe => true, + .ReleaseSmall, .ReleaseFast => false, +}; -pub fn init(alloc: std.mem.Allocator) !void { - _ = alloc; +pub const apis: []const vk.ApiInfo = &.{ + vk.features.version_1_0, + vk.features.version_1_1, + vk.features.version_1_2, + vk.features.version_1_3, + vk.extensions.khr_surface, + vk.extensions.khr_swapchain, + vk.extensions.khr_dynamic_rendering, + if (use_debug_messenger) vk.extensions.ext_debug_utils else .{}, +}; - std.debug.print("Init Render\n", .{}); +pub const device_extensions: []const [*:0]const u8 = &.{ + // todo somehow sync this with APIs above? + vk.extensions.khr_swapchain.name, + vk.extensions.khr_dynamic_rendering.name, +}; - // todo check vulkan supported - // todo create window surface +// // todo check vulkan supported +// pub fn frame() void { +// std.debug.print("frame\n", .{}); +// } + +pub const BaseWrapper = vk.BaseWrapper(apis); +pub const InstanceWrapper = vk.InstanceWrapper(apis); +pub const DeviceWrapper = vk.DeviceWrapper(apis); + +pub const InstanceProxy = vk.InstanceProxy(apis); +pub const DeviceProxy = vk.DeviceProxy(apis); +pub const QueueProxy = vk.QueueProxy(apis); +pub const CommandBufferProxy = vk.CommandBufferProxy(apis); + +pub const B: *const BaseWrapper = &_bw; +pub const I: *const InstanceProxy = &_ip; +pub const D: *const DeviceProxy = &_dp; +pub const Q: *const QueueProxy = &_qp; + +pub const device_config: *const CandidateDeviceInfo = &_dconfig; + +var _bw: BaseWrapper = undefined; +var _iw: InstanceWrapper = undefined; +var _dw: DeviceWrapper = undefined; + +var _ip: InstanceProxy = undefined; +var _dp: DeviceProxy = undefined; +var _qp: QueueProxy = undefined; + +var _instance: vk.Instance = undefined; +var _device: vk.Device = undefined; +var _dconfig: CandidateDeviceInfo = undefined; +var _queue: vk.Queue = undefined; +var _surface: vk.SurfaceKHR = undefined; + +pub fn init( + alloc: std.mem.Allocator, +) !void { + try init_base(); + errdefer deinit_base(); + + try init_instance(alloc); + errdefer deinit_instance(); + + try init_device(alloc); + errdefer deinit_device(); } -pub fn frame() void { - std.debug.print("frame\n", .{ }); +pub fn deinit() void { + deinit_device(); + deinit_instance(); + deinit_base(); } -pub fn deinit() void {} +fn init_base() !void { + if (use_debug_messenger) { + _bw = try BaseWrapper.load(glfwGetInstanceProcAddress); + } else { + _bw = BaseWrapper.loadNoFail(glfwGetInstanceProcAddress); + } +} + +fn deinit_base() void {} + +fn init_instance(alloc: std.mem.Allocator) !void { + var extensions = std.ArrayList([*:0]const u8).init(alloc); + defer extensions.deinit(); + + var layers = std.ArrayList([*:0]const u8).init(alloc); + defer layers.deinit(); + + if (use_debug_messenger) { + try extensions.appendSlice(&.{ + vk.extensions.ext_debug_utils.name, + }); + + try layers.appendSlice(&.{ + "VK_LAYER_KHRONOS_validation", + }); + } + + var glfw_exts_count: u32 = 0; + const glfw_exts: [*]const [*:0]const u8 = + @ptrCast(glfwGetRequiredInstanceExtensions(&glfw_exts_count)); + try extensions.appendSlice(glfw_exts[0..glfw_exts_count]); + + const mci: vk.DebugUtilsMessengerCreateInfoEXT = .{ + .message_severity = .{ + .error_bit_ext = true, + .info_bit_ext = true, + .verbose_bit_ext = true, + .warning_bit_ext = true, + }, + .message_type = .{ + .device_address_binding_bit_ext = true, + .general_bit_ext = false, + .performance_bit_ext = true, + .validation_bit_ext = true, + }, + .pfn_user_callback = &debug_callback, + .p_user_data = null, + }; + + _instance = try B.createInstance(&.{ + .p_application_info = &.{ + .p_application_name = "zig-glfw-vulkan", // todo RenderOptions + .application_version = vk.makeApiVersion(0, 0, 0, 0), + .p_engine_name = "nu-au", // todo RenderOptions + .engine_version = vk.makeApiVersion(0, 0, 0, 0), + .api_version = vk.API_VERSION_1_3, + }, + .enabled_extension_count = @intCast(extensions.items.len), + .pp_enabled_extension_names = extensions.items.ptr, + .enabled_layer_count = @intCast(layers.items.len), + .pp_enabled_layer_names = layers.items.ptr, + .p_next = if (use_debug_messenger) &mci else null, + }, null); + + if (use_debug_messenger) { + _iw = try InstanceWrapper.load(_instance, _bw.dispatch.vkGetInstanceProcAddr); + } else { + _iw = InstanceWrapper.loadNoFail(_instance, _bw.dispatch.vkGetInstanceProcAddr); + } + + _ip = InstanceProxy.init(_instance, &_iw); + + if (glfwCreateWindowSurface(_instance, Window.handle, null, &_surface) != .success) { + return error.glfwCreateWindowSurfaceFailed; + } +} + +fn deinit_instance() void { + _ip.destroySurfaceKHR(_surface, null); + _ip.destroyInstance(null); +} + +const CandidateDeviceInfo = struct { + pdev: vk.PhysicalDevice, + format: vk.SurfaceFormatKHR, + mode: vk.PresentModeKHR, + family: u32, // must support graphics and present for now + + fn init(alloc: std.mem.Allocator, pdev: vk.PhysicalDevice) !struct { i32, CandidateDeviceInfo } { + var score: i32 = 0; + var res: CandidateDeviceInfo = undefined; + + res.pdev = pdev; + + const props = I.getPhysicalDeviceProperties(pdev); + score += switch (props.device_type) { + vk.PhysicalDeviceType.discrete_gpu => 1000, + vk.PhysicalDeviceType.integrated_gpu => 500, + else => 0, + }; + + var format_count: u32 = undefined; + _ = try I.getPhysicalDeviceSurfaceFormatsKHR(pdev, _surface, &format_count, null); + if (format_count == 0) return error.NoSurfaceFormats; + const formats = try alloc.alloc(vk.SurfaceFormatKHR, format_count); + defer alloc.free(formats); + _ = try I.getPhysicalDeviceSurfaceFormatsKHR(pdev, _surface, &format_count, formats.ptr); + + for (formats) |fmt| { + if (fmt.color_space == .srgb_nonlinear_khr) { + res.format = fmt; + break; + } + } else { + res.format = formats[0]; + score -= 100; + } + + var mode_count: u32 = undefined; + _ = try I.getPhysicalDeviceSurfacePresentModesKHR(pdev, _surface, &mode_count, null); + if (mode_count == 0) return error.NoSurfacePresentModes; + const modes = try alloc.alloc(vk.PresentModeKHR, mode_count); + defer alloc.free(modes); + _ = try I.getPhysicalDeviceSurfacePresentModesKHR(pdev, _surface, &mode_count, modes.ptr); + + if (std.mem.indexOfAny(vk.PresentModeKHR, modes, &.{ + vk.PresentModeKHR.mailbox_khr, + vk.PresentModeKHR.immediate_khr, + })) |idx| { + res.mode = modes[idx]; + } else { + score -= 50; + res.mode = .fifo_khr; // this is guaranteed + } + + var ext_count: u32 = undefined; + _ = try I.enumerateDeviceExtensionProperties(pdev, null, &ext_count, null); + const exts = try alloc.alloc(vk.ExtensionProperties, ext_count); + defer alloc.free(exts); + _ = try I.enumerateDeviceExtensionProperties(pdev, null, &ext_count, exts.ptr); + + for (device_extensions) |needle| { + for (exts) |ext| { + if (std.mem.eql( + u8, + std.mem.span(needle), + std.mem.sliceTo(&ext.extension_name, 0), + )) + break; + } else { + return error.MissingDeviceExtension; + } + } + + var family_count: u32 = undefined; + I.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); + const families = try alloc.alloc(vk.QueueFamilyProperties, family_count); + defer alloc.free(families); + I.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr); + + for (families, 0..) |prop, idx| { + const graphics_support = prop.queue_flags.graphics_bit; + const present_support = try I.getPhysicalDeviceSurfaceSupportKHR(pdev, @intCast(idx), _surface) == vk.TRUE; + + if (graphics_support and present_support) { + res.family = @intCast(idx); + break; + } + } else { + return error.NoSuitableFamily; + } + + return .{ score, res }; + } +}; + +fn init_device(alloc: std.mem.Allocator) !void { + var pdev_count: u32 = undefined; + _ = try I.enumeratePhysicalDevices(&pdev_count, null); + if (pdev_count == 0) return error.NoDevice; + const pdevs = try alloc.alloc(vk.PhysicalDevice, pdev_count); + defer alloc.free(pdevs); + _ = try I.enumeratePhysicalDevices(&pdev_count, pdevs.ptr); + + // const scores = std.ArrayList(i32). + var scores: std.MultiArrayList(struct { score: i32, ci: CandidateDeviceInfo }) = .{}; + defer scores.deinit(alloc); + + for (pdevs) |pdev| { + const score, const ci = CandidateDeviceInfo.init(alloc, pdev) catch continue; + try scores.append(alloc, .{ .score = score, .ci = ci }); + } + + const idx = std.sort.argMax(i32, scores.items(.score), {}, std.sort.asc(i32)) orelse + return error.NoSuitableDevice; + _dconfig = scores.get(idx).ci; + + const qci: []const vk.DeviceQueueCreateInfo = &.{ + vk.DeviceQueueCreateInfo{ + .queue_family_index = _dconfig.family, + .queue_count = 1, + .p_queue_priorities = &[_]f32{1.0}, + }, + }; + + _device = try I.createDevice(_dconfig.pdev, &.{ + .queue_create_info_count = @intCast(qci.len), + .p_queue_create_infos = qci.ptr, + .enabled_extension_count = @intCast(device_extensions.len), + .pp_enabled_extension_names = device_extensions.ptr, + .p_next = &vk.PhysicalDeviceDynamicRenderingFeaturesKHR{ + .dynamic_rendering = vk.TRUE, + }, + }, null); + + if (use_debug_messenger) { + _dw = try DeviceWrapper.load(_device, _iw.dispatch.vkGetDeviceProcAddr); + } else { + _dw = DeviceWrapper.loadNoFail(_device, _iw.dispatch.vkGetDeviceProcAddr); + } + _dp = DeviceProxy.init(_device, &_dw); + errdefer D.destroyDevice(null); + + _queue = D.getDeviceQueue(_dconfig.family, 0); + + _qp = QueueProxy.init(_queue, &_dw); + + // todo i'm thinking this needs to be a more complex pointer structure... i'm making assumptions here about how the + // command pools are meant to work. probably I am cooking too much. +} + +fn deinit_device() void { + D.destroyDevice(null); +} + +pub fn debug_callback( + msg_severity: vk.DebugUtilsMessageSeverityFlagsEXT, + msg_type: vk.DebugUtilsMessageTypeFlagsEXT, + p_data: ?*const vk.DebugUtilsMessengerCallbackDataEXT, + _: ?*anyopaque, +) callconv(vk.vulkan_call_conv) vk.Bool32 { + // ripped from std.log.defaultLog + + const data = p_data orelse return vk.FALSE; + const message = data.p_message orelse return vk.FALSE; + + const severity_prefix = if (msg_severity.verbose_bit_ext) + "verbose:" + else if (msg_severity.info_bit_ext) + "info:" + else if (msg_severity.warning_bit_ext) + "warning:" + else if (msg_severity.error_bit_ext) + "error:" + else + "?:"; + + const type_prefix = if (msg_type.general_bit_ext) + "" + else if (msg_type.validation_bit_ext) + "validation:" + else if (msg_type.performance_bit_ext) + "performance:" + else if (msg_type.device_address_binding_bit_ext) + "device_address_binding:" + else + "?:"; + + const stderr = std.io.getStdErr().writer(); + var bw = std.io.bufferedWriter(stderr); + const writer = bw.writer(); + + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); + nosuspend { + writer.print("vk-{s}{s} {s}\n", .{ severity_prefix, type_prefix, message }) catch return vk.FALSE; + bw.flush() catch return vk.FALSE; + } + + return vk.FALSE; +} + +extern fn glfwGetInstanceProcAddress(instance: vk.Instance, procname: [*:0]const u8) vk.PfnVoidFunction; + +extern fn glfwGetRequiredInstanceExtensions(count: *u32) [*]const [*:0]const u8; + +extern fn glfwCreateWindowSurface( + instance: vk.Instance, + window: *Window.c.GLFWwindow, + allocation_callbacks: ?*const vk.AllocationCallbacks, + surface: *vk.SurfaceKHR, +) vk.Result; diff --git a/src/nu/Window.zig b/src/nu/Window.zig index dd03900..1701c83 100644 --- a/src/nu/Window.zig +++ b/src/nu/Window.zig @@ -20,7 +20,7 @@ pub const Options = struct { }; var bus: Bus = undefined; -var handle: *c.GLFWwindow = undefined; +pub var handle: *c.GLFWwindow = undefined; var unfocused_rate: f32 = 1.0 / 20.0; pub fn init(alloc: std.mem.Allocator, options: Options) !void { From 1613b90ac5b5bc73b4a8adab15ba345301a57f65 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Tue, 9 Jul 2024 14:39:16 -0400 Subject: [PATCH 090/113] root module nu_options --- src/main.zig | 11 +- src/nu.zig | 9 + src/nu/Render.zig | 389 +++----------------------------------- src/nu/Render/Context.zig | 388 +++++++++++++++++++++++++++++++++++++ src/nu/Window.zig | 16 +- 5 files changed, 438 insertions(+), 375 deletions(-) create mode 100644 src/nu/Render/Context.zig diff --git a/src/main.zig b/src/main.zig index a075d3e..3a3a79e 100644 --- a/src/main.zig +++ b/src/main.zig @@ -1,9 +1,13 @@ const std = @import("std"); - const nu = @import("nu.zig"); const App = @import("App.zig"); +pub const nu_options: nu.Options = .{ + .window = .{ .title = "Hello World" }, + .render = .{ .app_name = "hello-world" }, +}; + pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.detectLeaks(); @@ -13,7 +17,10 @@ pub fn main() !void { // problem: how to specify runtime options, like Window title? // problem: where should gpa go? probably some "Engine" structure in nu.zig - try nu.Window.init(alloc, .{ .title = "Hello World" }); + // don't necessarily need to declare topological sort - depth-first traversal + // of each module's dependencies without repeats would do. + + try nu.Window.init(alloc); defer nu.Window.deinit(); try nu.Render.init(alloc); diff --git a/src/nu.zig b/src/nu.zig index f3e7dbc..759e098 100644 --- a/src/nu.zig +++ b/src/nu.zig @@ -1,9 +1,18 @@ const std = @import("std"); +const root = @import("root"); pub const Window = @import("nu/Window.zig"); pub const Render = @import("nu/Render.zig"); pub const ImGui = @import("nu/ImGui.zig"); +pub const Options = struct { + window: Window.Options = .{}, + render: Render.Options = .{}, + // imgui: ImGui.Options = .{}, +}; + +pub const options: Options = if (@hasDecl(root, "nu_options")) root.nu_options else .{}; + pub fn run( driver: anytype, modules: anytype, diff --git a/src/nu/Render.zig b/src/nu/Render.zig index 0c54891..47fa3f7 100644 --- a/src/nu/Render.zig +++ b/src/nu/Render.zig @@ -6,378 +6,35 @@ const std = @import("std"); const builtin = @import("builtin"); const vk = @import("vk"); -const Window = @import("Window.zig"); +const ctx = @import("Render/Context.zig"); -pub const RenderOptions = struct { - app_name: []const u8, +pub const Options = struct { + app_name: []const u8 = "nu-au-app", + app_version: struct { + variant: u3 = 0, + major: u7 = 0, + minor: u10 = 0, + patch: u12 = 0, + } = .{}, engine_name: []const u8 = "nu-au", + engine_version: struct { + variant: u3 = 0, + major: u7 = 0, + minor: u10 = 0, + patch: u12 = 0, + } = .{}, + frames_in_flight: u8 = 3, }; -pub const use_debug_messenger = switch (builtin.mode) { - .Debug, .ReleaseSafe => true, - .ReleaseSmall, .ReleaseFast => false, -}; +pub fn init(alloc: std.mem.Allocator) !void { + // todo make ctx not globals -pub const apis: []const vk.ApiInfo = &.{ - vk.features.version_1_0, - vk.features.version_1_1, - vk.features.version_1_2, - vk.features.version_1_3, - vk.extensions.khr_surface, - vk.extensions.khr_swapchain, - vk.extensions.khr_dynamic_rendering, - if (use_debug_messenger) vk.extensions.ext_debug_utils else .{}, -}; - -pub const device_extensions: []const [*:0]const u8 = &.{ - // todo somehow sync this with APIs above? - vk.extensions.khr_swapchain.name, - vk.extensions.khr_dynamic_rendering.name, -}; - -// // todo check vulkan supported -// pub fn frame() void { -// std.debug.print("frame\n", .{}); -// } - -pub const BaseWrapper = vk.BaseWrapper(apis); -pub const InstanceWrapper = vk.InstanceWrapper(apis); -pub const DeviceWrapper = vk.DeviceWrapper(apis); - -pub const InstanceProxy = vk.InstanceProxy(apis); -pub const DeviceProxy = vk.DeviceProxy(apis); -pub const QueueProxy = vk.QueueProxy(apis); -pub const CommandBufferProxy = vk.CommandBufferProxy(apis); - -pub const B: *const BaseWrapper = &_bw; -pub const I: *const InstanceProxy = &_ip; -pub const D: *const DeviceProxy = &_dp; -pub const Q: *const QueueProxy = &_qp; - -pub const device_config: *const CandidateDeviceInfo = &_dconfig; - -var _bw: BaseWrapper = undefined; -var _iw: InstanceWrapper = undefined; -var _dw: DeviceWrapper = undefined; - -var _ip: InstanceProxy = undefined; -var _dp: DeviceProxy = undefined; -var _qp: QueueProxy = undefined; - -var _instance: vk.Instance = undefined; -var _device: vk.Device = undefined; -var _dconfig: CandidateDeviceInfo = undefined; -var _queue: vk.Queue = undefined; -var _surface: vk.SurfaceKHR = undefined; - -pub fn init( - alloc: std.mem.Allocator, -) !void { - try init_base(); - errdefer deinit_base(); - - try init_instance(alloc); - errdefer deinit_instance(); - - try init_device(alloc); - errdefer deinit_device(); + try ctx.init(alloc); + errdefer ctx.deinit(); } +pub fn frame() void {} + pub fn deinit() void { - deinit_device(); - deinit_instance(); - deinit_base(); + ctx.deinit(); } - -fn init_base() !void { - if (use_debug_messenger) { - _bw = try BaseWrapper.load(glfwGetInstanceProcAddress); - } else { - _bw = BaseWrapper.loadNoFail(glfwGetInstanceProcAddress); - } -} - -fn deinit_base() void {} - -fn init_instance(alloc: std.mem.Allocator) !void { - var extensions = std.ArrayList([*:0]const u8).init(alloc); - defer extensions.deinit(); - - var layers = std.ArrayList([*:0]const u8).init(alloc); - defer layers.deinit(); - - if (use_debug_messenger) { - try extensions.appendSlice(&.{ - vk.extensions.ext_debug_utils.name, - }); - - try layers.appendSlice(&.{ - "VK_LAYER_KHRONOS_validation", - }); - } - - var glfw_exts_count: u32 = 0; - const glfw_exts: [*]const [*:0]const u8 = - @ptrCast(glfwGetRequiredInstanceExtensions(&glfw_exts_count)); - try extensions.appendSlice(glfw_exts[0..glfw_exts_count]); - - const mci: vk.DebugUtilsMessengerCreateInfoEXT = .{ - .message_severity = .{ - .error_bit_ext = true, - .info_bit_ext = true, - .verbose_bit_ext = true, - .warning_bit_ext = true, - }, - .message_type = .{ - .device_address_binding_bit_ext = true, - .general_bit_ext = false, - .performance_bit_ext = true, - .validation_bit_ext = true, - }, - .pfn_user_callback = &debug_callback, - .p_user_data = null, - }; - - _instance = try B.createInstance(&.{ - .p_application_info = &.{ - .p_application_name = "zig-glfw-vulkan", // todo RenderOptions - .application_version = vk.makeApiVersion(0, 0, 0, 0), - .p_engine_name = "nu-au", // todo RenderOptions - .engine_version = vk.makeApiVersion(0, 0, 0, 0), - .api_version = vk.API_VERSION_1_3, - }, - .enabled_extension_count = @intCast(extensions.items.len), - .pp_enabled_extension_names = extensions.items.ptr, - .enabled_layer_count = @intCast(layers.items.len), - .pp_enabled_layer_names = layers.items.ptr, - .p_next = if (use_debug_messenger) &mci else null, - }, null); - - if (use_debug_messenger) { - _iw = try InstanceWrapper.load(_instance, _bw.dispatch.vkGetInstanceProcAddr); - } else { - _iw = InstanceWrapper.loadNoFail(_instance, _bw.dispatch.vkGetInstanceProcAddr); - } - - _ip = InstanceProxy.init(_instance, &_iw); - - if (glfwCreateWindowSurface(_instance, Window.handle, null, &_surface) != .success) { - return error.glfwCreateWindowSurfaceFailed; - } -} - -fn deinit_instance() void { - _ip.destroySurfaceKHR(_surface, null); - _ip.destroyInstance(null); -} - -const CandidateDeviceInfo = struct { - pdev: vk.PhysicalDevice, - format: vk.SurfaceFormatKHR, - mode: vk.PresentModeKHR, - family: u32, // must support graphics and present for now - - fn init(alloc: std.mem.Allocator, pdev: vk.PhysicalDevice) !struct { i32, CandidateDeviceInfo } { - var score: i32 = 0; - var res: CandidateDeviceInfo = undefined; - - res.pdev = pdev; - - const props = I.getPhysicalDeviceProperties(pdev); - score += switch (props.device_type) { - vk.PhysicalDeviceType.discrete_gpu => 1000, - vk.PhysicalDeviceType.integrated_gpu => 500, - else => 0, - }; - - var format_count: u32 = undefined; - _ = try I.getPhysicalDeviceSurfaceFormatsKHR(pdev, _surface, &format_count, null); - if (format_count == 0) return error.NoSurfaceFormats; - const formats = try alloc.alloc(vk.SurfaceFormatKHR, format_count); - defer alloc.free(formats); - _ = try I.getPhysicalDeviceSurfaceFormatsKHR(pdev, _surface, &format_count, formats.ptr); - - for (formats) |fmt| { - if (fmt.color_space == .srgb_nonlinear_khr) { - res.format = fmt; - break; - } - } else { - res.format = formats[0]; - score -= 100; - } - - var mode_count: u32 = undefined; - _ = try I.getPhysicalDeviceSurfacePresentModesKHR(pdev, _surface, &mode_count, null); - if (mode_count == 0) return error.NoSurfacePresentModes; - const modes = try alloc.alloc(vk.PresentModeKHR, mode_count); - defer alloc.free(modes); - _ = try I.getPhysicalDeviceSurfacePresentModesKHR(pdev, _surface, &mode_count, modes.ptr); - - if (std.mem.indexOfAny(vk.PresentModeKHR, modes, &.{ - vk.PresentModeKHR.mailbox_khr, - vk.PresentModeKHR.immediate_khr, - })) |idx| { - res.mode = modes[idx]; - } else { - score -= 50; - res.mode = .fifo_khr; // this is guaranteed - } - - var ext_count: u32 = undefined; - _ = try I.enumerateDeviceExtensionProperties(pdev, null, &ext_count, null); - const exts = try alloc.alloc(vk.ExtensionProperties, ext_count); - defer alloc.free(exts); - _ = try I.enumerateDeviceExtensionProperties(pdev, null, &ext_count, exts.ptr); - - for (device_extensions) |needle| { - for (exts) |ext| { - if (std.mem.eql( - u8, - std.mem.span(needle), - std.mem.sliceTo(&ext.extension_name, 0), - )) - break; - } else { - return error.MissingDeviceExtension; - } - } - - var family_count: u32 = undefined; - I.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); - const families = try alloc.alloc(vk.QueueFamilyProperties, family_count); - defer alloc.free(families); - I.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr); - - for (families, 0..) |prop, idx| { - const graphics_support = prop.queue_flags.graphics_bit; - const present_support = try I.getPhysicalDeviceSurfaceSupportKHR(pdev, @intCast(idx), _surface) == vk.TRUE; - - if (graphics_support and present_support) { - res.family = @intCast(idx); - break; - } - } else { - return error.NoSuitableFamily; - } - - return .{ score, res }; - } -}; - -fn init_device(alloc: std.mem.Allocator) !void { - var pdev_count: u32 = undefined; - _ = try I.enumeratePhysicalDevices(&pdev_count, null); - if (pdev_count == 0) return error.NoDevice; - const pdevs = try alloc.alloc(vk.PhysicalDevice, pdev_count); - defer alloc.free(pdevs); - _ = try I.enumeratePhysicalDevices(&pdev_count, pdevs.ptr); - - // const scores = std.ArrayList(i32). - var scores: std.MultiArrayList(struct { score: i32, ci: CandidateDeviceInfo }) = .{}; - defer scores.deinit(alloc); - - for (pdevs) |pdev| { - const score, const ci = CandidateDeviceInfo.init(alloc, pdev) catch continue; - try scores.append(alloc, .{ .score = score, .ci = ci }); - } - - const idx = std.sort.argMax(i32, scores.items(.score), {}, std.sort.asc(i32)) orelse - return error.NoSuitableDevice; - _dconfig = scores.get(idx).ci; - - const qci: []const vk.DeviceQueueCreateInfo = &.{ - vk.DeviceQueueCreateInfo{ - .queue_family_index = _dconfig.family, - .queue_count = 1, - .p_queue_priorities = &[_]f32{1.0}, - }, - }; - - _device = try I.createDevice(_dconfig.pdev, &.{ - .queue_create_info_count = @intCast(qci.len), - .p_queue_create_infos = qci.ptr, - .enabled_extension_count = @intCast(device_extensions.len), - .pp_enabled_extension_names = device_extensions.ptr, - .p_next = &vk.PhysicalDeviceDynamicRenderingFeaturesKHR{ - .dynamic_rendering = vk.TRUE, - }, - }, null); - - if (use_debug_messenger) { - _dw = try DeviceWrapper.load(_device, _iw.dispatch.vkGetDeviceProcAddr); - } else { - _dw = DeviceWrapper.loadNoFail(_device, _iw.dispatch.vkGetDeviceProcAddr); - } - _dp = DeviceProxy.init(_device, &_dw); - errdefer D.destroyDevice(null); - - _queue = D.getDeviceQueue(_dconfig.family, 0); - - _qp = QueueProxy.init(_queue, &_dw); - - // todo i'm thinking this needs to be a more complex pointer structure... i'm making assumptions here about how the - // command pools are meant to work. probably I am cooking too much. -} - -fn deinit_device() void { - D.destroyDevice(null); -} - -pub fn debug_callback( - msg_severity: vk.DebugUtilsMessageSeverityFlagsEXT, - msg_type: vk.DebugUtilsMessageTypeFlagsEXT, - p_data: ?*const vk.DebugUtilsMessengerCallbackDataEXT, - _: ?*anyopaque, -) callconv(vk.vulkan_call_conv) vk.Bool32 { - // ripped from std.log.defaultLog - - const data = p_data orelse return vk.FALSE; - const message = data.p_message orelse return vk.FALSE; - - const severity_prefix = if (msg_severity.verbose_bit_ext) - "verbose:" - else if (msg_severity.info_bit_ext) - "info:" - else if (msg_severity.warning_bit_ext) - "warning:" - else if (msg_severity.error_bit_ext) - "error:" - else - "?:"; - - const type_prefix = if (msg_type.general_bit_ext) - "" - else if (msg_type.validation_bit_ext) - "validation:" - else if (msg_type.performance_bit_ext) - "performance:" - else if (msg_type.device_address_binding_bit_ext) - "device_address_binding:" - else - "?:"; - - const stderr = std.io.getStdErr().writer(); - var bw = std.io.bufferedWriter(stderr); - const writer = bw.writer(); - - std.debug.lockStdErr(); - defer std.debug.unlockStdErr(); - nosuspend { - writer.print("vk-{s}{s} {s}\n", .{ severity_prefix, type_prefix, message }) catch return vk.FALSE; - bw.flush() catch return vk.FALSE; - } - - return vk.FALSE; -} - -extern fn glfwGetInstanceProcAddress(instance: vk.Instance, procname: [*:0]const u8) vk.PfnVoidFunction; - -extern fn glfwGetRequiredInstanceExtensions(count: *u32) [*]const [*:0]const u8; - -extern fn glfwCreateWindowSurface( - instance: vk.Instance, - window: *Window.c.GLFWwindow, - allocation_callbacks: ?*const vk.AllocationCallbacks, - surface: *vk.SurfaceKHR, -) vk.Result; diff --git a/src/nu/Render/Context.zig b/src/nu/Render/Context.zig new file mode 100644 index 0000000..5c32c7a --- /dev/null +++ b/src/nu/Render/Context.zig @@ -0,0 +1,388 @@ +const std = @import("std"); +const builtin = @import("builtin"); +const vk = @import("vk"); + +const nu = @import("../../nu.zig"); +const Window = @import("../Window.zig"); + +pub const use_debug_messenger = switch (builtin.mode) { + .Debug, .ReleaseSafe => true, + .ReleaseSmall, .ReleaseFast => false, +}; + +pub const apis: []const vk.ApiInfo = &.{ + vk.features.version_1_0, + vk.features.version_1_1, + vk.features.version_1_2, + vk.features.version_1_3, + vk.extensions.khr_surface, + vk.extensions.khr_swapchain, + vk.extensions.khr_dynamic_rendering, + if (use_debug_messenger) vk.extensions.ext_debug_utils else .{}, +}; + +pub const device_extensions: []const [*:0]const u8 = &.{ + // todo somehow sync this with APIs above? + vk.extensions.khr_swapchain.name, + vk.extensions.khr_dynamic_rendering.name, +}; + +pub const app_info: vk.ApplicationInfo = .{ + .p_application_name = nu.options.render.app_name, + .application_version = vk.makeApiVersion( + nu.options.render.app_version.variant, + nu.options.render.app_version.major, + nu.options.render.app_version.minor, + nu.options.render.app_version.patch, + ), + .p_engine_name = nu.options.render.engine_name, + .engine_version = vk.makeApiVersion( + nu.options.render.engine_version.variant, + nu.options.render.engine_version.major, + nu.options.render.engine_version.minor, + nu.options.render.engine_version.patch, + ), + .api_version = vk.API_VERSION_1_3, +}; + +pub const BaseWrapper = vk.BaseWrapper(apis); +pub const InstanceWrapper = vk.InstanceWrapper(apis); +pub const DeviceWrapper = vk.DeviceWrapper(apis); + +pub const InstanceProxy = vk.InstanceProxy(apis); +pub const DeviceProxy = vk.DeviceProxy(apis); +pub const QueueProxy = vk.QueueProxy(apis); +pub const CommandBufferProxy = vk.CommandBufferProxy(apis); + +pub const B: *const BaseWrapper = &_bw; +pub const I: *const InstanceProxy = &_ip; +pub const D: *const DeviceProxy = &_dp; +pub const Q: *const QueueProxy = &_qp; + +pub const device_config: *const CandidateDeviceInfo = &_dconfig; + +var _bw: BaseWrapper = undefined; +var _iw: InstanceWrapper = undefined; +var _dw: DeviceWrapper = undefined; + +var _ip: InstanceProxy = undefined; +var _dp: DeviceProxy = undefined; +var _qp: QueueProxy = undefined; + +var _instance: vk.Instance = undefined; +var _device: vk.Device = undefined; +var _dconfig: CandidateDeviceInfo = undefined; +var _queue: vk.Queue = undefined; +var _surface: vk.SurfaceKHR = undefined; + +pub fn init( + alloc: std.mem.Allocator, +) !void { + try init_base(); + errdefer deinit_base(); + + try init_instance(alloc); + errdefer deinit_instance(); + + try init_device(alloc); + errdefer deinit_device(); +} + +pub fn deinit() void { + deinit_device(); + deinit_instance(); + deinit_base(); +} + +fn init_base() !void { + if (use_debug_messenger) { + _bw = try BaseWrapper.load(glfwGetInstanceProcAddress); + } else { + _bw = BaseWrapper.loadNoFail(glfwGetInstanceProcAddress); + } +} + +fn deinit_base() void {} + +fn init_instance(alloc: std.mem.Allocator) !void { + var extensions = std.ArrayList([*:0]const u8).init(alloc); + defer extensions.deinit(); + + var layers = std.ArrayList([*:0]const u8).init(alloc); + defer layers.deinit(); + + if (use_debug_messenger) { + try extensions.appendSlice(&.{ + vk.extensions.ext_debug_utils.name, + }); + + try layers.appendSlice(&.{ + "VK_LAYER_KHRONOS_validation", + }); + } + + var glfw_exts_count: u32 = 0; + const glfw_exts: [*]const [*:0]const u8 = + @ptrCast(glfwGetRequiredInstanceExtensions(&glfw_exts_count)); + try extensions.appendSlice(glfw_exts[0..glfw_exts_count]); + + const mci: vk.DebugUtilsMessengerCreateInfoEXT = .{ + .message_severity = .{ + .error_bit_ext = true, + .info_bit_ext = true, + .verbose_bit_ext = true, + .warning_bit_ext = true, + }, + .message_type = .{ + .device_address_binding_bit_ext = true, + .general_bit_ext = false, + .performance_bit_ext = true, + .validation_bit_ext = true, + }, + .pfn_user_callback = &debug_callback, + .p_user_data = null, + }; + + _instance = try B.createInstance(&.{ + .p_application_info = &.{ + .p_application_name = "zig-glfw-vulkan", // todo RenderOptions + .application_version = vk.makeApiVersion(0, 0, 0, 0), + .p_engine_name = "nu-au", // todo RenderOptions + .engine_version = vk.makeApiVersion(0, 0, 0, 0), + .api_version = vk.API_VERSION_1_3, + }, + .enabled_extension_count = @intCast(extensions.items.len), + .pp_enabled_extension_names = extensions.items.ptr, + .enabled_layer_count = @intCast(layers.items.len), + .pp_enabled_layer_names = layers.items.ptr, + .p_next = if (use_debug_messenger) &mci else null, + }, null); + + if (use_debug_messenger) { + _iw = try InstanceWrapper.load(_instance, _bw.dispatch.vkGetInstanceProcAddr); + } else { + _iw = InstanceWrapper.loadNoFail(_instance, _bw.dispatch.vkGetInstanceProcAddr); + } + + _ip = InstanceProxy.init(_instance, &_iw); + + if (glfwCreateWindowSurface(_instance, Window.handle, null, &_surface) != .success) { + return error.glfwCreateWindowSurfaceFailed; + } +} + +fn deinit_instance() void { + _ip.destroySurfaceKHR(_surface, null); + _ip.destroyInstance(null); +} + +const CandidateDeviceInfo = struct { + pdev: vk.PhysicalDevice, + format: vk.SurfaceFormatKHR, + mode: vk.PresentModeKHR, + family: u32, // must support graphics and present for now + + fn init(alloc: std.mem.Allocator, pdev: vk.PhysicalDevice) !struct { i32, CandidateDeviceInfo } { + var score: i32 = 0; + var res: CandidateDeviceInfo = undefined; + + res.pdev = pdev; + + const props = I.getPhysicalDeviceProperties(pdev); + score += switch (props.device_type) { + vk.PhysicalDeviceType.discrete_gpu => 1000, + vk.PhysicalDeviceType.integrated_gpu => 500, + else => 0, + }; + + var format_count: u32 = undefined; + _ = try I.getPhysicalDeviceSurfaceFormatsKHR(pdev, _surface, &format_count, null); + if (format_count == 0) return error.NoSurfaceFormats; + const formats = try alloc.alloc(vk.SurfaceFormatKHR, format_count); + defer alloc.free(formats); + _ = try I.getPhysicalDeviceSurfaceFormatsKHR(pdev, _surface, &format_count, formats.ptr); + + for (formats) |fmt| { + if (fmt.color_space == .srgb_nonlinear_khr) { + res.format = fmt; + break; + } + } else { + res.format = formats[0]; + score -= 100; + } + + var mode_count: u32 = undefined; + _ = try I.getPhysicalDeviceSurfacePresentModesKHR(pdev, _surface, &mode_count, null); + if (mode_count == 0) return error.NoSurfacePresentModes; + const modes = try alloc.alloc(vk.PresentModeKHR, mode_count); + defer alloc.free(modes); + _ = try I.getPhysicalDeviceSurfacePresentModesKHR(pdev, _surface, &mode_count, modes.ptr); + + if (std.mem.indexOfAny(vk.PresentModeKHR, modes, &.{ + vk.PresentModeKHR.mailbox_khr, + vk.PresentModeKHR.immediate_khr, + })) |idx| { + res.mode = modes[idx]; + } else { + score -= 50; + res.mode = .fifo_khr; // this is guaranteed + } + + var ext_count: u32 = undefined; + _ = try I.enumerateDeviceExtensionProperties(pdev, null, &ext_count, null); + const exts = try alloc.alloc(vk.ExtensionProperties, ext_count); + defer alloc.free(exts); + _ = try I.enumerateDeviceExtensionProperties(pdev, null, &ext_count, exts.ptr); + + for (device_extensions) |needle| { + for (exts) |ext| { + if (std.mem.eql( + u8, + std.mem.span(needle), + std.mem.sliceTo(&ext.extension_name, 0), + )) + break; + } else { + return error.MissingDeviceExtension; + } + } + + var family_count: u32 = undefined; + I.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); + const families = try alloc.alloc(vk.QueueFamilyProperties, family_count); + defer alloc.free(families); + I.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr); + + for (families, 0..) |prop, idx| { + const graphics_support = prop.queue_flags.graphics_bit; + const present_support = try I.getPhysicalDeviceSurfaceSupportKHR(pdev, @intCast(idx), _surface) == vk.TRUE; + + if (graphics_support and present_support) { + res.family = @intCast(idx); + break; + } + } else { + return error.NoSuitableFamily; + } + + return .{ score, res }; + } +}; + +fn init_device(alloc: std.mem.Allocator) !void { + var pdev_count: u32 = undefined; + _ = try I.enumeratePhysicalDevices(&pdev_count, null); + if (pdev_count == 0) return error.NoDevice; + const pdevs = try alloc.alloc(vk.PhysicalDevice, pdev_count); + defer alloc.free(pdevs); + _ = try I.enumeratePhysicalDevices(&pdev_count, pdevs.ptr); + + // const scores = std.ArrayList(i32). + var scores: std.MultiArrayList(struct { score: i32, ci: CandidateDeviceInfo }) = .{}; + defer scores.deinit(alloc); + + for (pdevs) |pdev| { + const score, const ci = CandidateDeviceInfo.init(alloc, pdev) catch continue; + try scores.append(alloc, .{ .score = score, .ci = ci }); + } + + const idx = std.sort.argMax(i32, scores.items(.score), {}, std.sort.asc(i32)) orelse + return error.NoSuitableDevice; + _dconfig = scores.get(idx).ci; + + const qci: []const vk.DeviceQueueCreateInfo = &.{ + vk.DeviceQueueCreateInfo{ + .queue_family_index = _dconfig.family, + .queue_count = 1, + .p_queue_priorities = &[_]f32{1.0}, + }, + }; + + _device = try I.createDevice(_dconfig.pdev, &.{ + .queue_create_info_count = @intCast(qci.len), + .p_queue_create_infos = qci.ptr, + .enabled_extension_count = @intCast(device_extensions.len), + .pp_enabled_extension_names = device_extensions.ptr, + .p_next = &vk.PhysicalDeviceDynamicRenderingFeaturesKHR{ + .dynamic_rendering = vk.TRUE, + }, + }, null); + + if (use_debug_messenger) { + _dw = try DeviceWrapper.load(_device, _iw.dispatch.vkGetDeviceProcAddr); + } else { + _dw = DeviceWrapper.loadNoFail(_device, _iw.dispatch.vkGetDeviceProcAddr); + } + _dp = DeviceProxy.init(_device, &_dw); + errdefer D.destroyDevice(null); + + _queue = D.getDeviceQueue(_dconfig.family, 0); + + _qp = QueueProxy.init(_queue, &_dw); + + // todo i'm thinking this needs to be a more complex pointer structure... i'm making assumptions here about how the + // command pools are meant to work. probably I am cooking too much. +} + +fn deinit_device() void { + D.destroyDevice(null); +} + +pub fn debug_callback( + msg_severity: vk.DebugUtilsMessageSeverityFlagsEXT, + msg_type: vk.DebugUtilsMessageTypeFlagsEXT, + p_data: ?*const vk.DebugUtilsMessengerCallbackDataEXT, + _: ?*anyopaque, +) callconv(vk.vulkan_call_conv) vk.Bool32 { + // ripped from std.log.defaultLog + + const data = p_data orelse return vk.FALSE; + const message = data.p_message orelse return vk.FALSE; + + const severity_prefix = if (msg_severity.verbose_bit_ext) + "verbose:" + else if (msg_severity.info_bit_ext) + "info:" + else if (msg_severity.warning_bit_ext) + "warning:" + else if (msg_severity.error_bit_ext) + "error:" + else + "?:"; + + const type_prefix = if (msg_type.general_bit_ext) + "" + else if (msg_type.validation_bit_ext) + "validation:" + else if (msg_type.performance_bit_ext) + "performance:" + else if (msg_type.device_address_binding_bit_ext) + "device_address_binding:" + else + "?:"; + + const stderr = std.io.getStdErr().writer(); + var bw = std.io.bufferedWriter(stderr); + const writer = bw.writer(); + + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); + nosuspend { + writer.print("vk-{s}{s} {s}\n", .{ severity_prefix, type_prefix, message }) catch return vk.FALSE; + bw.flush() catch return vk.FALSE; + } + + return vk.FALSE; +} + +extern fn glfwGetInstanceProcAddress(instance: vk.Instance, procname: [*:0]const u8) vk.PfnVoidFunction; + +extern fn glfwGetRequiredInstanceExtensions(count: *u32) [*]const [*:0]const u8; + +extern fn glfwCreateWindowSurface( + instance: vk.Instance, + window: *Window.c.GLFWwindow, + allocation_callbacks: ?*const vk.AllocationCallbacks, + surface: *vk.SurfaceKHR, +) vk.Result; diff --git a/src/nu/Window.zig b/src/nu/Window.zig index 1701c83..e2363ff 100644 --- a/src/nu/Window.zig +++ b/src/nu/Window.zig @@ -4,6 +4,8 @@ const std = @import("std"); +const nu = @import("../nu.zig"); + pub const c = @cImport({ @cDefine("GLFW_INCLUDE_NONE", {}); @cInclude("GLFW/glfw3.h"); @@ -12,7 +14,7 @@ pub const c = @cImport({ pub const Bus = @import("Bus.zig"); pub const Options = struct { - title: [*:0]const u8, + title: [*:0]const u8 = "Hello World", width: u32 = 1280, height: u32 = 720, x11_class_name: [*:0]const u8 = "floating_window", @@ -23,7 +25,7 @@ var bus: Bus = undefined; pub var handle: *c.GLFWwindow = undefined; var unfocused_rate: f32 = 1.0 / 20.0; -pub fn init(alloc: std.mem.Allocator, options: Options) !void { +pub fn init(alloc: std.mem.Allocator) !void { if (c.glfwInit() != c.GLFW_TRUE) return error.glfwInitFailed; errdefer c.glfwTerminate(); @@ -32,13 +34,13 @@ pub fn init(alloc: std.mem.Allocator, options: Options) !void { errdefer bus.deinit(); c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); - c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, options.x11_class_name); - c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, options.x11_instance_name); + c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, nu.options.window.x11_class_name); + c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, nu.options.window.x11_instance_name); handle = c.glfwCreateWindow( - @intCast(options.width), - @intCast(options.height), - options.title, + @intCast(nu.options.window.width), + @intCast(nu.options.window.height), + nu.options.window.title, null, null, ) orelse From 4f9a1541761aa5f7e7097372955b63fae7884987 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Tue, 9 Jul 2024 15:17:26 -0400 Subject: [PATCH 091/113] move au into nu/render --- src/App.zig | 5 +- src/au.zig | 474 ------------------------- src/main.zig | 5 +- src/nu/Render.zig | 12 +- src/nu/Render/{Context.zig => au.zig} | 74 ++-- src/{ => nu/Render}/au/Bus.zig | 0 src/{ => nu/Render}/au/Flights.zig | 0 src/{ => nu/Render}/au/SwapChain.zig | 0 src/{ => nu/Render}/au/VkAllocator.zig | 0 src/{ => nu/Render}/au/ui.zig | 0 10 files changed, 53 insertions(+), 517 deletions(-) delete mode 100644 src/au.zig rename src/nu/Render/{Context.zig => au.zig} (88%) rename src/{ => nu/Render}/au/Bus.zig (100%) rename src/{ => nu/Render}/au/Flights.zig (100%) rename src/{ => nu/Render}/au/SwapChain.zig (100%) rename src/{ => nu/Render}/au/VkAllocator.zig (100%) rename src/{ => nu/Render}/au/ui.zig (100%) diff --git a/src/App.zig b/src/App.zig index c0bd5ce..95a830a 100644 --- a/src/App.zig +++ b/src/App.zig @@ -1,11 +1,8 @@ const std = @import("std"); const nu = @import("nu.zig"); -const Self = @This(); - pub fn init(alloc: std.mem.Allocator) !void { _ = alloc; } -pub fn deinit() void { -} +pub fn deinit() void {} diff --git a/src/au.zig b/src/au.zig deleted file mode 100644 index 2ce76d9..0000000 --- a/src/au.zig +++ /dev/null @@ -1,474 +0,0 @@ -const std = @import("std"); -const builtin = @import("builtin"); - -const vk = @import("vk"); -const c = @import("c.zig"); - -pub const Bus = @import("au/Bus.zig"); -pub const SwapChain = @import("au/SwapChain.zig"); -pub const Flights = @import("au/Flights.zig"); -pub const VkAllocator = @import("au/VkAllocator.zig"); - -pub const use_debug_messenger = switch (builtin.mode) { - .Debug, .ReleaseSafe => true, - .ReleaseSmall, .ReleaseFast => false, -}; - -pub const apis: []const vk.ApiInfo = &.{ - vk.features.version_1_0, - vk.features.version_1_1, - vk.features.version_1_2, - vk.features.version_1_3, - vk.extensions.khr_surface, - vk.extensions.khr_swapchain, - vk.extensions.khr_dynamic_rendering, - if (use_debug_messenger) vk.extensions.ext_debug_utils else .{}, -}; - -pub const device_extensions: []const [*:0]const u8 = &.{ - // todo somehow sync this with APIs above? - vk.extensions.khr_swapchain.name, - vk.extensions.khr_dynamic_rendering.name, -}; - -pub const app_info: vk.ApplicationInfo = .{ - .p_application_name = "zig-glfw-vulkan", - .application_version = vk.makeApiVersion(0, 0, 0, 0), - .p_engine_name = "zig-glfw-vulkan", - .engine_version = vk.makeApiVersion(0, 0, 0, 0), - .api_version = vk.API_VERSION_1_3, -}; - -pub const BaseWrapper = vk.BaseWrapper(apis); -pub const InstanceWrapper = vk.InstanceWrapper(apis); -pub const DeviceWrapper = vk.DeviceWrapper(apis); - -pub const InstanceProxy = vk.InstanceProxy(apis); -pub const DeviceProxy = vk.DeviceProxy(apis); -pub const QueueProxy = vk.QueueProxy(apis); -pub const CommandBufferProxy = vk.CommandBufferProxy(apis); - -pub const B: *const BaseWrapper = &_bw; -pub const I: *const InstanceProxy = &_ip; -pub const D: *const DeviceProxy = &_dp; -pub const W: *const Window = &_window; -pub const Q: *const QueueProxy = &_qp; - -pub const device_config: *const CandidateDeviceInfo = &_dconfig; - -var _bw: BaseWrapper = undefined; -var _iw: InstanceWrapper = undefined; -var _dw: DeviceWrapper = undefined; - -var _ip: InstanceProxy = undefined; -var _dp: DeviceProxy = undefined; -var _qp: QueueProxy = undefined; - -var _instance: vk.Instance = undefined; -var _window: Window = undefined; -var _bus: Bus = undefined; -var _device: vk.Device = undefined; -var _dconfig: CandidateDeviceInfo = undefined; -var _queue: vk.Queue = undefined; - -pub fn init(alloc: std.mem.Allocator) !void { - try init_glfw(); - errdefer deinit_glfw(); - - try init_base(); - errdefer deinit_base(); - - try init_instance(alloc); - errdefer deinit_instance(); - - try init_window(alloc); - errdefer deinit_window(); - - try init_device(alloc); - errdefer deinit_device(); - - try init_event_bus(alloc); - errdefer deinit_event_bus(); -} - -pub fn deinit() void { - deinit_event_bus(); - deinit_device(); - deinit_window(); - deinit_instance(); - deinit_base(); - deinit_glfw(); -} - -fn init_glfw() !void { - if (c.glfwInit() != c.GLFW_TRUE) - return error.glfwInitFailed; - errdefer c.glfwTerminate(); - - // todo move to render - if (c.glfwVulkanSupported() != c.GLFW_TRUE) - return error.glfwNoVulkan; -} - -fn deinit_glfw() void { - c.glfwTerminate(); -} - -fn init_base() !void { - if (use_debug_messenger) { - _bw = try BaseWrapper.load(c.glfwGetInstanceProcAddress); - } else { - _bw = BaseWrapper.loadNoFail(c.glfwGetInstanceProcAddress); - } -} - -fn deinit_base() void {} - -fn init_instance(alloc: std.mem.Allocator) !void { - var extensions = std.ArrayList([*:0]const u8).init(alloc); - defer extensions.deinit(); - - var layers = std.ArrayList([*:0]const u8).init(alloc); - defer layers.deinit(); - - if (use_debug_messenger) { - try extensions.appendSlice(&.{ - vk.extensions.ext_debug_utils.name, - }); - - try layers.appendSlice(&.{ - "VK_LAYER_KHRONOS_validation", - }); - } - - var glfw_exts_count: u32 = 0; - const glfw_exts: [*]const [*:0]const u8 = - @ptrCast(c.glfwGetRequiredInstanceExtensions(&glfw_exts_count)); - try extensions.appendSlice(glfw_exts[0..glfw_exts_count]); - - const mci: vk.DebugUtilsMessengerCreateInfoEXT = .{ - .message_severity = .{ - .error_bit_ext = true, - .info_bit_ext = true, - .verbose_bit_ext = true, - .warning_bit_ext = true, - }, - .message_type = .{ - .device_address_binding_bit_ext = true, - .general_bit_ext = false, - .performance_bit_ext = true, - .validation_bit_ext = true, - }, - .pfn_user_callback = &debug_callback, - .p_user_data = null, - }; - - _instance = try B.createInstance(&.{ - .p_application_info = &app_info, - .enabled_extension_count = @intCast(extensions.items.len), - .pp_enabled_extension_names = extensions.items.ptr, - .enabled_layer_count = @intCast(layers.items.len), - .pp_enabled_layer_names = layers.items.ptr, - .p_next = if (use_debug_messenger) &mci else null, - }, null); - - if (use_debug_messenger) { - _iw = try InstanceWrapper.load(_instance, _bw.dispatch.vkGetInstanceProcAddr); - } else { - _iw = InstanceWrapper.loadNoFail(_instance, _bw.dispatch.vkGetInstanceProcAddr); - } - - _ip = InstanceProxy.init(_instance, &_iw); -} - -fn deinit_instance() void { - _ip.destroyInstance(null); -} - -fn init_window(alloc: std.mem.Allocator) !void { - _window = try Window.init( - alloc, - app_info.p_application_name orelse "Au Window", - .{ .height = 720, .width = 1280 }, - ); - errdefer _window.deinit(); -} - -fn deinit_window() void { - _window.deinit(); -} - -const CandidateDeviceInfo = struct { - pdev: vk.PhysicalDevice, - format: vk.SurfaceFormatKHR, - mode: vk.PresentModeKHR, - family: u32, // must support graphics and present for now - - fn init(alloc: std.mem.Allocator, pdev: vk.PhysicalDevice) !struct { i32, CandidateDeviceInfo } { - var score: i32 = 0; - var res: CandidateDeviceInfo = undefined; - - res.pdev = pdev; - - const props = I.getPhysicalDeviceProperties(pdev); - score += switch (props.device_type) { - vk.PhysicalDeviceType.discrete_gpu => 1000, - vk.PhysicalDeviceType.integrated_gpu => 500, - else => 0, - }; - - var format_count: u32 = undefined; - _ = try I.getPhysicalDeviceSurfaceFormatsKHR(pdev, W.surface, &format_count, null); - if (format_count == 0) return error.NoSurfaceFormats; - const formats = try alloc.alloc(vk.SurfaceFormatKHR, format_count); - defer alloc.free(formats); - _ = try I.getPhysicalDeviceSurfaceFormatsKHR(pdev, W.surface, &format_count, formats.ptr); - - for (formats) |fmt| { - if (fmt.color_space == .srgb_nonlinear_khr) { - res.format = fmt; - break; - } - } else { - res.format = formats[0]; - score -= 100; - } - - var mode_count: u32 = undefined; - _ = try I.getPhysicalDeviceSurfacePresentModesKHR(pdev, W.surface, &mode_count, null); - if (mode_count == 0) return error.NoSurfacePresentModes; - const modes = try alloc.alloc(vk.PresentModeKHR, mode_count); - defer alloc.free(modes); - _ = try I.getPhysicalDeviceSurfacePresentModesKHR(pdev, W.surface, &mode_count, modes.ptr); - - if (std.mem.indexOfAny(vk.PresentModeKHR, modes, &.{ - vk.PresentModeKHR.mailbox_khr, - vk.PresentModeKHR.immediate_khr, - })) |idx| { - res.mode = modes[idx]; - } else { - score -= 50; - res.mode = .fifo_khr; // this is guaranteed - } - - var ext_count: u32 = undefined; - _ = try I.enumerateDeviceExtensionProperties(pdev, null, &ext_count, null); - const exts = try alloc.alloc(vk.ExtensionProperties, ext_count); - defer alloc.free(exts); - _ = try I.enumerateDeviceExtensionProperties(pdev, null, &ext_count, exts.ptr); - - for (device_extensions) |needle| { - for (exts) |ext| { - if (std.mem.eql( - u8, - std.mem.span(needle), - std.mem.sliceTo(&ext.extension_name, 0), - )) - break; - } else { - return error.MissingDeviceExtension; - } - } - - var family_count: u32 = undefined; - I.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); - const families = try alloc.alloc(vk.QueueFamilyProperties, family_count); - defer alloc.free(families); - I.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr); - - for (families, 0..) |prop, idx| { - const graphics_support = prop.queue_flags.graphics_bit; - const present_support = try I.getPhysicalDeviceSurfaceSupportKHR(pdev, @intCast(idx), W.surface) == vk.TRUE; - - if (graphics_support and present_support) { - res.family = @intCast(idx); - break; - } - } else { - return error.NoSuitableFamily; - } - - return .{ score, res }; - } -}; - -fn init_device(alloc: std.mem.Allocator) !void { - var pdev_count: u32 = undefined; - _ = try I.enumeratePhysicalDevices(&pdev_count, null); - if (pdev_count == 0) return error.NoDevice; - const pdevs = try alloc.alloc(vk.PhysicalDevice, pdev_count); - defer alloc.free(pdevs); - _ = try I.enumeratePhysicalDevices(&pdev_count, pdevs.ptr); - - // const scores = std.ArrayList(i32). - var scores: std.MultiArrayList(struct { score: i32, ci: CandidateDeviceInfo }) = .{}; - defer scores.deinit(alloc); - - for (pdevs) |pdev| { - const score, const ci = CandidateDeviceInfo.init(alloc, pdev) catch continue; - try scores.append(alloc, .{ .score = score, .ci = ci }); - } - - const idx = std.sort.argMax(i32, scores.items(.score), {}, std.sort.asc(i32)) orelse - return error.NoSuitableDevice; - _dconfig = scores.get(idx).ci; - - const qci: []const vk.DeviceQueueCreateInfo = &.{ - vk.DeviceQueueCreateInfo{ - .queue_family_index = _dconfig.family, - .queue_count = 1, - .p_queue_priorities = &[_]f32{1.0}, - }, - }; - - _device = try I.createDevice(_dconfig.pdev, &.{ - .queue_create_info_count = @intCast(qci.len), - .p_queue_create_infos = qci.ptr, - .enabled_extension_count = @intCast(device_extensions.len), - .pp_enabled_extension_names = device_extensions.ptr, - .p_next = &vk.PhysicalDeviceDynamicRenderingFeaturesKHR{ - .dynamic_rendering = vk.TRUE, - }, - }, null); - - if (use_debug_messenger) { - _dw = try DeviceWrapper.load(_device, _iw.dispatch.vkGetDeviceProcAddr); - } else { - _dw = DeviceWrapper.loadNoFail(_device, _iw.dispatch.vkGetDeviceProcAddr); - } - _dp = DeviceProxy.init(_device, &_dw); - errdefer D.destroyDevice(null); - - _queue = D.getDeviceQueue(_dconfig.family, 0); - - _qp = QueueProxy.init(_queue, &_dw); - - // todo i'm thinking this needs to be a more complex pointer structure... i'm making assumptions here about how the - // command pools are meant to work. probably I am cooking too much. -} - -fn deinit_device() void { - D.destroyDevice(null); -} - -pub fn debug_callback( - msg_severity: vk.DebugUtilsMessageSeverityFlagsEXT, - msg_type: vk.DebugUtilsMessageTypeFlagsEXT, - p_data: ?*const vk.DebugUtilsMessengerCallbackDataEXT, - _: ?*anyopaque, -) callconv(vk.vulkan_call_conv) vk.Bool32 { - // ripped from std.log.defaultLog - - const data = p_data orelse return vk.FALSE; - const message = data.p_message orelse return vk.FALSE; - - const severity_prefix = if (msg_severity.verbose_bit_ext) - "verbose:" - else if (msg_severity.info_bit_ext) - "info:" - else if (msg_severity.warning_bit_ext) - "warning:" - else if (msg_severity.error_bit_ext) - "error:" - else - "?:"; - - const type_prefix = if (msg_type.general_bit_ext) - "" - else if (msg_type.validation_bit_ext) - "validation:" - else if (msg_type.performance_bit_ext) - "performance:" - else if (msg_type.device_address_binding_bit_ext) - "device_address_binding:" - else - "?:"; - - const stderr = std.io.getStdErr().writer(); - var bw = std.io.bufferedWriter(stderr); - const writer = bw.writer(); - - std.debug.lockStdErr(); - defer std.debug.unlockStdErr(); - nosuspend { - writer.print("vk-{s}{s} {s}\n", .{ severity_prefix, type_prefix, message }) catch return vk.FALSE; - bw.flush() catch return vk.FALSE; - } - - return vk.FALSE; -} - -pub const Window = struct { - const Self = @This(); - - alloc: std.mem.Allocator, - handle: *c.GLFWwindow, - surface: vk.SurfaceKHR, - - pub fn init(alloc: std.mem.Allocator, title: [*:0]const u8, extent: vk.Extent2D) !Self { - var self: Self = undefined; - self.alloc = alloc; - - c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, "floating_window"); - c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, "floating_window"); - c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); - - self.handle = c.glfwCreateWindow( - @intCast(extent.width), - @intCast(extent.height), - title, - null, - null, - ) orelse return error.glfwWindowFailed; - errdefer c.glfwDestroyWindow(self.handle); - - if (c.glfwCreateWindowSurface(_instance, self.handle, null, &self.surface) != .success) { - return error.glfwSurfaceFailed; - } - errdefer I.destroySurfaceKHR(self.surface, null); - - return self; - } - - pub fn deinit(self: Self) void { - I.destroySurfaceKHR(self.surface, null); - c.glfwDestroyWindow(self.handle); - } - - pub fn should_close(self: Self) bool { - return c.glfwWindowShouldClose(self.handle) == c.GLFW_TRUE; - } - - pub fn focused(self: Self) bool { - return c.glfwGetWindowAttrib(self.handle, c.GLFW_FOCUSED) == c.GLFW_TRUE; - } -}; - -pub fn wait_events() []const Bus.Event { - _bus.clear(); - c.glfwWaitEvents(); - return _bus.events.items; -} - -pub fn poll_events() []const Bus.Event { - _bus.clear(); - c.glfwPollEvents(); - return _bus.events.items; -} - -pub fn wait_events_timeout(seconds: f64) []const Bus.Event { - _bus.clear(); - c.glfwWaitEventsTimeout(seconds); - return _bus.events.items; -} - -fn init_event_bus(alloc: std.mem.Allocator) !void { - _bus = Bus.init(alloc); - errdefer _bus.deinit(); - try _bus.connect(&_window); -} - -fn deinit_event_bus() void { - try _bus.disconnect(&_window); - _bus.deinit(); -} diff --git a/src/main.zig b/src/main.zig index 3a3a79e..2404851 100644 --- a/src/main.zig +++ b/src/main.zig @@ -5,7 +5,10 @@ const App = @import("App.zig"); pub const nu_options: nu.Options = .{ .window = .{ .title = "Hello World" }, - .render = .{ .app_name = "hello-world" }, + .render = .{ + .app_name = "hello-world", + .frames_in_flight = 3, + }, }; pub fn main() !void { diff --git a/src/nu/Render.zig b/src/nu/Render.zig index 47fa3f7..972f4e8 100644 --- a/src/nu/Render.zig +++ b/src/nu/Render.zig @@ -6,17 +6,17 @@ const std = @import("std"); const builtin = @import("builtin"); const vk = @import("vk"); -const ctx = @import("Render/Context.zig"); +const au = @import("Render/au.zig"); pub const Options = struct { - app_name: []const u8 = "nu-au-app", + app_name: [*:0]const u8 = "nu-au-app", app_version: struct { variant: u3 = 0, major: u7 = 0, minor: u10 = 0, patch: u12 = 0, } = .{}, - engine_name: []const u8 = "nu-au", + engine_name: [*:0]const u8 = "nu-au", engine_version: struct { variant: u3 = 0, major: u7 = 0, @@ -29,12 +29,12 @@ pub const Options = struct { pub fn init(alloc: std.mem.Allocator) !void { // todo make ctx not globals - try ctx.init(alloc); - errdefer ctx.deinit(); + try au.init(alloc); + errdefer au.deinit(); } pub fn frame() void {} pub fn deinit() void { - ctx.deinit(); + au.deinit(); } diff --git a/src/nu/Render/Context.zig b/src/nu/Render/au.zig similarity index 88% rename from src/nu/Render/Context.zig rename to src/nu/Render/au.zig index 5c32c7a..94705b4 100644 --- a/src/nu/Render/Context.zig +++ b/src/nu/Render/au.zig @@ -1,9 +1,12 @@ const std = @import("std"); const builtin = @import("builtin"); -const vk = @import("vk"); +const vk = @import("vk"); const nu = @import("../../nu.zig"); -const Window = @import("../Window.zig"); + +pub const SwapChain = @import("au/SwapChain.zig"); +pub const Flights = @import("au/Flights.zig"); +pub const VkAllocator = @import("au/VkAllocator.zig"); pub const use_debug_messenger = switch (builtin.mode) { .Debug, .ReleaseSafe => true, @@ -27,24 +30,6 @@ pub const device_extensions: []const [*:0]const u8 = &.{ vk.extensions.khr_dynamic_rendering.name, }; -pub const app_info: vk.ApplicationInfo = .{ - .p_application_name = nu.options.render.app_name, - .application_version = vk.makeApiVersion( - nu.options.render.app_version.variant, - nu.options.render.app_version.major, - nu.options.render.app_version.minor, - nu.options.render.app_version.patch, - ), - .p_engine_name = nu.options.render.engine_name, - .engine_version = vk.makeApiVersion( - nu.options.render.engine_version.variant, - nu.options.render.engine_version.major, - nu.options.render.engine_version.minor, - nu.options.render.engine_version.patch, - ), - .api_version = vk.API_VERSION_1_3, -}; - pub const BaseWrapper = vk.BaseWrapper(apis); pub const InstanceWrapper = vk.InstanceWrapper(apis); pub const DeviceWrapper = vk.DeviceWrapper(apis); @@ -58,6 +43,7 @@ pub const B: *const BaseWrapper = &_bw; pub const I: *const InstanceProxy = &_ip; pub const D: *const DeviceProxy = &_dp; pub const Q: *const QueueProxy = &_qp; +pub const S: *const vk.SurfaceKHR = &_surface; pub const device_config: *const CandidateDeviceInfo = &_dconfig; @@ -70,14 +56,12 @@ var _dp: DeviceProxy = undefined; var _qp: QueueProxy = undefined; var _instance: vk.Instance = undefined; +var _surface: vk.SurfaceKHR = undefined; var _device: vk.Device = undefined; var _dconfig: CandidateDeviceInfo = undefined; var _queue: vk.Queue = undefined; -var _surface: vk.SurfaceKHR = undefined; -pub fn init( - alloc: std.mem.Allocator, -) !void { +pub fn init(alloc: std.mem.Allocator) !void { try init_base(); errdefer deinit_base(); @@ -95,6 +79,9 @@ pub fn deinit() void { } fn init_base() !void { + if (glfwVulkanSupported() != nu.Window.c.GLFW_TRUE) + return error.glfwNoVulkan; + if (use_debug_messenger) { _bw = try BaseWrapper.load(glfwGetInstanceProcAddress); } else { @@ -145,10 +132,20 @@ fn init_instance(alloc: std.mem.Allocator) !void { _instance = try B.createInstance(&.{ .p_application_info = &.{ - .p_application_name = "zig-glfw-vulkan", // todo RenderOptions - .application_version = vk.makeApiVersion(0, 0, 0, 0), - .p_engine_name = "nu-au", // todo RenderOptions - .engine_version = vk.makeApiVersion(0, 0, 0, 0), + .p_application_name = nu.options.render.app_name, + .application_version = vk.makeApiVersion( + nu.options.render.app_version.variant, + nu.options.render.app_version.major, + nu.options.render.app_version.minor, + nu.options.render.app_version.patch, + ), + .p_engine_name = nu.options.render.engine_name, + .engine_version = vk.makeApiVersion( + nu.options.render.engine_version.variant, + nu.options.render.engine_version.major, + nu.options.render.engine_version.minor, + nu.options.render.engine_version.patch, + ), .api_version = vk.API_VERSION_1_3, }, .enabled_extension_count = @intCast(extensions.items.len), @@ -166,7 +163,7 @@ fn init_instance(alloc: std.mem.Allocator) !void { _ip = InstanceProxy.init(_instance, &_iw); - if (glfwCreateWindowSurface(_instance, Window.handle, null, &_surface) != .success) { + if (glfwCreateWindowSurface(_instance, nu.Window.handle, null, &_surface) != .success) { return error.glfwCreateWindowSurfaceFailed; } } @@ -376,13 +373,26 @@ pub fn debug_callback( return vk.FALSE; } -extern fn glfwGetInstanceProcAddress(instance: vk.Instance, procname: [*:0]const u8) vk.PfnVoidFunction; +extern fn glfwVulkanSupported() c_int; -extern fn glfwGetRequiredInstanceExtensions(count: *u32) [*]const [*:0]const u8; +extern fn glfwGetInstanceProcAddress( + instance: vk.Instance, + procname: [*:0]const u8, +) vk.PfnVoidFunction; + +extern fn glfwGetPhysicalDevicePresentationSupport( + instance: vk.Instance, + pdev: vk.PhysicalDevice, + queuefamily: u32, +) c_int; extern fn glfwCreateWindowSurface( instance: vk.Instance, - window: *Window.c.GLFWwindow, + window: *nu.Window.c.GLFWwindow, allocation_callbacks: ?*const vk.AllocationCallbacks, surface: *vk.SurfaceKHR, ) vk.Result; + +extern fn glfwGetRequiredInstanceExtensions( + count: *u32, +) [*][*:0]const u8; diff --git a/src/au/Bus.zig b/src/nu/Render/au/Bus.zig similarity index 100% rename from src/au/Bus.zig rename to src/nu/Render/au/Bus.zig diff --git a/src/au/Flights.zig b/src/nu/Render/au/Flights.zig similarity index 100% rename from src/au/Flights.zig rename to src/nu/Render/au/Flights.zig diff --git a/src/au/SwapChain.zig b/src/nu/Render/au/SwapChain.zig similarity index 100% rename from src/au/SwapChain.zig rename to src/nu/Render/au/SwapChain.zig diff --git a/src/au/VkAllocator.zig b/src/nu/Render/au/VkAllocator.zig similarity index 100% rename from src/au/VkAllocator.zig rename to src/nu/Render/au/VkAllocator.zig diff --git a/src/au/ui.zig b/src/nu/Render/au/ui.zig similarity index 100% rename from src/au/ui.zig rename to src/nu/Render/au/ui.zig From 59912a4bc6f266d46526ef80cdde0b8b0166160d Mon Sep 17 00:00:00 2001 From: David Allemang Date: Tue, 9 Jul 2024 15:42:08 -0400 Subject: [PATCH 092/113] render with swapchain --- src/nu.zig | 12 +++++- src/nu/Render.zig | 71 ++++++++++++++++++++++++++++++++-- src/nu/Render/au/Flights.zig | 7 +++- src/nu/Render/au/SwapChain.zig | 4 +- 4 files changed, 86 insertions(+), 8 deletions(-) diff --git a/src/nu.zig b/src/nu.zig index 759e098..26be0d6 100644 --- a/src/nu.zig +++ b/src/nu.zig @@ -22,8 +22,16 @@ pub fn run( _ = events; inline for (modules) |module| { - if (@hasDecl(module, "frame")) - module.frame(); + if (@hasDecl(module, "frame")) { + if (@typeInfo(@TypeOf(module.frame)).Fn.return_type) |R| { + switch (@typeInfo(R)) { + .ErrorUnion => try module.frame(), + else => @compileError("frame must be void or !void."), + } + } else { + module.frame(); + } + } } // todo fixed timestep diff --git a/src/nu/Render.zig b/src/nu/Render.zig index 972f4e8..43dc9f0 100644 --- a/src/nu/Render.zig +++ b/src/nu/Render.zig @@ -6,6 +6,7 @@ const std = @import("std"); const builtin = @import("builtin"); const vk = @import("vk"); +const nu = @import("../nu.zig"); const au = @import("Render/au.zig"); pub const Options = struct { @@ -26,15 +27,79 @@ pub const Options = struct { frames_in_flight: u8 = 3, }; -pub fn init(alloc: std.mem.Allocator) !void { - // todo make ctx not globals +var sc: au.SwapChain = undefined; +var flights: au.Flights = undefined; +pub fn init(alloc: std.mem.Allocator) !void { + // todo pick apart au into helpers; not a sub-module filled with its own globals. try au.init(alloc); errdefer au.deinit(); + + sc = try au.SwapChain.init(alloc); + errdefer sc.deinit(); + + flights = try au.Flights.init(alloc, nu.options.render.frames_in_flight); + errdefer flights.deinit(); } -pub fn frame() void {} +pub fn frame() !void { + const flight: au.Flights.Flight = flights.next(); + try flight.wait(); + + while (true) { + _ = try sc.rebuild(); + + const target = sc.acquire(flight.acquire, .null_handle) catch |err| switch (err) { + error.OutOfDateKHR => { + sc.mark(); + continue; + }, + else => return err, + }; + + const render_area: vk.Rect2D = .{ + .offset = .{ .x = 0, .y = 0 }, + .extent = sc.cinfo.image_extent, + }; + + try au.D.resetCommandPool(flight.pool, .{}); + var cmd = au.CommandBufferProxy.init(flight.cmd, au.D.wrapper); + + try cmd.beginCommandBuffer(&.{ .flags = .{ .one_time_submit_bit = true } }); + target.begin_rendering(cmd, render_area); + target.end_rendering(cmd); + try cmd.endCommandBuffer(); + + try au.Q.submit(1, &.{ + vk.SubmitInfo{ + .wait_semaphore_count = 1, + .p_wait_semaphores = &.{flight.acquire}, + .p_wait_dst_stage_mask = &.{ + vk.PipelineStageFlags{ .color_attachment_output_bit = true }, + }, + .command_buffer_count = 1, + .p_command_buffers = &.{cmd.handle}, + .signal_semaphore_count = 1, + .p_signal_semaphores = &.{flight.complete}, + }, + }, flight.fence); + + if (sc.present(&.{flight.complete}, target)) |_| { + return; + } else |err| switch (err) { + error.OutOfDateKHR => { + try flight.wait(); + sc.mark(); + continue; + }, + else => return err, + } + } +} pub fn deinit() void { + au.D.deviceWaitIdle() catch {}; + flights.deinit(); + sc.deinit(); au.deinit(); } diff --git a/src/nu/Render/au/Flights.zig b/src/nu/Render/au/Flights.zig index d758170..4662e08 100644 --- a/src/nu/Render/au/Flights.zig +++ b/src/nu/Render/au/Flights.zig @@ -4,12 +4,17 @@ const au = @import("../au.zig"); const Self = @This(); -const Flight = struct { +pub const Flight = struct { acquire: vk.Semaphore = .null_handle, complete: vk.Semaphore = .null_handle, fence: vk.Fence = .null_handle, pool: vk.CommandPool = .null_handle, cmd: vk.CommandBuffer = .null_handle, + + pub fn wait(self: Flight) !void { + _ = try au.D.waitForFences(1, &.{self.fence}, vk.TRUE, std.math.maxInt(u64)); + try au.D.resetFences(1, &.{self.fence}); + } }; alloc: std.mem.Allocator, diff --git a/src/nu/Render/au/SwapChain.zig b/src/nu/Render/au/SwapChain.zig index 77a28a9..204316d 100644 --- a/src/nu/Render/au/SwapChain.zig +++ b/src/nu/Render/au/SwapChain.zig @@ -11,7 +11,7 @@ images: std.ArrayListUnmanaged(vk.Image) = .{}, views: std.ArrayListUnmanaged(vk.ImageView) = .{}, pub fn init(alloc: std.mem.Allocator) !Self { - const caps = try au.I.getPhysicalDeviceSurfaceCapabilitiesKHR(au.device_config.pdev, au.W.surface); + const caps = try au.I.getPhysicalDeviceSurfaceCapabilitiesKHR(au.device_config.pdev, au.S.*); var min_image_count = @max(3, caps.min_image_count + 1); // todo magic numbers if (caps.max_image_count > 0) { @@ -24,7 +24,7 @@ pub fn init(alloc: std.mem.Allocator) !Self { return .{ .alloc = alloc, .cinfo = .{ - .surface = au.W.surface, + .surface = au.S.*, .min_image_count = min_image_count, .image_format = format.format, .image_color_space = format.color_space, From 2bb3b71b2b14edc1053e9283443a2027a4c91fee Mon Sep 17 00:00:00 2001 From: David Allemang Date: Tue, 9 Jul 2024 16:09:22 -0400 Subject: [PATCH 093/113] good but wrong teardown order; device wait idle --- src/main.zig | 2 +- src/nu.zig | 6 ++- src/nu/ImGui.zig | 98 +++++++++++++++++++++++++++++++++++++++++++- src/nu/Render.zig | 26 ++++++++++++ src/nu/Render/au.zig | 10 ++--- 5 files changed, 133 insertions(+), 9 deletions(-) diff --git a/src/main.zig b/src/main.zig index 2404851..6e82cc7 100644 --- a/src/main.zig +++ b/src/main.zig @@ -29,7 +29,7 @@ pub fn main() !void { try nu.Render.init(alloc); defer nu.Render.deinit(); - try nu.ImGui.init(alloc); + try nu.ImGui.init(); defer nu.ImGui.deinit(); try App.init(alloc); diff --git a/src/nu.zig b/src/nu.zig index 26be0d6..a977e4e 100644 --- a/src/nu.zig +++ b/src/nu.zig @@ -26,7 +26,11 @@ pub fn run( if (@typeInfo(@TypeOf(module.frame)).Fn.return_type) |R| { switch (@typeInfo(R)) { .ErrorUnion => try module.frame(), - else => @compileError("frame must be void or !void."), + .Void => module.frame(), + else => { + @compileLog(module.frame, @typeInfo(R)); + @compileError("frame must be void or !void."); + }, } } else { module.frame(); diff --git a/src/nu/ImGui.zig b/src/nu/ImGui.zig index e5f5c61..e5231bd 100644 --- a/src/nu/ImGui.zig +++ b/src/nu/ImGui.zig @@ -2,11 +2,105 @@ const std = @import("std"); +const vk = @import("vk"); +const nu = @import("../nu.zig"); +const au = @import("Render/au.zig"); const Render = @import("Render.zig"); +const Window = @import("Window.zig"); -pub fn init(alloc: std.mem.Allocator) !void { - _ = alloc; +const im = @import("cimgui"); + +pub fn loader_wrapper(procname: [*c]const u8, _: ?*anyopaque) callconv(.C) vk.PfnVoidFunction { + return au.glfwGetInstanceProcAddress(au.I.handle, procname); +} + +var ctx: *im.c.ImGuiContext = undefined; +var descriptor_pool: vk.DescriptorPool = undefined; + +pub fn init() !void { + ctx = im.c.igCreateContext(null) orelse { + return error.igCreateContextFailed; + }; + errdefer im.c.igDestroyContext(ctx); + + if (!im.c.ImGui_ImplVulkan_LoadFunctions(loader_wrapper, null)) { + return error.igVulkanLoadFunctionsFailed; + } + + if (!im.c.ImGui_ImplGlfw_InitForVulkan(@ptrCast(Window.handle), true)) { + return error.igGlfwInitFailed; + } + errdefer im.c.ImGui_ImplGlfw_Shutdown(); + + descriptor_pool = try au.D.createDescriptorPool(&vk.DescriptorPoolCreateInfo{ + .flags = .{ .free_descriptor_set_bit = true }, + .pool_size_count = 1, + .p_pool_sizes = &.{vk.DescriptorPoolSize{ + .descriptor_count = 32, + .type = .combined_image_sampler, + }}, + .max_sets = 32, + }, null); + errdefer au.D.destroyDescriptorPool(descriptor_pool, null); + + if (im.c.ImGui_ImplVulkan_Init(@constCast(&im.c.ImGui_ImplVulkan_InitInfo{ + .Instance = @ptrFromInt(@intFromEnum(au.I.handle)), + .PhysicalDevice = @ptrFromInt(@intFromEnum(au.device_config.pdev)), + .Device = @ptrFromInt(@intFromEnum(au.D.handle)), + .QueueFamily = au.device_config.family, + .Queue = @ptrFromInt(@intFromEnum(au.Q.handle)), + .DescriptorPool = @ptrFromInt(@intFromEnum(descriptor_pool)), + .RenderPass = null, + .MinImageCount = 2, + .ImageCount = @intCast(nu.options.render.frames_in_flight), + .PipelineRenderingCreateInfo = @bitCast(vk.PipelineRenderingCreateInfo{ + .view_mask = 0, + .depth_attachment_format = .undefined, + .stencil_attachment_format = .undefined, + .color_attachment_count = 1, + .p_color_attachment_formats = &.{au.device_config.format.format}, + }), + .MSAASamples = 0, + .PipelineCache = null, + .Subpass = 0, + .UseDynamicRendering = true, + .Allocator = null, + })) != true) { + return error.igVulkanInitFailed; + } + errdefer im.c.ImGui_ImplVulkan_Shutdown(); + + if (!im.c.ImGui_ImplVulkan_CreateFontsTexture()) { + return error.igVulkanFontTextureFailed; + } + + try Render.add_present_callback(present); + errdefer Render.remove_present_callback(present); +} + +pub fn frame() void { + im.c.ImGui_ImplGlfw_NewFrame(); + im.c.ImGui_ImplVulkan_NewFrame(); + im.c.igNewFrame(); + + im.c.igShowDemoWindow(null); + + im.c.igEndFrame(); + im.c.igRender(); } pub fn deinit() void { + Render.remove_present_callback(present); + im.c.ImGui_ImplVulkan_Shutdown(); + au.D.destroyDescriptorPool(descriptor_pool, null); + im.c.ImGui_ImplGlfw_Shutdown(); + im.c.igDestroyContext(ctx); +} + +pub fn present(cmd: au.CommandBufferProxy) void { + im.c.ImGui_ImplVulkan_RenderDrawData( + im.c.igGetDrawData(), + @ptrFromInt(@intFromEnum(cmd.handle)), + null, + ); } diff --git a/src/nu/Render.zig b/src/nu/Render.zig index 43dc9f0..952db90 100644 --- a/src/nu/Render.zig +++ b/src/nu/Render.zig @@ -30,11 +30,33 @@ pub const Options = struct { var sc: au.SwapChain = undefined; var flights: au.Flights = undefined; +const PresentCallback = *const fn (au.CommandBufferProxy) void; +var present_callbacks: std.ArrayList(PresentCallback) = undefined; + +pub fn add_present_callback(cb: PresentCallback) !void { + if (std.mem.indexOfScalar(PresentCallback, present_callbacks.items, cb)) |_| { + return; + } else { + try present_callbacks.append(cb); + } +} + +pub fn remove_present_callback(cb: PresentCallback) void { + if (std.mem.indexOfScalar(PresentCallback, present_callbacks.items, cb)) |idx| { + _ = present_callbacks.orderedRemove(idx); + } else { + return; + } +} + pub fn init(alloc: std.mem.Allocator) !void { // todo pick apart au into helpers; not a sub-module filled with its own globals. try au.init(alloc); errdefer au.deinit(); + present_callbacks = std.ArrayList(PresentCallback).init(alloc); + errdefer present_callbacks.deinit(); + sc = try au.SwapChain.init(alloc); errdefer sc.deinit(); @@ -67,6 +89,9 @@ pub fn frame() !void { try cmd.beginCommandBuffer(&.{ .flags = .{ .one_time_submit_bit = true } }); target.begin_rendering(cmd, render_area); + for (present_callbacks.items) |cb| { + cb(cmd); + } target.end_rendering(cmd); try cmd.endCommandBuffer(); @@ -98,6 +123,7 @@ pub fn frame() !void { } pub fn deinit() void { + present_callbacks.deinit(); au.D.deviceWaitIdle() catch {}; flights.deinit(); sc.deinit(); diff --git a/src/nu/Render/au.zig b/src/nu/Render/au.zig index 94705b4..2b54aa9 100644 --- a/src/nu/Render/au.zig +++ b/src/nu/Render/au.zig @@ -373,26 +373,26 @@ pub fn debug_callback( return vk.FALSE; } -extern fn glfwVulkanSupported() c_int; +pub extern fn glfwVulkanSupported() c_int; -extern fn glfwGetInstanceProcAddress( +pub extern fn glfwGetInstanceProcAddress( instance: vk.Instance, procname: [*:0]const u8, ) vk.PfnVoidFunction; -extern fn glfwGetPhysicalDevicePresentationSupport( +pub extern fn glfwGetPhysicalDevicePresentationSupport( instance: vk.Instance, pdev: vk.PhysicalDevice, queuefamily: u32, ) c_int; -extern fn glfwCreateWindowSurface( +pub extern fn glfwCreateWindowSurface( instance: vk.Instance, window: *nu.Window.c.GLFWwindow, allocation_callbacks: ?*const vk.AllocationCallbacks, surface: *vk.SurfaceKHR, ) vk.Result; -extern fn glfwGetRequiredInstanceExtensions( +pub extern fn glfwGetRequiredInstanceExtensions( count: *u32, ) [*][*:0]const u8; From 4e2cf3eb3b676523ded4971d975114326bd15301 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Tue, 9 Jul 2024 22:30:55 -0400 Subject: [PATCH 094/113] notes about structure --- src/main.zig | 4 +++- src/nu/Render.zig | 5 +++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/main.zig b/src/main.zig index 6e82cc7..9063123 100644 --- a/src/main.zig +++ b/src/main.zig @@ -17,12 +17,14 @@ pub fn main() !void { const alloc = gpa.allocator(); // todo declare or infer module dependencies, topological sort for init order - // problem: how to specify runtime options, like Window title? // problem: where should gpa go? probably some "Engine" structure in nu.zig // don't necessarily need to declare topological sort - depth-first traversal // of each module's dependencies without repeats would do. + // idea - use a structure like std.Build.Step where the polymorphic part is a + // component of the larger structure. + try nu.Window.init(alloc); defer nu.Window.deinit(); diff --git a/src/nu/Render.zig b/src/nu/Render.zig index 952db90..9dcd518 100644 --- a/src/nu/Render.zig +++ b/src/nu/Render.zig @@ -92,6 +92,11 @@ pub fn frame() !void { for (present_callbacks.items) |cb| { cb(cmd); } + + // todo really don't like this. + // there should be some comptime means for a module to invoke hooks on other modules. eg there should be some + // "record" hook that for each module that gets called here; but if the render module is never added then that + // hook never gets called target.end_rendering(cmd); try cmd.endCommandBuffer(); From cb8684cbf3189a1f7e4e0c35db29033e23d35951 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Tue, 9 Jul 2024 23:02:24 -0400 Subject: [PATCH 095/113] clean up hook invocation --- cimgui/src/root.zig | 7 +++- src/main.zig | 14 ++++--- src/nu.zig | 45 ++++++++++++--------- src/nu/ImGui.zig | 42 ++++++++++--------- src/nu/Render/au/ui.zig | 90 ----------------------------------------- 5 files changed, 62 insertions(+), 136 deletions(-) delete mode 100644 src/nu/Render/au/ui.zig diff --git a/cimgui/src/root.zig b/cimgui/src/root.zig index bd5d960..f12ad07 100644 --- a/cimgui/src/root.zig +++ b/cimgui/src/root.zig @@ -1,4 +1,9 @@ -pub const c = @cImport({ +pub usingnamespace @cImport({ + @cDefine("CIMGUI_DEFINE_ENUMS_AND_STRUCTS", {}); + @cInclude("cimgui.h"); +}); + +pub const impl = @cImport({ @cDefine("CIMGUI_DEFINE_ENUMS_AND_STRUCTS", {}); @cInclude("cimgui.h"); diff --git a/src/main.zig b/src/main.zig index 9063123..21cff7a 100644 --- a/src/main.zig +++ b/src/main.zig @@ -11,6 +11,14 @@ pub const nu_options: nu.Options = .{ }, }; +pub const nu_driver = nu.Window; + +pub const nu_modules = .{ + App, + nu.ImGui, + nu.Render, +}; + pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.detectLeaks(); @@ -37,9 +45,5 @@ pub fn main() !void { try App.init(alloc); defer App.deinit(); - try nu.run(nu.Window, .{ - App, - nu.ImGui, - nu.Render, - }); + try nu.run(); } diff --git a/src/nu.zig b/src/nu.zig index a977e4e..6ceccb9 100644 --- a/src/nu.zig +++ b/src/nu.zig @@ -12,31 +12,36 @@ pub const Options = struct { }; pub const options: Options = if (@hasDecl(root, "nu_options")) root.nu_options else .{}; +pub const modules = root.nu_modules; +pub const driver = root.nu_driver; -pub fn run( - driver: anytype, - modules: anytype, -) !void { +fn invoke(func: anytype, args: anytype) !void { + if (@typeInfo(@TypeOf(func)).Fn.return_type) |R| { + switch (@typeInfo(R)) { + .ErrorUnion => try @call(.auto, func, args), + .Void => @call(.auto, func, args), + else => { + @compileLog(func, @typeInfo(R)); + @compileError("Invalid hook return type. Must be void or !void."); + }, + } + } +} + +pub fn invoke_hook(comptime name: []const u8, args: anytype) !void { + inline for (modules) |module| { + if (@hasDecl(module, name)) { + try invoke(@field(module, name), args); + } + } +} + +pub fn run() !void { while (driver.next()) |events| { // todo event handler _ = events; - inline for (modules) |module| { - if (@hasDecl(module, "frame")) { - if (@typeInfo(@TypeOf(module.frame)).Fn.return_type) |R| { - switch (@typeInfo(R)) { - .ErrorUnion => try module.frame(), - .Void => module.frame(), - else => { - @compileLog(module.frame, @typeInfo(R)); - @compileError("frame must be void or !void."); - }, - } - } else { - module.frame(); - } - } - } + try invoke_hook("frame", .{}); // todo fixed timestep } diff --git a/src/nu/ImGui.zig b/src/nu/ImGui.zig index e5231bd..ee9bfa0 100644 --- a/src/nu/ImGui.zig +++ b/src/nu/ImGui.zig @@ -5,32 +5,34 @@ const std = @import("std"); const vk = @import("vk"); const nu = @import("../nu.zig"); const au = @import("Render/au.zig"); + const Render = @import("Render.zig"); const Window = @import("Window.zig"); const im = @import("cimgui"); +pub usingnamespace im; pub fn loader_wrapper(procname: [*c]const u8, _: ?*anyopaque) callconv(.C) vk.PfnVoidFunction { return au.glfwGetInstanceProcAddress(au.I.handle, procname); } -var ctx: *im.c.ImGuiContext = undefined; +var ctx: *im.ImGuiContext = undefined; var descriptor_pool: vk.DescriptorPool = undefined; pub fn init() !void { - ctx = im.c.igCreateContext(null) orelse { + ctx = im.igCreateContext(null) orelse { return error.igCreateContextFailed; }; - errdefer im.c.igDestroyContext(ctx); + errdefer im.igDestroyContext(ctx); - if (!im.c.ImGui_ImplVulkan_LoadFunctions(loader_wrapper, null)) { + if (!im.impl.ImGui_ImplVulkan_LoadFunctions(loader_wrapper, null)) { return error.igVulkanLoadFunctionsFailed; } - if (!im.c.ImGui_ImplGlfw_InitForVulkan(@ptrCast(Window.handle), true)) { + if (!im.impl.ImGui_ImplGlfw_InitForVulkan(@ptrCast(Window.handle), true)) { return error.igGlfwInitFailed; } - errdefer im.c.ImGui_ImplGlfw_Shutdown(); + errdefer im.impl.ImGui_ImplGlfw_Shutdown(); descriptor_pool = try au.D.createDescriptorPool(&vk.DescriptorPoolCreateInfo{ .flags = .{ .free_descriptor_set_bit = true }, @@ -43,7 +45,7 @@ pub fn init() !void { }, null); errdefer au.D.destroyDescriptorPool(descriptor_pool, null); - if (im.c.ImGui_ImplVulkan_Init(@constCast(&im.c.ImGui_ImplVulkan_InitInfo{ + if (im.impl.ImGui_ImplVulkan_Init(@constCast(&im.impl.ImGui_ImplVulkan_InitInfo{ .Instance = @ptrFromInt(@intFromEnum(au.I.handle)), .PhysicalDevice = @ptrFromInt(@intFromEnum(au.device_config.pdev)), .Device = @ptrFromInt(@intFromEnum(au.D.handle)), @@ -68,9 +70,9 @@ pub fn init() !void { })) != true) { return error.igVulkanInitFailed; } - errdefer im.c.ImGui_ImplVulkan_Shutdown(); + errdefer im.impl.ImGui_ImplVulkan_Shutdown(); - if (!im.c.ImGui_ImplVulkan_CreateFontsTexture()) { + if (!im.impl.ImGui_ImplVulkan_CreateFontsTexture()) { return error.igVulkanFontTextureFailed; } @@ -79,27 +81,27 @@ pub fn init() !void { } pub fn frame() void { - im.c.ImGui_ImplGlfw_NewFrame(); - im.c.ImGui_ImplVulkan_NewFrame(); - im.c.igNewFrame(); + im.impl.ImGui_ImplGlfw_NewFrame(); + im.impl.ImGui_ImplVulkan_NewFrame(); + im.igNewFrame(); - im.c.igShowDemoWindow(null); + im.igShowDemoWindow(null); - im.c.igEndFrame(); - im.c.igRender(); + im.igEndFrame(); + im.igRender(); } pub fn deinit() void { Render.remove_present_callback(present); - im.c.ImGui_ImplVulkan_Shutdown(); + im.impl.ImGui_ImplVulkan_Shutdown(); au.D.destroyDescriptorPool(descriptor_pool, null); - im.c.ImGui_ImplGlfw_Shutdown(); - im.c.igDestroyContext(ctx); + im.impl.ImGui_ImplGlfw_Shutdown(); + im.igDestroyContext(ctx); } pub fn present(cmd: au.CommandBufferProxy) void { - im.c.ImGui_ImplVulkan_RenderDrawData( - im.c.igGetDrawData(), + im.impl.ImGui_ImplVulkan_RenderDrawData( + @ptrCast(im.igGetDrawData()), @ptrFromInt(@intFromEnum(cmd.handle)), null, ); diff --git a/src/nu/Render/au/ui.zig b/src/nu/Render/au/ui.zig deleted file mode 100644 index 9d4c5f3..0000000 --- a/src/nu/Render/au/ui.zig +++ /dev/null @@ -1,90 +0,0 @@ -const std = @import("std"); -const vk = @import("vk"); -const im = @import("cimgui"); -const au = @import("../au.zig"); -const c = @import("../c.zig"); - -pub usingnamespace im.c; - -pub fn loader_wrapper(procname: [*c]const u8, _: ?*anyopaque) callconv(.C) vk.PfnVoidFunction { - return c.glfwGetInstanceProcAddress(au.I.handle, procname); -} - -var descriptor_pool: vk.DescriptorPool = undefined; - -pub fn init(frames_in_flight: usize) !*im.c.ImGuiContext { - const ctx = im.c.igCreateContext(null) orelse return error.igCreateContextFailed; - errdefer im.c.igDestroyContext(ctx); - - if (im.c.ImGui_ImplVulkan_LoadFunctions(loader_wrapper, null) != true) { - return error.igVulkanLoadFunctionsFailed; - } - - if (im.c.ImGui_ImplGlfw_InitForVulkan(@ptrCast(au.W.handle), true) != true) { - return error.igGlfwInitFailed; - } - errdefer im.c.ImGui_ImplGlfw_Shutdown(); - - descriptor_pool = try au.D.createDescriptorPool(&vk.DescriptorPoolCreateInfo{ - .flags = .{ .free_descriptor_set_bit = true }, - .pool_size_count = 1, - .p_pool_sizes = &.{vk.DescriptorPoolSize{ .descriptor_count = 32, .type = .combined_image_sampler }}, - .max_sets = 32, - }, null); - errdefer au.D.destroyDescriptorPool(descriptor_pool, null); - - if (im.c.ImGui_ImplVulkan_Init(@constCast(&im.c.ImGui_ImplVulkan_InitInfo{ - .Instance = @ptrFromInt(@intFromEnum(au.I.handle)), - .PhysicalDevice = @ptrFromInt(@intFromEnum(au.device_config.pdev)), - .Device = @ptrFromInt(@intFromEnum(au.D.handle)), - .QueueFamily = au.device_config.family, - .Queue = @ptrFromInt(@intFromEnum(au.Q.handle)), - .DescriptorPool = @ptrFromInt(@intFromEnum(descriptor_pool)), - .RenderPass = null, - .MinImageCount = 2, - .ImageCount = @intCast(frames_in_flight), - .PipelineRenderingCreateInfo = @bitCast(vk.PipelineRenderingCreateInfo{ - .view_mask = 0, - .depth_attachment_format = .undefined, - .stencil_attachment_format = .undefined, - .color_attachment_count = 1, - .p_color_attachment_formats = &.{au.device_config.format.format}, - }), - .MSAASamples = 0, - .PipelineCache = null, - .Subpass = 0, - .UseDynamicRendering = true, - .Allocator = null, - })) != true) { - return error.igVulkanInitFailed; - } - errdefer im.c.ImGui_ImplVulkan_Shutdown(); - - if (im.c.ImGui_ImplVulkan_CreateFontsTexture() != true) { - return error.igVulkanFontTextureFailed; - } - - return ctx; -} - -pub fn deinit(ctx: *im.c.ImGuiContext) void { - im.c.ImGui_ImplVulkan_Shutdown(); - au.D.destroyDescriptorPool(descriptor_pool, null); - im.c.ImGui_ImplGlfw_Shutdown(); - im.c.igDestroyContext(ctx); -} - -pub fn NewFrame() void { - im.c.ImGui_ImplGlfw_NewFrame(); - im.c.ImGui_ImplVulkan_NewFrame(); - im.c.igNewFrame(); -} - -pub fn EndFrame() void { - im.c.igEndFrame(); - im.c.igRender(); -} - -pub fn Draw(cmd: au.CommandBufferProxy) void { - im.c.ImGui_ImplVulkan_RenderDrawData(im.c.igGetDrawData(), @ptrFromInt(@intFromEnum(cmd.handle)), null); -} From cc1a9fdabc78c44b6afd294c5f3e4a88f478f7c1 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Tue, 9 Jul 2024 23:35:40 -0400 Subject: [PATCH 096/113] easy comptime hooks --- src/App.zig | 16 ++++++++++++++++ src/main.zig | 20 ++++++++++---------- src/nu.zig | 34 ++++++++++++++++++++++------------ src/nu/ImGui.zig | 26 ++++++++++++-------------- src/nu/Render.zig | 41 +++++++++-------------------------------- 5 files changed, 69 insertions(+), 68 deletions(-) diff --git a/src/App.zig b/src/App.zig index 95a830a..cf4d0b7 100644 --- a/src/App.zig +++ b/src/App.zig @@ -1,8 +1,24 @@ const std = @import("std"); const nu = @import("nu.zig"); +const Bus = @import("nu/Bus.zig"); +const Render = @import("nu/Render.zig"); +const ImGui = @import("nu/ImGui.zig"); + pub fn init(alloc: std.mem.Allocator) !void { _ = alloc; } pub fn deinit() void {} + +pub fn nu_frame() void {} + +// pub fn nu_events(events: []const Bus.Event) void { +// std.debug.print("{any}\n", .{events}); +// } + +// pub fn nu_render_present(_: Render.au.CommandBufferProxy) void {} + +pub fn nu_imgui_frame() void { + ImGui.igShowMetricsWindow(null); +} diff --git a/src/main.zig b/src/main.zig index 21cff7a..b8848d5 100644 --- a/src/main.zig +++ b/src/main.zig @@ -11,8 +11,17 @@ pub const nu_options: nu.Options = .{ }, }; -pub const nu_driver = nu.Window; +// todo declare or infer module dependencies, topological sort for init order. clean up "init" lines in main. +// +// problem: where should gpa go? probably some "Engine" structure in nu.zig +// +// don't necessarily need to declare topological sort - depth-first traversal +// of each module's dependencies without repeats would do. +// +// idea - use a structure like std.Build.Step where the polymorphic part is a +// component of the larger structure. +pub const nu_driver = nu.Window; pub const nu_modules = .{ App, nu.ImGui, @@ -24,15 +33,6 @@ pub fn main() !void { defer _ = gpa.detectLeaks(); const alloc = gpa.allocator(); - // todo declare or infer module dependencies, topological sort for init order - // problem: where should gpa go? probably some "Engine" structure in nu.zig - - // don't necessarily need to declare topological sort - depth-first traversal - // of each module's dependencies without repeats would do. - - // idea - use a structure like std.Build.Step where the polymorphic part is a - // component of the larger structure. - try nu.Window.init(alloc); defer nu.Window.deinit(); diff --git a/src/nu.zig b/src/nu.zig index 6ceccb9..887515b 100644 --- a/src/nu.zig +++ b/src/nu.zig @@ -8,13 +8,30 @@ pub const ImGui = @import("nu/ImGui.zig"); pub const Options = struct { window: Window.Options = .{}, render: Render.Options = .{}, - // imgui: ImGui.Options = .{}, + imgui: ImGui.Options = .{}, }; pub const options: Options = if (@hasDecl(root, "nu_options")) root.nu_options else .{}; pub const modules = root.nu_modules; pub const driver = root.nu_driver; +pub fn run() !void { + try invoke_hook("nu_enter", .{}); + + while (driver.next()) |events| { + if (events.len > 0) { + try invoke_hook("nu_events", .{events}); + } + + // todo frame timer + try invoke_hook("nu_frame", .{}); + + // todo fixed timestep + } + + try invoke_hook("nu_close", .{}); +} + fn invoke(func: anytype, args: anytype) !void { if (@typeInfo(@TypeOf(func)).Fn.return_type) |R| { switch (@typeInfo(R)) { @@ -28,6 +45,10 @@ fn invoke(func: anytype, args: anytype) !void { } } +// todo specify hook type. +// - special handling for error unions +// - allow per-hook state somehow declared in the handler + pub fn invoke_hook(comptime name: []const u8, args: anytype) !void { inline for (modules) |module| { if (@hasDecl(module, name)) { @@ -35,14 +56,3 @@ pub fn invoke_hook(comptime name: []const u8, args: anytype) !void { } } } - -pub fn run() !void { - while (driver.next()) |events| { - // todo event handler - _ = events; - - try invoke_hook("frame", .{}); - - // todo fixed timestep - } -} diff --git a/src/nu/ImGui.zig b/src/nu/ImGui.zig index ee9bfa0..ed69fb1 100644 --- a/src/nu/ImGui.zig +++ b/src/nu/ImGui.zig @@ -12,6 +12,8 @@ const Window = @import("Window.zig"); const im = @import("cimgui"); pub usingnamespace im; +pub const Options = struct {}; + pub fn loader_wrapper(procname: [*c]const u8, _: ?*anyopaque) callconv(.C) vk.PfnVoidFunction { return au.glfwGetInstanceProcAddress(au.I.handle, procname); } @@ -75,34 +77,30 @@ pub fn init() !void { if (!im.impl.ImGui_ImplVulkan_CreateFontsTexture()) { return error.igVulkanFontTextureFailed; } - - try Render.add_present_callback(present); - errdefer Render.remove_present_callback(present); } -pub fn frame() void { +pub fn nu_frame() !void { im.impl.ImGui_ImplGlfw_NewFrame(); im.impl.ImGui_ImplVulkan_NewFrame(); im.igNewFrame(); - im.igShowDemoWindow(null); + try nu.invoke_hook("nu_imgui_frame", .{}); im.igEndFrame(); im.igRender(); } -pub fn deinit() void { - Render.remove_present_callback(present); - im.impl.ImGui_ImplVulkan_Shutdown(); - au.D.destroyDescriptorPool(descriptor_pool, null); - im.impl.ImGui_ImplGlfw_Shutdown(); - im.igDestroyContext(ctx); -} - -pub fn present(cmd: au.CommandBufferProxy) void { +pub fn nu_render_present(cmd: au.CommandBufferProxy) void { im.impl.ImGui_ImplVulkan_RenderDrawData( @ptrCast(im.igGetDrawData()), @ptrFromInt(@intFromEnum(cmd.handle)), null, ); } + +pub fn deinit() void { + im.impl.ImGui_ImplVulkan_Shutdown(); + au.D.destroyDescriptorPool(descriptor_pool, null); + im.impl.ImGui_ImplGlfw_Shutdown(); + im.igDestroyContext(ctx); +} diff --git a/src/nu/Render.zig b/src/nu/Render.zig index 9dcd518..0d4d843 100644 --- a/src/nu/Render.zig +++ b/src/nu/Render.zig @@ -30,33 +30,11 @@ pub const Options = struct { var sc: au.SwapChain = undefined; var flights: au.Flights = undefined; -const PresentCallback = *const fn (au.CommandBufferProxy) void; -var present_callbacks: std.ArrayList(PresentCallback) = undefined; - -pub fn add_present_callback(cb: PresentCallback) !void { - if (std.mem.indexOfScalar(PresentCallback, present_callbacks.items, cb)) |_| { - return; - } else { - try present_callbacks.append(cb); - } -} - -pub fn remove_present_callback(cb: PresentCallback) void { - if (std.mem.indexOfScalar(PresentCallback, present_callbacks.items, cb)) |idx| { - _ = present_callbacks.orderedRemove(idx); - } else { - return; - } -} - pub fn init(alloc: std.mem.Allocator) !void { // todo pick apart au into helpers; not a sub-module filled with its own globals. try au.init(alloc); errdefer au.deinit(); - present_callbacks = std.ArrayList(PresentCallback).init(alloc); - errdefer present_callbacks.deinit(); - sc = try au.SwapChain.init(alloc); errdefer sc.deinit(); @@ -64,7 +42,7 @@ pub fn init(alloc: std.mem.Allocator) !void { errdefer flights.deinit(); } -pub fn frame() !void { +pub fn nu_frame() !void { const flight: au.Flights.Flight = flights.next(); try flight.wait(); @@ -89,14 +67,11 @@ pub fn frame() !void { try cmd.beginCommandBuffer(&.{ .flags = .{ .one_time_submit_bit = true } }); target.begin_rendering(cmd, render_area); - for (present_callbacks.items) |cb| { - cb(cmd); - } - // todo really don't like this. - // there should be some comptime means for a module to invoke hooks on other modules. eg there should be some - // "record" hook that for each module that gets called here; but if the render module is never added then that - // hook never gets called + // todo manage frame in flight state for each hook; pass the current flight in as context. + // will need some comptime -> anytype mapping. + try nu.invoke_hook("nu_render_present", .{cmd}); + target.end_rendering(cmd); try cmd.endCommandBuffer(); @@ -127,9 +102,11 @@ pub fn frame() !void { } } +pub fn nu_close() !void { + try au.D.deviceWaitIdle(); +} + pub fn deinit() void { - present_callbacks.deinit(); - au.D.deviceWaitIdle() catch {}; flights.deinit(); sc.deinit(); au.deinit(); From aaea1fcf2ab98d853fcbd3f14796e060ca847832 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Tue, 9 Jul 2024 23:37:19 -0400 Subject: [PATCH 097/113] style --- src/nu.zig | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/nu.zig b/src/nu.zig index 887515b..72638e4 100644 --- a/src/nu.zig +++ b/src/nu.zig @@ -32,6 +32,18 @@ pub fn run() !void { try invoke_hook("nu_close", .{}); } +// todo specify hook type. +// - special handling for error unions +// - allow per-hook state somehow declared in the handler + +pub fn invoke_hook(comptime name: []const u8, args: anytype) !void { + inline for (modules) |module| { + if (@hasDecl(module, name)) { + try invoke(@field(module, name), args); + } + } +} + fn invoke(func: anytype, args: anytype) !void { if (@typeInfo(@TypeOf(func)).Fn.return_type) |R| { switch (@typeInfo(R)) { @@ -44,15 +56,3 @@ fn invoke(func: anytype, args: anytype) !void { } } } - -// todo specify hook type. -// - special handling for error unions -// - allow per-hook state somehow declared in the handler - -pub fn invoke_hook(comptime name: []const u8, args: anytype) !void { - inline for (modules) |module| { - if (@hasDecl(module, name)) { - try invoke(@field(module, name), args); - } - } -} From e297865e93abd0603dc82d6762350dc31068fc37 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 10 Jul 2024 12:25:39 -0400 Subject: [PATCH 098/113] better hook type --- src/nu/hooks.zig | 174 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 174 insertions(+) create mode 100644 src/nu/hooks.zig diff --git a/src/nu/hooks.zig b/src/nu/hooks.zig new file mode 100644 index 0000000..79debaf --- /dev/null +++ b/src/nu/hooks.zig @@ -0,0 +1,174 @@ +const std = @import("std"); + +pub fn Hook(ftype: type) type { + const F: std.builtin.Type.Fn = @typeInfo(ftype).Fn; + const Result: type = F.return_type.?; + + return struct { + const Self = @This(); + + handlers: std.AutoArrayHashMap(*const ftype, void), + + pub fn init(alloc: std.mem.Allocator) Self { + return Self{ + .handlers = std.AutoArrayHashMap(*const ftype, void).init(alloc), + }; + } + + pub fn deinit(self: *Self) void { + self.handlers.deinit(); + } + + pub fn register(self: *Self, f: ftype) !void { + try self.handlers.putNoClobber(f, {}); + } + + pub fn unregister(self: *Self, f: ftype) void { + _ = self.handlers.orderedRemove(f); + } + + fn invoke_alloc_results(self: Self, alloc: std.mem.Allocator, args: anytype) ![]Result { + const results = try alloc.alloc(Result, self.handlers.count()); + for (self.handlers.keys(), results) |handler, *result| { + result.* = @call(.auto, handler, args); + } + return results; + } + + fn invoke_void(self: Self, args: anytype) void { + for (self.handlers.keys()) |handler| { + @call(.auto, handler, args); + } + } + + pub const invoke = switch (@typeInfo(Result)) { + .Void => invoke_void, + else => invoke_alloc_results, + }; + }; +} + +test "void hooks" { + const hooks = struct { + pub fn set_one(f: *usize) void { + f.* |= 0b01; + } + + pub fn set_two(f: *usize) void { + f.* |= 0b10; + } + }; + + var set_flags = Hook(fn (*usize) void).init(std.testing.allocator); + defer set_flags.deinit(); + + var flag: usize = undefined; + + flag = 0; + set_flags.invoke(.{&flag}); + try std.testing.expect(flag == 0b00); + + try set_flags.register(hooks.set_one); + + flag = 0; + set_flags.invoke(.{&flag}); + try std.testing.expect(flag == 0b01); + + try set_flags.register(hooks.set_two); + + flag = 0; + set_flags.invoke(.{&flag}); + try std.testing.expect(flag == 0b11); + + set_flags.unregister(hooks.set_one); + + flag = 0; + set_flags.invoke(.{&flag}); + try std.testing.expect(flag == 0b10); + + set_flags.unregister(hooks.set_two); + + flag = 0; + set_flags.invoke(.{&flag}); + try std.testing.expect(flag == 0b00); +} + +test "collect hooks" { + const hooks = struct { + pub fn double(f: usize) usize { + return f * 2; + } + + pub fn square(f: usize) usize { + return f * f; + } + }; + + var collect = Hook(fn (usize) usize).init(std.testing.allocator); + defer collect.deinit(); + + { + const result = try collect.invoke(std.testing.allocator, .{3}); + defer std.testing.allocator.free(result); + try std.testing.expectEqualSlices(usize, &.{}, result); + } + + try collect.register(hooks.double); + + { + const result = try collect.invoke(std.testing.allocator, .{4}); + defer std.testing.allocator.free(result); + try std.testing.expectEqualSlices(usize, &.{8}, result); + } + + try collect.register(hooks.square); + + { + const result = try collect.invoke(std.testing.allocator, .{5}); + defer std.testing.allocator.free(result); + try std.testing.expectEqualSlices(usize, &.{ 10, 25 }, result); + } + + collect.unregister(hooks.double); + + { + const result = try collect.invoke(std.testing.allocator, .{6}); + defer std.testing.allocator.free(result); + try std.testing.expectEqualSlices(usize, &.{36}, result); + } + + collect.unregister(hooks.square); + + { + const result = try collect.invoke(std.testing.allocator, .{7}); + defer std.testing.allocator.free(result); + try std.testing.expectEqualSlices(usize, &.{}, result); + } +} + +test "error_hooks" { + const CollectError = error{Fail}; + const Collect = Hook(fn (usize) CollectError!usize); + var collect = Collect.init(std.testing.allocator); + defer collect.deinit(); + + const hooks = struct { + pub fn halve(f: usize) !usize { + if (f % 2 == 0) return f / 2; + return CollectError.Fail; + } + + pub fn third(f: usize) !usize { + if (f % 3 == 0) return f / 3; + return CollectError.Fail; + } + }; + + try collect.register(hooks.halve); + try collect.register(hooks.third); + + const result = try collect.invoke(std.testing.allocator, .{4}); + defer std.testing.allocator.free(result); + try std.testing.expectEqual(2, try result[0]); + try std.testing.expectError(CollectError.Fail, result[1]); +} From fd1bd9dbf59ea427e508150076f4b372d5c92433 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 10 Jul 2024 13:06:44 -0400 Subject: [PATCH 099/113] runtime hooks --- src/App.zig | 6 ++- src/main.zig | 7 +++- src/nu.zig | 98 +++++++++++++++++++++++++++++++++-------------- src/nu/ImGui.zig | 37 +++++++++++++----- src/nu/Render.zig | 39 ++++++++++++++----- src/nu/Window.zig | 2 +- 6 files changed, 138 insertions(+), 51 deletions(-) diff --git a/src/App.zig b/src/App.zig index cf4d0b7..855902f 100644 --- a/src/App.zig +++ b/src/App.zig @@ -9,9 +9,13 @@ pub fn init(alloc: std.mem.Allocator) !void { _ = alloc; } +pub fn connect() !void { + try ImGui.hooks.frame.register(nu_imgui_frame); +} + pub fn deinit() void {} -pub fn nu_frame() void {} +// pub fn nu_frame() void {} // pub fn nu_events(events: []const Bus.Event) void { // std.debug.print("{any}\n", .{events}); diff --git a/src/main.zig b/src/main.zig index b8848d5..6f9602d 100644 --- a/src/main.zig +++ b/src/main.zig @@ -33,17 +33,20 @@ pub fn main() !void { defer _ = gpa.detectLeaks(); const alloc = gpa.allocator(); + nu.init(alloc); + defer nu.deinit(); + try nu.Window.init(alloc); defer nu.Window.deinit(); try nu.Render.init(alloc); defer nu.Render.deinit(); - try nu.ImGui.init(); + try nu.ImGui.init(alloc); defer nu.ImGui.deinit(); try App.init(alloc); defer App.deinit(); - try nu.run(); + try nu.run(alloc); } diff --git a/src/nu.zig b/src/nu.zig index 72638e4..c1f15ee 100644 --- a/src/nu.zig +++ b/src/nu.zig @@ -5,6 +5,9 @@ pub const Window = @import("nu/Window.zig"); pub const Render = @import("nu/Render.zig"); pub const ImGui = @import("nu/ImGui.zig"); +pub const Bus = @import("nu/Bus.zig"); +pub const Hook = @import("nu/hooks.zig").Hook; + pub const Options = struct { window: Window.Options = .{}, render: Render.Options = .{}, @@ -15,44 +18,81 @@ pub const options: Options = if (@hasDecl(root, "nu_options")) root.nu_options e pub const modules = root.nu_modules; pub const driver = root.nu_driver; -pub fn run() !void { - try invoke_hook("nu_enter", .{}); +pub const Hooks = struct { + pub const Enter = Hook(fn () void); + pub const Events = Hook(fn ([]Bus.Event) void); + pub const Frame = Hook(fn () anyerror!void); + pub const Close = Hook(fn () void); + + enter: Enter, + events: Events, + frame: Frame, + close: Close, +}; + +pub var hooks: Hooks = undefined; + +pub fn init(alloc: std.mem.Allocator) void { + hooks = .{ + .enter = Hooks.Enter.init(alloc), + .events = Hooks.Events.init(alloc), + .frame = Hooks.Frame.init(alloc), + .close = Hooks.Close.init(alloc), + }; +} + +pub fn deinit() void { + hooks.enter.deinit(); + hooks.events.deinit(); + hooks.frame.deinit(); + hooks.close.deinit(); +} + +pub fn run(alloc: std.mem.Allocator) !void { + // todo hooks should execute in priority order; then hook.register accepts a "priority" argument. + inline for (modules) |module| { + try module.connect(); + } + + hooks.enter.invoke(.{}); while (driver.next()) |events| { if (events.len > 0) { - try invoke_hook("nu_events", .{events}); + hooks.events.invoke(.{events}); } // todo frame timer - try invoke_hook("nu_frame", .{}); + const frame_results = try hooks.frame.invoke(alloc, .{}); + defer alloc.free(frame_results); + for (frame_results) |result| result catch |err| return err; // todo fixed timestep } - try invoke_hook("nu_close", .{}); + hooks.close.invoke(.{}); } -// todo specify hook type. -// - special handling for error unions -// - allow per-hook state somehow declared in the handler - -pub fn invoke_hook(comptime name: []const u8, args: anytype) !void { - inline for (modules) |module| { - if (@hasDecl(module, name)) { - try invoke(@field(module, name), args); - } - } -} - -fn invoke(func: anytype, args: anytype) !void { - if (@typeInfo(@TypeOf(func)).Fn.return_type) |R| { - switch (@typeInfo(R)) { - .ErrorUnion => try @call(.auto, func, args), - .Void => @call(.auto, func, args), - else => { - @compileLog(func, @typeInfo(R)); - @compileError("Invalid hook return type. Must be void or !void."); - }, - } - } -} +// // todo specify hook type. +// // - special handling for error unions +// // - allow per-hook state somehow declared in the handler +// +// pub fn invoke_hook(comptime name: []const u8, args: anytype) !void { +// inline for (modules) |module| { +// if (@hasDecl(module, name)) { +// try invoke(@field(module, name), args); +// } +// } +// } +// +// fn invoke(func: anytype, args: anytype) !void { +// if (@typeInfo(@TypeOf(func)).Fn.return_type) |R| { +// switch (@typeInfo(R)) { +// .ErrorUnion => try @call(.auto, func, args), +// .Void => @call(.auto, func, args), +// else => { +// @compileLog(func, @typeInfo(R)); +// @compileError("Invalid hook return type. Must be void or !void."); +// }, +// } +// } +// } diff --git a/src/nu/ImGui.zig b/src/nu/ImGui.zig index ed69fb1..66ad5a3 100644 --- a/src/nu/ImGui.zig +++ b/src/nu/ImGui.zig @@ -6,6 +6,7 @@ const vk = @import("vk"); const nu = @import("../nu.zig"); const au = @import("Render/au.zig"); +const Hook = @import("hooks.zig").Hook; const Render = @import("Render.zig"); const Window = @import("Window.zig"); @@ -14,6 +15,11 @@ pub usingnamespace im; pub const Options = struct {}; +pub const Hooks = struct { + pub const Frame = Hook(fn () void); + frame: Frame, +}; + pub fn loader_wrapper(procname: [*c]const u8, _: ?*anyopaque) callconv(.C) vk.PfnVoidFunction { return au.glfwGetInstanceProcAddress(au.I.handle, procname); } @@ -21,7 +27,9 @@ pub fn loader_wrapper(procname: [*c]const u8, _: ?*anyopaque) callconv(.C) vk.Pf var ctx: *im.ImGuiContext = undefined; var descriptor_pool: vk.DescriptorPool = undefined; -pub fn init() !void { +pub var hooks: Hooks = undefined; + +pub fn init(alloc: std.mem.Allocator) !void { ctx = im.igCreateContext(null) orelse { return error.igCreateContextFailed; }; @@ -77,6 +85,24 @@ pub fn init() !void { if (!im.impl.ImGui_ImplVulkan_CreateFontsTexture()) { return error.igVulkanFontTextureFailed; } + + hooks = .{ + .frame = Hooks.Frame.init(alloc), + }; + errdefer hooks.frame.deinit(); +} + +pub fn deinit() void { + hooks.frame.deinit(); + im.impl.ImGui_ImplVulkan_Shutdown(); + au.D.destroyDescriptorPool(descriptor_pool, null); + im.impl.ImGui_ImplGlfw_Shutdown(); + im.igDestroyContext(ctx); +} + +pub fn connect() !void { + try nu.hooks.frame.register(nu_frame); + try Render.hooks.present.register(nu_render_present); } pub fn nu_frame() !void { @@ -84,7 +110,7 @@ pub fn nu_frame() !void { im.impl.ImGui_ImplVulkan_NewFrame(); im.igNewFrame(); - try nu.invoke_hook("nu_imgui_frame", .{}); + hooks.frame.invoke(.{}); im.igEndFrame(); im.igRender(); @@ -97,10 +123,3 @@ pub fn nu_render_present(cmd: au.CommandBufferProxy) void { null, ); } - -pub fn deinit() void { - im.impl.ImGui_ImplVulkan_Shutdown(); - au.D.destroyDescriptorPool(descriptor_pool, null); - im.impl.ImGui_ImplGlfw_Shutdown(); - im.igDestroyContext(ctx); -} diff --git a/src/nu/Render.zig b/src/nu/Render.zig index 0d4d843..697f8c8 100644 --- a/src/nu/Render.zig +++ b/src/nu/Render.zig @@ -9,6 +9,8 @@ const vk = @import("vk"); const nu = @import("../nu.zig"); const au = @import("Render/au.zig"); +const Hook = @import("hooks.zig").Hook; + pub const Options = struct { app_name: [*:0]const u8 = "nu-au-app", app_version: struct { @@ -27,6 +29,14 @@ pub const Options = struct { frames_in_flight: u8 = 3, }; +pub const Hooks = struct { + pub const Present = Hook(fn (au.CommandBufferProxy) void); + + present: Present, +}; + +pub var hooks: Hooks = undefined; + var sc: au.SwapChain = undefined; var flights: au.Flights = undefined; @@ -40,6 +50,21 @@ pub fn init(alloc: std.mem.Allocator) !void { flights = try au.Flights.init(alloc, nu.options.render.frames_in_flight); errdefer flights.deinit(); + + hooks = .{ .present = Hooks.Present.init(alloc) }; + errdefer hooks.present.deinit(); +} + +pub fn deinit() void { + hooks.present.deinit(); + flights.deinit(); + sc.deinit(); + au.deinit(); +} + +pub fn connect() !void { + try nu.hooks.frame.register(nu_frame); + try nu.hooks.close.register(nu_close); } pub fn nu_frame() !void { @@ -70,7 +95,7 @@ pub fn nu_frame() !void { // todo manage frame in flight state for each hook; pass the current flight in as context. // will need some comptime -> anytype mapping. - try nu.invoke_hook("nu_render_present", .{cmd}); + hooks.present.invoke(.{cmd}); target.end_rendering(cmd); try cmd.endCommandBuffer(); @@ -102,12 +127,8 @@ pub fn nu_frame() !void { } } -pub fn nu_close() !void { - try au.D.deviceWaitIdle(); -} - -pub fn deinit() void { - flights.deinit(); - sc.deinit(); - au.deinit(); +pub fn nu_close() void { + au.D.deviceWaitIdle() catch |err| { + std.debug.panic("Device wait failed: {!}", .{err}); + }; } diff --git a/src/nu/Window.zig b/src/nu/Window.zig index e2363ff..303bc5b 100644 --- a/src/nu/Window.zig +++ b/src/nu/Window.zig @@ -21,7 +21,7 @@ pub const Options = struct { x11_instance_name: [*:0]const u8 = "floating_window", }; -var bus: Bus = undefined; +var bus: Bus = undefined; // todo bus should probably move to engine. pub var handle: *c.GLFWwindow = undefined; var unfocused_rate: f32 = 1.0 / 20.0; From 53cbe35a97e1bdf3ef1872f09705003f10858b1c Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 10 Jul 2024 14:32:37 -0400 Subject: [PATCH 100/113] clean up hook implementation --- src/nu/hooks.zig | 102 ++++++++++++++++++++++++++++------------------- 1 file changed, 61 insertions(+), 41 deletions(-) diff --git a/src/nu/hooks.zig b/src/nu/hooks.zig index 79debaf..ca8fef8 100644 --- a/src/nu/hooks.zig +++ b/src/nu/hooks.zig @@ -4,51 +4,74 @@ pub fn Hook(ftype: type) type { const F: std.builtin.Type.Fn = @typeInfo(ftype).Fn; const Result: type = F.return_type.?; - return struct { - const Self = @This(); + return switch (@typeInfo(Result)) { + .Void => struct { + const Self = @This(); - handlers: std.AutoArrayHashMap(*const ftype, void), + handlers: std.AutoArrayHashMap(*const ftype, void), - pub fn init(alloc: std.mem.Allocator) Self { - return Self{ - .handlers = std.AutoArrayHashMap(*const ftype, void).init(alloc), - }; - } - - pub fn deinit(self: *Self) void { - self.handlers.deinit(); - } - - pub fn register(self: *Self, f: ftype) !void { - try self.handlers.putNoClobber(f, {}); - } - - pub fn unregister(self: *Self, f: ftype) void { - _ = self.handlers.orderedRemove(f); - } - - fn invoke_alloc_results(self: Self, alloc: std.mem.Allocator, args: anytype) ![]Result { - const results = try alloc.alloc(Result, self.handlers.count()); - for (self.handlers.keys(), results) |handler, *result| { - result.* = @call(.auto, handler, args); + pub fn init(alloc: std.mem.Allocator) Self { + return Self{ + .handlers = std.AutoArrayHashMap(*const ftype, void).init(alloc), + }; } - return results; - } - fn invoke_void(self: Self, args: anytype) void { - for (self.handlers.keys()) |handler| { - @call(.auto, handler, args); + pub fn deinit(self: *Self) void { + self.handlers.deinit(); } - } - pub const invoke = switch (@typeInfo(Result)) { - .Void => invoke_void, - else => invoke_alloc_results, - }; + pub fn register(self: *Self, f: ftype) !void { + try self.handlers.putNoClobber(f, {}); + } + + pub fn unregister(self: *Self, f: ftype) void { + _ = self.handlers.orderedRemove(f); + } + + fn invoke(self: Self, args: anytype) void { + for (self.handlers.keys()) |handler| { + @call(.auto, handler, args); + } + } + }, + else => struct { + const Self = @This(); + + handlers: std.AutoArrayHashMap(*const ftype, void), + + pub fn init(alloc: std.mem.Allocator) Self { + return Self{ + .handlers = std.AutoArrayHashMap(*const ftype, void).init(alloc), + }; + } + + pub fn deinit(self: *Self) void { + self.handlers.deinit(); + } + + pub fn register(self: *Self, f: ftype) !void { + try self.handlers.putNoClobber(f, {}); + } + + pub fn unregister(self: *Self, f: ftype) void { + _ = self.handlers.orderedRemove(f); + } + + fn invoke(self: Self, alloc: std.mem.Allocator, args: anytype) ![]Result { + const results = try alloc.alloc(Result, self.handlers.count()); + for (self.handlers.keys(), results) |handler, *result| { + result.* = @call(.auto, handler, args); + } + return results; + } + }, }; } test "void hooks" { + var set_flags = Hook(fn (*usize) void).init(std.testing.allocator); + defer set_flags.deinit(); + const hooks = struct { pub fn set_one(f: *usize) void { f.* |= 0b01; @@ -59,9 +82,6 @@ test "void hooks" { } }; - var set_flags = Hook(fn (*usize) void).init(std.testing.allocator); - defer set_flags.deinit(); - var flag: usize = undefined; flag = 0; @@ -94,6 +114,9 @@ test "void hooks" { } test "collect hooks" { + var collect = Hook(fn (usize) usize).init(std.testing.allocator); + defer collect.deinit(); + const hooks = struct { pub fn double(f: usize) usize { return f * 2; @@ -104,9 +127,6 @@ test "collect hooks" { } }; - var collect = Hook(fn (usize) usize).init(std.testing.allocator); - defer collect.deinit(); - { const result = try collect.invoke(std.testing.allocator, .{3}); defer std.testing.allocator.free(result); From 0efc9310067e7b68d39cc56aed2520912a58b548 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Wed, 10 Jul 2024 17:27:40 -0400 Subject: [PATCH 101/113] wip polymorphic modules --- src/App.zig | 26 ++++----- src/main.zig | 48 ++++++++-------- src/nu.zig | 133 ++++++++++++++++++++----------------------- src/nu/ImGui.zig | 48 ++++++++-------- src/nu/Render.zig | 56 +++++++++--------- src/nu/Render/au.zig | 37 ++++++------ src/nu/Window.zig | 75 ++++++++++++------------ src/nu/hooks.zig | 4 +- 8 files changed, 208 insertions(+), 219 deletions(-) diff --git a/src/App.zig b/src/App.zig index 855902f..ae5db5b 100644 --- a/src/App.zig +++ b/src/App.zig @@ -5,24 +5,24 @@ const Bus = @import("nu/Bus.zig"); const Render = @import("nu/Render.zig"); const ImGui = @import("nu/ImGui.zig"); -pub fn init(alloc: std.mem.Allocator) !void { - _ = alloc; +pub fn module() nu.Module { + return nu.Module{ + .name = "App", + // .dependencies = &.{ Render.module(), ImGui.module() }, + .setup = setup, + .teardown = teardown, + .update = update, + }; } -pub fn connect() !void { - try ImGui.hooks.frame.register(nu_imgui_frame); +pub fn setup(_: std.mem.Allocator) !void { + try ImGui.gui.register(gui); } -pub fn deinit() void {} +pub fn teardown() void {} -// pub fn nu_frame() void {} +pub fn update() !void {} -// pub fn nu_events(events: []const Bus.Event) void { -// std.debug.print("{any}\n", .{events}); -// } - -// pub fn nu_render_present(_: Render.au.CommandBufferProxy) void {} - -pub fn nu_imgui_frame() void { +pub fn gui() void { ImGui.igShowMetricsWindow(null); } diff --git a/src/main.zig b/src/main.zig index 6f9602d..6c1751c 100644 --- a/src/main.zig +++ b/src/main.zig @@ -23,30 +23,32 @@ pub const nu_options: nu.Options = .{ pub const nu_driver = nu.Window; pub const nu_modules = .{ - App, - nu.ImGui, nu.Render, + nu.ImGui, + App, }; -pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - defer _ = gpa.detectLeaks(); - const alloc = gpa.allocator(); +pub const main = nu.main; - nu.init(alloc); - defer nu.deinit(); - - try nu.Window.init(alloc); - defer nu.Window.deinit(); - - try nu.Render.init(alloc); - defer nu.Render.deinit(); - - try nu.ImGui.init(alloc); - defer nu.ImGui.deinit(); - - try App.init(alloc); - defer App.deinit(); - - try nu.run(alloc); -} +// pub fn main() !void { +// var gpa = std.heap.GeneralPurposeAllocator(.{}){}; +// defer _ = gpa.detectLeaks(); +// const alloc = gpa.allocator(); +// +// nu.init(alloc); +// defer nu.deinit(); +// +// try nu.Window.init(alloc); +// defer nu.Window.deinit(); +// +// try nu.Render.init(alloc); +// defer nu.Render.deinit(); +// +// try nu.ImGui.init(alloc); +// defer nu.ImGui.deinit(); +// +// try App.init(alloc); +// defer App.deinit(); +// +// try nu.run(alloc); +// } diff --git a/src/nu.zig b/src/nu.zig index c1f15ee..09a2020 100644 --- a/src/nu.zig +++ b/src/nu.zig @@ -8,91 +8,80 @@ pub const ImGui = @import("nu/ImGui.zig"); pub const Bus = @import("nu/Bus.zig"); pub const Hook = @import("nu/hooks.zig").Hook; -pub const Options = struct { - window: Window.Options = .{}, - render: Render.Options = .{}, - imgui: ImGui.Options = .{}, +const Config = struct { + window: Window.Config = .{}, + render: Render.Config = .{}, }; -pub const options: Options = if (@hasDecl(root, "nu_options")) root.nu_options else .{}; -pub const modules = root.nu_modules; -pub const driver = root.nu_driver; +pub const config: Config = if (@hasDecl(root, "nu_config")) root.nu_config else .{}; -pub const Hooks = struct { - pub const Enter = Hook(fn () void); - pub const Events = Hook(fn ([]Bus.Event) void); - pub const Frame = Hook(fn () anyerror!void); - pub const Close = Hook(fn () void); - - enter: Enter, - events: Events, - frame: Frame, - close: Close, +pub const Module = struct { + // todo dependencies + name: []const u8, + setup: *const fn (alloc: std.mem.Allocator) anyerror!void, + teardown: *const fn () void, + update: ?*const fn () anyerror!void = null, + frame: ?*const fn () anyerror!void = null, + dependencies: []const Module = &.{}, }; -pub var hooks: Hooks = undefined; +pub const Driver = struct { + module: Module, + next: *const fn () bool, // events? callbacks? +}; -pub fn init(alloc: std.mem.Allocator) void { - hooks = .{ - .enter = Hooks.Enter.init(alloc), - .events = Hooks.Events.init(alloc), - .frame = Hooks.Frame.init(alloc), - .close = Hooks.Close.init(alloc), - }; +fn enable(modules: *std.StringArrayHashMap(Module), module: Module) void { + // this doesn't handle dependencies correctly. need to do real topological sort. + + if (modules.contains(module.name)) return; + for (module.dependencies) |dep| enable(modules, dep); + modules.putNoClobber(module.name, module) catch @panic("OOM"); } -pub fn deinit() void { - hooks.enter.deinit(); - hooks.events.deinit(); - hooks.frame.deinit(); - hooks.close.deinit(); -} +pub fn main() void { + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + const alloc = gpa.allocator(); -pub fn run(alloc: std.mem.Allocator) !void { - // todo hooks should execute in priority order; then hook.register accepts a "priority" argument. - inline for (modules) |module| { - try module.connect(); + var modules = std.StringArrayHashMap(Module).init(alloc); + defer modules.deinit(); + + const driver = root.nu_driver.driver(); + enable(&modules, driver.module); + inline for (root.nu_modules) |mod| { + enable(&modules, mod.module()); } - hooks.enter.invoke(.{}); + for (modules.values()) |mod| { + std.log.debug("{s}.setup", .{mod.name}); + mod.setup(alloc) catch |err| { + std.debug.panic("Module {s} setup error: {!}", .{ mod.name, err }); + }; + } + defer { + var rev = std.mem.reverseIterator(modules.values()); + while (rev.next()) |mod| { + std.log.debug("{s}.teardown", .{mod.name}); + mod.teardown(); + } + } - while (driver.next()) |events| { - if (events.len > 0) { - hooks.events.invoke(.{events}); + std.log.info("Loaded modules: {s}", .{modules.keys()}); + + while (driver.next()) { + for (modules.values()) |mod| { + if (mod.update) |update| { + update() catch |err| { + std.debug.panic("Module {s} update event error: {!}", .{ mod.name, err }); + }; + } } - // todo frame timer - const frame_results = try hooks.frame.invoke(alloc, .{}); - defer alloc.free(frame_results); - for (frame_results) |result| result catch |err| return err; - - // todo fixed timestep + for (modules.values()) |mod| { + if (mod.frame) |frame| { + frame() catch |err| { + std.debug.panic("Module {s} frame event error: {!}", .{ mod.name, err }); + }; + } + } } - - hooks.close.invoke(.{}); } - -// // todo specify hook type. -// // - special handling for error unions -// // - allow per-hook state somehow declared in the handler -// -// pub fn invoke_hook(comptime name: []const u8, args: anytype) !void { -// inline for (modules) |module| { -// if (@hasDecl(module, name)) { -// try invoke(@field(module, name), args); -// } -// } -// } -// -// fn invoke(func: anytype, args: anytype) !void { -// if (@typeInfo(@TypeOf(func)).Fn.return_type) |R| { -// switch (@typeInfo(R)) { -// .ErrorUnion => try @call(.auto, func, args), -// .Void => @call(.auto, func, args), -// else => { -// @compileLog(func, @typeInfo(R)); -// @compileError("Invalid hook return type. Must be void or !void."); -// }, -// } -// } -// } diff --git a/src/nu/ImGui.zig b/src/nu/ImGui.zig index 66ad5a3..d96a0de 100644 --- a/src/nu/ImGui.zig +++ b/src/nu/ImGui.zig @@ -13,23 +13,33 @@ const Window = @import("Window.zig"); const im = @import("cimgui"); pub usingnamespace im; -pub const Options = struct {}; +pub const Config = struct {}; +const config = nu.config.imgui; -pub const Hooks = struct { - pub const Frame = Hook(fn () void); - frame: Frame, -}; +pub fn module() nu.Module { + return nu.Module{ + .name = "ImGui", + // .dependencies = &.{Render.module()}, + .setup = setup, + .teardown = teardown, + .update = update, + }; +} pub fn loader_wrapper(procname: [*c]const u8, _: ?*anyopaque) callconv(.C) vk.PfnVoidFunction { return au.glfwGetInstanceProcAddress(au.I.handle, procname); } +pub var gui: Hook(fn () void) = undefined; var ctx: *im.ImGuiContext = undefined; var descriptor_pool: vk.DescriptorPool = undefined; -pub var hooks: Hooks = undefined; +pub fn setup(alloc: std.mem.Allocator) !void { + gui = @TypeOf(gui).init(alloc); + errdefer gui.deinit(); + + try Render.present.register(present); -pub fn init(alloc: std.mem.Allocator) !void { ctx = im.igCreateContext(null) orelse { return error.igCreateContextFailed; }; @@ -64,7 +74,7 @@ pub fn init(alloc: std.mem.Allocator) !void { .DescriptorPool = @ptrFromInt(@intFromEnum(descriptor_pool)), .RenderPass = null, .MinImageCount = 2, - .ImageCount = @intCast(nu.options.render.frames_in_flight), + .ImageCount = @intCast(nu.config.render.frames_in_flight), .PipelineRenderingCreateInfo = @bitCast(vk.PipelineRenderingCreateInfo{ .view_mask = 0, .depth_attachment_format = .undefined, @@ -85,38 +95,30 @@ pub fn init(alloc: std.mem.Allocator) !void { if (!im.impl.ImGui_ImplVulkan_CreateFontsTexture()) { return error.igVulkanFontTextureFailed; } - - hooks = .{ - .frame = Hooks.Frame.init(alloc), - }; - errdefer hooks.frame.deinit(); } -pub fn deinit() void { - hooks.frame.deinit(); +pub fn teardown() void { + au.D.deviceWaitIdle() catch |err| std.debug.panic("Device wait failed: {!}", .{err}); + im.impl.ImGui_ImplVulkan_Shutdown(); au.D.destroyDescriptorPool(descriptor_pool, null); im.impl.ImGui_ImplGlfw_Shutdown(); im.igDestroyContext(ctx); + gui.deinit(); } -pub fn connect() !void { - try nu.hooks.frame.register(nu_frame); - try Render.hooks.present.register(nu_render_present); -} - -pub fn nu_frame() !void { +pub fn update() !void { im.impl.ImGui_ImplGlfw_NewFrame(); im.impl.ImGui_ImplVulkan_NewFrame(); im.igNewFrame(); - hooks.frame.invoke(.{}); + gui.invoke(.{}); im.igEndFrame(); im.igRender(); } -pub fn nu_render_present(cmd: au.CommandBufferProxy) void { +pub fn present(cmd: au.CommandBufferProxy) void { im.impl.ImGui_ImplVulkan_RenderDrawData( @ptrCast(im.igGetDrawData()), @ptrFromInt(@intFromEnum(cmd.handle)), diff --git a/src/nu/Render.zig b/src/nu/Render.zig index 697f8c8..3e4ddac 100644 --- a/src/nu/Render.zig +++ b/src/nu/Render.zig @@ -9,9 +9,7 @@ const vk = @import("vk"); const nu = @import("../nu.zig"); const au = @import("Render/au.zig"); -const Hook = @import("hooks.zig").Hook; - -pub const Options = struct { +pub const Config = struct { app_name: [*:0]const u8 = "nu-au-app", app_version: struct { variant: u3 = 0, @@ -27,20 +25,29 @@ pub const Options = struct { patch: u12 = 0, } = .{}, frames_in_flight: u8 = 3, + use_debug_messenger: bool = switch (builtin.mode) { + .Debug, .ReleaseSafe => true, + .ReleaseSmall, .ReleaseFast => false, + }, }; +const config = nu.config.render; -pub const Hooks = struct { - pub const Present = Hook(fn (au.CommandBufferProxy) void); - - present: Present, -}; - -pub var hooks: Hooks = undefined; +pub fn module() nu.Module { + return nu.Module{ + .name = "Render", + .setup = setup, + .teardown = teardown, + .frame = frame, + .dependencies = &.{nu.Window.driver().module}, + }; +} var sc: au.SwapChain = undefined; var flights: au.Flights = undefined; -pub fn init(alloc: std.mem.Allocator) !void { +pub var present: nu.Hook(fn (au.CommandBufferProxy) void) = undefined; + +pub fn setup(alloc: std.mem.Allocator) !void { // todo pick apart au into helpers; not a sub-module filled with its own globals. try au.init(alloc); errdefer au.deinit(); @@ -48,26 +55,22 @@ pub fn init(alloc: std.mem.Allocator) !void { sc = try au.SwapChain.init(alloc); errdefer sc.deinit(); - flights = try au.Flights.init(alloc, nu.options.render.frames_in_flight); + flights = try au.Flights.init(alloc, config.frames_in_flight); errdefer flights.deinit(); - hooks = .{ .present = Hooks.Present.init(alloc) }; - errdefer hooks.present.deinit(); + present = @TypeOf(present).init(alloc); + errdefer present.deinit(); } -pub fn deinit() void { - hooks.present.deinit(); +pub fn teardown() void { + au.D.deviceWaitIdle() catch |err| std.debug.panic("Device wait failed: {!}", .{err}); + errdefer present.deinit(); flights.deinit(); sc.deinit(); au.deinit(); } -pub fn connect() !void { - try nu.hooks.frame.register(nu_frame); - try nu.hooks.close.register(nu_close); -} - -pub fn nu_frame() !void { +pub fn frame() !void { const flight: au.Flights.Flight = flights.next(); try flight.wait(); @@ -95,7 +98,8 @@ pub fn nu_frame() !void { // todo manage frame in flight state for each hook; pass the current flight in as context. // will need some comptime -> anytype mapping. - hooks.present.invoke(.{cmd}); + + present.invoke(.{cmd}); target.end_rendering(cmd); try cmd.endCommandBuffer(); @@ -126,9 +130,3 @@ pub fn nu_frame() !void { } } } - -pub fn nu_close() void { - au.D.deviceWaitIdle() catch |err| { - std.debug.panic("Device wait failed: {!}", .{err}); - }; -} diff --git a/src/nu/Render/au.zig b/src/nu/Render/au.zig index 2b54aa9..e12cbf5 100644 --- a/src/nu/Render/au.zig +++ b/src/nu/Render/au.zig @@ -8,10 +8,7 @@ pub const SwapChain = @import("au/SwapChain.zig"); pub const Flights = @import("au/Flights.zig"); pub const VkAllocator = @import("au/VkAllocator.zig"); -pub const use_debug_messenger = switch (builtin.mode) { - .Debug, .ReleaseSafe => true, - .ReleaseSmall, .ReleaseFast => false, -}; +const config = nu.config.render; pub const apis: []const vk.ApiInfo = &.{ vk.features.version_1_0, @@ -21,7 +18,7 @@ pub const apis: []const vk.ApiInfo = &.{ vk.extensions.khr_surface, vk.extensions.khr_swapchain, vk.extensions.khr_dynamic_rendering, - if (use_debug_messenger) vk.extensions.ext_debug_utils else .{}, + if (config.use_debug_messenger) vk.extensions.ext_debug_utils else .{}, }; pub const device_extensions: []const [*:0]const u8 = &.{ @@ -82,7 +79,7 @@ fn init_base() !void { if (glfwVulkanSupported() != nu.Window.c.GLFW_TRUE) return error.glfwNoVulkan; - if (use_debug_messenger) { + if (config.use_debug_messenger) { _bw = try BaseWrapper.load(glfwGetInstanceProcAddress); } else { _bw = BaseWrapper.loadNoFail(glfwGetInstanceProcAddress); @@ -98,7 +95,7 @@ fn init_instance(alloc: std.mem.Allocator) !void { var layers = std.ArrayList([*:0]const u8).init(alloc); defer layers.deinit(); - if (use_debug_messenger) { + if (config.use_debug_messenger) { try extensions.appendSlice(&.{ vk.extensions.ext_debug_utils.name, }); @@ -132,19 +129,19 @@ fn init_instance(alloc: std.mem.Allocator) !void { _instance = try B.createInstance(&.{ .p_application_info = &.{ - .p_application_name = nu.options.render.app_name, + .p_application_name = config.app_name, .application_version = vk.makeApiVersion( - nu.options.render.app_version.variant, - nu.options.render.app_version.major, - nu.options.render.app_version.minor, - nu.options.render.app_version.patch, + config.app_version.variant, + config.app_version.major, + config.app_version.minor, + config.app_version.patch, ), - .p_engine_name = nu.options.render.engine_name, + .p_engine_name = config.engine_name, .engine_version = vk.makeApiVersion( - nu.options.render.engine_version.variant, - nu.options.render.engine_version.major, - nu.options.render.engine_version.minor, - nu.options.render.engine_version.patch, + config.engine_version.variant, + config.engine_version.major, + config.engine_version.minor, + config.engine_version.patch, ), .api_version = vk.API_VERSION_1_3, }, @@ -152,10 +149,10 @@ fn init_instance(alloc: std.mem.Allocator) !void { .pp_enabled_extension_names = extensions.items.ptr, .enabled_layer_count = @intCast(layers.items.len), .pp_enabled_layer_names = layers.items.ptr, - .p_next = if (use_debug_messenger) &mci else null, + .p_next = if (config.use_debug_messenger) &mci else null, }, null); - if (use_debug_messenger) { + if (config.use_debug_messenger) { _iw = try InstanceWrapper.load(_instance, _bw.dispatch.vkGetInstanceProcAddr); } else { _iw = InstanceWrapper.loadNoFail(_instance, _bw.dispatch.vkGetInstanceProcAddr); @@ -306,7 +303,7 @@ fn init_device(alloc: std.mem.Allocator) !void { }, }, null); - if (use_debug_messenger) { + if (config.use_debug_messenger) { _dw = try DeviceWrapper.load(_device, _iw.dispatch.vkGetDeviceProcAddr); } else { _dw = DeviceWrapper.loadNoFail(_device, _iw.dispatch.vkGetDeviceProcAddr); diff --git a/src/nu/Window.zig b/src/nu/Window.zig index 303bc5b..bc13dc9 100644 --- a/src/nu/Window.zig +++ b/src/nu/Window.zig @@ -13,61 +13,62 @@ pub const c = @cImport({ pub const Bus = @import("Bus.zig"); -pub const Options = struct { +pub const Config = struct { title: [*:0]const u8 = "Hello World", width: u32 = 1280, height: u32 = 720, x11_class_name: [*:0]const u8 = "floating_window", x11_instance_name: [*:0]const u8 = "floating_window", + unfocused_wait: f32 = 1.0 / 20.0, }; +const config: Config = nu.config.window; -var bus: Bus = undefined; // todo bus should probably move to engine. -pub var handle: *c.GLFWwindow = undefined; -var unfocused_rate: f32 = 1.0 / 20.0; - -pub fn init(alloc: std.mem.Allocator) !void { - if (c.glfwInit() != c.GLFW_TRUE) - return error.glfwInitFailed; - errdefer c.glfwTerminate(); - - bus = Bus.init(alloc); - errdefer bus.deinit(); - - c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); - c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, nu.options.window.x11_class_name); - c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, nu.options.window.x11_instance_name); - - handle = c.glfwCreateWindow( - @intCast(nu.options.window.width), - @intCast(nu.options.window.height), - nu.options.window.title, - null, - null, - ) orelse - return error.glfWCreateWindowFailed; - errdefer c.glfwDestroyWindow(handle); - - bus.connect(handle); - errdefer bus.disconnect(handle); +pub fn driver() nu.Driver { + return nu.Driver{ + .module = .{ + .name = "Window", + .dependencies = &.{}, // todo bus + .setup = setup, + .teardown = teardown, + }, + .next = next, + }; } -pub fn deinit() void { - bus.deinit(); +pub var handle: *c.GLFWwindow = undefined; + +pub fn setup(_: std.mem.Allocator) !void { + if (c.glfwInit() != c.GLFW_TRUE) std.debug.panic("GLFW Init Failed", .{}); + + c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); + c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, config.x11_class_name); + c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, config.x11_instance_name); + handle = c.glfwCreateWindow( + @intCast(config.width), + @intCast(config.height), + config.title, + null, + null, + ) orelse std.debug.panic("GLFW Create Window Failed", .{}); + + // bus.connect(handle); + // errdefer bus.disconnect(handle); +} + +pub fn teardown() void { c.glfwDestroyWindow(handle); c.glfwTerminate(); } -pub fn next() ?[]Bus.Event { - bus.clear(); - +pub fn next() bool { if (c.glfwWindowShouldClose(handle) == c.GLFW_TRUE) - return null; + return false; if (c.glfwGetWindowAttrib(handle, c.GLFW_FOCUSED) == c.GLFW_TRUE) { c.glfwPollEvents(); } else { - c.glfwWaitEventsTimeout(unfocused_rate); + c.glfwWaitEventsTimeout(config.unfocused_wait); } - return bus.events.items; + return true; } diff --git a/src/nu/hooks.zig b/src/nu/hooks.zig index ca8fef8..8e6b4e1 100644 --- a/src/nu/hooks.zig +++ b/src/nu/hooks.zig @@ -28,7 +28,7 @@ pub fn Hook(ftype: type) type { _ = self.handlers.orderedRemove(f); } - fn invoke(self: Self, args: anytype) void { + pub fn invoke(self: Self, args: anytype) void { for (self.handlers.keys()) |handler| { @call(.auto, handler, args); } @@ -57,7 +57,7 @@ pub fn Hook(ftype: type) type { _ = self.handlers.orderedRemove(f); } - fn invoke(self: Self, alloc: std.mem.Allocator, args: anytype) ![]Result { + pub fn invoke(self: Self, alloc: std.mem.Allocator, args: anytype) ![]Result { const results = try alloc.alloc(Result, self.handlers.count()); for (self.handlers.keys(), results) |handler, *result| { result.* = @call(.auto, handler, args); From 8c3e65fced8fdc92a2b6fa9c3837d47f2fea4d51 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Mon, 15 Jul 2024 15:38:57 -0400 Subject: [PATCH 102/113] comptime topological sort of dependencies --- src/App.zig | 20 +++---- src/main.zig | 2 +- src/nu.zig | 139 ++++++++++++++++++++++++++-------------------- src/nu/ImGui.zig | 29 +++------- src/nu/Render.zig | 21 +------ 5 files changed, 96 insertions(+), 115 deletions(-) diff --git a/src/App.zig b/src/App.zig index ae5db5b..a2b0349 100644 --- a/src/App.zig +++ b/src/App.zig @@ -5,24 +5,18 @@ const Bus = @import("nu/Bus.zig"); const Render = @import("nu/Render.zig"); const ImGui = @import("nu/ImGui.zig"); -pub fn module() nu.Module { - return nu.Module{ - .name = "App", - // .dependencies = &.{ Render.module(), ImGui.module() }, - .setup = setup, - .teardown = teardown, - .update = update, - }; -} +pub const depends = .{ ImGui, Render }; -pub fn setup(_: std.mem.Allocator) !void { - try ImGui.gui.register(gui); -} +pub fn setup(_: std.mem.Allocator) !void {} pub fn teardown() void {} pub fn update() !void {} -pub fn gui() void { +pub fn frame() !void { ImGui.igShowMetricsWindow(null); } + +// pub fn gui() void { +// ImGui.igShowMetricsWindow(null); +// } diff --git a/src/main.zig b/src/main.zig index 6c1751c..92139b5 100644 --- a/src/main.zig +++ b/src/main.zig @@ -22,8 +22,8 @@ pub const nu_options: nu.Options = .{ // component of the larger structure. pub const nu_driver = nu.Window; +pub const nu_present = nu.Render; pub const nu_modules = .{ - nu.Render, nu.ImGui, App, }; diff --git a/src/nu.zig b/src/nu.zig index 09a2020..d0df829 100644 --- a/src/nu.zig +++ b/src/nu.zig @@ -15,73 +15,90 @@ const Config = struct { pub const config: Config = if (@hasDecl(root, "nu_config")) root.nu_config else .{}; -pub const Module = struct { - // todo dependencies - name: []const u8, - setup: *const fn (alloc: std.mem.Allocator) anyerror!void, - teardown: *const fn () void, - update: ?*const fn () anyerror!void = null, - frame: ?*const fn () anyerror!void = null, - dependencies: []const Module = &.{}, -}; - -pub const Driver = struct { - module: Module, - next: *const fn () bool, // events? callbacks? -}; - -fn enable(modules: *std.StringArrayHashMap(Module), module: Module) void { - // this doesn't handle dependencies correctly. need to do real topological sort. - - if (modules.contains(module.name)) return; - for (module.dependencies) |dep| enable(modules, dep); - modules.putNoClobber(module.name, module) catch @panic("OOM"); -} - pub fn main() void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const alloc = gpa.allocator(); - var modules = std.StringArrayHashMap(Module).init(alloc); - defer modules.deinit(); + const engine = Engine(root.nu_driver, root.nu_present, root.nu_modules); - const driver = root.nu_driver.driver(); - enable(&modules, driver.module); - inline for (root.nu_modules) |mod| { - enable(&modules, mod.module()); - } + engine.try_invoke("setup", .{alloc}) catch @panic("Startup failed"); + defer engine.rinvoke("teardown", .{}); - for (modules.values()) |mod| { - std.log.debug("{s}.setup", .{mod.name}); - mod.setup(alloc) catch |err| { - std.debug.panic("Module {s} setup error: {!}", .{ mod.name, err }); - }; - } - defer { - var rev = std.mem.reverseIterator(modules.values()); - while (rev.next()) |mod| { - std.log.debug("{s}.teardown", .{mod.name}); - mod.teardown(); - } - } + while (engine.next()) { + engine.try_invoke("fixed", .{}) catch @panic("fixed failed!"); + engine.try_invoke("frame", .{}) catch @panic("frame failed!"); - std.log.info("Loaded modules: {s}", .{modules.keys()}); - - while (driver.next()) { - for (modules.values()) |mod| { - if (mod.update) |update| { - update() catch |err| { - std.debug.panic("Module {s} update event error: {!}", .{ mod.name, err }); - }; - } - } - - for (modules.values()) |mod| { - if (mod.frame) |frame| { - frame() catch |err| { - std.debug.panic("Module {s} frame event error: {!}", .{ mod.name, err }); - }; - } - } + engine.render() catch @panic("render failed!"); } } + +pub fn Engine(comptime D: type, comptime R: type, comptime M: anytype) type { + return struct { + pub const modules = Graph.sort(.{ D, R } ++ M); + + pub fn next() bool { + return D.next(); + } + + pub fn render() !void { + return R.render(@This()); + } + + pub fn try_invoke(comptime name: []const u8, args: anytype) !void { + inline for (modules) |mod| { + if (@hasDecl(mod, name)) { + try @call(.auto, @field(mod, name), args); + } + } + } + + pub fn invoke(comptime name: []const u8, args: anytype) void { + inline for (modules) |mod| { + if (@hasDecl(mod, name)) { + @call(.auto, @field(mod, name), args); + } + } + } + + pub fn try_rinvoke(comptime name: []const u8, args: anytype) !void { + comptime var it = std.mem.reverseIterator(modules); + inline while (it.next()) |mod| { + if (@hasDecl(mod, name)) { + try @call(.auto, @field(mod, name), args); + } + } + } + + pub fn rinvoke(comptime name: []const u8, args: anytype) void { + comptime var it = std.mem.reverseIterator(modules); + inline while (it.next()) |mod| { + if (@hasDecl(mod, name)) { + @call(.auto, @field(mod, name), args); + } + } + } + }; +} + +const Graph = struct { + fn visit( + comptime mod: type, + comptime mark: *[]const type, + comptime result: *[]const type, + ) void { + if (std.mem.indexOfScalar(type, result.*, mod)) |_| return; + if (std.mem.indexOfScalar(type, mark.*, mod)) |_| @compileError("Cycle"); + mark.* = mark.* ++ .{mod}; + if (@hasDecl(mod, "depends")) + for (mod.depends) |dep| + visit(dep, mark, result); + result.* = result.* ++ .{mod}; + } + + fn sort(comptime modules: anytype) []const type { + var mark: []const type = &.{}; + var result: []const type = &.{}; + for (modules) |mod| visit(mod, &mark, &result); + return result; + } +}; diff --git a/src/nu/ImGui.zig b/src/nu/ImGui.zig index d96a0de..a65b513 100644 --- a/src/nu/ImGui.zig +++ b/src/nu/ImGui.zig @@ -16,29 +16,17 @@ pub usingnamespace im; pub const Config = struct {}; const config = nu.config.imgui; -pub fn module() nu.Module { - return nu.Module{ - .name = "ImGui", - // .dependencies = &.{Render.module()}, - .setup = setup, - .teardown = teardown, - .update = update, - }; -} +pub const depends = .{ Render, Window }; pub fn loader_wrapper(procname: [*c]const u8, _: ?*anyopaque) callconv(.C) vk.PfnVoidFunction { return au.glfwGetInstanceProcAddress(au.I.handle, procname); } -pub var gui: Hook(fn () void) = undefined; var ctx: *im.ImGuiContext = undefined; var descriptor_pool: vk.DescriptorPool = undefined; -pub fn setup(alloc: std.mem.Allocator) !void { - gui = @TypeOf(gui).init(alloc); - errdefer gui.deinit(); - - try Render.present.register(present); +pub fn setup(_: std.mem.Allocator) !void { + // try Render.present.register(present); ctx = im.igCreateContext(null) orelse { return error.igCreateContextFailed; @@ -104,21 +92,18 @@ pub fn teardown() void { au.D.destroyDescriptorPool(descriptor_pool, null); im.impl.ImGui_ImplGlfw_Shutdown(); im.igDestroyContext(ctx); - gui.deinit(); } -pub fn update() !void { +pub fn frame() !void { im.impl.ImGui_ImplGlfw_NewFrame(); im.impl.ImGui_ImplVulkan_NewFrame(); im.igNewFrame(); - - gui.invoke(.{}); - - im.igEndFrame(); - im.igRender(); } pub fn present(cmd: au.CommandBufferProxy) void { + im.igEndFrame(); + im.igRender(); + im.impl.ImGui_ImplVulkan_RenderDrawData( @ptrCast(im.igGetDrawData()), @ptrFromInt(@intFromEnum(cmd.handle)), diff --git a/src/nu/Render.zig b/src/nu/Render.zig index 3e4ddac..3a6694f 100644 --- a/src/nu/Render.zig +++ b/src/nu/Render.zig @@ -32,21 +32,11 @@ pub const Config = struct { }; const config = nu.config.render; -pub fn module() nu.Module { - return nu.Module{ - .name = "Render", - .setup = setup, - .teardown = teardown, - .frame = frame, - .dependencies = &.{nu.Window.driver().module}, - }; -} +pub const depends = .{nu.Window}; var sc: au.SwapChain = undefined; var flights: au.Flights = undefined; -pub var present: nu.Hook(fn (au.CommandBufferProxy) void) = undefined; - pub fn setup(alloc: std.mem.Allocator) !void { // todo pick apart au into helpers; not a sub-module filled with its own globals. try au.init(alloc); @@ -57,20 +47,16 @@ pub fn setup(alloc: std.mem.Allocator) !void { flights = try au.Flights.init(alloc, config.frames_in_flight); errdefer flights.deinit(); - - present = @TypeOf(present).init(alloc); - errdefer present.deinit(); } pub fn teardown() void { au.D.deviceWaitIdle() catch |err| std.debug.panic("Device wait failed: {!}", .{err}); - errdefer present.deinit(); flights.deinit(); sc.deinit(); au.deinit(); } -pub fn frame() !void { +pub fn render(engine: anytype) !void { const flight: au.Flights.Flight = flights.next(); try flight.wait(); @@ -97,9 +83,8 @@ pub fn frame() !void { target.begin_rendering(cmd, render_area); // todo manage frame in flight state for each hook; pass the current flight in as context. - // will need some comptime -> anytype mapping. - present.invoke(.{cmd}); + engine.invoke("present", .{cmd}); target.end_rendering(cmd); try cmd.endCommandBuffer(); From b5eb8fb5daaea466968e09867c65a4cf59229e91 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Mon, 15 Jul 2024 17:14:24 -0400 Subject: [PATCH 103/113] misc cleanup --- src/App.zig | 22 ---------- src/main.zig | 108 ++++++++++++++++++++++++++++++---------------- src/nu.zig | 11 ++++- src/nu/ImGui.zig | 3 -- src/nu/Render.zig | 4 +- 5 files changed, 81 insertions(+), 67 deletions(-) delete mode 100644 src/App.zig diff --git a/src/App.zig b/src/App.zig deleted file mode 100644 index a2b0349..0000000 --- a/src/App.zig +++ /dev/null @@ -1,22 +0,0 @@ -const std = @import("std"); -const nu = @import("nu.zig"); - -const Bus = @import("nu/Bus.zig"); -const Render = @import("nu/Render.zig"); -const ImGui = @import("nu/ImGui.zig"); - -pub const depends = .{ ImGui, Render }; - -pub fn setup(_: std.mem.Allocator) !void {} - -pub fn teardown() void {} - -pub fn update() !void {} - -pub fn frame() !void { - ImGui.igShowMetricsWindow(null); -} - -// pub fn gui() void { -// ImGui.igShowMetricsWindow(null); -// } diff --git a/src/main.zig b/src/main.zig index 92139b5..ef9b065 100644 --- a/src/main.zig +++ b/src/main.zig @@ -1,7 +1,11 @@ const std = @import("std"); const nu = @import("nu.zig"); -const App = @import("App.zig"); +pub const nu_modules = .{ + App, + // UI, +}; +pub const main = nu.main; pub const nu_options: nu.Options = .{ .window = .{ .title = "Hello World" }, @@ -11,44 +15,72 @@ pub const nu_options: nu.Options = .{ }, }; -// todo declare or infer module dependencies, topological sort for init order. clean up "init" lines in main. -// -// problem: where should gpa go? probably some "Engine" structure in nu.zig -// -// don't necessarily need to declare topological sort - depth-first traversal -// of each module's dependencies without repeats would do. -// -// idea - use a structure like std.Build.Step where the polymorphic part is a -// component of the larger structure. +pub const UI = struct { + const im = nu.ImGui; -pub const nu_driver = nu.Window; -pub const nu_present = nu.Render; -pub const nu_modules = .{ - nu.ImGui, - App, + pub const depends = .{im}; + + var color: @Vector(4, f32) = @splat(1); + + pub fn setup(_: std.mem.Allocator) !void { + const io: *nu.ImGui.ImGuiIO = @ptrCast(nu.ImGui.igGetIO()); + io.ConfigFlags |= nu.ImGui.ImGuiConfigFlags_DockingEnable; + } + + pub fn frame() !void { + nu.ImGui.igShowMetricsWindow(null); + + { + const viewport = im.igGetMainViewport(); + im.igSetNextWindowPos(viewport.*.WorkPos, 0, .{ .x = 0, .y = 0 }); + im.igSetNextWindowSize(viewport.*.WorkSize, 0); + im.igSetNextWindowViewport(viewport.*.ID); + im.igPushStyleVar_Float(im.ImGuiStyleVar_WindowRounding, 0); + im.igPushStyleVar_Float(im.ImGuiStyleVar_WindowBorderSize, 0); + im.igPushStyleVar_Vec2(im.ImGuiStyleVar_WindowPadding, .{ .x = 0, .y = 0 }); + defer im.igPopStyleVar(3); + + const window_flags = + im.ImGuiWindowFlags_MenuBar | + im.ImGuiWindowFlags_NoDocking | + im.ImGuiWindowFlags_NoTitleBar | + im.ImGuiWindowFlags_NoCollapse | + im.ImGuiWindowFlags_NoResize | + im.ImGuiWindowFlags_NoMove | + im.ImGuiWindowFlags_NoBringToFrontOnFocus | + im.ImGuiWindowFlags_NoNavFocus | + im.ImGuiWindowFlags_NoBackground; + + const dock_flags = + im.ImGuiDockNodeFlags_PassthruCentralNode | + im.ImGuiDockNodeFlags_NoDockingOverCentralNode; + + _ = im.igBegin("Main Dockspace", null, window_flags); + const id = im.igGetID_Str("maindockspace"); + _ = im.igDockSpace(id, .{ .x = 0, .y = 0 }, dock_flags, null); + im.igEnd(); + } + + if (nu.ImGui.igBegin("Color", null, nu.ImGui.ImGuiWindowFlags_None)) { + if (nu.ImGui.igColorEdit4("color", @ptrCast(&color), nu.ImGui.ImGuiColorEditFlags_AlphaPreviewHalf)) {} + } + nu.ImGui.igEnd(); + } }; -pub const main = nu.main; +const App = struct { + const vk = @import("vk"); + const au = @import("nu/Render/au.zig"); -// pub fn main() !void { -// var gpa = std.heap.GeneralPurposeAllocator(.{}){}; -// defer _ = gpa.detectLeaks(); -// const alloc = gpa.allocator(); -// -// nu.init(alloc); -// defer nu.deinit(); -// -// try nu.Window.init(alloc); -// defer nu.Window.deinit(); -// -// try nu.Render.init(alloc); -// defer nu.Render.deinit(); -// -// try nu.ImGui.init(alloc); -// defer nu.ImGui.deinit(); -// -// try App.init(alloc); -// defer App.deinit(); -// -// try nu.run(alloc); -// } + pub const depends = .{nu.Render}; + + // todo timeline semaphore + + pub fn setup(_: std.mem.Allocator) !void {} + + pub fn teardown() void {} + + pub fn frame() !void {} + + pub fn present(_: au.CommandBufferProxy) void {} +}; diff --git a/src/nu.zig b/src/nu.zig index d0df829..b94a17a 100644 --- a/src/nu.zig +++ b/src/nu.zig @@ -14,16 +14,23 @@ const Config = struct { }; pub const config: Config = if (@hasDecl(root, "nu_config")) root.nu_config else .{}; +pub const engine = Engine(Window, Render, root.nu_modules); + +// Hooks: setup, teardown, fixed, frame, present pub fn main() void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const alloc = gpa.allocator(); - const engine = Engine(root.nu_driver, root.nu_present, root.nu_modules); + std.debug.print("Engine modules: ", .{}); + inline for (engine.modules) |module| std.debug.print("{s} ", .{@typeName(module)}); + std.debug.print("\n", .{}); engine.try_invoke("setup", .{alloc}) catch @panic("Startup failed"); defer engine.rinvoke("teardown", .{}); + std.debug.print("Setup complete.\n", .{}); + while (engine.next()) { engine.try_invoke("fixed", .{}) catch @panic("fixed failed!"); engine.try_invoke("frame", .{}) catch @panic("frame failed!"); @@ -41,7 +48,7 @@ pub fn Engine(comptime D: type, comptime R: type, comptime M: anytype) type { } pub fn render() !void { - return R.render(@This()); + return R.render(); } pub fn try_invoke(comptime name: []const u8, args: anytype) !void { diff --git a/src/nu/ImGui.zig b/src/nu/ImGui.zig index a65b513..a7d5f72 100644 --- a/src/nu/ImGui.zig +++ b/src/nu/ImGui.zig @@ -26,8 +26,6 @@ var ctx: *im.ImGuiContext = undefined; var descriptor_pool: vk.DescriptorPool = undefined; pub fn setup(_: std.mem.Allocator) !void { - // try Render.present.register(present); - ctx = im.igCreateContext(null) orelse { return error.igCreateContextFailed; }; @@ -87,7 +85,6 @@ pub fn setup(_: std.mem.Allocator) !void { pub fn teardown() void { au.D.deviceWaitIdle() catch |err| std.debug.panic("Device wait failed: {!}", .{err}); - im.impl.ImGui_ImplVulkan_Shutdown(); au.D.destroyDescriptorPool(descriptor_pool, null); im.impl.ImGui_ImplGlfw_Shutdown(); diff --git a/src/nu/Render.zig b/src/nu/Render.zig index 3a6694f..ec49598 100644 --- a/src/nu/Render.zig +++ b/src/nu/Render.zig @@ -56,7 +56,7 @@ pub fn teardown() void { au.deinit(); } -pub fn render(engine: anytype) !void { +pub fn render() !void { const flight: au.Flights.Flight = flights.next(); try flight.wait(); @@ -84,7 +84,7 @@ pub fn render(engine: anytype) !void { // todo manage frame in flight state for each hook; pass the current flight in as context. - engine.invoke("present", .{cmd}); + nu.engine.invoke("present", .{cmd}); target.end_rendering(cmd); try cmd.endCommandBuffer(); From 03662d8063356e730d63aa03a7d90bc5ec0b4360 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 21 Nov 2024 11:27:41 -0500 Subject: [PATCH 104/113] improve build.zig; run nu tests --- .gitignore | 2 ++ build.zig | 53 ++++++++++++++++++++++++++++++++++++++-------------- src/main.zig | 2 +- src/nu.zig | 7 ++++++- 4 files changed, 48 insertions(+), 16 deletions(-) diff --git a/.gitignore b/.gitignore index c33569d..2206cc9 100644 --- a/.gitignore +++ b/.gitignore @@ -45,5 +45,7 @@ build-*/ docgen_tmp/ .idea/ +.direnv/ +.envrc imgui.ini diff --git a/build.zig b/build.zig index 1177ab2..9ab99d6 100644 --- a/build.zig +++ b/build.zig @@ -14,12 +14,28 @@ pub fn build(b: *std.Build) void { const cimgui = b.dependency("cimgui", .{}); + const nu = b.addModule("nu", .{ + .root_source_file = b.path("src/nu.zig"), + .target = target, + .optimize = optimize, + .link_libc = true, + }); + nu.addImport("cimgui", cimgui.module("cimgui")); + nu.addImport("vk", vkmod); + nu.linkSystemLibrary("glfw3", .{ + .needed = true, + .preferred_link_mode = .static, + .use_pkg_config = .force, + }); + const exe = b.addExecutable(.{ .name = "scratchzig", .root_source_file = b.path("src/main.zig"), .target = target, .optimize = optimize, }); + exe.root_module.addImport("nu", nu); + exe.root_module.addImport("vk", vkmod); exe.root_module.addImport("cimgui", cimgui.module("cimgui")); const shaders = vkgen.ShaderCompileStep.create( @@ -31,14 +47,6 @@ pub fn build(b: *std.Build) void { shaders.add("triangle_frag", "src/shaders/triangle.frag", .{}); exe.root_module.addImport("shaders", shaders.getModule()); - exe.linkSystemLibrary2("glfw3", .{ - .needed = true, - .preferred_link_mode = .static, - .use_pkg_config = .force, - }); - exe.linkLibC(); - exe.root_module.addImport("vk", vkmod); - b.installArtifact(exe); const run_cmd = b.addRunArtifact(exe); @@ -51,19 +59,36 @@ pub fn build(b: *std.Build) void { const run_step = b.step("run", "Run the app"); run_step.dependOn(&run_cmd.step); - const exe_unit_tests = b.addTest(.{ - .root_source_file = b.path("src/main.zig"), + const nu_unit_tests = b.addTest(.{ + .root_source_file = b.path("src/nu.zig"), .target = target, .optimize = optimize, }); - exe_unit_tests.linkSystemLibrary2("glfw3", .{ + nu_unit_tests.root_module.addImport("cimgui", cimgui.module("cimgui")); + nu_unit_tests.root_module.addImport("vk", vkmod); + nu_unit_tests.root_module.linkSystemLibrary("glfw3", .{ .needed = true, .preferred_link_mode = .static, .use_pkg_config = .force, }); - exe_unit_tests.linkLibC(); - const run_exe_unit_tests = b.addRunArtifact(exe_unit_tests); + nu_unit_tests.linkLibC(); + + const run_nu_unit_tests = b.addRunArtifact(nu_unit_tests); + + // const exe_unit_tests = b.addTest(.{ + // .root_source_file = b.path("src/main.zig"), + // .target = target, + // .optimize = optimize, + // }); + // exe_unit_tests.linkSystemLibrary2("glfw3", .{ + // .needed = true, + // .preferred_link_mode = .static, + // .use_pkg_config = .force, + // }); + // exe_unit_tests.linkLibC(); + // const run_exe_unit_tests = b.addRunArtifact(exe_unit_tests); const test_step = b.step("test", "Run unit tests"); - test_step.dependOn(&run_exe_unit_tests.step); + // test_step.dependOn(&run_exe_unit_tests.step); + test_step.dependOn(&run_nu_unit_tests.step); } diff --git a/src/main.zig b/src/main.zig index ef9b065..887ab29 100644 --- a/src/main.zig +++ b/src/main.zig @@ -3,7 +3,7 @@ const nu = @import("nu.zig"); pub const nu_modules = .{ App, - // UI, + UI, }; pub const main = nu.main; diff --git a/src/nu.zig b/src/nu.zig index b94a17a..8e60c18 100644 --- a/src/nu.zig +++ b/src/nu.zig @@ -1,12 +1,13 @@ const std = @import("std"); const root = @import("root"); +const hooks = @import("nu/hooks.zig"); pub const Window = @import("nu/Window.zig"); pub const Render = @import("nu/Render.zig"); pub const ImGui = @import("nu/ImGui.zig"); pub const Bus = @import("nu/Bus.zig"); -pub const Hook = @import("nu/hooks.zig").Hook; +pub const Hook = hooks.Hook; const Config = struct { window: Window.Config = .{}, @@ -109,3 +110,7 @@ const Graph = struct { return result; } }; + +test { + std.testing.refAllDecls(hooks); +} \ No newline at end of file From bad7ce0d2511b1625e9f30f5d3a3308e8948d524 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 21 Nov 2024 11:42:44 -0500 Subject: [PATCH 105/113] separate step for installing "dev" tools allows building and debugging test runner. --- build.zig | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/build.zig b/build.zig index 9ab99d6..ee624d3 100644 --- a/build.zig +++ b/build.zig @@ -73,7 +73,13 @@ pub fn build(b: *std.Build) void { }); nu_unit_tests.linkLibC(); - const run_nu_unit_tests = b.addRunArtifact(nu_unit_tests); + const nu_test_runner = b.addInstallArtifact(nu_unit_tests, .{ + .dest_dir = .{ .override = .{ .custom = "dev" } }, + .dest_sub_path = "nu_test_runner", + }); + + const devel_step = b.step("dev", "Build development tools and test runners"); + devel_step.dependOn(&nu_test_runner.step); // const exe_unit_tests = b.addTest(.{ // .root_source_file = b.path("src/main.zig"), @@ -88,7 +94,7 @@ pub fn build(b: *std.Build) void { // exe_unit_tests.linkLibC(); // const run_exe_unit_tests = b.addRunArtifact(exe_unit_tests); - const test_step = b.step("test", "Run unit tests"); + // const test_step = b.step("test", "Run unit tests"); // test_step.dependOn(&run_exe_unit_tests.step); - test_step.dependOn(&run_nu_unit_tests.step); + // test_step.dependOn(&run_nu_unit_tests.step); } From 2c773bef71cdfd1a628b5ffb68d4a272a59c3505 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 21 Nov 2024 11:43:19 -0500 Subject: [PATCH 106/113] only support void hooks --- src/nu/hooks.zig | 166 ++++++++--------------------------------------- 1 file changed, 27 insertions(+), 139 deletions(-) diff --git a/src/nu/hooks.zig b/src/nu/hooks.zig index 8e6b4e1..1039d1e 100644 --- a/src/nu/hooks.zig +++ b/src/nu/hooks.zig @@ -2,69 +2,37 @@ const std = @import("std"); pub fn Hook(ftype: type) type { const F: std.builtin.Type.Fn = @typeInfo(ftype).Fn; - const Result: type = F.return_type.?; + const R: std.builtin.Type = @typeInfo(F.return_type.?); + comptime if (R != .Void) @compileError("Hook signature must return void."); - return switch (@typeInfo(Result)) { - .Void => struct { - const Self = @This(); + return struct { + const Self = @This(); - handlers: std.AutoArrayHashMap(*const ftype, void), + handlers: std.AutoArrayHashMap(*const ftype, void), - pub fn init(alloc: std.mem.Allocator) Self { - return Self{ - .handlers = std.AutoArrayHashMap(*const ftype, void).init(alloc), - }; + pub fn init(alloc: std.mem.Allocator) Self { + return Self{ + .handlers = std.AutoArrayHashMap(*const ftype, void).init(alloc), + }; + } + + pub fn deinit(self: *Self) void { + self.handlers.deinit(); + } + + pub fn register(self: *Self, f: ftype) !void { + try self.handlers.putNoClobber(f, {}); + } + + pub fn unregister(self: *Self, f: ftype) void { + _ = self.handlers.orderedRemove(f); + } + + pub fn invoke(self: Self, args: anytype) void { + for (self.handlers.keys()) |handler| { + @call(.auto, handler, args); } - - pub fn deinit(self: *Self) void { - self.handlers.deinit(); - } - - pub fn register(self: *Self, f: ftype) !void { - try self.handlers.putNoClobber(f, {}); - } - - pub fn unregister(self: *Self, f: ftype) void { - _ = self.handlers.orderedRemove(f); - } - - pub fn invoke(self: Self, args: anytype) void { - for (self.handlers.keys()) |handler| { - @call(.auto, handler, args); - } - } - }, - else => struct { - const Self = @This(); - - handlers: std.AutoArrayHashMap(*const ftype, void), - - pub fn init(alloc: std.mem.Allocator) Self { - return Self{ - .handlers = std.AutoArrayHashMap(*const ftype, void).init(alloc), - }; - } - - pub fn deinit(self: *Self) void { - self.handlers.deinit(); - } - - pub fn register(self: *Self, f: ftype) !void { - try self.handlers.putNoClobber(f, {}); - } - - pub fn unregister(self: *Self, f: ftype) void { - _ = self.handlers.orderedRemove(f); - } - - pub fn invoke(self: Self, alloc: std.mem.Allocator, args: anytype) ![]Result { - const results = try alloc.alloc(Result, self.handlers.count()); - for (self.handlers.keys(), results) |handler, *result| { - result.* = @call(.auto, handler, args); - } - return results; - } - }, + } }; } @@ -112,83 +80,3 @@ test "void hooks" { set_flags.invoke(.{&flag}); try std.testing.expect(flag == 0b00); } - -test "collect hooks" { - var collect = Hook(fn (usize) usize).init(std.testing.allocator); - defer collect.deinit(); - - const hooks = struct { - pub fn double(f: usize) usize { - return f * 2; - } - - pub fn square(f: usize) usize { - return f * f; - } - }; - - { - const result = try collect.invoke(std.testing.allocator, .{3}); - defer std.testing.allocator.free(result); - try std.testing.expectEqualSlices(usize, &.{}, result); - } - - try collect.register(hooks.double); - - { - const result = try collect.invoke(std.testing.allocator, .{4}); - defer std.testing.allocator.free(result); - try std.testing.expectEqualSlices(usize, &.{8}, result); - } - - try collect.register(hooks.square); - - { - const result = try collect.invoke(std.testing.allocator, .{5}); - defer std.testing.allocator.free(result); - try std.testing.expectEqualSlices(usize, &.{ 10, 25 }, result); - } - - collect.unregister(hooks.double); - - { - const result = try collect.invoke(std.testing.allocator, .{6}); - defer std.testing.allocator.free(result); - try std.testing.expectEqualSlices(usize, &.{36}, result); - } - - collect.unregister(hooks.square); - - { - const result = try collect.invoke(std.testing.allocator, .{7}); - defer std.testing.allocator.free(result); - try std.testing.expectEqualSlices(usize, &.{}, result); - } -} - -test "error_hooks" { - const CollectError = error{Fail}; - const Collect = Hook(fn (usize) CollectError!usize); - var collect = Collect.init(std.testing.allocator); - defer collect.deinit(); - - const hooks = struct { - pub fn halve(f: usize) !usize { - if (f % 2 == 0) return f / 2; - return CollectError.Fail; - } - - pub fn third(f: usize) !usize { - if (f % 3 == 0) return f / 3; - return CollectError.Fail; - } - }; - - try collect.register(hooks.halve); - try collect.register(hooks.third); - - const result = try collect.invoke(std.testing.allocator, .{4}); - defer std.testing.allocator.free(result); - try std.testing.expectEqual(2, try result[0]); - try std.testing.expectError(CollectError.Fail, result[1]); -} From 9150224734218cba2dee213982fa8caa8be7387c Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 21 Nov 2024 19:44:42 -0500 Subject: [PATCH 107/113] Remove hooks --- src/nu.zig | 3 -- src/nu/ImGui.zig | 1 - src/nu/hooks.zig | 82 ------------------------------------------------ 3 files changed, 86 deletions(-) delete mode 100644 src/nu/hooks.zig diff --git a/src/nu.zig b/src/nu.zig index 8e60c18..2786121 100644 --- a/src/nu.zig +++ b/src/nu.zig @@ -1,13 +1,11 @@ const std = @import("std"); const root = @import("root"); -const hooks = @import("nu/hooks.zig"); pub const Window = @import("nu/Window.zig"); pub const Render = @import("nu/Render.zig"); pub const ImGui = @import("nu/ImGui.zig"); pub const Bus = @import("nu/Bus.zig"); -pub const Hook = hooks.Hook; const Config = struct { window: Window.Config = .{}, @@ -112,5 +110,4 @@ const Graph = struct { }; test { - std.testing.refAllDecls(hooks); } \ No newline at end of file diff --git a/src/nu/ImGui.zig b/src/nu/ImGui.zig index a7d5f72..b891211 100644 --- a/src/nu/ImGui.zig +++ b/src/nu/ImGui.zig @@ -6,7 +6,6 @@ const vk = @import("vk"); const nu = @import("../nu.zig"); const au = @import("Render/au.zig"); -const Hook = @import("hooks.zig").Hook; const Render = @import("Render.zig"); const Window = @import("Window.zig"); diff --git a/src/nu/hooks.zig b/src/nu/hooks.zig deleted file mode 100644 index 1039d1e..0000000 --- a/src/nu/hooks.zig +++ /dev/null @@ -1,82 +0,0 @@ -const std = @import("std"); - -pub fn Hook(ftype: type) type { - const F: std.builtin.Type.Fn = @typeInfo(ftype).Fn; - const R: std.builtin.Type = @typeInfo(F.return_type.?); - comptime if (R != .Void) @compileError("Hook signature must return void."); - - return struct { - const Self = @This(); - - handlers: std.AutoArrayHashMap(*const ftype, void), - - pub fn init(alloc: std.mem.Allocator) Self { - return Self{ - .handlers = std.AutoArrayHashMap(*const ftype, void).init(alloc), - }; - } - - pub fn deinit(self: *Self) void { - self.handlers.deinit(); - } - - pub fn register(self: *Self, f: ftype) !void { - try self.handlers.putNoClobber(f, {}); - } - - pub fn unregister(self: *Self, f: ftype) void { - _ = self.handlers.orderedRemove(f); - } - - pub fn invoke(self: Self, args: anytype) void { - for (self.handlers.keys()) |handler| { - @call(.auto, handler, args); - } - } - }; -} - -test "void hooks" { - var set_flags = Hook(fn (*usize) void).init(std.testing.allocator); - defer set_flags.deinit(); - - const hooks = struct { - pub fn set_one(f: *usize) void { - f.* |= 0b01; - } - - pub fn set_two(f: *usize) void { - f.* |= 0b10; - } - }; - - var flag: usize = undefined; - - flag = 0; - set_flags.invoke(.{&flag}); - try std.testing.expect(flag == 0b00); - - try set_flags.register(hooks.set_one); - - flag = 0; - set_flags.invoke(.{&flag}); - try std.testing.expect(flag == 0b01); - - try set_flags.register(hooks.set_two); - - flag = 0; - set_flags.invoke(.{&flag}); - try std.testing.expect(flag == 0b11); - - set_flags.unregister(hooks.set_one); - - flag = 0; - set_flags.invoke(.{&flag}); - try std.testing.expect(flag == 0b10); - - set_flags.unregister(hooks.set_two); - - flag = 0; - set_flags.invoke(.{&flag}); - try std.testing.expect(flag == 0b00); -} From ccee3733b079c9e37076ec5e45cde8e789bdfb27 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Thu, 21 Nov 2024 23:03:08 -0500 Subject: [PATCH 108/113] nu simplified; working swapchain, queue, and validators. it compiles, but initialization is not done create debug messenger Use proxies wip swapchain wip swapchain - stub usage get device queue wip swapchain - scaffolded with segfault wip swapchain - fix segfault wip swapchain - working, but resize broken. semaphore issue with naive handling satisfy validation --- build.zig.zon | 2 +- src/main.zig | 110 ++++++------ src/nu.zig | 5 +- src/nu/ImGui.zig | 45 ++--- src/nu/Render.zig | 208 ++++++++++++++++------- src/nu/Render/Debug.zig | 66 ++++++++ src/nu/Render/au/SwapChain.zig | 7 +- src/nu/Render/ctx.zig | 298 +++++++++++++++++++++++++++++++++ src/nu/Render/swap_chain.zig | 283 +++++++++++++++++++++++++++++++ src/nu/Window.zig | 3 + 10 files changed, 890 insertions(+), 137 deletions(-) create mode 100644 src/nu/Render/Debug.zig create mode 100644 src/nu/Render/ctx.zig create mode 100644 src/nu/Render/swap_chain.zig diff --git a/build.zig.zon b/build.zig.zon index 6942b2e..4873873 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -8,7 +8,7 @@ .hash = "1220cf0972c6fe05437c1a8689b955084385eb7ca1f8c14010d49ca5a89570a5d90d", }, .cimgui = .{ - .path="cimgui", + .path = "cimgui", }, }, diff --git a/src/main.zig b/src/main.zig index 887ab29..17627fb 100644 --- a/src/main.zig +++ b/src/main.zig @@ -3,7 +3,7 @@ const nu = @import("nu.zig"); pub const nu_modules = .{ App, - UI, + // UI, }; pub const main = nu.main; @@ -15,62 +15,62 @@ pub const nu_options: nu.Options = .{ }, }; -pub const UI = struct { - const im = nu.ImGui; - - pub const depends = .{im}; - - var color: @Vector(4, f32) = @splat(1); - - pub fn setup(_: std.mem.Allocator) !void { - const io: *nu.ImGui.ImGuiIO = @ptrCast(nu.ImGui.igGetIO()); - io.ConfigFlags |= nu.ImGui.ImGuiConfigFlags_DockingEnable; - } - - pub fn frame() !void { - nu.ImGui.igShowMetricsWindow(null); - - { - const viewport = im.igGetMainViewport(); - im.igSetNextWindowPos(viewport.*.WorkPos, 0, .{ .x = 0, .y = 0 }); - im.igSetNextWindowSize(viewport.*.WorkSize, 0); - im.igSetNextWindowViewport(viewport.*.ID); - im.igPushStyleVar_Float(im.ImGuiStyleVar_WindowRounding, 0); - im.igPushStyleVar_Float(im.ImGuiStyleVar_WindowBorderSize, 0); - im.igPushStyleVar_Vec2(im.ImGuiStyleVar_WindowPadding, .{ .x = 0, .y = 0 }); - defer im.igPopStyleVar(3); - - const window_flags = - im.ImGuiWindowFlags_MenuBar | - im.ImGuiWindowFlags_NoDocking | - im.ImGuiWindowFlags_NoTitleBar | - im.ImGuiWindowFlags_NoCollapse | - im.ImGuiWindowFlags_NoResize | - im.ImGuiWindowFlags_NoMove | - im.ImGuiWindowFlags_NoBringToFrontOnFocus | - im.ImGuiWindowFlags_NoNavFocus | - im.ImGuiWindowFlags_NoBackground; - - const dock_flags = - im.ImGuiDockNodeFlags_PassthruCentralNode | - im.ImGuiDockNodeFlags_NoDockingOverCentralNode; - - _ = im.igBegin("Main Dockspace", null, window_flags); - const id = im.igGetID_Str("maindockspace"); - _ = im.igDockSpace(id, .{ .x = 0, .y = 0 }, dock_flags, null); - im.igEnd(); - } - - if (nu.ImGui.igBegin("Color", null, nu.ImGui.ImGuiWindowFlags_None)) { - if (nu.ImGui.igColorEdit4("color", @ptrCast(&color), nu.ImGui.ImGuiColorEditFlags_AlphaPreviewHalf)) {} - } - nu.ImGui.igEnd(); - } -}; +// pub const UI = struct { +// const im = nu.ImGui; +// +// pub const depends = .{im}; +// +// var color: @Vector(4, f32) = @splat(1); +// +// pub fn setup(_: std.mem.Allocator) !void { +// const io: *nu.ImGui.ImGuiIO = @ptrCast(nu.ImGui.igGetIO()); +// io.ConfigFlags |= nu.ImGui.ImGuiConfigFlags_DockingEnable; +// } +// +// pub fn frame() !void { +// nu.ImGui.igShowMetricsWindow(null); +// +// { +// const viewport = im.igGetMainViewport(); +// im.igSetNextWindowPos(viewport.*.WorkPos, 0, .{ .x = 0, .y = 0 }); +// im.igSetNextWindowSize(viewport.*.WorkSize, 0); +// im.igSetNextWindowViewport(viewport.*.ID); +// im.igPushStyleVar_Float(im.ImGuiStyleVar_WindowRounding, 0); +// im.igPushStyleVar_Float(im.ImGuiStyleVar_WindowBorderSize, 0); +// im.igPushStyleVar_Vec2(im.ImGuiStyleVar_WindowPadding, .{ .x = 0, .y = 0 }); +// defer im.igPopStyleVar(3); +// +// const window_flags = +// im.ImGuiWindowFlags_MenuBar | +// im.ImGuiWindowFlags_NoDocking | +// im.ImGuiWindowFlags_NoTitleBar | +// im.ImGuiWindowFlags_NoCollapse | +// im.ImGuiWindowFlags_NoResize | +// im.ImGuiWindowFlags_NoMove | +// im.ImGuiWindowFlags_NoBringToFrontOnFocus | +// im.ImGuiWindowFlags_NoNavFocus | +// im.ImGuiWindowFlags_NoBackground; +// +// const dock_flags = +// im.ImGuiDockNodeFlags_PassthruCentralNode | +// im.ImGuiDockNodeFlags_NoDockingOverCentralNode; +// +// _ = im.igBegin("Main Dockspace", null, window_flags); +// const id = im.igGetID_Str("maindockspace"); +// _ = im.igDockSpace(id, .{ .x = 0, .y = 0 }, dock_flags, null); +// im.igEnd(); +// } +// +// if (nu.ImGui.igBegin("Color", null, nu.ImGui.ImGuiWindowFlags_None)) { +// if (nu.ImGui.igColorEdit4("color", @ptrCast(&color), nu.ImGui.ImGuiColorEditFlags_AlphaPreviewHalf)) {} +// } +// nu.ImGui.igEnd(); +// } +// }; const App = struct { const vk = @import("vk"); - const au = @import("nu/Render/au.zig"); + // const au = @import("nu/Render/au.zig"); pub const depends = .{nu.Render}; @@ -82,5 +82,5 @@ const App = struct { pub fn frame() !void {} - pub fn present(_: au.CommandBufferProxy) void {} + // pub fn present(_: au.CommandBufferProxy) void {} }; diff --git a/src/nu.zig b/src/nu.zig index 2786121..aaacbb2 100644 --- a/src/nu.zig +++ b/src/nu.zig @@ -3,7 +3,7 @@ const root = @import("root"); pub const Window = @import("nu/Window.zig"); pub const Render = @import("nu/Render.zig"); -pub const ImGui = @import("nu/ImGui.zig"); +// pub const ImGui = @import("nu/ImGui.zig"); pub const Bus = @import("nu/Bus.zig"); @@ -109,5 +109,4 @@ const Graph = struct { } }; -test { -} \ No newline at end of file +test {} diff --git a/src/nu/ImGui.zig b/src/nu/ImGui.zig index b891211..38e1caf 100644 --- a/src/nu/ImGui.zig +++ b/src/nu/ImGui.zig @@ -4,7 +4,6 @@ const std = @import("std"); const vk = @import("vk"); const nu = @import("../nu.zig"); -const au = @import("Render/au.zig"); const Render = @import("Render.zig"); const Window = @import("Window.zig"); @@ -18,7 +17,7 @@ const config = nu.config.imgui; pub const depends = .{ Render, Window }; pub fn loader_wrapper(procname: [*c]const u8, _: ?*anyopaque) callconv(.C) vk.PfnVoidFunction { - return au.glfwGetInstanceProcAddress(au.I.handle, procname); + return nu.glfwGetInstanceProcAddress(nu.I.handle, procname); } var ctx: *im.ImGuiContext = undefined; @@ -39,23 +38,27 @@ pub fn setup(_: std.mem.Allocator) !void { } errdefer im.impl.ImGui_ImplGlfw_Shutdown(); - descriptor_pool = try au.D.createDescriptorPool(&vk.DescriptorPoolCreateInfo{ - .flags = .{ .free_descriptor_set_bit = true }, - .pool_size_count = 1, - .p_pool_sizes = &.{vk.DescriptorPoolSize{ - .descriptor_count = 32, - .type = .combined_image_sampler, - }}, - .max_sets = 32, - }, null); - errdefer au.D.destroyDescriptorPool(descriptor_pool, null); + descriptor_pool = try Render.ctx.dw.createDescriptorPool( + Render.ctx.device, + &vk.DescriptorPoolCreateInfo{ + .flags = .{ .free_descriptor_set_bit = true }, + .pool_size_count = 1, + .p_pool_sizes = &.{vk.DescriptorPoolSize{ + .descriptor_count = 32, + .type = .combined_image_sampler, + }}, + .max_sets = 32, + }, + null, + ); + errdefer Render.ctx.dw.destroyDescriptorPool(Render.ctx.device, descriptor_pool, null); if (im.impl.ImGui_ImplVulkan_Init(@constCast(&im.impl.ImGui_ImplVulkan_InitInfo{ - .Instance = @ptrFromInt(@intFromEnum(au.I.handle)), - .PhysicalDevice = @ptrFromInt(@intFromEnum(au.device_config.pdev)), - .Device = @ptrFromInt(@intFromEnum(au.D.handle)), - .QueueFamily = au.device_config.family, - .Queue = @ptrFromInt(@intFromEnum(au.Q.handle)), + .Instance = @ptrFromInt(@intFromEnum(Render.ctx.instance)), + .PhysicalDevice = @ptrFromInt(@intFromEnum(Render.ctx.pdevice)), + .Device = @ptrFromInt(@intFromEnum(Render.ctx.device)), + .QueueFamily = au.device_config.family, // todo + .Queue = @ptrFromInt(@intFromEnum(au.Q.handle)), // todo .DescriptorPool = @ptrFromInt(@intFromEnum(descriptor_pool)), .RenderPass = null, .MinImageCount = 2, @@ -65,7 +68,7 @@ pub fn setup(_: std.mem.Allocator) !void { .depth_attachment_format = .undefined, .stencil_attachment_format = .undefined, .color_attachment_count = 1, - .p_color_attachment_formats = &.{au.device_config.format.format}, + .p_color_attachment_formats = &.{au.device_config.format.format}, // todo }), .MSAASamples = 0, .PipelineCache = null, @@ -83,9 +86,9 @@ pub fn setup(_: std.mem.Allocator) !void { } pub fn teardown() void { - au.D.deviceWaitIdle() catch |err| std.debug.panic("Device wait failed: {!}", .{err}); + Render.ctx.dw.deviceWaitIdle(Render.ctx.device) catch |err| std.debug.panic("Device wait failed: {!}", .{err}); im.impl.ImGui_ImplVulkan_Shutdown(); - au.D.destroyDescriptorPool(descriptor_pool, null); + Render.ctx.dw.destroyDescriptorPool(Render.ctx.device, descriptor_pool, null); im.impl.ImGui_ImplGlfw_Shutdown(); im.igDestroyContext(ctx); } @@ -96,7 +99,7 @@ pub fn frame() !void { im.igNewFrame(); } -pub fn present(cmd: au.CommandBufferProxy) void { +pub fn present(cmd: au.CommandBufferProxy) void { // todo im.igEndFrame(); im.igRender(); diff --git a/src/nu/Render.zig b/src/nu/Render.zig index ec49598..ed243a4 100644 --- a/src/nu/Render.zig +++ b/src/nu/Render.zig @@ -7,7 +7,10 @@ const builtin = @import("builtin"); const vk = @import("vk"); const nu = @import("../nu.zig"); -const au = @import("Render/au.zig"); +// const au = @import("Render/au.zig"); + +const ctx = @import("Render/ctx.zig"); +const swap_chain = @import("Render/swap_chain.zig"); pub const Config = struct { app_name: [*:0]const u8 = "nu-au-app", @@ -24,7 +27,6 @@ pub const Config = struct { minor: u10 = 0, patch: u12 = 0, } = .{}, - frames_in_flight: u8 = 3, use_debug_messenger: bool = switch (builtin.mode) { .Debug, .ReleaseSafe => true, .ReleaseSmall, .ReleaseFast => false, @@ -34,84 +36,178 @@ const config = nu.config.render; pub const depends = .{nu.Window}; -var sc: au.SwapChain = undefined; -var flights: au.Flights = undefined; +const SwapChain = swap_chain.SwapChain(Flight); +const Flight = struct { + pool: vk.CommandPool = .null_handle, + cmd: vk.CommandBuffer = .null_handle, + + pub fn init() !Flight { + const pool = try ctx.D.createCommandPool( + &.{ .queue_family_index = ctx.family.* }, + null, + ); + errdefer ctx.D.destroyCommandPool(pool, null); + + var cmds: [1]vk.CommandBuffer = undefined; + try ctx.D.allocateCommandBuffers( + &vk.CommandBufferAllocateInfo{ + .command_buffer_count = 1, + .command_pool = pool, + .level = .primary, + }, + &cmds, + ); + errdefer ctx.D.freeCommandBuffers(pool, 1, &cmds); + + return .{ + .pool = pool, + .cmd = cmds[0], + }; + } + + pub fn deinit(self: Flight) void { + const cmds: [1]vk.CommandBuffer = .{self.cmd}; + ctx.D.freeCommandBuffers(self.pool, 1, &cmds); + ctx.D.destroyCommandPool(self.pool, null); + } +}; + +var _sc: SwapChain = undefined; +var _flights: []Flight = undefined; pub fn setup(alloc: std.mem.Allocator) !void { - // todo pick apart au into helpers; not a sub-module filled with its own globals. - try au.init(alloc); - errdefer au.deinit(); + try ctx.init(alloc); + errdefer ctx.deinit(); - sc = try au.SwapChain.init(alloc); - errdefer sc.deinit(); + _flights = try alloc.alloc(Flight, 3); + errdefer alloc.free(_flights); + errdefer for (_flights) |flight| flight.deinit(); + for (_flights) |*flight| flight.* = try Flight.init(); - flights = try au.Flights.init(alloc, config.frames_in_flight); - errdefer flights.deinit(); + _sc = try SwapChain.init(alloc, _flights); + errdefer _sc.deinit(); } pub fn teardown() void { - au.D.deviceWaitIdle() catch |err| std.debug.panic("Device wait failed: {!}", .{err}); - flights.deinit(); - sc.deinit(); - au.deinit(); + _sc.deinit(); + for (_flights) |flight| flight.deinit(); + ctx.deinit(); } pub fn render() !void { - const flight: au.Flights.Flight = flights.next(); - try flight.wait(); + const target = try _sc.acquire(); - while (true) { - _ = try sc.rebuild(); + const render_area: vk.Rect2D = .{ + .offset = .{ .x = 0, .y = 0 }, + .extent = _sc.cinfo.image_extent, + }; - const target = sc.acquire(flight.acquire, .null_handle) catch |err| switch (err) { - error.OutOfDateKHR => { - sc.mark(); - continue; + try ctx.D.resetCommandPool(target.flight.pool, .{}); + var cmd = ctx.CommandBufferProxy.init(target.flight.cmd, ctx.dw); + + try cmd.beginCommandBuffer(&.{ .flags = .{ .one_time_submit_bit = true } }); + { + cmd.pipelineBarrier( + .{ .top_of_pipe_bit = true }, + .{ .color_attachment_output_bit = true }, + .{}, + 0, + null, + 0, + null, + 1, + &.{ + vk.ImageMemoryBarrier{ + .src_access_mask = .{}, + .dst_access_mask = .{ .color_attachment_write_bit = true }, + .old_layout = .undefined, + .new_layout = .color_attachment_optimal, + .src_queue_family_index = 0, // values are the same; no transfer occurs + .dst_queue_family_index = 0, + .image = target.image, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }, }, - else => return err, - }; + ); - const render_area: vk.Rect2D = .{ - .offset = .{ .x = 0, .y = 0 }, - .extent = sc.cinfo.image_extent, - }; + cmd.beginRendering(&vk.RenderingInfo{ + .render_area = render_area, + .layer_count = 1, + .view_mask = 0, + .color_attachment_count = 1, + .p_color_attachments = &.{ + vk.RenderingAttachmentInfo{ + .image_view = target.view, + .image_layout = .color_attachment_optimal, + .resolve_mode = .{}, + .resolve_image_view = .null_handle, + .resolve_image_layout = .undefined, + .load_op = .clear, + .store_op = .store, + .clear_value = .{ .color = .{ .float_32 = .{ 1, 0, 0, 1 } } }, + }, + }, + }); - try au.D.resetCommandPool(flight.pool, .{}); - var cmd = au.CommandBufferProxy.init(flight.cmd, au.D.wrapper); + cmd.endRendering(); - try cmd.beginCommandBuffer(&.{ .flags = .{ .one_time_submit_bit = true } }); - target.begin_rendering(cmd, render_area); + cmd.pipelineBarrier( + .{ .color_attachment_output_bit = true }, + .{ .bottom_of_pipe_bit = true }, + .{}, + 0, + null, + 0, + null, + 1, + &.{ + vk.ImageMemoryBarrier{ + .src_access_mask = .{ .color_attachment_write_bit = true }, + .dst_access_mask = .{}, + .old_layout = .color_attachment_optimal, + .new_layout = .present_src_khr, + .src_queue_family_index = 0, // values are the same; no transfer occurs. + .dst_queue_family_index = 0, + .image = target.image, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }, + }, + ); + } + try cmd.endCommandBuffer(); - // todo manage frame in flight state for each hook; pass the current flight in as context. - - nu.engine.invoke("present", .{cmd}); - - target.end_rendering(cmd); - try cmd.endCommandBuffer(); - - try au.Q.submit(1, &.{ + try ctx.Q.submit( + 1, + &.{ vk.SubmitInfo{ .wait_semaphore_count = 1, - .p_wait_semaphores = &.{flight.acquire}, + // don't start writing to color attachment until the swapchain image has been acquired. + .p_wait_semaphores = &.{ + target.acquired, + }, .p_wait_dst_stage_mask = &.{ vk.PipelineStageFlags{ .color_attachment_output_bit = true }, }, .command_buffer_count = 1, - .p_command_buffers = &.{cmd.handle}, + .p_command_buffers = &.{target.flight.cmd}, .signal_semaphore_count = 1, - .p_signal_semaphores = &.{flight.complete}, + .p_signal_semaphores = &.{target.complete}, }, - }, flight.fence); + }, + target.available, // target will become available again once these finish + ); - if (sc.present(&.{flight.complete}, target)) |_| { - return; - } else |err| switch (err) { - error.OutOfDateKHR => { - try flight.wait(); - sc.mark(); - continue; - }, - else => return err, - } - } + try _sc.present(target); } diff --git a/src/nu/Render/Debug.zig b/src/nu/Render/Debug.zig new file mode 100644 index 0000000..1cbb4b1 --- /dev/null +++ b/src/nu/Render/Debug.zig @@ -0,0 +1,66 @@ +const std = @import("std"); +const vk = @import("vk"); + +pub const ci: vk.DebugUtilsMessengerCreateInfoEXT = .{ + .message_severity = .{ + .error_bit_ext = true, + .info_bit_ext = true, + .verbose_bit_ext = true, + .warning_bit_ext = true, + }, + .message_type = .{ + .device_address_binding_bit_ext = true, + .general_bit_ext = true, + .performance_bit_ext = true, + .validation_bit_ext = true, + }, + .pfn_user_callback = &debug_callback, + .p_user_data = null, +}; + +pub fn debug_callback( + msg_severity: vk.DebugUtilsMessageSeverityFlagsEXT, + msg_type: vk.DebugUtilsMessageTypeFlagsEXT, + p_data: ?*const vk.DebugUtilsMessengerCallbackDataEXT, + _: ?*anyopaque, +) callconv(vk.vulkan_call_conv) vk.Bool32 { + // ripped from std.log.defaultLog + + const data = p_data orelse return vk.FALSE; + const message = data.p_message orelse return vk.FALSE; + + const severity_prefix = if (msg_severity.verbose_bit_ext) + "verbose:" + else if (msg_severity.info_bit_ext) + "info:" + else if (msg_severity.warning_bit_ext) + "warning:" + else if (msg_severity.error_bit_ext) + "error:" + else + "?:"; + + const type_prefix = if (msg_type.general_bit_ext) + "" + else if (msg_type.validation_bit_ext) + "validation:" + else if (msg_type.performance_bit_ext) + "performance:" + else if (msg_type.device_address_binding_bit_ext) + "device_address_binding:" + else + "?:"; + + const stderr = std.io.getStdErr().writer(); + var bw = std.io.bufferedWriter(stderr); + const writer = bw.writer(); + + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); + nosuspend { + writer.print("vk-{s}{s} {s}\n", .{ severity_prefix, type_prefix, message }) catch return vk.FALSE; + bw.flush() catch return vk.FALSE; + } + + return vk.FALSE; +} diff --git a/src/nu/Render/au/SwapChain.zig b/src/nu/Render/au/SwapChain.zig index 204316d..17c2bd8 100644 --- a/src/nu/Render/au/SwapChain.zig +++ b/src/nu/Render/au/SwapChain.zig @@ -99,7 +99,12 @@ pub fn rebuild(self: *Self) !bool { } pub fn acquire(self: Self, semaphore: vk.Semaphore, fence: vk.Fence) !Target { - const acq = try au.D.acquireNextImageKHR(self.handle, std.math.maxInt(u64), semaphore, fence); + const acq = try au.D.acquireNextImageKHR( + self.handle, + std.math.maxInt(u64), + semaphore, + fence, + ); return .{ .idx = acq.image_index, .image = self.images.items[acq.image_index], diff --git a/src/nu/Render/ctx.zig b/src/nu/Render/ctx.zig new file mode 100644 index 0000000..170331d --- /dev/null +++ b/src/nu/Render/ctx.zig @@ -0,0 +1,298 @@ +const std = @import("std"); +const builtin = @import("builtin"); +const vk = @import("vk"); +const nu = @import("../../nu.zig"); + +const Debug = @import("Debug.zig"); + +const config = nu.config.render; + +pub const versions: []const vk.ApiInfo = &.{ + vk.features.version_1_0, + vk.features.version_1_1, + vk.features.version_1_2, + vk.features.version_1_3, +}; + +pub const instance_exts: []const vk.ApiInfo = if (config.use_debug_messenger) &.{ + vk.extensions.ext_debug_utils, + vk.extensions.khr_surface, +} else &.{}; + +pub const device_exts: []const vk.ApiInfo = &.{ + vk.extensions.khr_swapchain, + vk.extensions.khr_dynamic_rendering, + vk.extensions.khr_timeline_semaphore, +}; + +pub const apis = versions ++ instance_exts ++ device_exts; + +pub const layers: []const [*:0]const u8 = + if (config.use_debug_messenger) &.{ + "VK_LAYER_KHRONOS_validation", +} else &.{}; + +pub const BaseWrapper = vk.BaseWrapper(apis); +pub const InstanceWrapper = vk.InstanceWrapper(apis); +pub const DeviceWrapper = vk.DeviceWrapper(apis); +pub const InstanceProxy = vk.InstanceProxy(apis); +pub const DeviceProxy = vk.DeviceProxy(apis); +pub const QueueProxy = vk.QueueProxy(apis); +pub const CommandBufferProxy = vk.CommandBufferProxy(apis); + +var _iw: InstanceWrapper = undefined; +pub const iw: *const InstanceWrapper = &_iw; +var _dw: DeviceWrapper = undefined; +pub const dw: *const DeviceWrapper = &_dw; + +var _B: BaseWrapper = undefined; +pub const B: *const BaseWrapper = &_B; +var _I: InstanceProxy = undefined; +pub const I: *const InstanceProxy = &_I; +var _D: DeviceProxy = undefined; +pub const D: *const DeviceProxy = &_D; +var _Q: QueueProxy = undefined; +pub const Q: *const QueueProxy = &_Q; + +var _instance: vk.Instance = undefined; +pub const instance: *const vk.Instance = &_instance; +var _messenger: if (config.use_debug_messenger) vk.DebugUtilsMessengerEXT else void = undefined; +pub const messenger: *const if (config.use_debug_messenger) vk.DebugUtilsMessengerEXT else void = &_messenger; +var _surface: vk.SurfaceKHR = undefined; +pub const surface: *const vk.SurfaceKHR = &_surface; +var _pdevice: vk.PhysicalDevice = undefined; +pub const pdevice: *const vk.PhysicalDevice = &_pdevice; +var _device: vk.Device = undefined; +pub const device: *const vk.Device = &_device; +var _family: u32 = undefined; +pub const family: *const u32 = &_family; +var _queue: vk.Queue = undefined; +pub const queue: *const vk.Queue = &_queue; + +pub fn init(alloc: std.mem.Allocator) !void { + _B = try BaseWrapper.load(glfwGetInstanceProcAddress); + + _instance = try _create_instance(alloc); + _iw = try InstanceWrapper.load(_instance, glfwGetInstanceProcAddress); + errdefer _destroy_instance(); + _I = InstanceProxy.init(_instance, iw); + + if (config.use_debug_messenger) _messenger = try _create_messenger(); + errdefer if (config.use_debug_messenger) _destroy_messenger(); + + _surface = try _create_surface(); + errdefer _destroy_surface(); + + _pdevice = try _select_pdevice(alloc); + _family = try _select_queue_family_index(alloc); // only one queue supported + _device = try _create_device(alloc); + _dw = try DeviceWrapper.load(_device, iw.dispatch.vkGetDeviceProcAddr); + errdefer _destroy_device(); + _D = DeviceProxy.init(_device, dw); + _queue = D.getDeviceQueue(_family, 0); // only one queue supported + _Q = QueueProxy.init(_queue, dw); +} + +pub fn deinit() void { + _destroy_device(); + _destroy_surface(); + if (config.use_debug_messenger) _destroy_messenger(); + _destroy_instance(); +} + +fn _create_instance(alloc: std.mem.Allocator) !vk.Instance { + var extnames = std.ArrayList([*:0]const u8).init(alloc); + defer extnames.deinit(); + + for (instance_exts) |ext| + try extnames.append(ext.name); + + var glfw_exts_count: u32 = 0; + const glfw_exts: [*]const [*:0]const u8 = + glfwGetRequiredInstanceExtensions(&glfw_exts_count); + try extnames.appendSlice(glfw_exts[0..glfw_exts_count]); + + var ci: vk.InstanceCreateInfo = .{ + .p_application_info = &vk.ApplicationInfo{ + .p_application_name = config.app_name, + .application_version = vk.makeApiVersion( + config.app_version.variant, + config.app_version.major, + config.app_version.minor, + config.app_version.patch, + ), + .p_engine_name = config.engine_name, + .engine_version = vk.makeApiVersion( + config.engine_version.variant, + config.engine_version.major, + config.engine_version.minor, + config.engine_version.patch, + ), + .api_version = vk.features.version_1_3.version, + }, + .enabled_extension_count = @intCast(extnames.items.len), + .pp_enabled_extension_names = extnames.items.ptr, + .enabled_layer_count = @intCast(layers.len), + .pp_enabled_layer_names = layers.ptr, + }; + + if (config.use_debug_messenger) ci.p_next = &Debug.ci; + + return try B.createInstance(&ci, null); +} + +fn _destroy_instance() void { + I.destroyInstance(null); +} + +fn _create_messenger() !vk.DebugUtilsMessengerEXT { + return try I.createDebugUtilsMessengerEXT(&Debug.ci, null); +} + +fn _destroy_messenger() void { + I.destroyDebugUtilsMessengerEXT(_messenger, null); +} + +fn _create_surface() !vk.SurfaceKHR { + var res: vk.SurfaceKHR = undefined; + if (glfwCreateWindowSurface( + _instance, + nu.Window.handle, + null, + &res, + ) != .success) { + return error.CreateWindowSurfaceFailed; + } + return res; +} + +fn _destroy_surface() void { + I.destroySurfaceKHR(_surface, null); +} + +fn _select_pdevice(alloc: std.mem.Allocator) !vk.PhysicalDevice { + var count: u32 = undefined; + _ = try I.enumeratePhysicalDevices( + &count, + null, + ); + const pdevs = try alloc.alloc(vk.PhysicalDevice, count); + defer alloc.free(pdevs); + _ = try I.enumeratePhysicalDevices( + &count, + pdevs.ptr, + ); + + const scores = try alloc.alloc(i32, count); + @memset(scores, 0); + defer alloc.free(scores); + + for (pdevs, scores) |pdev, *score| { + const props = I.getPhysicalDeviceProperties(pdev); + score.* += switch (props.device_type) { + .discrete_gpu => 1000, + .integrated_gpu => 500, + else => 0, + }; + } + + const idx = std.mem.indexOfMax(i32, scores); + + return pdevs[idx]; +} + +fn _select_queue_family_index(alloc: std.mem.Allocator) !u32 { + var count: u32 = undefined; + I.getPhysicalDeviceQueueFamilyProperties( + _pdevice, + &count, + null, + ); + const families = try alloc.alloc(vk.QueueFamilyProperties, count); + defer alloc.free(families); + I.getPhysicalDeviceQueueFamilyProperties( + _pdevice, + &count, + families.ptr, + ); + + for (families, 0..) |prop, idx| { + if (!prop.queue_flags.graphics_bit) continue; + + if (!prop.queue_flags.transfer_bit) continue; + + if (try I.getPhysicalDeviceSurfaceSupportKHR( + _pdevice, + @intCast(idx), + _surface, + ) != vk.TRUE) continue; + + return @intCast(idx); + } + + return error.NoSuitableQueueFamily; +} + +fn _create_device(alloc: std.mem.Allocator) !vk.Device { + const qci: []const vk.DeviceQueueCreateInfo = &.{ + vk.DeviceQueueCreateInfo{ + .queue_count = 1, + .queue_family_index = @intCast(_family), + .p_queue_priorities = &[_]f32{1.0}, + }, + }; + + var extnames = std.ArrayList([*:0]const u8).init(alloc); + defer extnames.deinit(); + + for (device_exts) |ext| + try extnames.append(ext.name); + + const ci: vk.DeviceCreateInfo = .{ + .queue_create_info_count = @intCast(qci.len), + .p_queue_create_infos = qci.ptr, + .enabled_extension_count = @intCast(extnames.items.len), + .pp_enabled_extension_names = extnames.items.ptr, + .p_next = &vk.PhysicalDeviceDynamicRenderingFeaturesKHR{ + .dynamic_rendering = vk.TRUE, + }, + }; + + return try I.createDevice(_pdevice, &ci, null); +} + +fn _destroy_device() void { + D.deviceWaitIdle() catch |err| switch (err) { + error.OutOfHostMemory, + error.OutOfDeviceMemory, + error.DeviceLost, + => { + // In these cases we would destroy the device anyway, so just fall through. Note any child objects must + // already be destroyed. This assumes normal cleanup has been done before _destroy_device was called. + }, + else => unreachable, + }; + D.destroyDevice(null); +} + +pub extern fn glfwGetInstanceProcAddress( + instance: vk.Instance, + procname: [*:0]const u8, +) vk.PfnVoidFunction; + +pub extern fn glfwGetPhysicalDevicePresentationSupport( + instance: vk.Instance, + pdev: vk.PhysicalDevice, + queuefamily: u32, +) c_int; + +pub extern fn glfwCreateWindowSurface( + instance: vk.Instance, + window: *nu.Window.c.GLFWwindow, + allocation_callbacks: ?*const vk.AllocationCallbacks, + surface: *vk.SurfaceKHR, +) vk.Result; + +pub extern fn glfwGetRequiredInstanceExtensions( + count: *u32, +) [*][*:0]const u8; diff --git a/src/nu/Render/swap_chain.zig b/src/nu/Render/swap_chain.zig new file mode 100644 index 0000000..c87f665 --- /dev/null +++ b/src/nu/Render/swap_chain.zig @@ -0,0 +1,283 @@ +const std = @import("std"); +const vk = @import("vk"); +const ctx = @import("ctx.zig"); + +fn _choose_format(alloc: std.mem.Allocator) !vk.SurfaceFormatKHR { + var count: u32 = undefined; + std.debug.assert(.success == try ctx.I.getPhysicalDeviceSurfaceFormatsKHR( + ctx.pdevice.*, + ctx.surface.*, + &count, + null, + )); + const formats = try alloc.alloc(vk.SurfaceFormatKHR, count); + defer alloc.free(formats); + std.debug.assert(.success == try ctx.I.getPhysicalDeviceSurfaceFormatsKHR( + ctx.pdevice.*, + ctx.surface.*, + &count, + formats.ptr, + )); + + for (formats) |format| { + if (format.color_space == .srgb_nonlinear_khr) return format; + } else { + return formats[0]; + } +} + +fn _choose_mode(alloc: std.mem.Allocator) !vk.PresentModeKHR { + _ = ctx; + _ = alloc; + + return .fifo_khr; +} + +pub fn SwapChain(F: type) type { + return struct { + const Self = @This(); + + pub const Target = struct { + image_index: u32, + flight_index: u32, + image: vk.Image, + view: vk.ImageView, + flight: *F, + acquired: vk.Semaphore, // this semaphore will be signaled when the target is acquired + complete: vk.Semaphore, // this semaphore should be signaled when the render is complete + available: vk.Fence, // this fence should be signaled when the target flight is available + }; + + alloc: std.mem.Allocator, + + cur: u8 = 0, + flights: []F, + acquired_sems: []vk.Semaphore, + complete_sems: []vk.Semaphore, + available_fncs: []vk.Fence, + + cinfo: vk.SwapchainCreateInfoKHR, + handle: vk.SwapchainKHR, + + images: std.ArrayListUnmanaged(vk.Image), + views: std.ArrayListUnmanaged(vk.ImageView), + + pub fn init(alloc: std.mem.Allocator, flights: []F) !Self { + const acquired_sems = try alloc.alloc(vk.Semaphore, flights.len); + errdefer alloc.free(acquired_sems); + @memset(acquired_sems, .null_handle); + errdefer for (acquired_sems) |semaphore| ctx.D.destroySemaphore(semaphore, null); + for (acquired_sems) |*sem| { + sem.* = try ctx.D.createSemaphore(&vk.SemaphoreCreateInfo{}, null); + } + + const complete_sems = try alloc.alloc(vk.Semaphore, flights.len); + errdefer alloc.free(complete_sems); + @memset(complete_sems, .null_handle); + errdefer for (complete_sems) |semaphore| ctx.D.destroySemaphore(semaphore, null); + for (complete_sems) |*sem| { + sem.* = try ctx.D.createSemaphore(&vk.SemaphoreCreateInfo{}, null); + } + + const available_fncs = try alloc.alloc(vk.Fence, flights.len); + errdefer alloc.free(available_fncs); + @memset(available_fncs, .null_handle); + errdefer for (available_fncs) |fence| ctx.D.destroyFence(fence, null); + for (available_fncs) |*fnc| { + fnc.* = try ctx.D.createFence(&vk.FenceCreateInfo{ .flags = .{ .signaled_bit = true } }, null); + } + + const capabilities = try ctx.I.getPhysicalDeviceSurfaceCapabilitiesKHR(ctx.pdevice.*, ctx.surface.*); + const format = try _choose_format(alloc); + const mode = try _choose_mode(alloc); + + var min_image_count = @min(3, capabilities.min_image_count + 1); + if (capabilities.max_image_count > 0) { + min_image_count = @min(min_image_count, capabilities.max_image_count); + } + + const cinfo: vk.SwapchainCreateInfoKHR = .{ + .surface = ctx.surface.*, + .min_image_count = min_image_count, + .image_format = format.format, + .image_color_space = format.color_space, + .image_extent = undefined, // set in rebuild + .image_array_layers = 1, + .image_usage = .{ .color_attachment_bit = true }, + .image_sharing_mode = .exclusive, + .pre_transform = .{ .identity_bit_khr = true }, + .composite_alpha = .{ .opaque_bit_khr = true }, + .present_mode = mode, + .clipped = vk.TRUE, + .old_swapchain = .null_handle, + }; + + return .{ + .alloc = alloc, + .flights = flights, + .acquired_sems = acquired_sems, + .complete_sems = complete_sems, + .available_fncs = available_fncs, + .cinfo = cinfo, + .handle = .null_handle, + .images = .{}, + .views = .{}, + }; + } + + pub fn deinit(self: *Self) void { + for (self.views.items) |view| ctx.D.destroyImageView(view, null); + self.views.deinit(self.alloc); + + // images are owned by swapchain and not explicitly destroyed + self.images.deinit(self.alloc); + + ctx.D.destroySwapchainKHR(self.handle, null); + + // The easiest way to ensure fences and semaphores are not in use for deletion. + ctx.D.deviceWaitIdle() catch |err| switch (err) { + error.OutOfHostMemory, + error.OutOfDeviceMemory, + => {}, + error.DeviceLost, + => return, // If the devices is lost there isn't much I know to do. I guess deinit is not needed? + else => unreachable, + }; + + for (self.available_fncs) |fnc| ctx.D.destroyFence(fnc, null); + self.alloc.free(self.available_fncs); + + for (self.complete_sems) |sem| ctx.D.destroySemaphore(sem, null); + self.alloc.free(self.complete_sems); + + for (self.acquired_sems) |sem| ctx.D.destroySemaphore(sem, null); + self.alloc.free(self.acquired_sems); + } + + pub fn acquire(self: *Self) !Target { + const flight_index = self.cur; + const acquired = self.acquired_sems[flight_index]; + const complete = self.complete_sems[flight_index]; + const available = self.available_fncs[flight_index]; + + const timeout = std.math.maxInt(u64); + + for (0..5) |_| { + if (self.handle == .null_handle) { + try self.rebuild(); + std.debug.assert(self.handle != .null_handle); + } + + const fences: [1]vk.Fence = .{available}; + std.debug.assert(.success == try ctx.D.waitForFences( + 1, + &fences, + vk.TRUE, + std.math.maxInt(u64), + )); + + if (ctx.D.acquireNextImageKHR( + self.handle, + timeout, + acquired, + .null_handle, + )) |res| { + switch (res.result) { + .success, .suboptimal_khr => {}, + else => unreachable, + } + + try ctx.D.resetFences(1, &.{available}); + self.cur = @intCast(@mod(self.cur + 1, self.flights.len)); + + return Target{ + .image_index = res.image_index, + .flight_index = flight_index, + .image = self.images.items[res.image_index], + .view = self.views.items[res.image_index], + .flight = &self.flights[flight_index], + .acquired = acquired, + .complete = complete, + .available = available, + }; + } else |err| switch (err) { + error.OutOfDateKHR => { + self.handle = .null_handle; + continue; + }, + else => return err, + } + } else { + return error.CannotRecreateSwapchain; + } + } + + pub fn present(self: *Self, target: Target) !void { + if (ctx.Q.presentKHR(&vk.PresentInfoKHR{ + .wait_semaphore_count = 1, // todo extra semaphores? + .p_wait_semaphores = &.{target.complete}, + .swapchain_count = 1, + .p_swapchains = &.{self.handle}, + .p_image_indices = &.{target.image_index}, + .p_results = null, + })) |res| { + switch (res) { + .success => {}, + .suboptimal_khr => { + self.handle = .null_handle; + return; + }, + else => unreachable, + } + } else |err| switch (err) { + error.OutOfDateKHR => { + self.handle = .null_handle; + std.log.debug("Dropped frame", .{}); + return; + }, + else => return err, + } + } + + fn rebuild(self: *Self) !void { + std.debug.assert(self.handle == .null_handle); + + const capabilities = try ctx.I.getPhysicalDeviceSurfaceCapabilitiesKHR( + ctx.pdevice.*, + ctx.surface.*, + ); + self.cinfo.image_extent = capabilities.current_extent; + self.handle = try ctx.D.createSwapchainKHR(&self.cinfo, null); + ctx.D.destroySwapchainKHR(self.cinfo.old_swapchain, null); + errdefer ctx.D.destroySwapchainKHR(self.handle, null); + self.cinfo.old_swapchain = self.handle; + + for (self.views.items) |view| ctx.D.destroyImageView(view, null); + + var count: u32 = undefined; + std.debug.assert(.success == try ctx.D.getSwapchainImagesKHR(self.handle, &count, null)); + try self.images.resize(self.alloc, count); + try self.views.resize(self.alloc, count); + std.debug.assert(.success == try ctx.D.getSwapchainImagesKHR(self.handle, &count, self.images.items.ptr)); + + @memset(self.views.items, .null_handle); + errdefer for (self.views.items) |view| ctx.D.destroyImageView(view, null); + + for (self.images.items, self.views.items) |image, *view| { + view.* = try ctx.D.createImageView(&vk.ImageViewCreateInfo{ + .image = image, + .view_type = .@"2d", + .format = self.cinfo.image_format, + .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity }, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }, null); + } + } + }; +} diff --git a/src/nu/Window.zig b/src/nu/Window.zig index bc13dc9..3d10aed 100644 --- a/src/nu/Window.zig +++ b/src/nu/Window.zig @@ -40,6 +40,9 @@ pub var handle: *c.GLFWwindow = undefined; pub fn setup(_: std.mem.Allocator) !void { if (c.glfwInit() != c.GLFW_TRUE) std.debug.panic("GLFW Init Failed", .{}); + if (c.glfwVulkanSupported() != c.GLFW_TRUE) + std.debug.panic("GLFW Vulkan not supported", .{}); + c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); c.glfwWindowHintString(c.GLFW_X11_CLASS_NAME, config.x11_class_name); c.glfwWindowHintString(c.GLFW_X11_INSTANCE_NAME, config.x11_instance_name); From f6969589b72d321543fe9ea177b05bfec7034788 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Sun, 24 Nov 2024 22:25:28 -0500 Subject: [PATCH 109/113] Remove old au work --- src/Uber.zig | 1 - src/nu/Render/au.zig | 395 ------------------------------- src/nu/Render/au/Flights.zig | 62 ----- src/nu/Render/au/VkAllocator.zig | 2 + src/nu/Render/swap_chain.zig | 283 ---------------------- 5 files changed, 2 insertions(+), 741 deletions(-) delete mode 100644 src/nu/Render/au.zig delete mode 100644 src/nu/Render/au/Flights.zig delete mode 100644 src/nu/Render/swap_chain.zig diff --git a/src/Uber.zig b/src/Uber.zig index b1f59f8..702aa3c 100644 --- a/src/Uber.zig +++ b/src/Uber.zig @@ -1,5 +1,4 @@ const std = @import("std"); -const au = @import("au.zig"); const vk = @import("vk"); const shaders = @import("shaders"); diff --git a/src/nu/Render/au.zig b/src/nu/Render/au.zig deleted file mode 100644 index e12cbf5..0000000 --- a/src/nu/Render/au.zig +++ /dev/null @@ -1,395 +0,0 @@ -const std = @import("std"); -const builtin = @import("builtin"); - -const vk = @import("vk"); -const nu = @import("../../nu.zig"); - -pub const SwapChain = @import("au/SwapChain.zig"); -pub const Flights = @import("au/Flights.zig"); -pub const VkAllocator = @import("au/VkAllocator.zig"); - -const config = nu.config.render; - -pub const apis: []const vk.ApiInfo = &.{ - vk.features.version_1_0, - vk.features.version_1_1, - vk.features.version_1_2, - vk.features.version_1_3, - vk.extensions.khr_surface, - vk.extensions.khr_swapchain, - vk.extensions.khr_dynamic_rendering, - if (config.use_debug_messenger) vk.extensions.ext_debug_utils else .{}, -}; - -pub const device_extensions: []const [*:0]const u8 = &.{ - // todo somehow sync this with APIs above? - vk.extensions.khr_swapchain.name, - vk.extensions.khr_dynamic_rendering.name, -}; - -pub const BaseWrapper = vk.BaseWrapper(apis); -pub const InstanceWrapper = vk.InstanceWrapper(apis); -pub const DeviceWrapper = vk.DeviceWrapper(apis); - -pub const InstanceProxy = vk.InstanceProxy(apis); -pub const DeviceProxy = vk.DeviceProxy(apis); -pub const QueueProxy = vk.QueueProxy(apis); -pub const CommandBufferProxy = vk.CommandBufferProxy(apis); - -pub const B: *const BaseWrapper = &_bw; -pub const I: *const InstanceProxy = &_ip; -pub const D: *const DeviceProxy = &_dp; -pub const Q: *const QueueProxy = &_qp; -pub const S: *const vk.SurfaceKHR = &_surface; - -pub const device_config: *const CandidateDeviceInfo = &_dconfig; - -var _bw: BaseWrapper = undefined; -var _iw: InstanceWrapper = undefined; -var _dw: DeviceWrapper = undefined; - -var _ip: InstanceProxy = undefined; -var _dp: DeviceProxy = undefined; -var _qp: QueueProxy = undefined; - -var _instance: vk.Instance = undefined; -var _surface: vk.SurfaceKHR = undefined; -var _device: vk.Device = undefined; -var _dconfig: CandidateDeviceInfo = undefined; -var _queue: vk.Queue = undefined; - -pub fn init(alloc: std.mem.Allocator) !void { - try init_base(); - errdefer deinit_base(); - - try init_instance(alloc); - errdefer deinit_instance(); - - try init_device(alloc); - errdefer deinit_device(); -} - -pub fn deinit() void { - deinit_device(); - deinit_instance(); - deinit_base(); -} - -fn init_base() !void { - if (glfwVulkanSupported() != nu.Window.c.GLFW_TRUE) - return error.glfwNoVulkan; - - if (config.use_debug_messenger) { - _bw = try BaseWrapper.load(glfwGetInstanceProcAddress); - } else { - _bw = BaseWrapper.loadNoFail(glfwGetInstanceProcAddress); - } -} - -fn deinit_base() void {} - -fn init_instance(alloc: std.mem.Allocator) !void { - var extensions = std.ArrayList([*:0]const u8).init(alloc); - defer extensions.deinit(); - - var layers = std.ArrayList([*:0]const u8).init(alloc); - defer layers.deinit(); - - if (config.use_debug_messenger) { - try extensions.appendSlice(&.{ - vk.extensions.ext_debug_utils.name, - }); - - try layers.appendSlice(&.{ - "VK_LAYER_KHRONOS_validation", - }); - } - - var glfw_exts_count: u32 = 0; - const glfw_exts: [*]const [*:0]const u8 = - @ptrCast(glfwGetRequiredInstanceExtensions(&glfw_exts_count)); - try extensions.appendSlice(glfw_exts[0..glfw_exts_count]); - - const mci: vk.DebugUtilsMessengerCreateInfoEXT = .{ - .message_severity = .{ - .error_bit_ext = true, - .info_bit_ext = true, - .verbose_bit_ext = true, - .warning_bit_ext = true, - }, - .message_type = .{ - .device_address_binding_bit_ext = true, - .general_bit_ext = false, - .performance_bit_ext = true, - .validation_bit_ext = true, - }, - .pfn_user_callback = &debug_callback, - .p_user_data = null, - }; - - _instance = try B.createInstance(&.{ - .p_application_info = &.{ - .p_application_name = config.app_name, - .application_version = vk.makeApiVersion( - config.app_version.variant, - config.app_version.major, - config.app_version.minor, - config.app_version.patch, - ), - .p_engine_name = config.engine_name, - .engine_version = vk.makeApiVersion( - config.engine_version.variant, - config.engine_version.major, - config.engine_version.minor, - config.engine_version.patch, - ), - .api_version = vk.API_VERSION_1_3, - }, - .enabled_extension_count = @intCast(extensions.items.len), - .pp_enabled_extension_names = extensions.items.ptr, - .enabled_layer_count = @intCast(layers.items.len), - .pp_enabled_layer_names = layers.items.ptr, - .p_next = if (config.use_debug_messenger) &mci else null, - }, null); - - if (config.use_debug_messenger) { - _iw = try InstanceWrapper.load(_instance, _bw.dispatch.vkGetInstanceProcAddr); - } else { - _iw = InstanceWrapper.loadNoFail(_instance, _bw.dispatch.vkGetInstanceProcAddr); - } - - _ip = InstanceProxy.init(_instance, &_iw); - - if (glfwCreateWindowSurface(_instance, nu.Window.handle, null, &_surface) != .success) { - return error.glfwCreateWindowSurfaceFailed; - } -} - -fn deinit_instance() void { - _ip.destroySurfaceKHR(_surface, null); - _ip.destroyInstance(null); -} - -const CandidateDeviceInfo = struct { - pdev: vk.PhysicalDevice, - format: vk.SurfaceFormatKHR, - mode: vk.PresentModeKHR, - family: u32, // must support graphics and present for now - - fn init(alloc: std.mem.Allocator, pdev: vk.PhysicalDevice) !struct { i32, CandidateDeviceInfo } { - var score: i32 = 0; - var res: CandidateDeviceInfo = undefined; - - res.pdev = pdev; - - const props = I.getPhysicalDeviceProperties(pdev); - score += switch (props.device_type) { - vk.PhysicalDeviceType.discrete_gpu => 1000, - vk.PhysicalDeviceType.integrated_gpu => 500, - else => 0, - }; - - var format_count: u32 = undefined; - _ = try I.getPhysicalDeviceSurfaceFormatsKHR(pdev, _surface, &format_count, null); - if (format_count == 0) return error.NoSurfaceFormats; - const formats = try alloc.alloc(vk.SurfaceFormatKHR, format_count); - defer alloc.free(formats); - _ = try I.getPhysicalDeviceSurfaceFormatsKHR(pdev, _surface, &format_count, formats.ptr); - - for (formats) |fmt| { - if (fmt.color_space == .srgb_nonlinear_khr) { - res.format = fmt; - break; - } - } else { - res.format = formats[0]; - score -= 100; - } - - var mode_count: u32 = undefined; - _ = try I.getPhysicalDeviceSurfacePresentModesKHR(pdev, _surface, &mode_count, null); - if (mode_count == 0) return error.NoSurfacePresentModes; - const modes = try alloc.alloc(vk.PresentModeKHR, mode_count); - defer alloc.free(modes); - _ = try I.getPhysicalDeviceSurfacePresentModesKHR(pdev, _surface, &mode_count, modes.ptr); - - if (std.mem.indexOfAny(vk.PresentModeKHR, modes, &.{ - vk.PresentModeKHR.mailbox_khr, - vk.PresentModeKHR.immediate_khr, - })) |idx| { - res.mode = modes[idx]; - } else { - score -= 50; - res.mode = .fifo_khr; // this is guaranteed - } - - var ext_count: u32 = undefined; - _ = try I.enumerateDeviceExtensionProperties(pdev, null, &ext_count, null); - const exts = try alloc.alloc(vk.ExtensionProperties, ext_count); - defer alloc.free(exts); - _ = try I.enumerateDeviceExtensionProperties(pdev, null, &ext_count, exts.ptr); - - for (device_extensions) |needle| { - for (exts) |ext| { - if (std.mem.eql( - u8, - std.mem.span(needle), - std.mem.sliceTo(&ext.extension_name, 0), - )) - break; - } else { - return error.MissingDeviceExtension; - } - } - - var family_count: u32 = undefined; - I.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null); - const families = try alloc.alloc(vk.QueueFamilyProperties, family_count); - defer alloc.free(families); - I.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr); - - for (families, 0..) |prop, idx| { - const graphics_support = prop.queue_flags.graphics_bit; - const present_support = try I.getPhysicalDeviceSurfaceSupportKHR(pdev, @intCast(idx), _surface) == vk.TRUE; - - if (graphics_support and present_support) { - res.family = @intCast(idx); - break; - } - } else { - return error.NoSuitableFamily; - } - - return .{ score, res }; - } -}; - -fn init_device(alloc: std.mem.Allocator) !void { - var pdev_count: u32 = undefined; - _ = try I.enumeratePhysicalDevices(&pdev_count, null); - if (pdev_count == 0) return error.NoDevice; - const pdevs = try alloc.alloc(vk.PhysicalDevice, pdev_count); - defer alloc.free(pdevs); - _ = try I.enumeratePhysicalDevices(&pdev_count, pdevs.ptr); - - // const scores = std.ArrayList(i32). - var scores: std.MultiArrayList(struct { score: i32, ci: CandidateDeviceInfo }) = .{}; - defer scores.deinit(alloc); - - for (pdevs) |pdev| { - const score, const ci = CandidateDeviceInfo.init(alloc, pdev) catch continue; - try scores.append(alloc, .{ .score = score, .ci = ci }); - } - - const idx = std.sort.argMax(i32, scores.items(.score), {}, std.sort.asc(i32)) orelse - return error.NoSuitableDevice; - _dconfig = scores.get(idx).ci; - - const qci: []const vk.DeviceQueueCreateInfo = &.{ - vk.DeviceQueueCreateInfo{ - .queue_family_index = _dconfig.family, - .queue_count = 1, - .p_queue_priorities = &[_]f32{1.0}, - }, - }; - - _device = try I.createDevice(_dconfig.pdev, &.{ - .queue_create_info_count = @intCast(qci.len), - .p_queue_create_infos = qci.ptr, - .enabled_extension_count = @intCast(device_extensions.len), - .pp_enabled_extension_names = device_extensions.ptr, - .p_next = &vk.PhysicalDeviceDynamicRenderingFeaturesKHR{ - .dynamic_rendering = vk.TRUE, - }, - }, null); - - if (config.use_debug_messenger) { - _dw = try DeviceWrapper.load(_device, _iw.dispatch.vkGetDeviceProcAddr); - } else { - _dw = DeviceWrapper.loadNoFail(_device, _iw.dispatch.vkGetDeviceProcAddr); - } - _dp = DeviceProxy.init(_device, &_dw); - errdefer D.destroyDevice(null); - - _queue = D.getDeviceQueue(_dconfig.family, 0); - - _qp = QueueProxy.init(_queue, &_dw); - - // todo i'm thinking this needs to be a more complex pointer structure... i'm making assumptions here about how the - // command pools are meant to work. probably I am cooking too much. -} - -fn deinit_device() void { - D.destroyDevice(null); -} - -pub fn debug_callback( - msg_severity: vk.DebugUtilsMessageSeverityFlagsEXT, - msg_type: vk.DebugUtilsMessageTypeFlagsEXT, - p_data: ?*const vk.DebugUtilsMessengerCallbackDataEXT, - _: ?*anyopaque, -) callconv(vk.vulkan_call_conv) vk.Bool32 { - // ripped from std.log.defaultLog - - const data = p_data orelse return vk.FALSE; - const message = data.p_message orelse return vk.FALSE; - - const severity_prefix = if (msg_severity.verbose_bit_ext) - "verbose:" - else if (msg_severity.info_bit_ext) - "info:" - else if (msg_severity.warning_bit_ext) - "warning:" - else if (msg_severity.error_bit_ext) - "error:" - else - "?:"; - - const type_prefix = if (msg_type.general_bit_ext) - "" - else if (msg_type.validation_bit_ext) - "validation:" - else if (msg_type.performance_bit_ext) - "performance:" - else if (msg_type.device_address_binding_bit_ext) - "device_address_binding:" - else - "?:"; - - const stderr = std.io.getStdErr().writer(); - var bw = std.io.bufferedWriter(stderr); - const writer = bw.writer(); - - std.debug.lockStdErr(); - defer std.debug.unlockStdErr(); - nosuspend { - writer.print("vk-{s}{s} {s}\n", .{ severity_prefix, type_prefix, message }) catch return vk.FALSE; - bw.flush() catch return vk.FALSE; - } - - return vk.FALSE; -} - -pub extern fn glfwVulkanSupported() c_int; - -pub extern fn glfwGetInstanceProcAddress( - instance: vk.Instance, - procname: [*:0]const u8, -) vk.PfnVoidFunction; - -pub extern fn glfwGetPhysicalDevicePresentationSupport( - instance: vk.Instance, - pdev: vk.PhysicalDevice, - queuefamily: u32, -) c_int; - -pub extern fn glfwCreateWindowSurface( - instance: vk.Instance, - window: *nu.Window.c.GLFWwindow, - allocation_callbacks: ?*const vk.AllocationCallbacks, - surface: *vk.SurfaceKHR, -) vk.Result; - -pub extern fn glfwGetRequiredInstanceExtensions( - count: *u32, -) [*][*:0]const u8; diff --git a/src/nu/Render/au/Flights.zig b/src/nu/Render/au/Flights.zig deleted file mode 100644 index 4662e08..0000000 --- a/src/nu/Render/au/Flights.zig +++ /dev/null @@ -1,62 +0,0 @@ -const std = @import("std"); -const vk = @import("vk"); -const au = @import("../au.zig"); - -const Self = @This(); - -pub const Flight = struct { - acquire: vk.Semaphore = .null_handle, - complete: vk.Semaphore = .null_handle, - fence: vk.Fence = .null_handle, - pool: vk.CommandPool = .null_handle, - cmd: vk.CommandBuffer = .null_handle, - - pub fn wait(self: Flight) !void { - _ = try au.D.waitForFences(1, &.{self.fence}, vk.TRUE, std.math.maxInt(u64)); - try au.D.resetFences(1, &.{self.fence}); - } -}; - -alloc: std.mem.Allocator, -flights: []Flight, -idx: usize, - -pub fn init(alloc: std.mem.Allocator, n: usize) !Self { - var self: Self = .{ - .alloc = alloc, - .flights = try alloc.alloc(Flight, n), - .idx = 0, - }; - errdefer self.deinit(); - - for (self.flights) |*flight| { - flight.acquire = try au.D.createSemaphore(&.{}, null); - flight.complete = try au.D.createSemaphore(&.{}, null); - flight.fence = try au.D.createFence(&.{ .flags = .{ .signaled_bit = true } }, null); - flight.pool = try au.D.createCommandPool(&.{ .queue_family_index = au.device_config.family }, null); - try au.D.allocateCommandBuffers(&vk.CommandBufferAllocateInfo{ - .command_buffer_count = 1, - .command_pool = flight.pool, - .level = .primary, - }, @ptrCast(&flight.cmd)); - } - - return self; -} - -pub fn deinit(self: Self) void { - for (self.flights) |flight| { - au.D.destroySemaphore(flight.acquire, null); - au.D.destroySemaphore(flight.complete, null); - au.D.destroyFence(flight.fence, null); - au.D.freeCommandBuffers(flight.pool, 1, &.{flight.cmd}); - au.D.destroyCommandPool(flight.pool, null); - } - self.alloc.free(self.flights); -} - -pub fn next(self: *Self) Flight { - const idx = self.idx; - self.idx = (self.idx + 1) % self.flights.len; - return self.flights[idx]; -} diff --git a/src/nu/Render/au/VkAllocator.zig b/src/nu/Render/au/VkAllocator.zig index 0533c65..de51884 100644 --- a/src/nu/Render/au/VkAllocator.zig +++ b/src/nu/Render/au/VkAllocator.zig @@ -1,3 +1,5 @@ +// todo look into Vulkan Memory Allocator + const std = @import("std"); const vk = @import("vk"); const au = @import("../au.zig"); diff --git a/src/nu/Render/swap_chain.zig b/src/nu/Render/swap_chain.zig deleted file mode 100644 index c87f665..0000000 --- a/src/nu/Render/swap_chain.zig +++ /dev/null @@ -1,283 +0,0 @@ -const std = @import("std"); -const vk = @import("vk"); -const ctx = @import("ctx.zig"); - -fn _choose_format(alloc: std.mem.Allocator) !vk.SurfaceFormatKHR { - var count: u32 = undefined; - std.debug.assert(.success == try ctx.I.getPhysicalDeviceSurfaceFormatsKHR( - ctx.pdevice.*, - ctx.surface.*, - &count, - null, - )); - const formats = try alloc.alloc(vk.SurfaceFormatKHR, count); - defer alloc.free(formats); - std.debug.assert(.success == try ctx.I.getPhysicalDeviceSurfaceFormatsKHR( - ctx.pdevice.*, - ctx.surface.*, - &count, - formats.ptr, - )); - - for (formats) |format| { - if (format.color_space == .srgb_nonlinear_khr) return format; - } else { - return formats[0]; - } -} - -fn _choose_mode(alloc: std.mem.Allocator) !vk.PresentModeKHR { - _ = ctx; - _ = alloc; - - return .fifo_khr; -} - -pub fn SwapChain(F: type) type { - return struct { - const Self = @This(); - - pub const Target = struct { - image_index: u32, - flight_index: u32, - image: vk.Image, - view: vk.ImageView, - flight: *F, - acquired: vk.Semaphore, // this semaphore will be signaled when the target is acquired - complete: vk.Semaphore, // this semaphore should be signaled when the render is complete - available: vk.Fence, // this fence should be signaled when the target flight is available - }; - - alloc: std.mem.Allocator, - - cur: u8 = 0, - flights: []F, - acquired_sems: []vk.Semaphore, - complete_sems: []vk.Semaphore, - available_fncs: []vk.Fence, - - cinfo: vk.SwapchainCreateInfoKHR, - handle: vk.SwapchainKHR, - - images: std.ArrayListUnmanaged(vk.Image), - views: std.ArrayListUnmanaged(vk.ImageView), - - pub fn init(alloc: std.mem.Allocator, flights: []F) !Self { - const acquired_sems = try alloc.alloc(vk.Semaphore, flights.len); - errdefer alloc.free(acquired_sems); - @memset(acquired_sems, .null_handle); - errdefer for (acquired_sems) |semaphore| ctx.D.destroySemaphore(semaphore, null); - for (acquired_sems) |*sem| { - sem.* = try ctx.D.createSemaphore(&vk.SemaphoreCreateInfo{}, null); - } - - const complete_sems = try alloc.alloc(vk.Semaphore, flights.len); - errdefer alloc.free(complete_sems); - @memset(complete_sems, .null_handle); - errdefer for (complete_sems) |semaphore| ctx.D.destroySemaphore(semaphore, null); - for (complete_sems) |*sem| { - sem.* = try ctx.D.createSemaphore(&vk.SemaphoreCreateInfo{}, null); - } - - const available_fncs = try alloc.alloc(vk.Fence, flights.len); - errdefer alloc.free(available_fncs); - @memset(available_fncs, .null_handle); - errdefer for (available_fncs) |fence| ctx.D.destroyFence(fence, null); - for (available_fncs) |*fnc| { - fnc.* = try ctx.D.createFence(&vk.FenceCreateInfo{ .flags = .{ .signaled_bit = true } }, null); - } - - const capabilities = try ctx.I.getPhysicalDeviceSurfaceCapabilitiesKHR(ctx.pdevice.*, ctx.surface.*); - const format = try _choose_format(alloc); - const mode = try _choose_mode(alloc); - - var min_image_count = @min(3, capabilities.min_image_count + 1); - if (capabilities.max_image_count > 0) { - min_image_count = @min(min_image_count, capabilities.max_image_count); - } - - const cinfo: vk.SwapchainCreateInfoKHR = .{ - .surface = ctx.surface.*, - .min_image_count = min_image_count, - .image_format = format.format, - .image_color_space = format.color_space, - .image_extent = undefined, // set in rebuild - .image_array_layers = 1, - .image_usage = .{ .color_attachment_bit = true }, - .image_sharing_mode = .exclusive, - .pre_transform = .{ .identity_bit_khr = true }, - .composite_alpha = .{ .opaque_bit_khr = true }, - .present_mode = mode, - .clipped = vk.TRUE, - .old_swapchain = .null_handle, - }; - - return .{ - .alloc = alloc, - .flights = flights, - .acquired_sems = acquired_sems, - .complete_sems = complete_sems, - .available_fncs = available_fncs, - .cinfo = cinfo, - .handle = .null_handle, - .images = .{}, - .views = .{}, - }; - } - - pub fn deinit(self: *Self) void { - for (self.views.items) |view| ctx.D.destroyImageView(view, null); - self.views.deinit(self.alloc); - - // images are owned by swapchain and not explicitly destroyed - self.images.deinit(self.alloc); - - ctx.D.destroySwapchainKHR(self.handle, null); - - // The easiest way to ensure fences and semaphores are not in use for deletion. - ctx.D.deviceWaitIdle() catch |err| switch (err) { - error.OutOfHostMemory, - error.OutOfDeviceMemory, - => {}, - error.DeviceLost, - => return, // If the devices is lost there isn't much I know to do. I guess deinit is not needed? - else => unreachable, - }; - - for (self.available_fncs) |fnc| ctx.D.destroyFence(fnc, null); - self.alloc.free(self.available_fncs); - - for (self.complete_sems) |sem| ctx.D.destroySemaphore(sem, null); - self.alloc.free(self.complete_sems); - - for (self.acquired_sems) |sem| ctx.D.destroySemaphore(sem, null); - self.alloc.free(self.acquired_sems); - } - - pub fn acquire(self: *Self) !Target { - const flight_index = self.cur; - const acquired = self.acquired_sems[flight_index]; - const complete = self.complete_sems[flight_index]; - const available = self.available_fncs[flight_index]; - - const timeout = std.math.maxInt(u64); - - for (0..5) |_| { - if (self.handle == .null_handle) { - try self.rebuild(); - std.debug.assert(self.handle != .null_handle); - } - - const fences: [1]vk.Fence = .{available}; - std.debug.assert(.success == try ctx.D.waitForFences( - 1, - &fences, - vk.TRUE, - std.math.maxInt(u64), - )); - - if (ctx.D.acquireNextImageKHR( - self.handle, - timeout, - acquired, - .null_handle, - )) |res| { - switch (res.result) { - .success, .suboptimal_khr => {}, - else => unreachable, - } - - try ctx.D.resetFences(1, &.{available}); - self.cur = @intCast(@mod(self.cur + 1, self.flights.len)); - - return Target{ - .image_index = res.image_index, - .flight_index = flight_index, - .image = self.images.items[res.image_index], - .view = self.views.items[res.image_index], - .flight = &self.flights[flight_index], - .acquired = acquired, - .complete = complete, - .available = available, - }; - } else |err| switch (err) { - error.OutOfDateKHR => { - self.handle = .null_handle; - continue; - }, - else => return err, - } - } else { - return error.CannotRecreateSwapchain; - } - } - - pub fn present(self: *Self, target: Target) !void { - if (ctx.Q.presentKHR(&vk.PresentInfoKHR{ - .wait_semaphore_count = 1, // todo extra semaphores? - .p_wait_semaphores = &.{target.complete}, - .swapchain_count = 1, - .p_swapchains = &.{self.handle}, - .p_image_indices = &.{target.image_index}, - .p_results = null, - })) |res| { - switch (res) { - .success => {}, - .suboptimal_khr => { - self.handle = .null_handle; - return; - }, - else => unreachable, - } - } else |err| switch (err) { - error.OutOfDateKHR => { - self.handle = .null_handle; - std.log.debug("Dropped frame", .{}); - return; - }, - else => return err, - } - } - - fn rebuild(self: *Self) !void { - std.debug.assert(self.handle == .null_handle); - - const capabilities = try ctx.I.getPhysicalDeviceSurfaceCapabilitiesKHR( - ctx.pdevice.*, - ctx.surface.*, - ); - self.cinfo.image_extent = capabilities.current_extent; - self.handle = try ctx.D.createSwapchainKHR(&self.cinfo, null); - ctx.D.destroySwapchainKHR(self.cinfo.old_swapchain, null); - errdefer ctx.D.destroySwapchainKHR(self.handle, null); - self.cinfo.old_swapchain = self.handle; - - for (self.views.items) |view| ctx.D.destroyImageView(view, null); - - var count: u32 = undefined; - std.debug.assert(.success == try ctx.D.getSwapchainImagesKHR(self.handle, &count, null)); - try self.images.resize(self.alloc, count); - try self.views.resize(self.alloc, count); - std.debug.assert(.success == try ctx.D.getSwapchainImagesKHR(self.handle, &count, self.images.items.ptr)); - - @memset(self.views.items, .null_handle); - errdefer for (self.views.items) |view| ctx.D.destroyImageView(view, null); - - for (self.images.items, self.views.items) |image, *view| { - view.* = try ctx.D.createImageView(&vk.ImageViewCreateInfo{ - .image = image, - .view_type = .@"2d", - .format = self.cinfo.image_format, - .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity }, - .subresource_range = .{ - .aspect_mask = .{ .color_bit = true }, - .base_mip_level = 0, - .level_count = 1, - .base_array_layer = 0, - .layer_count = 1, - }, - }, null); - } - } - }; -} From ae37fc2ad367aef8ad7eb35be9470b43144f2f66 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Sun, 24 Nov 2024 22:25:42 -0500 Subject: [PATCH 110/113] Clean up swapchain --- src/nu/Render.zig | 50 +----- src/nu/Render/SwapChain.zig | 301 +++++++++++++++++++++++++++++++++ src/nu/Render/au/SwapChain.zig | 213 ----------------------- src/nu/Render/ctx.zig | 4 + 4 files changed, 313 insertions(+), 255 deletions(-) create mode 100644 src/nu/Render/SwapChain.zig delete mode 100644 src/nu/Render/au/SwapChain.zig diff --git a/src/nu/Render.zig b/src/nu/Render.zig index ed243a4..b74c49a 100644 --- a/src/nu/Render.zig +++ b/src/nu/Render.zig @@ -10,7 +10,7 @@ const nu = @import("../nu.zig"); // const au = @import("Render/au.zig"); const ctx = @import("Render/ctx.zig"); -const swap_chain = @import("Render/swap_chain.zig"); +const SwapChain = @import("Render/SwapChain.zig"); pub const Config = struct { app_name: [*:0]const u8 = "nu-au-app", @@ -36,7 +36,6 @@ const config = nu.config.render; pub const depends = .{nu.Window}; -const SwapChain = swap_chain.SwapChain(Flight); const Flight = struct { pool: vk.CommandPool = .null_handle, cmd: vk.CommandBuffer = .null_handle, @@ -84,7 +83,7 @@ pub fn setup(alloc: std.mem.Allocator) !void { errdefer for (_flights) |flight| flight.deinit(); for (_flights) |*flight| flight.* = try Flight.init(); - _sc = try SwapChain.init(alloc, _flights); + _sc = try SwapChain.init(alloc, _flights.len); errdefer _sc.deinit(); } @@ -96,14 +95,15 @@ pub fn teardown() void { pub fn render() !void { const target = try _sc.acquire(); + const flight = &_flights[target.flight_index]; const render_area: vk.Rect2D = .{ .offset = .{ .x = 0, .y = 0 }, .extent = _sc.cinfo.image_extent, }; - try ctx.D.resetCommandPool(target.flight.pool, .{}); - var cmd = ctx.CommandBufferProxy.init(target.flight.cmd, ctx.dw); + try ctx.D.resetCommandPool(flight.pool, .{}); + var cmd = ctx.CommandBufferProxy.init(flight.cmd, ctx.dw); try cmd.beginCommandBuffer(&.{ .flags = .{ .one_time_submit_bit = true } }); { @@ -116,24 +116,7 @@ pub fn render() !void { 0, null, 1, - &.{ - vk.ImageMemoryBarrier{ - .src_access_mask = .{}, - .dst_access_mask = .{ .color_attachment_write_bit = true }, - .old_layout = .undefined, - .new_layout = .color_attachment_optimal, - .src_queue_family_index = 0, // values are the same; no transfer occurs - .dst_queue_family_index = 0, - .image = target.image, - .subresource_range = .{ - .aspect_mask = .{ .color_bit = true }, - .base_mip_level = 0, - .level_count = 1, - .base_array_layer = 0, - .layer_count = 1, - }, - }, - }, + &.{target.top_of_pipe()}, ); cmd.beginRendering(&vk.RenderingInfo{ @@ -166,24 +149,7 @@ pub fn render() !void { 0, null, 1, - &.{ - vk.ImageMemoryBarrier{ - .src_access_mask = .{ .color_attachment_write_bit = true }, - .dst_access_mask = .{}, - .old_layout = .color_attachment_optimal, - .new_layout = .present_src_khr, - .src_queue_family_index = 0, // values are the same; no transfer occurs. - .dst_queue_family_index = 0, - .image = target.image, - .subresource_range = .{ - .aspect_mask = .{ .color_bit = true }, - .base_mip_level = 0, - .level_count = 1, - .base_array_layer = 0, - .layer_count = 1, - }, - }, - }, + &.{target.bottom_of_pipe()}, ); } try cmd.endCommandBuffer(); @@ -201,7 +167,7 @@ pub fn render() !void { vk.PipelineStageFlags{ .color_attachment_output_bit = true }, }, .command_buffer_count = 1, - .p_command_buffers = &.{target.flight.cmd}, + .p_command_buffers = &.{flight.cmd}, .signal_semaphore_count = 1, .p_signal_semaphores = &.{target.complete}, }, diff --git a/src/nu/Render/SwapChain.zig b/src/nu/Render/SwapChain.zig new file mode 100644 index 0000000..b2df68d --- /dev/null +++ b/src/nu/Render/SwapChain.zig @@ -0,0 +1,301 @@ +const std = @import("std"); +const vk = @import("vk"); +const ctx = @import("ctx.zig"); + +fn _choose_format(alloc: std.mem.Allocator) !vk.SurfaceFormatKHR { + var count: u32 = undefined; + std.debug.assert(.success == try ctx.I.getPhysicalDeviceSurfaceFormatsKHR( + ctx.pdevice.*, + ctx.surface.*, + &count, + null, + )); + const formats = try alloc.alloc(vk.SurfaceFormatKHR, count); + defer alloc.free(formats); + std.debug.assert(.success == try ctx.I.getPhysicalDeviceSurfaceFormatsKHR( + ctx.pdevice.*, + ctx.surface.*, + &count, + formats.ptr, + )); + + for (formats) |format| { + if (format.color_space == .srgb_nonlinear_khr) return format; + } else { + return formats[0]; + } +} + +fn _choose_mode(alloc: std.mem.Allocator) !vk.PresentModeKHR { + _ = ctx; + _ = alloc; + + return .fifo_khr; +} + +const Self = @This(); + +pub const Target = struct { + image_index: u32, + flight_index: u32, + image: vk.Image, + view: vk.ImageView, + acquired: vk.Semaphore, // this semaphore will be signaled when the target is acquired + complete: vk.Semaphore, // this semaphore should be signaled when the render is complete + available: vk.Fence, // this fence should be signaled when the target flight is available + + pub fn top_of_pipe(target: Target) vk.ImageMemoryBarrier { + return vk.ImageMemoryBarrier{ + .src_access_mask = .{}, + .dst_access_mask = .{ .color_attachment_write_bit = true }, + .old_layout = .undefined, + .new_layout = .color_attachment_optimal, + .src_queue_family_index = 0, + .dst_queue_family_index = 0, + .image = target.image, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }; + } + + pub fn bottom_of_pipe(target: Target) vk.ImageMemoryBarrier { + return vk.ImageMemoryBarrier{ + .src_access_mask = .{ .color_attachment_write_bit = true }, + .dst_access_mask = .{}, + .old_layout = .color_attachment_optimal, + .new_layout = .present_src_khr, + .src_queue_family_index = 0, + .dst_queue_family_index = 0, + .image = target.image, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }; + } +}; + +const Sync = struct { + acquired: vk.Semaphore = .null_handle, + complete: vk.Semaphore = .null_handle, + available: vk.Fence = .null_handle, +}; + +const View = struct { + image: vk.Image, + view: vk.ImageView, +}; + +alloc: std.mem.Allocator, + +flight_index: usize = 0, +flight_syncs: std.MultiArrayList(Sync) = .{}, + +cinfo: vk.SwapchainCreateInfoKHR = undefined, +handle: vk.SwapchainKHR = .null_handle, + +chain: std.MultiArrayList(View) = .{}, + +pub fn init(alloc: std.mem.Allocator, flight_count: usize) !Self { + var self: Self = .{ + .alloc = alloc, + }; + errdefer self.deinit(); + + try self.flight_syncs.resize(alloc, flight_count); + for (self.flight_syncs.items(.acquired)) |*sem| + sem.* = try ctx.D.createSemaphore(&.{}, null); + for (self.flight_syncs.items(.complete)) |*sem| + sem.* = try ctx.D.createSemaphore(&.{}, null); + for (self.flight_syncs.items(.available)) |*fnc| + fnc.* = try ctx.D.createFence(&.{ .flags = .{ .signaled_bit = true } }, null); + + const caps = try ctx.getPhysicalDeviceSurfaceCapabilities(); + const format = try _choose_format(alloc); + const mode = try _choose_mode(alloc); + + self.cinfo = .{ + .surface = ctx.surface.*, + .min_image_count = std.math.clamp( + @min(3, caps.min_image_count + 1), + caps.min_image_count, + if (caps.max_image_count > 0) caps.max_image_count else 127, + ), + .image_format = format.format, + .image_color_space = format.color_space, + .image_extent = undefined, // set in rebuild + .image_array_layers = 1, + .image_usage = .{ .color_attachment_bit = true }, + .image_sharing_mode = .exclusive, + .pre_transform = .{ .identity_bit_khr = true }, + .composite_alpha = .{ .opaque_bit_khr = true }, + .present_mode = mode, + .clipped = vk.TRUE, + .old_swapchain = .null_handle, + }; + + return self; +} + +fn rebuild(self: *Self) !void { + std.debug.assert(self.handle == .null_handle); // don't rebuild if we weren't marked + + const caps = try ctx.getPhysicalDeviceSurfaceCapabilities(); + self.cinfo.image_extent = caps.current_extent; + + self.handle = try ctx.D.createSwapchainKHR(&self.cinfo, null); + ctx.D.destroySwapchainKHR(self.cinfo.old_swapchain, null); + self.cinfo.old_swapchain = self.handle; + + for (self.chain.items(.view)) |view| ctx.D.destroyImageView(view, null); + @memset(self.chain.items(.view), .null_handle); + @memset(self.chain.items(.image), .null_handle); + + var count: u32 = undefined; + std.debug.assert( + .success == try ctx.D.getSwapchainImagesKHR( + self.handle, + &count, + null, + ), + ); + try self.chain.resize(self.alloc, count); + std.debug.assert( + .success == try ctx.D.getSwapchainImagesKHR( + self.handle, + &count, + self.chain.items(.image).ptr, + ), + ); + + for (self.chain.items(.image), self.chain.items(.view)) |image, *view| { + view.* = try ctx.D.createImageView(&vk.ImageViewCreateInfo{ + .image = image, + .view_type = .@"2d", + .format = self.cinfo.image_format, + .components = .{ + .r = .identity, + .g = .identity, + .b = .identity, + .a = .identity, + }, + .subresource_range = .{ + .aspect_mask = .{ .color_bit = true }, + .base_mip_level = 0, + .level_count = 1, + .base_array_layer = 0, + .layer_count = 1, + }, + }, null); + } +} + +pub fn deinit(self: *Self) void { + // The easiest way to ensure fences and semaphores are not in use for deletion. If it fails, the device must + // have failed somehow and sync is not necessary so just continue with cleanup. + ctx.D.deviceWaitIdle() catch {}; + + // images are owned by swapchain and not explicitly destroyed + for (self.chain.items(.view)) |view| ctx.D.destroyImageView(view, null); + self.chain.deinit(self.alloc); + + ctx.D.destroySwapchainKHR(self.handle, null); + + for (self.flight_syncs.items(.acquired)) |sem| ctx.D.destroySemaphore(sem, null); + for (self.flight_syncs.items(.complete)) |sem| ctx.D.destroySemaphore(sem, null); + for (self.flight_syncs.items(.available)) |fnc| ctx.D.destroyFence(fnc, null); + self.flight_syncs.deinit(self.alloc); +} + +pub fn acquire(self: *Self) !Target { + var target: Target = .{ + .flight_index = @intCast(self.flight_index), + .acquired = self.flight_syncs.items(.acquired)[self.flight_index], + .complete = self.flight_syncs.items(.complete)[self.flight_index], + .available = self.flight_syncs.items(.available)[self.flight_index], + .image = undefined, + .view = undefined, + .image_index = undefined, + }; + + const timeout = std.math.maxInt(u64); + + for (0..5) |_| { + if (self.handle == .null_handle) { + try self.rebuild(); + std.debug.assert(self.handle != .null_handle); + } + + const fences: [1]vk.Fence = .{target.available}; + std.debug.assert(.success == try ctx.D.waitForFences( + 1, + &fences, + vk.TRUE, + std.math.maxInt(u64), + )); + + if (ctx.D.acquireNextImageKHR( + self.handle, + timeout, + target.acquired, + .null_handle, + )) |res| { + switch (res.result) { + .success, .suboptimal_khr => {}, + else => unreachable, + } + + target.image_index = res.image_index; + target.image = self.chain.items(.image)[res.image_index]; + target.view = self.chain.items(.view)[res.image_index]; + + try ctx.D.resetFences(1, &.{target.available}); + self.flight_index = @mod(self.flight_index + 1, self.flight_syncs.len); + + return target; + } else |err| switch (err) { + error.OutOfDateKHR => { + self.handle = .null_handle; + continue; + }, + else => return err, + } + } else { + return error.CannotRecreateSwapchain; + } +} + +pub fn present(self: *Self, target: Target) !void { + if (ctx.Q.presentKHR(&vk.PresentInfoKHR{ + .wait_semaphore_count = 1, // todo extra semaphores? + .p_wait_semaphores = &.{target.complete}, + .swapchain_count = 1, + .p_swapchains = &.{self.handle}, + .p_image_indices = &.{target.image_index}, + .p_results = null, + })) |res| { + switch (res) { + .success => {}, + .suboptimal_khr => { + self.handle = .null_handle; + return; + }, + else => unreachable, + } + } else |err| switch (err) { + error.OutOfDateKHR => { + self.handle = .null_handle; + std.log.debug("Dropped frame", .{}); + return; + }, + else => return err, + } +} diff --git a/src/nu/Render/au/SwapChain.zig b/src/nu/Render/au/SwapChain.zig deleted file mode 100644 index 17c2bd8..0000000 --- a/src/nu/Render/au/SwapChain.zig +++ /dev/null @@ -1,213 +0,0 @@ -const std = @import("std"); -const au = @import("../au.zig"); -const vk = @import("vk"); - -const Self = @This(); - -alloc: std.mem.Allocator, -cinfo: vk.SwapchainCreateInfoKHR, -handle: vk.SwapchainKHR = .null_handle, -images: std.ArrayListUnmanaged(vk.Image) = .{}, -views: std.ArrayListUnmanaged(vk.ImageView) = .{}, - -pub fn init(alloc: std.mem.Allocator) !Self { - const caps = try au.I.getPhysicalDeviceSurfaceCapabilitiesKHR(au.device_config.pdev, au.S.*); - - var min_image_count = @max(3, caps.min_image_count + 1); // todo magic numbers - if (caps.max_image_count > 0) { - min_image_count = @min(min_image_count, caps.max_image_count); - } - - // determine format - const format = au.device_config.format; - - return .{ - .alloc = alloc, - .cinfo = .{ - .surface = au.S.*, - .min_image_count = min_image_count, - .image_format = format.format, - .image_color_space = format.color_space, - .image_extent = undefined, // set in rebuild - .image_array_layers = 1, - .image_usage = .{ .color_attachment_bit = true }, - .image_sharing_mode = .exclusive, - .pre_transform = .{ .identity_bit_khr = true }, - .composite_alpha = .{ .opaque_bit_khr = true }, - .present_mode = au.device_config.mode, - .clipped = vk.TRUE, - .old_swapchain = .null_handle, - }, - }; -} - -pub fn deinit(self: *Self) void { - for (self.views.items) |view| { - au.D.destroyImageView(view, null); - } - self.views.deinit(self.alloc); - - self.images.deinit(self.alloc); - - au.D.destroySwapchainKHR(self.handle, null); -} - -/// mark that the swapchain _should_ be rebuilt with the given extent -/// this function is reentrant, so the swapchain can be marked multiple times -/// and only one rebuild occur -pub fn mark(self: *Self) void { - self.handle = .null_handle; -} - -/// rebuild the swapchain only if it is marked. return true if the swapchain was rebuilt. -pub fn rebuild(self: *Self) !bool { - if (self.handle != .null_handle) return false; - - const caps = try au.I.getPhysicalDeviceSurfaceCapabilitiesKHR(au.device_config.pdev, self.cinfo.surface); - self.cinfo.image_extent = caps.current_extent; - - self.handle = try au.D.createSwapchainKHR(&self.cinfo, null); - au.D.destroySwapchainKHR(self.cinfo.old_swapchain, null); - self.cinfo.old_swapchain = self.handle; - - var count: u32 = undefined; - _ = try au.D.getSwapchainImagesKHR(self.handle, &count, null); - try self.images.resize(self.alloc, count); - _ = try au.D.getSwapchainImagesKHR(self.handle, &count, self.images.items.ptr); - - for (self.views.items) |view| { - au.D.destroyImageView(view, null); - } - try self.views.resize(self.alloc, count); - for (self.images.items, self.views.items) |image, *view| { - view.* = try au.D.createImageView(&vk.ImageViewCreateInfo{ - .image = image, - .view_type = .@"2d", - .format = self.cinfo.image_format, - .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity }, - .subresource_range = .{ - .aspect_mask = .{ .color_bit = true }, - .base_mip_level = 0, - .level_count = 1, - .base_array_layer = 0, - .layer_count = 1, - }, - }, null); - } - - return true; -} - -pub fn acquire(self: Self, semaphore: vk.Semaphore, fence: vk.Fence) !Target { - const acq = try au.D.acquireNextImageKHR( - self.handle, - std.math.maxInt(u64), - semaphore, - fence, - ); - return .{ - .idx = acq.image_index, - .image = self.images.items[acq.image_index], - .view = self.views.items[acq.image_index], - }; -} - -const Target = struct { - idx: u32, - image: vk.Image, - view: vk.ImageView, - - pub fn begin_rendering(self: Target, cmd: au.CommandBufferProxy, area: vk.Rect2D) void { - cmd.pipelineBarrier( - .{ .top_of_pipe_bit = true }, - .{ .color_attachment_output_bit = true }, - .{}, - 0, - null, - 0, - null, - 1, - &.{ - vk.ImageMemoryBarrier{ - .src_access_mask = .{}, - .dst_access_mask = .{ .color_attachment_write_bit = true }, - .old_layout = .undefined, - .new_layout = .color_attachment_optimal, - .src_queue_family_index = 0, - .dst_queue_family_index = 0, - .image = self.image, - .subresource_range = .{ - .aspect_mask = .{ .color_bit = true }, - .base_mip_level = 0, - .level_count = 1, - .base_array_layer = 0, - .layer_count = 1, - }, - }, - }, - ); - - cmd.beginRendering(&vk.RenderingInfo{ - .render_area = area, - .layer_count = 1, - .view_mask = 0, - .color_attachment_count = 1, - .p_color_attachments = &.{ - vk.RenderingAttachmentInfo{ - .image_view = self.view, - .image_layout = .color_attachment_optimal, - .resolve_mode = .{}, - .resolve_image_view = .null_handle, - .resolve_image_layout = .undefined, - .load_op = .clear, - .store_op = .store, - .clear_value = .{ .color = .{ .float_32 = .{ 0, 0, 0, 1 } } }, - }, - }, - }); - } - - pub fn end_rendering(self: Target, cmd: au.CommandBufferProxy) void { - cmd.endRendering(); - - cmd.pipelineBarrier( - .{ .color_attachment_output_bit = true }, - .{ .bottom_of_pipe_bit = true }, - .{}, - 0, - null, - 0, - null, - 1, - &.{ - vk.ImageMemoryBarrier{ - .src_access_mask = .{ .color_attachment_write_bit = true }, - .dst_access_mask = .{}, - .old_layout = .color_attachment_optimal, - .new_layout = .present_src_khr, - .src_queue_family_index = 0, - .dst_queue_family_index = 0, - .image = self.image, - .subresource_range = .{ - .aspect_mask = .{ .color_bit = true }, - .base_mip_level = 0, - .level_count = 1, - .base_array_layer = 0, - .layer_count = 1, - }, - }, - }, - ); - } -}; - -pub fn present(self: Self, wait_semaphores: []const vk.Semaphore, target: Target) !vk.Result { - return try au.Q.presentKHR(&vk.PresentInfoKHR{ - .wait_semaphore_count = @intCast(wait_semaphores.len), - .p_wait_semaphores = wait_semaphores.ptr, - .swapchain_count = 1, - .p_swapchains = &.{self.handle}, - .p_image_indices = &.{target.idx}, - .p_results = null, - }); -} diff --git a/src/nu/Render/ctx.zig b/src/nu/Render/ctx.zig index 170331d..e3a2397 100644 --- a/src/nu/Render/ctx.zig +++ b/src/nu/Render/ctx.zig @@ -275,6 +275,10 @@ fn _destroy_device() void { D.destroyDevice(null); } +pub fn getPhysicalDeviceSurfaceCapabilities() !vk.SurfaceCapabilitiesKHR { + return try I.getPhysicalDeviceSurfaceCapabilitiesKHR(_pdevice, _surface); +} + pub extern fn glfwGetInstanceProcAddress( instance: vk.Instance, procname: [*:0]const u8, From 3e4b70573c1c88e71138a9d018e6eda265bf10db Mon Sep 17 00:00:00 2001 From: David Allemang Date: Mon, 25 Nov 2024 12:06:27 -0500 Subject: [PATCH 111/113] well-behaved swapchain --- build.zig | 2 +- src/nu.zig | 2 + src/nu/Render.zig | 15 ++++--- src/nu/Render/SwapChain.zig | 88 ++++++++++++++++--------------------- src/nu/Render/ctx.zig | 19 ++++++-- src/nu/Window.zig | 18 ++++++++ 6 files changed, 83 insertions(+), 61 deletions(-) diff --git a/build.zig b/build.zig index ee624d3..c23f4de 100644 --- a/build.zig +++ b/build.zig @@ -4,7 +4,7 @@ const vkgen = @import("vulkan-zig"); pub fn build(b: *std.Build) void { const target = b.standardTargetOptions(.{}); const optimize = b.standardOptimizeOption(.{ - .preferred_optimize_mode = .ReleaseSafe, + // .preferred_optimize_mode = .ReleaseSafe, }); const vk = b.dependency("vulkan-zig", .{ diff --git a/src/nu.zig b/src/nu.zig index aaacbb2..88c0a4b 100644 --- a/src/nu.zig +++ b/src/nu.zig @@ -18,6 +18,8 @@ pub const engine = Engine(Window, Render, root.nu_modules); // Hooks: setup, teardown, fixed, frame, present pub fn main() void { + std.log.info("use_debug_messenger: {}", .{config.render.use_debug_messenger}); + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const alloc = gpa.allocator(); diff --git a/src/nu/Render.zig b/src/nu/Render.zig index b74c49a..5960530 100644 --- a/src/nu/Render.zig +++ b/src/nu/Render.zig @@ -7,7 +7,6 @@ const builtin = @import("builtin"); const vk = @import("vk"); const nu = @import("../nu.zig"); -// const au = @import("Render/au.zig"); const ctx = @import("Render/ctx.zig"); const SwapChain = @import("Render/SwapChain.zig"); @@ -85,6 +84,12 @@ pub fn setup(alloc: std.mem.Allocator) !void { _sc = try SwapChain.init(alloc, _flights.len); errdefer _sc.deinit(); + + nu.Window.add_resize_callback(&on_resize); +} + +fn on_resize(_: u32, _: u32) void { + _sc.rebuild() catch @panic("rebuild on resize failed"); } pub fn teardown() void { @@ -94,7 +99,7 @@ pub fn teardown() void { } pub fn render() !void { - const target = try _sc.acquire(); + const target = try _sc.acquire() orelse return; const flight = &_flights[target.flight_index]; const render_area: vk.Rect2D = .{ @@ -116,7 +121,7 @@ pub fn render() !void { 0, null, 1, - &.{target.top_of_pipe()}, + &.{target.top_of_pipe_barrier()}, ); cmd.beginRendering(&vk.RenderingInfo{ @@ -133,7 +138,7 @@ pub fn render() !void { .resolve_image_layout = .undefined, .load_op = .clear, .store_op = .store, - .clear_value = .{ .color = .{ .float_32 = .{ 1, 0, 0, 1 } } }, + .clear_value = .{ .color = .{ .float_32 = .{ 0.1, 0.1, 0.1, 1 } } }, }, }, }); @@ -149,7 +154,7 @@ pub fn render() !void { 0, null, 1, - &.{target.bottom_of_pipe()}, + &.{target.bottom_of_pipe_barrier()}, ); } try cmd.endCommandBuffer(); diff --git a/src/nu/Render/SwapChain.zig b/src/nu/Render/SwapChain.zig index b2df68d..5ab0969 100644 --- a/src/nu/Render/SwapChain.zig +++ b/src/nu/Render/SwapChain.zig @@ -44,7 +44,7 @@ pub const Target = struct { complete: vk.Semaphore, // this semaphore should be signaled when the render is complete available: vk.Fence, // this fence should be signaled when the target flight is available - pub fn top_of_pipe(target: Target) vk.ImageMemoryBarrier { + pub fn top_of_pipe_barrier(target: Target) vk.ImageMemoryBarrier { return vk.ImageMemoryBarrier{ .src_access_mask = .{}, .dst_access_mask = .{ .color_attachment_write_bit = true }, @@ -63,7 +63,7 @@ pub const Target = struct { }; } - pub fn bottom_of_pipe(target: Target) vk.ImageMemoryBarrier { + pub fn bottom_of_pipe_barrier(target: Target) vk.ImageMemoryBarrier { return vk.ImageMemoryBarrier{ .src_access_mask = .{ .color_attachment_write_bit = true }, .dst_access_mask = .{}, @@ -142,15 +142,16 @@ pub fn init(alloc: std.mem.Allocator, flight_count: usize) !Self { .old_swapchain = .null_handle, }; + // try self.rebuild(); + return self; } -fn rebuild(self: *Self) !void { - std.debug.assert(self.handle == .null_handle); // don't rebuild if we weren't marked - +pub fn rebuild(self: *Self) !void { const caps = try ctx.getPhysicalDeviceSurfaceCapabilities(); self.cinfo.image_extent = caps.current_extent; + try ctx.D.queueWaitIdle(ctx.queue.*); self.handle = try ctx.D.createSwapchainKHR(&self.cinfo, null); ctx.D.destroySwapchainKHR(self.cinfo.old_swapchain, null); self.cinfo.old_swapchain = self.handle; @@ -215,7 +216,7 @@ pub fn deinit(self: *Self) void { self.flight_syncs.deinit(self.alloc); } -pub fn acquire(self: *Self) !Target { +pub fn acquire(self: *Self) !?Target { var target: Target = .{ .flight_index = @intCast(self.flight_index), .acquired = self.flight_syncs.items(.acquired)[self.flight_index], @@ -228,74 +229,59 @@ pub fn acquire(self: *Self) !Target { const timeout = std.math.maxInt(u64); - for (0..5) |_| { - if (self.handle == .null_handle) { - try self.rebuild(); - std.debug.assert(self.handle != .null_handle); - } + std.debug.assert(.success == try ctx.D.waitForFences( + 1, + &.{target.available}, + vk.TRUE, + std.math.maxInt(u64), + )); - const fences: [1]vk.Fence = .{target.available}; - std.debug.assert(.success == try ctx.D.waitForFences( - 1, - &fences, - vk.TRUE, - std.math.maxInt(u64), - )); + // two attempts + + target.image_index = for (0..2) |_| { + if (self.handle == .null_handle) try self.rebuild(); if (ctx.D.acquireNextImageKHR( self.handle, timeout, target.acquired, .null_handle, - )) |res| { - switch (res.result) { - .success, .suboptimal_khr => {}, - else => unreachable, - } - - target.image_index = res.image_index; - target.image = self.chain.items(.image)[res.image_index]; - target.view = self.chain.items(.view)[res.image_index]; - - try ctx.D.resetFences(1, &.{target.available}); - self.flight_index = @mod(self.flight_index + 1, self.flight_syncs.len); - - return target; + )) |res| switch (res.result) { + .success, .suboptimal_khr => break res.image_index, + else => unreachable, } else |err| switch (err) { error.OutOfDateKHR => { self.handle = .null_handle; - continue; }, else => return err, } } else { - return error.CannotRecreateSwapchain; - } + return null; + }; + + target.image = self.chain.items(.image)[target.image_index]; + target.view = self.chain.items(.view)[target.image_index]; + + try ctx.D.resetFences(1, &.{target.available}); + self.flight_index = @mod(self.flight_index + 1, self.flight_syncs.len); + + return target; } pub fn present(self: *Self, target: Target) !void { if (ctx.Q.presentKHR(&vk.PresentInfoKHR{ - .wait_semaphore_count = 1, // todo extra semaphores? + .wait_semaphore_count = 1, .p_wait_semaphores = &.{target.complete}, .swapchain_count = 1, .p_swapchains = &.{self.handle}, .p_image_indices = &.{target.image_index}, .p_results = null, - })) |res| { - switch (res) { - .success => {}, - .suboptimal_khr => { - self.handle = .null_handle; - return; - }, - else => unreachable, - } - } else |err| switch (err) { - error.OutOfDateKHR => { - self.handle = .null_handle; - std.log.debug("Dropped frame", .{}); - return; - }, + })) |res| switch(res) { + .success => {}, + .suboptimal_khr => self.handle = .null_handle, + else => unreachable, + } else |err| switch(err) { + error.OutOfDateKHR => self.handle = .null_handle, else => return err, } } diff --git a/src/nu/Render/ctx.zig b/src/nu/Render/ctx.zig index e3a2397..eb7343e 100644 --- a/src/nu/Render/ctx.zig +++ b/src/nu/Render/ctx.zig @@ -17,7 +17,9 @@ pub const versions: []const vk.ApiInfo = &.{ pub const instance_exts: []const vk.ApiInfo = if (config.use_debug_messenger) &.{ vk.extensions.ext_debug_utils, vk.extensions.khr_surface, -} else &.{}; +} else &.{ + vk.extensions.khr_surface, +}; pub const device_exts: []const vk.ApiInfo = &.{ vk.extensions.khr_swapchain, @@ -70,10 +72,16 @@ var _queue: vk.Queue = undefined; pub const queue: *const vk.Queue = &_queue; pub fn init(alloc: std.mem.Allocator) !void { - _B = try BaseWrapper.load(glfwGetInstanceProcAddress); + _B = if (config.use_debug_messenger) + try BaseWrapper.load(glfwGetInstanceProcAddress) + else + BaseWrapper.loadNoFail(glfwGetInstanceProcAddress); _instance = try _create_instance(alloc); - _iw = try InstanceWrapper.load(_instance, glfwGetInstanceProcAddress); + _iw = if (config.use_debug_messenger) + try InstanceWrapper.load(_instance, glfwGetInstanceProcAddress) + else + InstanceWrapper.loadNoFail(_instance, glfwGetInstanceProcAddress); errdefer _destroy_instance(); _I = InstanceProxy.init(_instance, iw); @@ -86,7 +94,10 @@ pub fn init(alloc: std.mem.Allocator) !void { _pdevice = try _select_pdevice(alloc); _family = try _select_queue_family_index(alloc); // only one queue supported _device = try _create_device(alloc); - _dw = try DeviceWrapper.load(_device, iw.dispatch.vkGetDeviceProcAddr); + _dw = if (config.use_debug_messenger) + try DeviceWrapper.load(_device, iw.dispatch.vkGetDeviceProcAddr) + else + DeviceWrapper.loadNoFail(_device, iw.dispatch.vkGetDeviceProcAddr); errdefer _destroy_device(); _D = DeviceProxy.init(_device, dw); _queue = D.getDeviceQueue(_family, 0); // only one queue supported diff --git a/src/nu/Window.zig b/src/nu/Window.zig index 3d10aed..9ae74e5 100644 --- a/src/nu/Window.zig +++ b/src/nu/Window.zig @@ -54,6 +54,8 @@ pub fn setup(_: std.mem.Allocator) !void { null, ) orelse std.debug.panic("GLFW Create Window Failed", .{}); + _ = c.glfwSetFramebufferSizeCallback(handle, &resize_callback); + // bus.connect(handle); // errdefer bus.disconnect(handle); } @@ -75,3 +77,19 @@ pub fn next() bool { return true; } + +var _resize_callbacks: std.BoundedArray(*const fn (u32, u32) void, 16) = .{}; + +fn resize_callback(_: ?*c.GLFWwindow, w: c_int, h: c_int) callconv(.C) void { + for (_resize_callbacks.slice()) |cb| { + cb(@intCast(w), @intCast(h)); + } +} + +pub fn add_resize_callback(cb: *const fn (u32, u32) void) void { + _resize_callbacks.appendAssumeCapacity(cb); +} + +pub fn set_title(title: [:0]const u8) void { + c.glfwSetWindowTitle(handle, title); +} From b8d89122abc06450c23bd0a433dc15bdfdc7a545 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Mon, 25 Nov 2024 20:09:57 -0500 Subject: [PATCH 112/113] remove queue wait idle from swapchain rebuild --- src/nu/Render/SwapChain.zig | 1 - 1 file changed, 1 deletion(-) diff --git a/src/nu/Render/SwapChain.zig b/src/nu/Render/SwapChain.zig index 5ab0969..183eedb 100644 --- a/src/nu/Render/SwapChain.zig +++ b/src/nu/Render/SwapChain.zig @@ -151,7 +151,6 @@ pub fn rebuild(self: *Self) !void { const caps = try ctx.getPhysicalDeviceSurfaceCapabilities(); self.cinfo.image_extent = caps.current_extent; - try ctx.D.queueWaitIdle(ctx.queue.*); self.handle = try ctx.D.createSwapchainKHR(&self.cinfo, null); ctx.D.destroySwapchainKHR(self.cinfo.old_swapchain, null); self.cinfo.old_swapchain = self.handle; From ce8109c1aa73e1785c78acec5a9ec1bc9fa25099 Mon Sep 17 00:00:00 2001 From: David Allemang Date: Mon, 25 Nov 2024 20:54:45 -0500 Subject: [PATCH 113/113] colorful box --- build.zig | 66 ++------- src/Uber.zig | 31 ++-- src/main.zig | 281 ++++++++++++++++++++++++++---------- src/nu.zig | 9 +- src/nu/ImGui.zig | 27 ++-- src/nu/Render.zig | 15 +- src/nu/Render/SwapChain.zig | 10 +- src/nu/Window.zig | 7 + 8 files changed, 280 insertions(+), 166 deletions(-) diff --git a/build.zig b/build.zig index c23f4de..1fa459e 100644 --- a/build.zig +++ b/build.zig @@ -14,19 +14,11 @@ pub fn build(b: *std.Build) void { const cimgui = b.dependency("cimgui", .{}); - const nu = b.addModule("nu", .{ - .root_source_file = b.path("src/nu.zig"), - .target = target, - .optimize = optimize, - .link_libc = true, - }); - nu.addImport("cimgui", cimgui.module("cimgui")); - nu.addImport("vk", vkmod); - nu.linkSystemLibrary("glfw3", .{ - .needed = true, - .preferred_link_mode = .static, - .use_pkg_config = .force, - }); + // nu.linkSystemLibrary("glfw3", .{ + // .needed = true, + // .preferred_link_mode = .static, + // .use_pkg_config = .force, + // }); const exe = b.addExecutable(.{ .name = "scratchzig", @@ -34,9 +26,15 @@ pub fn build(b: *std.Build) void { .target = target, .optimize = optimize, }); - exe.root_module.addImport("nu", nu); + // exe.root_module.addImport("nu", nu); exe.root_module.addImport("vk", vkmod); exe.root_module.addImport("cimgui", cimgui.module("cimgui")); + exe.root_module.linkSystemLibrary("glfw3", .{ + .needed = true, + .preferred_link_mode = .static, + .use_pkg_config = .force, + }); + exe.linkLibC(); const shaders = vkgen.ShaderCompileStep.create( b, @@ -59,42 +57,6 @@ pub fn build(b: *std.Build) void { const run_step = b.step("run", "Run the app"); run_step.dependOn(&run_cmd.step); - const nu_unit_tests = b.addTest(.{ - .root_source_file = b.path("src/nu.zig"), - .target = target, - .optimize = optimize, - }); - nu_unit_tests.root_module.addImport("cimgui", cimgui.module("cimgui")); - nu_unit_tests.root_module.addImport("vk", vkmod); - nu_unit_tests.root_module.linkSystemLibrary("glfw3", .{ - .needed = true, - .preferred_link_mode = .static, - .use_pkg_config = .force, - }); - nu_unit_tests.linkLibC(); - - const nu_test_runner = b.addInstallArtifact(nu_unit_tests, .{ - .dest_dir = .{ .override = .{ .custom = "dev" } }, - .dest_sub_path = "nu_test_runner", - }); - - const devel_step = b.step("dev", "Build development tools and test runners"); - devel_step.dependOn(&nu_test_runner.step); - - // const exe_unit_tests = b.addTest(.{ - // .root_source_file = b.path("src/main.zig"), - // .target = target, - // .optimize = optimize, - // }); - // exe_unit_tests.linkSystemLibrary2("glfw3", .{ - // .needed = true, - // .preferred_link_mode = .static, - // .use_pkg_config = .force, - // }); - // exe_unit_tests.linkLibC(); - // const run_exe_unit_tests = b.addRunArtifact(exe_unit_tests); - - // const test_step = b.step("test", "Run unit tests"); - // test_step.dependOn(&run_exe_unit_tests.step); - // test_step.dependOn(&run_nu_unit_tests.step); + // const devel_step = b.step("dev", "Build development tools and test runners"); + // devel_step.dependOn(&nu_test_runner.step); } diff --git a/src/Uber.zig b/src/Uber.zig index 702aa3c..efde228 100644 --- a/src/Uber.zig +++ b/src/Uber.zig @@ -2,8 +2,11 @@ const std = @import("std"); const vk = @import("vk"); const shaders = @import("shaders"); +const nu = @import("nu.zig"); const Self = @This(); +const ctx = nu.Render.ctx; + set_layout: vk.DescriptorSetLayout, layout: vk.PipelineLayout, pipeline: vk.Pipeline, @@ -50,34 +53,34 @@ pub const Vertex = extern struct { }; pub fn init(cache: vk.PipelineCache) !Self { - const vert = try au.D.createShaderModule(&.{ + const vert = try ctx.D.createShaderModule(&.{ .code_size = shaders.triangle_vert.len, .p_code = @ptrCast(&shaders.triangle_vert), }, null); - defer au.D.destroyShaderModule(vert, null); + defer ctx.D.destroyShaderModule(vert, null); - const frag = try au.D.createShaderModule(&.{ + const frag = try ctx.D.createShaderModule(&.{ .code_size = shaders.triangle_frag.len, .p_code = @ptrCast(&shaders.triangle_frag), }, null); - defer au.D.destroyShaderModule(frag, null); + defer ctx.D.destroyShaderModule(frag, null); - const set_layout = try au.D.createDescriptorSetLayout(&vk.DescriptorSetLayoutCreateInfo{ + const set_layout = try ctx.D.createDescriptorSetLayout(&vk.DescriptorSetLayoutCreateInfo{ .flags = .{}, .binding_count = @intCast(Uniform.Bindings.len), .p_bindings = &Uniform.Bindings, }, null); - errdefer au.D.destroyDescriptorSetLayout(set_layout, null); + errdefer ctx.D.destroyDescriptorSetLayout(set_layout, null); - const layout = try au.D.createPipelineLayout(&vk.PipelineLayoutCreateInfo{ + const layout = try ctx.D.createPipelineLayout(&vk.PipelineLayoutCreateInfo{ .push_constant_range_count = 0, .set_layout_count = 1, .p_set_layouts = &.{set_layout}, }, null); - errdefer au.D.destroyPipelineLayout(layout, null); + errdefer ctx.D.destroyPipelineLayout(layout, null); var pipeline: vk.Pipeline = .null_handle; - _ = try au.D.createGraphicsPipelines(cache, 1, &[1]vk.GraphicsPipelineCreateInfo{ + _ = try ctx.D.createGraphicsPipelines(cache, 1, &[1]vk.GraphicsPipelineCreateInfo{ vk.GraphicsPipelineCreateInfo{ .stage_count = 2, .p_stages = &.{ @@ -152,20 +155,20 @@ pub fn init(cache: vk.PipelineCache) !Self { }, .p_next = &vk.PipelineRenderingCreateInfo{ .color_attachment_count = 1, - .p_color_attachment_formats = &.{au.device_config.format.format}, + .p_color_attachment_formats = &.{nu.Render.sc.cinfo.image_format}, .depth_attachment_format = .undefined, .stencil_attachment_format = .undefined, .view_mask = 0, }, }, }, null, @ptrCast(&pipeline)); - errdefer au.D.destroyPipeline(pipeline, null); + errdefer ctx.D.destroyPipeline(pipeline, null); return .{ .pipeline = pipeline, .layout = layout, .set_layout = set_layout }; } pub fn deinit(self: Self) void { - au.D.destroyPipeline(self.pipeline, null); - au.D.destroyPipelineLayout(self.layout, null); - au.D.destroyDescriptorSetLayout(self.set_layout, null); + ctx.D.destroyPipeline(self.pipeline, null); + ctx.D.destroyPipelineLayout(self.layout, null); + ctx.D.destroyDescriptorSetLayout(self.set_layout, null); } diff --git a/src/main.zig b/src/main.zig index 17627fb..e61048f 100644 --- a/src/main.zig +++ b/src/main.zig @@ -1,86 +1,219 @@ const std = @import("std"); const nu = @import("nu.zig"); +const Uber = @import("Uber.zig"); -pub const nu_modules = .{ - App, - // UI, -}; +pub const nu_modules = .{@This()}; pub const main = nu.main; -pub const nu_options: nu.Options = .{ - .window = .{ .title = "Hello World" }, +pub const nu_config: nu.Config = .{ + .window = nu.Window.Config{ + .title = "au", + }, .render = .{ - .app_name = "hello-world", - .frames_in_flight = 3, + .app_name = "au", + .frames_in_flight = 2, }, }; -// pub const UI = struct { -// const im = nu.ImGui; -// -// pub const depends = .{im}; -// -// var color: @Vector(4, f32) = @splat(1); -// -// pub fn setup(_: std.mem.Allocator) !void { -// const io: *nu.ImGui.ImGuiIO = @ptrCast(nu.ImGui.igGetIO()); -// io.ConfigFlags |= nu.ImGui.ImGuiConfigFlags_DockingEnable; -// } -// -// pub fn frame() !void { -// nu.ImGui.igShowMetricsWindow(null); -// -// { -// const viewport = im.igGetMainViewport(); -// im.igSetNextWindowPos(viewport.*.WorkPos, 0, .{ .x = 0, .y = 0 }); -// im.igSetNextWindowSize(viewport.*.WorkSize, 0); -// im.igSetNextWindowViewport(viewport.*.ID); -// im.igPushStyleVar_Float(im.ImGuiStyleVar_WindowRounding, 0); -// im.igPushStyleVar_Float(im.ImGuiStyleVar_WindowBorderSize, 0); -// im.igPushStyleVar_Vec2(im.ImGuiStyleVar_WindowPadding, .{ .x = 0, .y = 0 }); -// defer im.igPopStyleVar(3); -// -// const window_flags = -// im.ImGuiWindowFlags_MenuBar | -// im.ImGuiWindowFlags_NoDocking | -// im.ImGuiWindowFlags_NoTitleBar | -// im.ImGuiWindowFlags_NoCollapse | -// im.ImGuiWindowFlags_NoResize | -// im.ImGuiWindowFlags_NoMove | -// im.ImGuiWindowFlags_NoBringToFrontOnFocus | -// im.ImGuiWindowFlags_NoNavFocus | -// im.ImGuiWindowFlags_NoBackground; -// -// const dock_flags = -// im.ImGuiDockNodeFlags_PassthruCentralNode | -// im.ImGuiDockNodeFlags_NoDockingOverCentralNode; -// -// _ = im.igBegin("Main Dockspace", null, window_flags); -// const id = im.igGetID_Str("maindockspace"); -// _ = im.igDockSpace(id, .{ .x = 0, .y = 0 }, dock_flags, null); -// im.igEnd(); -// } -// -// if (nu.ImGui.igBegin("Color", null, nu.ImGui.ImGuiWindowFlags_None)) { -// if (nu.ImGui.igColorEdit4("color", @ptrCast(&color), nu.ImGui.ImGuiColorEditFlags_AlphaPreviewHalf)) {} -// } -// nu.ImGui.igEnd(); -// } -// }; +const im = nu.ImGui; +const ctx = nu.Render.ctx; +const vk = @import("vk"); -const App = struct { - const vk = @import("vk"); - // const au = @import("nu/Render/au.zig"); +pub const depends = .{nu.ImGui}; - pub const depends = .{nu.Render}; - - // todo timeline semaphore - - pub fn setup(_: std.mem.Allocator) !void {} - - pub fn teardown() void {} - - pub fn frame() !void {} - - // pub fn present(_: au.CommandBufferProxy) void {} +const vertices: []const Uber.Vertex = &.{ + .{ .pos = .{ 0.0, 0.0, 0.0, 1.0 }, .color = .{ 1.0, 1.0, 0.0 } }, + .{ .pos = .{ 1.0, 0.0, 0.0, 1.0 }, .color = .{ 1.0, 0.0, 1.0 } }, + .{ .pos = .{ 0.0, 1.0, 0.0, 1.0 }, .color = .{ 0.0, 1.0, 1.0 } }, + .{ .pos = .{ 1.0, 1.0, 0.0, 1.0 }, .color = .{ 1.0, 1.0, 1.0 } }, }; + +const Index = u16; +const indices: []const Index = &.{ + 1, 0, 2, + 2, 3, 1, +}; + +const uniform: Uber.Uniform = .{ + .mat = .{ + 1, 0, 0, 0, + 0, 1, 0, 0, + 0, 0, 1, 0, + 0, 0, 0, 1, + }, +}; + +var mem: vk.DeviceMemory = .null_handle; +var vbo: vk.Buffer = .null_handle; +var ibo: vk.Buffer = .null_handle; +var ubo: vk.Buffer = .null_handle; + +var pool: vk.DescriptorPool = .null_handle; +var descriptor_set: vk.DescriptorSet = .null_handle; + +var cache: vk.PipelineCache = .null_handle; +var uber: Uber = undefined; + +pub fn setup(_: std.mem.Allocator) !void { + errdefer teardown(); + const props = ctx.I.getPhysicalDeviceMemoryProperties(ctx.pdevice.*); + const memory_type_index: u32 = for (0..props.memory_type_count) |idx| { + const t = props.memory_types[idx]; + if (t.property_flags.host_coherent_bit and t.property_flags.host_visible_bit) { + break @intCast(idx); + } + } else { + unreachable; + }; + + const SIZE = 0x10000; + mem = try ctx.D.allocateMemory(&vk.MemoryAllocateInfo{ + .allocation_size = SIZE, + .memory_type_index = memory_type_index, + }, null); + + const raw: [*]u8 = @ptrCast(try ctx.D.mapMemory(mem, 0, vk.WHOLE_SIZE, .{}) orelse unreachable); + errdefer ctx.D.unmapMemory(mem); + + // todo VMA. This doesn't work for some reason to do with alignment. + // var fba = std.heap.FixedBufferAllocator.init(raw[0..SIZE]); + // const aa = fba.allocator(); + + var bump: usize = 0; + + const vbytes = std.mem.sliceAsBytes(vertices); + vbo = try ctx.D.createBuffer(&vk.BufferCreateInfo{ + .queue_family_index_count = 1, + .p_queue_family_indices = &.{ctx.family.*}, + .sharing_mode = .exclusive, + .size = vbytes.len, + .usage = .{ .vertex_buffer_bit = true }, + }, null); + const vreq = ctx.D.getBufferMemoryRequirements(vbo); + bump = std.mem.alignForward(usize, bump, vreq.alignment); + @memcpy(raw + bump, vbytes); + try ctx.D.bindBufferMemory(vbo, mem, bump); + bump += vreq.size; + + const ibytes = std.mem.sliceAsBytes(indices); + ibo = try ctx.D.createBuffer(&vk.BufferCreateInfo{ + .queue_family_index_count = 1, + .p_queue_family_indices = &.{ctx.family.*}, + .sharing_mode = .exclusive, + .size = ibytes.len, + .usage = .{ .index_buffer_bit = true }, + }, null); + const ireq = ctx.D.getBufferMemoryRequirements(ibo); + bump = std.mem.alignForward(usize, bump, ireq.alignment); + @memcpy(raw + bump, ibytes); + try ctx.D.bindBufferMemory(ibo, mem, bump); + bump += ireq.size; + + const ubytes = std.mem.asBytes(&uniform); + ubo = try ctx.D.createBuffer(&vk.BufferCreateInfo{ + .queue_family_index_count = 1, + .p_queue_family_indices = &.{ctx.family.*}, + .sharing_mode = .exclusive, + .size = ubytes.len, + .usage = .{ .uniform_buffer_bit = true }, + }, null); + const ureq = ctx.D.getBufferMemoryRequirements(ubo); + bump = std.mem.alignForward(usize, bump, ureq.alignment); + @memcpy(raw + bump, ubytes); + try ctx.D.bindBufferMemory(ubo, mem, bump); + bump += ureq.size; + + cache = try ctx.D.createPipelineCache(&vk.PipelineCacheCreateInfo{}, null); + + uber = try Uber.init(cache); + + const pool_sizes: []const vk.DescriptorPoolSize = &.{vk.DescriptorPoolSize{ + .descriptor_count = 8, + .type = .uniform_buffer, + }}; + + pool = try ctx.D.createDescriptorPool(&vk.DescriptorPoolCreateInfo{ + .flags = .{ .free_descriptor_set_bit = true }, + .pool_size_count = @intCast(pool_sizes.len), + .p_pool_sizes = pool_sizes.ptr, + .max_sets = 32, + }, null); + + var sets: [1]vk.DescriptorSet = .{.null_handle}; + try ctx.D.allocateDescriptorSets(&vk.DescriptorSetAllocateInfo{ + .descriptor_pool = pool, + .descriptor_set_count = 1, + .p_set_layouts = &.{uber.set_layout}, + }, &sets); + descriptor_set = sets[0]; + + ctx.D.updateDescriptorSets( + 1, + &.{ + vk.WriteDescriptorSet{ + .dst_set = descriptor_set, + .dst_binding = 0, + .dst_array_element = 0, + .descriptor_count = 1, + .descriptor_type = .uniform_buffer, + .p_buffer_info = &.{ + vk.DescriptorBufferInfo{ + .buffer = ubo, + .offset = 0, + .range = vk.WHOLE_SIZE, + }, + }, + .p_image_info = undefined, + .p_texel_buffer_view = undefined, + }, + }, + 0, + null, + ); +} + +pub fn teardown() void { + ctx.Q.waitIdle() catch {}; + uber.deinit(); + ctx.D.destroyPipelineCache(cache, null); + ctx.D.destroyBuffer(ubo, null); + ctx.D.destroyBuffer(ibo, null); + ctx.D.destroyBuffer(vbo, null); + ctx.D.freeMemory(mem, null); + ctx.D.freeDescriptorSets(pool, 1, &.{descriptor_set}) catch unreachable; + ctx.D.destroyDescriptorPool(pool, null); +} + +pub fn frame() !void { + im.igShowMetricsWindow(null); +} + +pub fn present(cmd: ctx.CommandBufferProxy) void { + const w, const h = nu.Window.size(); + + cmd.bindPipeline(.graphics, uber.pipeline); + cmd.setScissor(0, 1, &.{vk.Rect2D{ + .offset = .{ .x = 0, .y = 0 }, + .extent = .{ .width = w, .height = h }, + }}); + cmd.setViewport(0, 1, &.{vk.Viewport{ + .x = 0, + .y = 0, + .width = @floatFromInt(w), + .height = @floatFromInt(h), + .min_depth = 0, + .max_depth = 1, + }}); + cmd.bindIndexBuffer(ibo, 0, .uint16); + cmd.bindVertexBuffers(0, 1, &.{vbo}, &.{0}); + cmd.bindDescriptorSets( + .graphics, + uber.layout, + 0, + 1, + &.{descriptor_set}, + 0, + null, + ); + cmd.drawIndexed(@intCast(indices.len), 1, 0, 0, 0); +} diff --git a/src/nu.zig b/src/nu.zig index 88c0a4b..27695a4 100644 --- a/src/nu.zig +++ b/src/nu.zig @@ -3,11 +3,12 @@ const root = @import("root"); pub const Window = @import("nu/Window.zig"); pub const Render = @import("nu/Render.zig"); -// pub const ImGui = @import("nu/ImGui.zig"); +pub const ImGui = @import("nu/ImGui.zig"); +pub const ctx = @import("nu/Render/ctx.zig"); -pub const Bus = @import("nu/Bus.zig"); +// pub const Bus = @import("nu/Bus.zig"); -const Config = struct { +pub const Config = struct { window: Window.Config = .{}, render: Render.Config = .{}, }; @@ -52,6 +53,7 @@ pub fn Engine(comptime D: type, comptime R: type, comptime M: anytype) type { return R.render(); } + // todo remove this pub fn try_invoke(comptime name: []const u8, args: anytype) !void { inline for (modules) |mod| { if (@hasDecl(mod, name)) { @@ -68,6 +70,7 @@ pub fn Engine(comptime D: type, comptime R: type, comptime M: anytype) type { } } + // todo remove this pub fn try_rinvoke(comptime name: []const u8, args: anytype) !void { comptime var it = std.mem.reverseIterator(modules); inline while (it.next()) |mod| { diff --git a/src/nu/ImGui.zig b/src/nu/ImGui.zig index 38e1caf..07a94a0 100644 --- a/src/nu/ImGui.zig +++ b/src/nu/ImGui.zig @@ -17,7 +17,7 @@ const config = nu.config.imgui; pub const depends = .{ Render, Window }; pub fn loader_wrapper(procname: [*c]const u8, _: ?*anyopaque) callconv(.C) vk.PfnVoidFunction { - return nu.glfwGetInstanceProcAddress(nu.I.handle, procname); + return Render.ctx.glfwGetInstanceProcAddress(Render.ctx.instance.*, procname); } var ctx: *im.ImGuiContext = undefined; @@ -38,8 +38,7 @@ pub fn setup(_: std.mem.Allocator) !void { } errdefer im.impl.ImGui_ImplGlfw_Shutdown(); - descriptor_pool = try Render.ctx.dw.createDescriptorPool( - Render.ctx.device, + descriptor_pool = try Render.ctx.D.createDescriptorPool( &vk.DescriptorPoolCreateInfo{ .flags = .{ .free_descriptor_set_bit = true }, .pool_size_count = 1, @@ -51,24 +50,24 @@ pub fn setup(_: std.mem.Allocator) !void { }, null, ); - errdefer Render.ctx.dw.destroyDescriptorPool(Render.ctx.device, descriptor_pool, null); + errdefer Render.ctx.D.destroyDescriptorPool(descriptor_pool, null); if (im.impl.ImGui_ImplVulkan_Init(@constCast(&im.impl.ImGui_ImplVulkan_InitInfo{ - .Instance = @ptrFromInt(@intFromEnum(Render.ctx.instance)), - .PhysicalDevice = @ptrFromInt(@intFromEnum(Render.ctx.pdevice)), - .Device = @ptrFromInt(@intFromEnum(Render.ctx.device)), - .QueueFamily = au.device_config.family, // todo - .Queue = @ptrFromInt(@intFromEnum(au.Q.handle)), // todo + .Instance = @ptrFromInt(@intFromEnum(Render.ctx.instance.*)), + .PhysicalDevice = @ptrFromInt(@intFromEnum(Render.ctx.pdevice.*)), + .Device = @ptrFromInt(@intFromEnum(Render.ctx.device.*)), + .QueueFamily = Render.ctx.family.*, // todo + .Queue = @ptrFromInt(@intFromEnum(Render.ctx.Q.handle)), // todo .DescriptorPool = @ptrFromInt(@intFromEnum(descriptor_pool)), .RenderPass = null, .MinImageCount = 2, - .ImageCount = @intCast(nu.config.render.frames_in_flight), + .ImageCount = @intCast(Render.sc.frames_in_flight), .PipelineRenderingCreateInfo = @bitCast(vk.PipelineRenderingCreateInfo{ .view_mask = 0, .depth_attachment_format = .undefined, .stencil_attachment_format = .undefined, .color_attachment_count = 1, - .p_color_attachment_formats = &.{au.device_config.format.format}, // todo + .p_color_attachment_formats = &.{Render.sc.cinfo.image_format}, // todo }), .MSAASamples = 0, .PipelineCache = null, @@ -86,9 +85,9 @@ pub fn setup(_: std.mem.Allocator) !void { } pub fn teardown() void { - Render.ctx.dw.deviceWaitIdle(Render.ctx.device) catch |err| std.debug.panic("Device wait failed: {!}", .{err}); + Render.ctx.D.deviceWaitIdle() catch |err| std.debug.panic("Device wait failed: {!}", .{err}); im.impl.ImGui_ImplVulkan_Shutdown(); - Render.ctx.dw.destroyDescriptorPool(Render.ctx.device, descriptor_pool, null); + Render.ctx.D.destroyDescriptorPool(descriptor_pool, null); im.impl.ImGui_ImplGlfw_Shutdown(); im.igDestroyContext(ctx); } @@ -99,7 +98,7 @@ pub fn frame() !void { im.igNewFrame(); } -pub fn present(cmd: au.CommandBufferProxy) void { // todo +pub fn rpresent(cmd: Render.ctx.CommandBufferProxy) void { // todo im.igEndFrame(); im.igRender(); diff --git a/src/nu/Render.zig b/src/nu/Render.zig index 5960530..fd868d2 100644 --- a/src/nu/Render.zig +++ b/src/nu/Render.zig @@ -8,7 +8,8 @@ const vk = @import("vk"); const nu = @import("../nu.zig"); -const ctx = @import("Render/ctx.zig"); +pub const ctx = @import("Render/ctx.zig"); + const SwapChain = @import("Render/SwapChain.zig"); pub const Config = struct { @@ -30,6 +31,7 @@ pub const Config = struct { .Debug, .ReleaseSafe => true, .ReleaseSmall, .ReleaseFast => false, }, + frames_in_flight: u8 = 2, }; const config = nu.config.render; @@ -70,17 +72,17 @@ const Flight = struct { } }; +pub const sc: *const SwapChain = &_sc; + var _sc: SwapChain = undefined; -var _flights: []Flight = undefined; +var _flights: [nu.config.render.frames_in_flight]Flight = undefined; pub fn setup(alloc: std.mem.Allocator) !void { try ctx.init(alloc); errdefer ctx.deinit(); - _flights = try alloc.alloc(Flight, 3); - errdefer alloc.free(_flights); errdefer for (_flights) |flight| flight.deinit(); - for (_flights) |*flight| flight.* = try Flight.init(); + for (&_flights) |*flight| flight.* = try Flight.init(); _sc = try SwapChain.init(alloc, _flights.len); errdefer _sc.deinit(); @@ -143,6 +145,9 @@ pub fn render() !void { }, }); + nu.engine.invoke("present", .{cmd}); + nu.engine.rinvoke("rpresent", .{cmd}); + cmd.endRendering(); cmd.pipelineBarrier( diff --git a/src/nu/Render/SwapChain.zig b/src/nu/Render/SwapChain.zig index 183eedb..01356e4 100644 --- a/src/nu/Render/SwapChain.zig +++ b/src/nu/Render/SwapChain.zig @@ -96,6 +96,7 @@ const View = struct { alloc: std.mem.Allocator, +frames_in_flight: u32, flight_index: usize = 0, flight_syncs: std.MultiArrayList(Sync) = .{}, @@ -104,13 +105,14 @@ handle: vk.SwapchainKHR = .null_handle, chain: std.MultiArrayList(View) = .{}, -pub fn init(alloc: std.mem.Allocator, flight_count: usize) !Self { +pub fn init(alloc: std.mem.Allocator, frames_in_flight: usize) !Self { var self: Self = .{ .alloc = alloc, + .frames_in_flight = @intCast(frames_in_flight), }; errdefer self.deinit(); - try self.flight_syncs.resize(alloc, flight_count); + try self.flight_syncs.resize(alloc, frames_in_flight); for (self.flight_syncs.items(.acquired)) |*sem| sem.* = try ctx.D.createSemaphore(&.{}, null); for (self.flight_syncs.items(.complete)) |*sem| @@ -275,11 +277,11 @@ pub fn present(self: *Self, target: Target) !void { .p_swapchains = &.{self.handle}, .p_image_indices = &.{target.image_index}, .p_results = null, - })) |res| switch(res) { + })) |res| switch (res) { .success => {}, .suboptimal_khr => self.handle = .null_handle, else => unreachable, - } else |err| switch(err) { + } else |err| switch (err) { error.OutOfDateKHR => self.handle = .null_handle, else => return err, } diff --git a/src/nu/Window.zig b/src/nu/Window.zig index 9ae74e5..0c749d6 100644 --- a/src/nu/Window.zig +++ b/src/nu/Window.zig @@ -93,3 +93,10 @@ pub fn add_resize_callback(cb: *const fn (u32, u32) void) void { pub fn set_title(title: [:0]const u8) void { c.glfwSetWindowTitle(handle, title); } + +pub fn size() std.meta.Tuple(&[_]type{ u32, u32 }) { + var w: c_int = undefined; + var h: c_int = undefined; + c.glfwGetFramebufferSize(handle, &w, &h); + return .{ @intCast(w), @intCast(h) }; +}