24 Commits

Author SHA1 Message Date
Robin Voetter
d87813312e Make Vulkan enums always be 32-bit (fixes #26) 2021-11-08 13:51:35 +01:00
InKryption
e17c3593d1 Update graphics_context.zig 2021-11-08 13:51:24 +01:00
InKryption
9513d33bf8 Replace anytype with explicit []const {s}Command 2021-11-08 13:51:12 +01:00
Robin Voetter
59c5b88d17 Add mach-glfw and mach-glfw-vulkan-example readme links 2021-11-08 13:48:27 +01:00
Stephen Gutekanst
4588c0fcad examples: do not call glfwSwapBuffers
I am porting this example to [mach-glfw](github.com/hexops/mach-glfw), and noticed that no GLFW error handling callback is registered because in my port there are a lot of GLFW errors :)

`glfwSwapBuffers` here is emitting `GLFW_NO_WINDOW_CONTEXT` errors constantly, because calling it without a valid OpenGL context is illegal. It's not needed for Vulkan.
2021-11-08 13:48:03 +01:00
Robin Voetter
f55409f98a Make command enums lower camel case to reflect command function name style 2021-10-25 14:38:00 +02:00
Robin Voetter
cbf06a8d42 CI: Enable CI for zig-0.8.1-compat 2021-10-25 13:59:04 +02:00
Robin Voetter
c5bb254766 Make sure there are no errors after parsing generated Zig 2021-10-25 13:58:44 +02:00
Robin Voetter
5980bac303 CI: Bump vulkan sdk to 189 2021-10-25 13:55:18 +02:00
Robin Voetter
3bfacc7e16 Fix some allocation bugs, replace everything by arena (#18) 2021-10-25 13:54:48 +02:00
Marten Ringwelski
1e594c0f09 examples/swapchain: Fix typo 2021-10-25 13:54:31 +02:00
ashpil
397e663296 adds defaults for previously undetected feature struct 2021-10-25 13:54:12 +02:00
Robin Voetter
933010cfff Update readme with new build.zig usage 2021-10-25 13:53:04 +02:00
ashpil
0eccd593ce implements default for feature structs 2021-10-25 13:46:52 +02:00
Robin Voetter
6a2c379146 Update readme 2021-07-06 11:00:13 +02:00
Robin Voetter
4429151d9c CI: Also test 0.8.0 branch 2021-07-06 10:46:44 +02:00
Robin Voetter
77651872ab Render error set constant for wrappers 2021-07-06 10:37:39 +02:00
Robin Voetter
5a51d18bda CI: Upload vk.zig as artifact 2021-07-06 10:37:39 +02:00
Robin Voetter
6feeeac109 Merge pull request #12 from ashpil/master
camel -> snake for command enums + fixes
2021-07-06 10:37:39 +02:00
ashpil
8f10cec149 camel -> snake for command enums + fixes 2021-07-06 10:37:39 +02:00
ashpil
0e65efd9d6 less verbose interface via @Type 2021-07-06 10:37:39 +02:00
Robin Voetter
b3c71d69ea Fix a whole bunch of issues exposed by ziglang/zig#9191
This also includes a workaround for the fact that @"type" refers to the builtin
and not to a variable called "type". See ziglang/zig#2897.
2021-07-06 10:37:39 +02:00
Robin Voetter
b63533d95b Fix another vk.xml moment 2021-07-06 10:37:39 +02:00
Robin Voetter
419e541a16 zig fmt **.zig 2021-07-06 10:37:39 +02:00
30 changed files with 19716 additions and 4575 deletions

View File

@@ -2,59 +2,45 @@ name: Build
on: on:
push: push:
branches: [ master ] branches: [ zig-0.8.1-compat ]
pull_request: pull_request:
branches: [ master ] branches: [ zig-0.8.1-compat ]
schedule: schedule:
- cron: '0 6 * * *' - cron: '0 6 * * *'
jobs: jobs:
build: build:
runs-on: ubuntu-22.04 runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v2
- name: Setup Zig - name: Setup Zig
uses: mlugg/setup-zig@v2 uses: goto-bus-stop/setup-zig@v1.3.0
with: with:
version: master version: 0.8.0
- name: Check formatting - name: Test
run: zig fmt --check . run: |
zig build test
- name: Fetch latest Vulkan SDK - name: Fetch Vulkan SDK
run: | run: |
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo apt-key add - wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo apt-key add -
sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-1.2.189-focal.list https://packages.lunarg.com/vulkan/1.2.189/lunarg-vulkan-1.2.189-focal.list
sudo apt update sudo apt update
sudo apt install shaderc libglfw3 libglfw3-dev sudo apt install shaderc libglfw3 libglfw3-dev
- name: Fetch latest vk.xml - name: Fetch latest vk.xml
run: | run: |
wget https://raw.githubusercontent.com/KhronosGroup/Vulkan-Docs/main/xml/vk.xml wget https://raw.githubusercontent.com/KhronosGroup/Vulkan-Docs/main/xml/vk.xml
wget https://raw.githubusercontent.com/KhronosGroup/Vulkan-Docs/main/xml/video.xml
- name: Test and install with latest zig & latest vk.xml - name: Build with latest zig & vk.xml
run: zig build test install -Dregistry=$(pwd)/vk.xml run: |
zig build -Dvulkan-registry=./vk.xml
- name: Test and install with latest zig & latest vk.xml & latest video.xml - name: Archive vk.xml
run: zig build test install -p zig-out-video -Dregistry=$(pwd)/vk.xml -Dvideo=$(pwd)/video.xml uses: actions/upload-artifact@v2
- name: Build example with latest zig & vk.xml from dependency
run: zig build --build-file $(pwd)/examples/build.zig
- name: Build example with latest zig & latest vk.xml
run: zig build --build-file $(pwd)/examples/build.zig -Doverride-registry=$(pwd)/vk.xml
- name: Build example with latest zig & vk.xml from dependency & use zig shaders
run: zig build --build-file $(pwd)/examples/build.zig -Dzig-shader
- name: Archive vk.zig
uses: actions/upload-artifact@v4
with: with:
name: vk.zig name: vk.zig
path: | path: zig-cache/vk.zig
zig-out/src/vk.zig
zig-out-video/src/vk.zig
if-no-files-found: error

3
.gitignore vendored
View File

@@ -1,5 +1,2 @@
zig-cache/ zig-cache/
zig-out/ zig-out/
.vscode/.zig-cache/
.zig-cache/
examples/.zig-cache

View File

@@ -1,4 +1,4 @@
Copyright © Robin Voetter Copyright © 2020 Robin Voetter
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

334
README.md
View File

@@ -10,113 +10,38 @@ vulkan-zig attempts to provide a better experience to programming Vulkan applica
vulkan-zig is automatically tested daily against the latest vk.xml and zig, and supports vk.xml from version 1.x.163. vulkan-zig is automatically tested daily against the latest vk.xml and zig, and supports vk.xml from version 1.x.163.
## Example
A partial implementation of https://vulkan-tutorial.com is implemented in [examples/triangle.zig](examples/triangle.zig). This example can be ran by executing `zig build --build-file $(pwd)/examples/build.zig run-triangle` in vulkan-zig's root. See in particular the [build file](examples/build.zig), which contains a concrete example of how to use vulkan-zig as a dependency.
### Zig versions ### Zig versions
vulkan-zig aims to be always compatible with the ever-changing Zig master branch (however, development may lag a few days behind). Sometimes, the Zig master branch breaks a bunch of functionality however, which may make the latest version vulkan-zig incompatible with older releases of Zig. This repository aims to have a version compatible for both the latest Zig master, and the latest Zig release. The `master` branch is compatible with the `master` branch of Zig, and versions for older versions of Zig are maintained in the `zig-<version>-compat` branch. vulkan-zig aims to be always compatible with the ever-changing Zig master branch (however, development may lag a few days behind). Sometimes, the Zig master branch breaks a bunch of functionality however, which may make the latest version vulkan-zig incompatible with older releases of Zig. This repository aims to have a version compatible for both the latest Zig master, and the latest Zig release. The `master` branch is compatible with the `master` branch of Zig, and versions for older versions of Zig are maintained in the `zig-<version>-compat` branch.
`master` is compatible and tested with the Zig self-hosted compiler. The `zig-stage1-compat` branch contains a version which is compatible with the Zig stage 1 compiler.
## Features ## Features
### CLI-interface ### CLI-interface
A CLI-interface is provided to generate vk.zig from the [Vulkan XML registry](https://github.com/KhronosGroup/Vulkan-Docs/blob/master/xml), which is built by default when invoking `zig build` in the project root. To generate vk.zig, simply invoke the program as follows:
A CLI-interface is provided to generate vk.zig from the [Vulkan XML registry](https://github.com/KhronosGroup/Vulkan-Docs/blob/main/xml), which is built by default when invoking `zig build` in the project root. To generate vk.zig, simply invoke the program as follows:
``` ```
$ zig-out/bin/vulkan-zig-generator path/to/vk.xml output/path/to/vk.zig $ zig-cache/bin/vulkan-zig-generator path/to/vk.xml output/path/to/vk.zig
``` ```
This reads the xml file, parses its contents, renders the Vulkan bindings, and formats file, before writing the result to the output path. While the intended usage of vulkan-zig is through direct generation from build.zig (see below), the CLI-interface can be used for one-off generation and vendoring the result. This reads the xml file, parses its contents, renders the Vulkan bindings, and formats file, before writing the result to the output path. While the intended usage of vulkan-zig is through direct generation from build.zig (see below), the CLI-interface can be used for one-off generation and vendoring the result.
`path/to/vk.xml` can be obtained from several sources: ### Generation from build.zig
- From the LunarG Vulkan SDK. This can either be obtained from [LunarG](https://www.lunarg.com/vulkan-sdk) or usually using the package manager. The registry can then be found at `$VULKAN_SDK/share/vulkan/registry/vk.xml`. Vulkan bindings can be generated from the Vulkan XML registry at compile time with build.zig, by using the provided Vulkan generation step:
- Directly from the [Vulkan-Headers GitHub repository](https://github.com/KhronosGroup/Vulkan-Headers/blob/main/registry/vk.xml).
### Generation with the package manager from build.zig
There is also support for adding this project as a dependency through zig package manager in its current form. In order to do this, add this repo as a dependency in your build.zig.zon:
```zig ```zig
.{ const vkgen = @import("vulkan-zig/generator/index.zig");
// -- snip --
.dependencies = .{ pub fn build(b: *Builder) void {
// -- snip -- ...
.vulkan_zig = .{ const exe = b.addExecutable("my-executable", "src/main.zig");
.url = "https://github.com/Snektron/vulkan-zig/archive/<commit SHA>.tar.gz",
.hash = "<dependency hash>", // Create a step that generates vk.zig (stored in zig-cache) from the provided vulkan registry.
}, const gen = vkgen.VkGenerateStep.init(b, "path/to/vk.xml", "vk.zig");
}, exe.step.dependOn(&gen.step);
// Add the generated file as package to the final executable
exe.addPackage(gen.package);
} }
``` ```
And then in your build.zig file, you'll need to add a line like this to your build function: This reads vk.xml, parses its contents, and renders the Vulkan bindings to "vk.zig", which is then formatted and placed in `zig-cache`. The resulting file can then be added to an executable by using `addPackage`, after which the bindings will be made available to the executable under the name `vulkan`.
```zig
const vulkan = b.dependency("vulkan_zig", .{
.registry = b.path("path/to/vk.xml"),
}).module("vulkan-zig");
exe.root_module.addImport("vulkan", vulkan);
```
That will allow you to `@import("vulkan")` in your executable's source.
#### Generating bindings directly from Vulkan-Headers
Bindings can be generated directly from the Vulkan-Headers repository by adding Vulkan-Headers as a dependency, and then passing the path to `vk.xml` from that dependency:
```zig
.{
// -- snip --
.dependencies = .{
// -- snip --
.vulkan_headers = .{
.url = "https://github.com/KhronosGroup/Vulkan-Headers/archive/v1.3.283.tar.gz",
.hash = "<dependency hash>",
},
},
}
```
```zig
const vulkan = b.dependency("vulkan_zig", .{
.registry = b.dependency("vulkan_headers", .{}).path("registry/vk.xml"),
}).module("vulkan-zig");
exe.root_module.addImport("vulkan", vulkan);
```
### Manual generation with the package manager from build.zig
Bindings can also be generated by invoking the generator directly. This may be useful is some special cases, for example, it integrates particularly well with fetching the registry via the package manager. This can be done by adding the Vulkan-Headers repository to your dependencies, and then passing the `vk.xml` inside it to vulkan-zig-generator:
```zig
.{
// -- snip --
.depdendencies = .{
// -- snip --
.vulkan_headers = .{
.url = "https://github.com/KhronosGroup/Vulkan-Headers/archive/<commit SHA>.tar.gz",
.hash = "<dependency hash>",
},
},
}
```
And then pass `vk.xml` to vulkan-zig-generator as follows:
```zig
// Get the (lazy) path to vk.xml:
const registry = b.dependency("vulkan_headers", .{}).path("registry/vk.xml");
// Get generator executable reference
const vk_gen = b.dependency("vulkan_zig", .{}).artifact("vulkan-zig-generator");
// Set up a run step to generate the bindings
const vk_generate_cmd = b.addRunArtifact(vk_gen);
// Pass the registry to the generator
vk_generate_cmd.addFileArg(registry);
// Create a module from the generator's output...
const vulkan_zig = b.addModule("vulkan-zig", .{
.root_source_file = vk_generate_cmd.addOutputFileArg("vk.zig"),
});
// ... and pass it as a module to your executable's build command
exe.root_module.addImport("vulkan", vulkan_zig);
```
See [examples/build.zig](examples/build.zig) and [examples/build.zig.zon](examples/build.zig.zon) for a concrete example.
### Function & field renaming ### Function & field renaming
Functions and fields are renamed to be more or less in line with [Zig's standard library style](https://ziglang.org/documentation/master/#Style-Guide): Functions and fields are renamed to be more or less in line with [Zig's standard library style](https://ziglang.org/documentation/master/#Style-Guide):
* The vk prefix is removed everywhere * The vk prefix is removed everywhere
* Structs like `VkInstanceCreateInfo` are renamed to `InstanceCreateInfo`. * Structs like `VkInstanceCreateInfo` are renamed to `InstanceCreateInfo`.
@@ -127,9 +52,8 @@ Functions and fields are renamed to be more or less in line with [Zig's standard
* Container fields and function parameter names are generated in (lower) snake case in a similar manner: `ppEnabledLayerNames` becomes `pp_enabled_layer_names`. * Container fields and function parameter names are generated in (lower) snake case in a similar manner: `ppEnabledLayerNames` becomes `pp_enabled_layer_names`.
* Any name which is either an illegal Zig name or a reserved identifier is rendered using `@"name"` syntax. For example, `VK_IMAGE_TYPE_2D` is translated to `@"2d"`. * Any name which is either an illegal Zig name or a reserved identifier is rendered using `@"name"` syntax. For example, `VK_IMAGE_TYPE_2D` is translated to `@"2d"`.
### Dispatch Tables ### Function pointers & Wrappers
vulkan-zig provides no integration for statically linking libvulkan, and these symbols are not generated at all. Instead, vulkan functions are to be loaded dynamically. For each Vulkan function, a function pointer type is generated using the exact parameters and return types as defined by the Vulkan specification:
Vulkan-zig provides no integration for statically linking libvulkan, and these symbols are not generated at all. Instead, vulkan functions are to be loaded dynamically. For each Vulkan function, a function pointer type is generated using the exact parameters and return types as defined by the Vulkan specification:
```zig ```zig
pub const PfnCreateInstance = fn ( pub const PfnCreateInstance = fn (
p_create_info: *const InstanceCreateInfo, p_create_info: *const InstanceCreateInfo,
@@ -138,58 +62,62 @@ pub const PfnCreateInstance = fn (
) callconv(vulkan_call_conv) Result; ) callconv(vulkan_call_conv) Result;
``` ```
A set of _dispatch table_ structures is generated. A dispatch table simply contains a set of (optional) function pointers to Vulkan API functions, and not much else. Function pointers grouped by the nature of the function as follows: For each function, a wrapper is generated into one of three structs:
* Vulkan functions which are loaded by `vkGetInstanceProcAddr` without the need for passing an instance are placed in `BaseDispatch`. * BaseWrapper. This contains wrappers for functions which are loaded by `vkGetInstanceProcAddr` without an instance, such as `vkCreateInstance`, `vkEnumerateInstanceVersion`, etc.
* Vulkan functions which are loaded by `vkGetInstanceProcAddr` but do need an instance are placed in `InstanceDispatch`. * InstanceWrapper. This contains wrappers for functions which are otherwise loaded by `vkGetInstanceProcAddr`.
* Vulkan functions which are loaded by `vkGetDeviceProcAddr` are placed in `DeviceDispatch`. * DeviceWrapper. This contains wrappers for functions which are loaded by `vkGetDeviceProcAddr`.
### Wrappers
To provide more interesting functionality, a set of _wrapper_ types is also generated, one for each dispatch table type. These contain the Zig-versions of each Vulkan API function, along with corresponding error set definitions, return type definitions, etc, where appropriate.
Each wrapper struct can be called with an array of the appropriate enums:
```zig
const vk = @import("vulkan");
const BaseDispatch = vk.BaseWrapper(.{
.CreateInstance,
});
```
The wrapper struct then provides wrapper functions for each function pointer in the dispatch struct: The wrapper struct then provides wrapper functions for each function pointer in the dispatch struct:
```zig ```zig
pub const BaseWrapper = struct { pub const BaseWrapper(comptime cmds: anytype) type {
const Self = @This();
const Dispatch = CreateDispatchStruct(cmds);
dispatch: Dispatch,
pub const CreateInstanceError = error{
OutOfHostMemory,
OutOfDeviceMemory,
InitializationFailed,
LayerNotPresent,
ExtensionNotPresent,
IncompatibleDriver,
Unknown,
};
pub fn createInstance(
self: Self,
create_info: InstanceCreateInfo,
p_allocator: ?*const AllocationCallbacks,
) CreateInstanceError!Instance {
var instance: Instance = undefined;
const result = self.dispatch.vkCreateInstance.?(
&create_info,
p_allocator,
&instance,
);
switch (result) {
.success => {},
.error_out_of_host_memory => return error.OutOfHostMemory,
.error_out_of_device_memory => return error.OutOfDeviceMemory,
.error_initialization_failed => return error.InitializationFailed,
.error_layer_not_present => return error.LayerNotPresent,
.error_extension_not_present => return error.ExtensionNotPresent,
.error_incompatible_driver => return error.IncompatibleDriver,
else => return error.Unknown,
}
return instance;
}
... ...
}; const Dispatch = CreateDispatchStruct(cmds);
return struct {
dispatch: Dispatch,
pub const CreateInstanceError = error{
OutOfHostMemory,
OutOfDeviceMemory,
InitializationFailed,
LayerNotPresent,
ExtensionNotPresent,
IncompatibleDriver,
Unknown,
};
pub fn createInstance(
self: Self,
create_info: InstanceCreateInfo,
p_allocator: ?*const AllocationCallbacks,
) CreateInstanceError!Instance {
var instance: Instance = undefined;
const result = self.dispatch.vkCreateInstance(
&create_info,
p_allocator,
&instance,
);
switch (result) {
.success => {},
.error_out_of_host_memory => return error.OutOfHostMemory,
.error_out_of_device_memory => return error.OutOfDeviceMemory,
.error_initialization_failed => return error.InitializationFailed,
.error_layer_not_present => return error.LayerNotPresent,
.error_extension_not_present => return error.ExtensionNotPresent,
.error_incompatible_driver => return error.IncompatibleDriver,
else => return error.Unknown,
}
return instance;
}
...
}
}
``` ```
Wrappers are generated according to the following rules: Wrappers are generated according to the following rules:
* The return type is determined from the original return type and the parameters. * The return type is determined from the original return type and the parameters.
@@ -200,56 +128,14 @@ Wrappers are generated according to the following rules:
* Error codes are translated into Zig errors. * Error codes are translated into Zig errors.
* As of yet, there is no specific handling of enumeration style commands or other commands which accept slices. * As of yet, there is no specific handling of enumeration style commands or other commands which accept slices.
#### Initializing Wrappers Furthermore, each wrapper contains a function to load each function pointer member when passed either `PfnGetInstanceProcAddr` or `PfnGetDeviceProcAddr`, which attempts to load each member as function pointer and casts it to the appropriate type. These functions are loaded literally, and any wrongly named member or member with a wrong function pointer type will result in problems.
* For `BaseWrapper`, this function has signature `fn load(loader: anytype) !Self`, where the type of `loader` must resemble `PfnGetInstanceProcAddr` (with optionally having a different calling convention).
* For `InstanceWrapper`, this function has signature `fn load(instance: Instance, loader: anytype) !Self`, where the type of `loader` must resemble `PfnGetInstanceProcAddr`.
* For `DeviceWrapper`, this function has signature `fn load(device: Device, loader: anytype) !Self`, where the type of `loader` must resemble `PfnGetDeviceProcAddr`.
Wrapper types are initialized by the `load` function, which must be passed a _loader_: A function which loads a function pointer by name. One can access the underlying unwrapped C functions by doing `wrapper.dispatch.vkFuncYouWant(..)`.
* For `BaseWrapper`, this function has signature `fn load(loader: anytype) Self`, where the type of `loader` must resemble `PfnGetInstanceProcAddr` (with optionally having a different calling convention).
* For `InstanceWrapper`, this function has signature `fn load(instance: Instance, loader: anytype) Self`, where the type of `loader` must resemble `PfnGetInstanceProcAddr`.
* For `DeviceWrapper`, this function has signature `fn load(device: Device, loader: anytype) Self`, where the type of `loader` must resemble `PfnGetDeviceProcAddr`.
Note that these functions accepts a loader with the signature of `anytype` instead of `PfnGetInstanceProcAddr`. This is because it is valid for `vkGetInstanceProcAddr` to load itself, in which case the returned function is to be called with the vulkan calling convention. This calling convention is not required for loading vulkan-zig itself, though, and a loader to be called with any calling convention with the target architecture may be passed in. This is particularly useful when interacting with C libraries that provide `vkGetInstanceProcAddr`.
```zig
// vkGetInstanceProcAddr as provided by GLFW.
// Note that vk.Instance and vk.PfnVoidFunction are ABI compatible with VkInstance,
// and that `extern` implies the C calling convention.
pub extern fn glfwGetInstanceProcAddress(instance: vk.Instance, procname: [*:0]const u8) vk.PfnVoidFunction;
// Or provide a custom implementation.
// This function is called with the unspecified Zig-internal calling convention.
fn customGetInstanceProcAddress(instance: vk.Instance, procname: [*:0]const u8) vk.PfnVoidFunction {
...
}
// Both calls are valid.
const vkb = BaseWrapper.load(glfwGetInstanceProcAddress);
const vkb = BaseWrapper.load(customGetInstanceProcAddress);
```
The `load` function tries to load all function pointers unconditionally, regardless of enabled extensions or platform. If a function pointer could not be loaded, its entry in the dispatch table is set to `null`. When invoking a function on a wrapper table, the function pointer is checked for null, and there will be a crash or undefined behavior if it was not loaded properly. That means that **it is up to the programmer to ensure that a function pointer is valid for the platform before calling it**, either by checking whether the associated extension or Vulkan version is supported or simply by checking whether the function pointer is non-null.
One can access the underlying unwrapped C functions by doing `wrapper.dispatch.vkFuncYouWant.?(..)`.
#### Proxying Wrappers
Proxying wrappers wrap a wrapper and a pointer to the associated handle in a single struct, and automatically passes this handle to commands as appropriate. Besides the proxying wrappers for instances and devices, there are also proxying wrappers for queues and command buffers. Proxying wrapper type are constructed in the same way as a regular wrapper, by passing an api specification to them. To initialize a proxying wrapper, it must be passed a handle and a pointer to an appropriate wrapper. For queue and command buffer proxying wrappers, a pointer to a device wrapper must be passed.
```zig
const InstanceWrapper = vk.InstanceWrapper;
const Instance = vk.InstanceProxy;
const instance_handle = try vkb.createInstance(...);
const vki = try InstanceWrapper.load(instance_handle, vkb.dispatch.vkGetInstanceProcAddr.?);
const instance = Instance.load(instance_handle, &vki);
defer instance.destroyInstance(null);
```
For queue and command buffer proxying wrappers, the `queue` and `cmd` prefix is removed for functions where appropriate. Note that the device proxying wrappers also have the queue and command buffer functions made available for convenience, but there the prefix is not stripped.
Note that the proxy must be passed a _pointer_ to a wrapper. This is because there was a limitation with LLVM in the past, where a struct with an object pointer and its associated function pointers wouldn't be optimized properly. By using a separate function pointer, LLVM knows that the "vtable" dispatch struct can never be modified and so it can subject each call to vtable optimizations.
### Bitflags ### Bitflags
Packed structs of bools are used for bit flags in vulkan-zig, instead of both a `FlagBits` and `Flags` variant. Places where either of these variants are used are both replaced by this packed struct instead. This means that even in places where just one flag would normally be accepted, the packed struct is accepted. The programmer is responsible for only enabling a single bit. Packed structs of bools are used for bit flags in vulkan-zig, instead of both a `FlagBits` and `Flags` variant. Places where either of these variants are used are both replaced by this packed struct instead. This means that even in places where just one flag would normally be accepted, the packed struct is accepted. The programmer is responsible for only enabling a single bit.
Each bit is defaulted to `false`, and the first `bool` is aligned to guarantee the overal alignment Each bit is defaulted to `false`, and the first `bool` is aligned to guarantee the overal alignment
@@ -298,7 +184,6 @@ pub fn FlagsMixin(comptime FlagsType: type) type {
``` ```
### Handles ### Handles
Handles are generated to a non-exhaustive enum, backed by a `u64` for non-dispatchable handles and `usize` for dispatchable ones: Handles are generated to a non-exhaustive enum, backed by a `u64` for non-dispatchable handles and `usize` for dispatchable ones:
```zig ```zig
const Instance = extern enum(usize) { null_handle = 0, _ }; const Instance = extern enum(usize) { null_handle = 0, _ };
@@ -306,7 +191,6 @@ const Instance = extern enum(usize) { null_handle = 0, _ };
This means that handles are type-safe even when compiling for a 32-bit target. This means that handles are type-safe even when compiling for a 32-bit target.
### Struct defaults ### Struct defaults
Defaults are generated for certain fields of structs: Defaults are generated for certain fields of structs:
* sType is defaulted to the appropriate value. * sType is defaulted to the appropriate value.
* pNext is defaulted to `null`. * pNext is defaulted to `null`.
@@ -314,14 +198,13 @@ Defaults are generated for certain fields of structs:
```zig ```zig
pub const InstanceCreateInfo = extern struct { pub const InstanceCreateInfo = extern struct {
s_type: StructureType = .instance_create_info, s_type: StructureType = .instance_create_info,
p_next: ?*const anyopaque = null, p_next: ?*const c_void = null,
flags: InstanceCreateFlags, flags: InstanceCreateFlags,
... ...
}; };
``` ```
### Pointer types ### Pointer types
Pointer types in both commands (wrapped and function pointers) and struct fields are augmented with the following information, where available in the registry: Pointer types in both commands (wrapped and function pointers) and struct fields are augmented with the following information, where available in the registry:
* Pointer optional-ness. * Pointer optional-ness.
* Pointer const-ness. * Pointer const-ness.
@@ -330,63 +213,46 @@ Pointer types in both commands (wrapped and function pointers) and struct fields
Note that this information is not everywhere as useful in the registry, leading to places where optional-ness is not correct. Most notably, CreateInfo type structures which take a slice often have the item count marked as optional, but the pointer itself not. As of yet, this is not fixed in vulkan-zig. If drivers properly follow the Vulkan specification, these can be initialized to `undefined`, however, [that is not always the case](https://zeux.io/2019/07/17/serializing-pipeline-cache/). Note that this information is not everywhere as useful in the registry, leading to places where optional-ness is not correct. Most notably, CreateInfo type structures which take a slice often have the item count marked as optional, but the pointer itself not. As of yet, this is not fixed in vulkan-zig. If drivers properly follow the Vulkan specification, these can be initialized to `undefined`, however, [that is not always the case](https://zeux.io/2019/07/17/serializing-pipeline-cache/).
### Platform types ### Platform types
Defaults with the same ABI layout are generated for most platform-defined types. These can either by bitcasted to, or overridden by defining them in the project root: Defaults with the same ABI layout are generated for most platform-defined types. These can either by bitcasted to, or overridden by defining them in the project root:
```zig ```zig
pub const xcb_connection_t = if (@hasDecl(root, "xcb_connection_t")) root.xcb_connection_t else opaque{}; pub const xcb_connection_t = if (@hasDecl(root, "xcb_connection_t")) root.xcb_connection_t else @Type(.Opaque);
``` ```
For some times (such as those from Google Games Platform) no default is known, but an `opaque{}` will be used by default. Usage of these without providing a concrete type in the project root is likely an error. For some times (such as those from Google Games Platform) no default is known. Usage of these without providing a concrete type in the project root generates a compile error.
### Shader compilation ### Shader compilation
vulkan-zig provides functionality to help compiling shaders using glslc. It can be used from build.zig as follows:
Shaders should be compiled by invoking a shader compiler via the build system. For example:
```zig ```zig
const vkgen = @import("vulkan-zig/generator/index.zig");
pub fn build(b: *Builder) void { pub fn build(b: *Builder) void {
... ...
const vert_cmd = b.addSystemCommand(&.{ const exe = b.addExecutable("my-executable", "src/main.zig");
"glslc",
"--target-env=vulkan1.2", const gen = vkgen.VkGenerateStep(b, "path/to/vk.xml", "vk.zig");
"-o" exe.step.dependOn(&gen.step);
}); exe.addPackage(gen.package);
const vert_spv = vert_cmd.addOutputFileArg("vert.spv");
vert_cmd.addFileArg(b.path("shaders/triangle.vert")); const shader_comp = vkgen.ShaderCompileStep.init(
exe.root_module.addAnonymousImport("vertex_shader", .{ builder,
.root_source_file = vert_spv &[_][]const u8{"glslc", "--target-env=vulkan1.2"}, // Path to glslc and additional parameters
}); );
... exe.step.dependOn(&shader_comp.step);
const spv_path = shader_comp.addShader("path/to/shader.frag");
} }
``` ```
Upon compilation, glslc is then invoked to compile each shader, and the result is placed within `zig-cache`. `addShader` returns the full path to the compiled shader code. This file can then be included in the project, as is done in [build.zig for the example](build.zig) by generating an additional file which uses `@embedFile`.
Note that SPIR-V must be 32-bit aligned when fed to Vulkan. The easiest way to do this is to dereference the shader's bytecode and manually align it as follows:
```zig
const vert_spv align(@alignOf(u32)) = @embedFile("vertex_shader").*;
```
See [examples/build.zig](examples/build.zig) for a working example.
For more advanced shader compiler usage, one may consider a library such as [shader_compiler](https://github.com/Games-by-Mason/shader_compiler).
### Vulkan Video
Vulkan-zig also supports generating Vulkan Video bindings. To do this, one additionally pass `--video <video.xml>` to the generator, or pass `-Dvideo=<video.xml>` to build.zig. If using vulkan-zig via the Zig package manager, the following also works:
```zig
const vulkan_headers = b.dependency("vulkan_headers");
const vulkan = b.dependency("vulkan_zig", .{
.registry = vulkan_headers.path("registry/vk.xml"),
.video = vulkan_headers.path("registery/video.xml"),
}).module("vulkan-zig");
```
The Vulkan Video bindings are not generated by default. In this case, the relevant definitions must be supplied by the user. See [platform types](#platform-types) for how this is done.
## Limitations ## Limitations
* Currently, the self-hosted version of Zig's cache-hash API is not yet ready for usage, which means that the bindings are regenerated every time an executable is built.
* vulkan-zig has as of yet no functionality for selecting feature levels and extensions when generating bindings. This is because when an extension is promoted to Vulkan core, its fields and commands are renamed to lose the extensions author tag (for example, VkSemaphoreWaitFlagsKHR was renamed to VkSemaphoreWaitFlags when it was promoted from an extension to Vulkan 1.2 core). This leads to inconsistencies when only items from up to a certain feature level is included, as these promoted items then need to re-gain a tag. * vulkan-zig has as of yet no functionality for selecting feature levels and extensions when generating bindings. This is because when an extension is promoted to Vulkan core, its fields and commands are renamed to lose the extensions author tag (for example, VkSemaphoreWaitFlagsKHR was renamed to VkSemaphoreWaitFlags when it was promoted from an extension to Vulkan 1.2 core). This leads to inconsistencies when only items from up to a certain feature level is included, as these promoted items then need to re-gain a tag.
## See also ## Example
A partial implementation of https://vulkan-tutorial.org is implemented in [examples/triangle.zig](examples/triangle.zig). This example can be ran by executing `zig build run-triangle` in vulkan-zig's root.
* Implementation of https://vulkan-tutorial.com using `@cImport`'ed bindings: https://github.com/andrewrk/zig-vulkan-triangle. ## See also
* Implementation of https://vulkan-tutorial.org using `@cImport`'ed bindings: https://github.com/andrewrk/zig-vulkan-triangle.
* Alternative binding generator: https://github.com/SpexGuy/Zig-Vulkan-Headers * Alternative binding generator: https://github.com/SpexGuy/Zig-Vulkan-Headers
* Zig bindings for GLFW: https://github.com/hexops/mach-glfw * Zig bindings for GLFW: https://github.com/hexops/mach-glfw
* With vulkan-zig integration example: https://github.com/hexops/mach-glfw-vulkan-example * With vulkan-zig integration example: https://github.com/hexops/mach-glfw-vulkan-example
* Advanced shader compilation: https://github.com/Games-by-Mason/shader_compiler

157
build.zig
View File

@@ -1,66 +1,107 @@
const std = @import("std"); const std = @import("std");
const vkgen = @import("generator/index.zig");
const Step = std.build.Step;
const Builder = std.build.Builder;
pub fn build(b: *std.Build) void { pub const ResourceGenStep = struct {
const target = b.standardTargetOptions(.{}); step: Step,
const optimize = b.standardOptimizeOption(.{}); shader_step: *vkgen.ShaderCompileStep,
const maybe_registry = b.option(std.Build.LazyPath, "registry", "Set the path to the Vulkan registry (vk.xml)"); builder: *Builder,
const maybe_video = b.option(std.Build.LazyPath, "video", "Set the path to the Vulkan Video registry (video.xml)"); package: std.build.Pkg,
const test_step = b.step("test", "Run all the tests"); resources: std.ArrayList(u8),
const root_module = b.createModule(.{ pub fn init(builder: *Builder, out: []const u8) *ResourceGenStep {
.root_source_file = b.path("src/main.zig"), const self = builder.allocator.create(ResourceGenStep) catch unreachable;
.target = target, const full_out_path = std.fs.path.join(builder.allocator, &[_][]const u8{
.optimize = optimize, builder.build_root,
}); builder.cache_root,
out,
}) catch unreachable;
// Using the package manager, this artifact can be obtained by the user self.* = .{
// through `b.dependency(<name in build.zig.zon>, .{}).artifact("vulkan-zig-generator")`. .step = Step.init(.Custom, "resources", builder.allocator, make),
// with that, the user need only `.addArg("path/to/vk.xml")`, and then obtain .shader_step = vkgen.ShaderCompileStep.init(builder, &[_][]const u8{"glslc", "--target-env=vulkan1.2"}),
// a file source to the generated code with `.addOutputArg("vk.zig")` .builder = builder,
const generator_exe = b.addExecutable(.{ .package = .{
.name = "vulkan-zig-generator", .name = "resources",
.root_module = root_module, .path = full_out_path,
}); .dependencies = null,
b.installArtifact(generator_exe); },
.resources = std.ArrayList(u8).init(builder.allocator),
};
// Or they can skip all that, and just make sure to pass `.registry = "path/to/vk.xml"` to `b.dependency`, self.step.dependOn(&self.shader_step.step);
// and then obtain the module directly via `.module("vulkan-zig")`. return self;
if (maybe_registry) |registry| {
const vk_generate_cmd = b.addRunArtifact(generator_exe);
if (maybe_video) |video| {
vk_generate_cmd.addArg("--video");
vk_generate_cmd.addFileArg(video);
}
vk_generate_cmd.addFileArg(registry);
const vk_zig = vk_generate_cmd.addOutputFileArg("vk.zig");
const vk_zig_module = b.addModule("vulkan-zig", .{
.root_source_file = vk_zig,
});
// Also install vk.zig, if passed.
const vk_zig_install_step = b.addInstallFile(vk_zig, "src/vk.zig");
b.getInstallStep().dependOn(&vk_zig_install_step.step);
// And run tests on this vk.zig too.
// This test needs to be an object so that vulkan-zig can import types from the root.
// It does not need to run anyway.
const ref_all_decls_test = b.addObject(.{
.name = "ref-all-decls-test",
.root_module = b.createModule(.{
.root_source_file = b.path("test/ref_all_decls.zig"),
.target = target,
.optimize = optimize,
}),
});
ref_all_decls_test.root_module.addImport("vulkan", vk_zig_module);
test_step.dependOn(&ref_all_decls_test.step);
} }
const test_target = b.addTest(.{ .root_module = root_module }); fn renderPath(path: []const u8, writer: anytype) void {
test_step.dependOn(&b.addRunArtifact(test_target).step); const separators = &[_]u8{ std.fs.path.sep_windows, std.fs.path.sep_posix };
var i: usize = 0;
while (std.mem.indexOfAnyPos(u8, path, i, separators)) |j| {
writer.writeAll(path[i..j]) catch unreachable;
switch (std.fs.path.sep) {
std.fs.path.sep_windows => writer.writeAll("\\\\") catch unreachable,
std.fs.path.sep_posix => writer.writeByte(std.fs.path.sep_posix) catch unreachable,
else => unreachable,
}
i = j + 1;
}
writer.writeAll(path[i..]) catch unreachable;
}
pub fn addShader(self: *ResourceGenStep, name: []const u8, source: []const u8) void {
const shader_out_path = self.shader_step.add(source);
var writer = self.resources.writer();
writer.print("pub const {s} = @embedFile(\"", .{name}) catch unreachable;
renderPath(shader_out_path, writer);
writer.writeAll("\");\n") catch unreachable;
}
fn make(step: *Step) !void {
const self = @fieldParentPtr(ResourceGenStep, "step", step);
const cwd = std.fs.cwd();
const dir = std.fs.path.dirname(self.package.path).?;
try cwd.makePath(dir);
try cwd.writeFile(self.package.path, self.resources.items);
}
};
pub fn build(b: *Builder) void {
var test_step = b.step("test", "Run all the tests");
test_step.dependOn(&b.addTest("generator/index.zig").step);
const target = b.standardTargetOptions(.{});
const mode = b.standardReleaseOptions();
const generator_exe = b.addExecutable("vulkan-zig-generator", "generator/main.zig");
generator_exe.setTarget(target);
generator_exe.setBuildMode(mode);
generator_exe.install();
const triangle_exe = b.addExecutable("triangle", "examples/triangle.zig");
triangle_exe.setTarget(target);
triangle_exe.setBuildMode(mode);
triangle_exe.install();
triangle_exe.linkLibC();
triangle_exe.linkSystemLibrary("glfw");
const vk_xml_path = b.option([]const u8, "vulkan-registry", "Override the to the Vulkan registry") orelse "examples/vk.xml";
const gen = vkgen.VkGenerateStep.init(b, vk_xml_path, "vk.zig");
triangle_exe.step.dependOn(&gen.step);
triangle_exe.addPackage(gen.package);
const res = ResourceGenStep.init(b, "resources.zig");
res.addShader("triangle_vert", "examples/shaders/triangle.vert");
res.addShader("triangle_frag", "examples/shaders/triangle.frag");
triangle_exe.step.dependOn(&res.step);
triangle_exe.addPackage(res.package);
const triangle_run_cmd = triangle_exe.run();
triangle_run_cmd.step.dependOn(b.getInstallStep());
const triangle_run_step = b.step("run-triangle", "Run the triangle example");
triangle_run_step.dependOn(&triangle_run_cmd.step);
} }

View File

@@ -1,12 +0,0 @@
.{
.name = .vulkan,
.fingerprint = 0xbe155a03c72db6af,
.version = "0.0.0",
.minimum_zig_version = "0.15.1",
.paths = .{
"build.zig",
"LICENSE",
"README.md",
"src",
},
}

View File

@@ -1,100 +0,0 @@
const std = @import("std");
const vkgen = @import("vulkan_zig");
pub fn build(b: *std.Build) void {
const target = b.standardTargetOptions(.{});
const optimize = b.standardOptimizeOption(.{});
const maybe_override_registry = b.option([]const u8, "override-registry", "Override the path to the Vulkan registry used for the examples");
const use_zig_shaders = b.option(bool, "zig-shader", "Use Zig shaders instead of GLSL") orelse false;
const registry = b.dependency("vulkan_headers", .{}).path("registry/vk.xml");
const triangle_exe = b.addExecutable(.{
.name = "triangle",
.root_module = b.createModule(.{
.root_source_file = b.path("triangle.zig"),
.target = target,
.link_libc = true,
.optimize = optimize,
}),
// TODO: Remove this once x86_64 is stable
.use_llvm = true,
});
b.installArtifact(triangle_exe);
triangle_exe.linkSystemLibrary("glfw");
const registry_path: std.Build.LazyPath = if (maybe_override_registry) |override_registry|
.{ .cwd_relative = override_registry }
else
registry;
const vulkan = b.dependency("vulkan_zig", .{
.registry = registry_path,
}).module("vulkan-zig");
triangle_exe.root_module.addImport("vulkan", vulkan);
if (use_zig_shaders) {
const spirv_target = b.resolveTargetQuery(.{
.cpu_arch = .spirv32,
.os_tag = .vulkan,
.cpu_model = .{ .explicit = &std.Target.spirv.cpu.vulkan_v1_2 },
.ofmt = .spirv,
});
const vert_spv = b.addObject(.{
.name = "vertex_shader",
.root_module = b.createModule(.{
.root_source_file = b.path("shaders/vertex.zig"),
.target = spirv_target,
}),
.use_llvm = false,
});
triangle_exe.root_module.addAnonymousImport(
"vertex_shader",
.{ .root_source_file = vert_spv.getEmittedBin() },
);
const frag_spv = b.addObject(.{
.name = "fragment_shader",
.root_module = b.createModule(.{
.root_source_file = b.path("shaders/fragment.zig"),
.target = spirv_target,
}),
.use_llvm = false,
});
triangle_exe.root_module.addAnonymousImport(
"fragment_shader",
.{ .root_source_file = frag_spv.getEmittedBin() },
);
} else {
const vert_cmd = b.addSystemCommand(&.{
"glslc",
"--target-env=vulkan1.2",
"-o",
});
const vert_spv = vert_cmd.addOutputFileArg("vert.spv");
vert_cmd.addFileArg(b.path("shaders/triangle.vert"));
triangle_exe.root_module.addAnonymousImport("vertex_shader", .{
.root_source_file = vert_spv,
});
const frag_cmd = b.addSystemCommand(&.{
"glslc",
"--target-env=vulkan1.2",
"-o",
});
const frag_spv = frag_cmd.addOutputFileArg("frag.spv");
frag_cmd.addFileArg(b.path("shaders/triangle.frag"));
triangle_exe.root_module.addAnonymousImport("fragment_shader", .{
.root_source_file = frag_spv,
});
}
const triangle_run_cmd = b.addRunArtifact(triangle_exe);
triangle_run_cmd.step.dependOn(b.getInstallStep());
const triangle_run_step = b.step("run-triangle", "Run the triangle example");
triangle_run_step.dependOn(&triangle_run_cmd.step);
}

View File

@@ -1,15 +0,0 @@
.{
.name = .vulkan_zig_examples,
.fingerprint = 0x60508bcca14cfc6d,
.version = "0.1.0",
.dependencies = .{
.vulkan_zig = .{
.path = "..",
},
.vulkan_headers = .{
.url = "https://github.com/KhronosGroup/Vulkan-Headers/archive/v1.3.283.tar.gz",
.hash = "N-V-__8AAAkkoQGn5z1yoNVrwqZfnYmZp8AZ5CJgoHRMQI0c",
},
},
.paths = .{""},
}

View File

@@ -1,31 +1,12 @@
const c = @cImport({ pub usingnamespace @cImport({
@cDefine("GLFW_INCLUDE_NONE", {}); @cDefine("GLFW_INCLUDE_NONE", {});
@cInclude("GLFW/glfw3.h"); @cInclude("GLFW/glfw3.h");
}); });
const vk = @import("vulkan"); const vk = @import("vulkan");
// Re-export the GLFW things that we need
pub const GLFW_TRUE = c.GLFW_TRUE;
pub const GLFW_FALSE = c.GLFW_FALSE;
pub const GLFW_CLIENT_API = c.GLFW_CLIENT_API;
pub const GLFW_NO_API = c.GLFW_NO_API;
pub const GLFWwindow = c.GLFWwindow;
pub const glfwInit = c.glfwInit;
pub const glfwTerminate = c.glfwTerminate;
pub const glfwVulkanSupported = c.glfwVulkanSupported;
pub const glfwWindowHint = c.glfwWindowHint;
pub const glfwCreateWindow = c.glfwCreateWindow;
pub const glfwDestroyWindow = c.glfwDestroyWindow;
pub const glfwWindowShouldClose = c.glfwWindowShouldClose;
pub const glfwGetRequiredInstanceExtensions = c.glfwGetRequiredInstanceExtensions;
pub const glfwGetFramebufferSize = c.glfwGetFramebufferSize;
pub const glfwPollEvents = c.glfwPollEvents;
// usually the GLFW vulkan functions are exported if Vulkan is included, // usually the GLFW vulkan functions are exported if Vulkan is included,
// but since thats not the case here, they are manually imported. // but since thats not the case here, they are manually imported.
pub extern fn glfwGetInstanceProcAddress(instance: vk.Instance, procname: [*:0]const u8) vk.PfnVoidFunction; pub extern fn glfwGetInstanceProcAddress(instance: vk.Instance, procname: [*:0]const u8) vk.PfnVoidFunction;
pub extern fn glfwGetPhysicalDevicePresentationSupport(instance: vk.Instance, pdev: vk.PhysicalDevice, queuefamily: u32) c_int; pub extern fn glfwGetPhysicalDevicePresentationSupport(instance: vk.Instance, pdev: vk.PhysicalDevice, queuefamily: u32) c_int;

View File

@@ -3,144 +3,156 @@ const vk = @import("vulkan");
const c = @import("c.zig"); const c = @import("c.zig");
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
const required_device_extensions = [_][*:0]const u8{vk.extensions.khr_swapchain.name}; const required_device_extensions = [_][]const u8{vk.extension_info.khr_swapchain.name};
/// There are 3 levels of bindings in vulkan-zig: const BaseDispatch = vk.BaseWrapper(&.{
/// - The Dispatch types (vk.BaseDispatch, vk.InstanceDispatch, vk.DeviceDispatch) .createInstance,
/// are "plain" structs which just contain the function pointers for a particular });
/// object.
/// - The Wrapper types (vk.Basewrapper, vk.InstanceWrapper, vk.DeviceWrapper) contains
/// the Dispatch type, as well as Ziggified Vulkan functions - these return Zig errors,
/// etc.
/// - The Proxy types (vk.InstanceProxy, vk.DeviceProxy, vk.CommandBufferProxy,
/// vk.QueueProxy) contain a pointer to a Wrapper and also contain the object's handle.
/// Calling Ziggified functions on these types automatically passes the handle as
/// the first parameter of each function. Note that this type accepts a pointer to
/// a wrapper struct as there is a problem with LLVM where embedding function pointers
/// and object pointer in the same struct leads to missed optimizations. If the wrapper
/// member is a pointer, LLVM will try to optimize it as any other vtable.
/// The wrappers contain
const BaseWrapper = vk.BaseWrapper;
const InstanceWrapper = vk.InstanceWrapper;
const DeviceWrapper = vk.DeviceWrapper;
const Instance = vk.InstanceProxy; const InstanceDispatch = vk.InstanceWrapper(&.{
const Device = vk.DeviceProxy; .destroyInstance,
.createDevice,
.destroySurfaceKHR,
.enumeratePhysicalDevices,
.getPhysicalDeviceProperties,
.enumerateDeviceExtensionProperties,
.getPhysicalDeviceSurfaceFormatsKHR,
.getPhysicalDeviceSurfacePresentModesKHR,
.getPhysicalDeviceSurfaceCapabilitiesKHR,
.getPhysicalDeviceQueueFamilyProperties,
.getPhysicalDeviceSurfaceSupportKHR,
.getPhysicalDeviceMemoryProperties,
.getDeviceProcAddr,
});
const DeviceDispatch = vk.DeviceWrapper(&.{
.destroyDevice,
.getDeviceQueue,
.createSemaphore,
.createFence,
.createImageView,
.destroyImageView,
.destroySemaphore,
.destroyFence,
.getSwapchainImagesKHR,
.createSwapchainKHR,
.destroySwapchainKHR,
.acquireNextImageKHR,
.deviceWaitIdle,
.waitForFences,
.resetFences,
.queueSubmit,
.queuePresentKHR,
.createCommandPool,
.destroyCommandPool,
.allocateCommandBuffers,
.freeCommandBuffers,
.queueWaitIdle,
.createShaderModule,
.destroyShaderModule,
.createPipelineLayout,
.destroyPipelineLayout,
.createRenderPass,
.destroyRenderPass,
.createGraphicsPipelines,
.destroyPipeline,
.createFramebuffer,
.destroyFramebuffer,
.beginCommandBuffer,
.endCommandBuffer,
.allocateMemory,
.freeMemory,
.createBuffer,
.destroyBuffer,
.getBufferMemoryRequirements,
.mapMemory,
.unmapMemory,
.bindBufferMemory,
.cmdBeginRenderPass,
.cmdEndRenderPass,
.cmdBindPipeline,
.cmdDraw,
.cmdSetViewport,
.cmdSetScissor,
.cmdBindVertexBuffers,
.cmdCopyBuffer,
});
pub const GraphicsContext = struct { pub const GraphicsContext = struct {
pub const CommandBuffer = vk.CommandBufferProxy; vkb: BaseDispatch,
vki: InstanceDispatch,
vkd: DeviceDispatch,
allocator: Allocator, instance: vk.Instance,
vkb: BaseWrapper,
instance: Instance,
debug_messenger: vk.DebugUtilsMessengerEXT,
surface: vk.SurfaceKHR, surface: vk.SurfaceKHR,
pdev: vk.PhysicalDevice, pdev: vk.PhysicalDevice,
props: vk.PhysicalDeviceProperties, props: vk.PhysicalDeviceProperties,
mem_props: vk.PhysicalDeviceMemoryProperties, mem_props: vk.PhysicalDeviceMemoryProperties,
dev: Device, dev: vk.Device,
graphics_queue: Queue, graphics_queue: Queue,
present_queue: Queue, present_queue: Queue,
pub fn init(allocator: Allocator, app_name: [*:0]const u8, window: *c.GLFWwindow) !GraphicsContext { pub fn init(allocator: *Allocator, app_name: [*:0]const u8, window: *c.GLFWwindow) !GraphicsContext {
var self: GraphicsContext = undefined; var self: GraphicsContext = undefined;
self.allocator = allocator; self.vkb = try BaseDispatch.load(c.glfwGetInstanceProcAddress);
self.vkb = BaseWrapper.load(c.glfwGetInstanceProcAddress);
var extension_names: std.ArrayList([*:0]const u8) = .empty;
defer extension_names.deinit(allocator);
try extension_names.append(allocator, vk.extensions.ext_debug_utils.name);
// the following extensions are to support vulkan in mac os
// see https://github.com/glfw/glfw/issues/2335
try extension_names.append(allocator, vk.extensions.khr_portability_enumeration.name);
try extension_names.append(allocator, vk.extensions.khr_get_physical_device_properties_2.name);
var glfw_exts_count: u32 = 0; var glfw_exts_count: u32 = 0;
const glfw_exts = c.glfwGetRequiredInstanceExtensions(&glfw_exts_count); const glfw_exts = c.glfwGetRequiredInstanceExtensions(&glfw_exts_count);
try extension_names.appendSlice(allocator, @ptrCast(glfw_exts[0..glfw_exts_count]));
const instance = try self.vkb.createInstance(&.{ const app_info = vk.ApplicationInfo{
.p_application_info = &.{ .p_application_name = app_name,
.p_application_name = app_name, .application_version = vk.makeApiVersion(0, 0, 0, 0),
.application_version = @bitCast(vk.makeApiVersion(0, 0, 0, 0)), .p_engine_name = app_name,
.p_engine_name = app_name, .engine_version = vk.makeApiVersion(0, 0, 0, 0),
.engine_version = @bitCast(vk.makeApiVersion(0, 0, 0, 0)), .api_version = vk.API_VERSION_1_2,
.api_version = @bitCast(vk.API_VERSION_1_2), };
},
.enabled_extension_count = @intCast(extension_names.items.len), self.instance = try self.vkb.createInstance(.{
.pp_enabled_extension_names = extension_names.items.ptr, .flags = .{},
// enumerate_portability_bit_khr to support vulkan in mac os .p_application_info = &app_info,
// see https://github.com/glfw/glfw/issues/2335 .enabled_layer_count = 0,
.flags = .{ .enumerate_portability_bit_khr = true }, .pp_enabled_layer_names = undefined,
.enabled_extension_count = glfw_exts_count,
.pp_enabled_extension_names = @ptrCast([*]const [*:0]const u8, glfw_exts),
}, null); }, null);
const vki = try allocator.create(InstanceWrapper); self.vki = try InstanceDispatch.load(self.instance, c.glfwGetInstanceProcAddress);
errdefer allocator.destroy(vki); errdefer self.vki.destroyInstance(self.instance, null);
vki.* = InstanceWrapper.load(instance, self.vkb.dispatch.vkGetInstanceProcAddr.?);
self.instance = Instance.init(instance, vki);
errdefer self.instance.destroyInstance(null);
self.debug_messenger = try self.instance.createDebugUtilsMessengerEXT(&.{
.message_severity = .{
//.verbose_bit_ext = true,
//.info_bit_ext = true,
.warning_bit_ext = true,
.error_bit_ext = true,
},
.message_type = .{
.general_bit_ext = true,
.validation_bit_ext = true,
.performance_bit_ext = true,
},
.pfn_user_callback = &debugUtilsMessengerCallback,
.p_user_data = null,
}, null);
self.surface = try createSurface(self.instance, window); self.surface = try createSurface(self.instance, window);
errdefer self.instance.destroySurfaceKHR(self.surface, null); errdefer self.vki.destroySurfaceKHR(self.instance, self.surface, null);
const candidate = try pickPhysicalDevice(self.instance, allocator, self.surface); const candidate = try pickPhysicalDevice(self.vki, self.instance, allocator, self.surface);
self.pdev = candidate.pdev; self.pdev = candidate.pdev;
self.props = candidate.props; self.props = candidate.props;
self.dev = try initializeCandidate(self.vki, candidate);
self.vkd = try DeviceDispatch.load(self.dev, self.vki.dispatch.vkGetDeviceProcAddr);
errdefer self.vkd.destroyDevice(self.dev, null);
const dev = try initializeCandidate(self.instance, candidate); self.graphics_queue = Queue.init(self.vkd, self.dev, candidate.queues.graphics_family);
self.present_queue = Queue.init(self.vkd, self.dev, candidate.queues.graphics_family);
const vkd = try allocator.create(DeviceWrapper); self.mem_props = self.vki.getPhysicalDeviceMemoryProperties(self.pdev);
errdefer allocator.destroy(vkd);
vkd.* = DeviceWrapper.load(dev, self.instance.wrapper.dispatch.vkGetDeviceProcAddr.?);
self.dev = Device.init(dev, vkd);
errdefer self.dev.destroyDevice(null);
self.graphics_queue = Queue.init(self.dev, candidate.queues.graphics_family);
self.present_queue = Queue.init(self.dev, candidate.queues.present_family);
self.mem_props = self.instance.getPhysicalDeviceMemoryProperties(self.pdev);
return self; return self;
} }
pub fn deinit(self: GraphicsContext) void { pub fn deinit(self: GraphicsContext) void {
self.dev.destroyDevice(null); self.vkd.destroyDevice(self.dev, null);
self.instance.destroySurfaceKHR(self.surface, null); self.vki.destroySurfaceKHR(self.instance, self.surface, null);
self.instance.destroyDebugUtilsMessengerEXT(self.debug_messenger, null); self.vki.destroyInstance(self.instance, null);
self.instance.destroyInstance(null);
// Don't forget to free the tables to prevent a memory leak.
self.allocator.destroy(self.dev.wrapper);
self.allocator.destroy(self.instance.wrapper);
} }
pub fn deviceName(self: *const GraphicsContext) []const u8 { pub fn deviceName(self: GraphicsContext) []const u8 {
return std.mem.sliceTo(&self.props.device_name, 0); const len = std.mem.indexOfScalar(u8, &self.props.device_name, 0).?;
return self.props.device_name[0..len];
} }
pub fn findMemoryTypeIndex(self: GraphicsContext, memory_type_bits: u32, flags: vk.MemoryPropertyFlags) !u32 { pub fn findMemoryTypeIndex(self: GraphicsContext, memory_type_bits: u32, flags: vk.MemoryPropertyFlags) !u32 {
for (self.mem_props.memory_types[0..self.mem_props.memory_type_count], 0..) |mem_type, i| { for (self.mem_props.memory_types[0..self.mem_props.memory_type_count]) |mem_type, i| {
if (memory_type_bits & (@as(u32, 1) << @truncate(i)) != 0 and mem_type.property_flags.contains(flags)) { if (memory_type_bits & (@as(u32, 1) << @truncate(u5, i)) != 0 and mem_type.property_flags.contains(flags)) {
return @truncate(i); return @truncate(u32, i);
} }
} }
@@ -148,7 +160,7 @@ pub const GraphicsContext = struct {
} }
pub fn allocate(self: GraphicsContext, requirements: vk.MemoryRequirements, flags: vk.MemoryPropertyFlags) !vk.DeviceMemory { pub fn allocate(self: GraphicsContext, requirements: vk.MemoryRequirements, flags: vk.MemoryPropertyFlags) !vk.DeviceMemory {
return try self.dev.allocateMemory(&.{ return try self.vkd.allocateMemory(self.dev, .{
.allocation_size = requirements.size, .allocation_size = requirements.size,
.memory_type_index = try self.findMemoryTypeIndex(requirements.memory_type_bits, flags), .memory_type_index = try self.findMemoryTypeIndex(requirements.memory_type_bits, flags),
}, null); }, null);
@@ -159,32 +171,34 @@ pub const Queue = struct {
handle: vk.Queue, handle: vk.Queue,
family: u32, family: u32,
fn init(device: Device, family: u32) Queue { fn init(vkd: DeviceDispatch, dev: vk.Device, family: u32) Queue {
return .{ return .{
.handle = device.getDeviceQueue(family, 0), .handle = vkd.getDeviceQueue(dev, family, 0),
.family = family, .family = family,
}; };
} }
}; };
fn createSurface(instance: Instance, window: *c.GLFWwindow) !vk.SurfaceKHR { fn createSurface(instance: vk.Instance, window: *c.GLFWwindow) !vk.SurfaceKHR {
var surface: vk.SurfaceKHR = undefined; var surface: vk.SurfaceKHR = undefined;
if (c.glfwCreateWindowSurface(instance.handle, window, null, &surface) != .success) { if (c.glfwCreateWindowSurface(instance, window, null, &surface) != .success) {
return error.SurfaceInitFailed; return error.SurfaceInitFailed;
} }
return surface; return surface;
} }
fn initializeCandidate(instance: Instance, candidate: DeviceCandidate) !vk.Device { fn initializeCandidate(vki: InstanceDispatch, candidate: DeviceCandidate) !vk.Device {
const priority = [_]f32{1}; const priority = [_]f32{1};
const qci = [_]vk.DeviceQueueCreateInfo{ const qci = [_]vk.DeviceQueueCreateInfo{
.{ .{
.flags = .{},
.queue_family_index = candidate.queues.graphics_family, .queue_family_index = candidate.queues.graphics_family,
.queue_count = 1, .queue_count = 1,
.p_queue_priorities = &priority, .p_queue_priorities = &priority,
}, },
.{ .{
.flags = .{},
.queue_family_index = candidate.queues.present_family, .queue_family_index = candidate.queues.present_family,
.queue_count = 1, .queue_count = 1,
.p_queue_priorities = &priority, .p_queue_priorities = &priority,
@@ -196,11 +210,15 @@ fn initializeCandidate(instance: Instance, candidate: DeviceCandidate) !vk.Devic
else else
2; 2;
return try instance.createDevice(candidate.pdev, &.{ return try vki.createDevice(candidate.pdev, .{
.flags = .{},
.queue_create_info_count = queue_count, .queue_create_info_count = queue_count,
.p_queue_create_infos = &qci, .p_queue_create_infos = &qci,
.enabled_layer_count = 0,
.pp_enabled_layer_names = undefined,
.enabled_extension_count = required_device_extensions.len, .enabled_extension_count = required_device_extensions.len,
.pp_enabled_extension_names = @ptrCast(&required_device_extensions), .pp_enabled_extension_names = @ptrCast([*]const [*:0]const u8, &required_device_extensions),
.p_enabled_features = null,
}, null); }, null);
} }
@@ -215,27 +233,22 @@ const QueueAllocation = struct {
present_family: u32, present_family: u32,
}; };
fn debugUtilsMessengerCallback(severity: vk.DebugUtilsMessageSeverityFlagsEXT, msg_type: vk.DebugUtilsMessageTypeFlagsEXT, callback_data: ?*const vk.DebugUtilsMessengerCallbackDataEXT, _: ?*anyopaque) callconv(.c) vk.Bool32 {
const severity_str = if (severity.verbose_bit_ext) "verbose" else if (severity.info_bit_ext) "info" else if (severity.warning_bit_ext) "warning" else if (severity.error_bit_ext) "error" else "unknown";
const type_str = if (msg_type.general_bit_ext) "general" else if (msg_type.validation_bit_ext) "validation" else if (msg_type.performance_bit_ext) "performance" else if (msg_type.device_address_binding_bit_ext) "device addr" else "unknown";
const message: [*c]const u8 = if (callback_data) |cb_data| cb_data.p_message else "NO MESSAGE!";
std.debug.print("[{s}][{s}]. Message:\n {s}\n", .{ severity_str, type_str, message });
return .false;
}
fn pickPhysicalDevice( fn pickPhysicalDevice(
instance: Instance, vki: InstanceDispatch,
allocator: Allocator, instance: vk.Instance,
allocator: *Allocator,
surface: vk.SurfaceKHR, surface: vk.SurfaceKHR,
) !DeviceCandidate { ) !DeviceCandidate {
const pdevs = try instance.enumeratePhysicalDevicesAlloc(allocator); var device_count: u32 = undefined;
_ = try vki.enumeratePhysicalDevices(instance, &device_count, null);
const pdevs = try allocator.alloc(vk.PhysicalDevice, device_count);
defer allocator.free(pdevs); defer allocator.free(pdevs);
_ = try vki.enumeratePhysicalDevices(instance, &device_count, pdevs.ptr);
for (pdevs) |pdev| { for (pdevs) |pdev| {
if (try checkSuitable(instance, pdev, allocator, surface)) |candidate| { if (try checkSuitable(vki, pdev, allocator, surface)) |candidate| {
return candidate; return candidate;
} }
} }
@@ -244,21 +257,22 @@ fn pickPhysicalDevice(
} }
fn checkSuitable( fn checkSuitable(
instance: Instance, vki: InstanceDispatch,
pdev: vk.PhysicalDevice, pdev: vk.PhysicalDevice,
allocator: Allocator, allocator: *Allocator,
surface: vk.SurfaceKHR, surface: vk.SurfaceKHR,
) !?DeviceCandidate { ) !?DeviceCandidate {
if (!try checkExtensionSupport(instance, pdev, allocator)) { const props = vki.getPhysicalDeviceProperties(pdev);
if (!try checkExtensionSupport(vki, pdev, allocator)) {
return null; return null;
} }
if (!try checkSurfaceSupport(instance, pdev, surface)) { if (!try checkSurfaceSupport(vki, pdev, surface)) {
return null; return null;
} }
if (try allocateQueues(instance, pdev, allocator, surface)) |allocation| { if (try allocateQueues(vki, pdev, allocator, surface)) |allocation| {
const props = instance.getPhysicalDeviceProperties(pdev);
return DeviceCandidate{ return DeviceCandidate{
.pdev = pdev, .pdev = pdev,
.props = props, .props = props,
@@ -269,21 +283,25 @@ fn checkSuitable(
return null; return null;
} }
fn allocateQueues(instance: Instance, pdev: vk.PhysicalDevice, allocator: Allocator, surface: vk.SurfaceKHR) !?QueueAllocation { fn allocateQueues(vki: InstanceDispatch, pdev: vk.PhysicalDevice, allocator: *Allocator, surface: vk.SurfaceKHR) !?QueueAllocation {
const families = try instance.getPhysicalDeviceQueueFamilyPropertiesAlloc(pdev, allocator); var family_count: u32 = undefined;
vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, null);
const families = try allocator.alloc(vk.QueueFamilyProperties, family_count);
defer allocator.free(families); defer allocator.free(families);
vki.getPhysicalDeviceQueueFamilyProperties(pdev, &family_count, families.ptr);
var graphics_family: ?u32 = null; var graphics_family: ?u32 = null;
var present_family: ?u32 = null; var present_family: ?u32 = null;
for (families, 0..) |properties, i| { for (families) |properties, i| {
const family: u32 = @intCast(i); const family = @intCast(u32, i);
if (graphics_family == null and properties.queue_flags.graphics_bit) { if (graphics_family == null and properties.queue_flags.graphics_bit) {
graphics_family = family; graphics_family = family;
} }
if (present_family == null and (try instance.getPhysicalDeviceSurfaceSupportKHR(pdev, family, surface)) == .true) { if (present_family == null and (try vki.getPhysicalDeviceSurfaceSupportKHR(pdev, family, surface)) == vk.TRUE) {
present_family = family; present_family = family;
} }
} }
@@ -298,27 +316,34 @@ fn allocateQueues(instance: Instance, pdev: vk.PhysicalDevice, allocator: Alloca
return null; return null;
} }
fn checkSurfaceSupport(instance: Instance, pdev: vk.PhysicalDevice, surface: vk.SurfaceKHR) !bool { fn checkSurfaceSupport(vki: InstanceDispatch, pdev: vk.PhysicalDevice, surface: vk.SurfaceKHR) !bool {
var format_count: u32 = undefined; var format_count: u32 = undefined;
_ = try instance.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null); _ = try vki.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null);
var present_mode_count: u32 = undefined; var present_mode_count: u32 = undefined;
_ = try instance.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &present_mode_count, null); _ = try vki.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &present_mode_count, null);
return format_count > 0 and present_mode_count > 0; return format_count > 0 and present_mode_count > 0;
} }
fn checkExtensionSupport( fn checkExtensionSupport(
instance: Instance, vki: InstanceDispatch,
pdev: vk.PhysicalDevice, pdev: vk.PhysicalDevice,
allocator: Allocator, allocator: *Allocator,
) !bool { ) !bool {
const propsv = try instance.enumerateDeviceExtensionPropertiesAlloc(pdev, null, allocator); var count: u32 = undefined;
_ = try vki.enumerateDeviceExtensionProperties(pdev, null, &count, null);
const propsv = try allocator.alloc(vk.ExtensionProperties, count);
defer allocator.free(propsv); defer allocator.free(propsv);
_ = try vki.enumerateDeviceExtensionProperties(pdev, null, &count, propsv.ptr);
for (required_device_extensions) |ext| { for (required_device_extensions) |ext| {
for (propsv) |props| { for (propsv) |props| {
if (std.mem.eql(u8, std.mem.span(ext), std.mem.sliceTo(&props.extension_name, 0))) { const len = std.mem.indexOfScalar(u8, &props.extension_name, 0).?;
const prop_ext_name = props.extension_name[0..len];
if (std.mem.eql(u8, ext, prop_ext_name)) {
break; break;
} }
} else { } else {

View File

@@ -1,12 +0,0 @@
const std = @import("std");
const gpu = std.gpu;
extern const v_color: @Vector(3, f32) addrspace(.input);
extern var f_color: @Vector(4, f32) addrspace(.output);
export fn main() callconv(.spirv_fragment) void {
gpu.location(&v_color, 0);
gpu.location(&f_color, 0);
f_color = .{ v_color[0], v_color[1], v_color[2], 1.0 };
}

View File

@@ -1,16 +0,0 @@
const std = @import("std");
const gpu = std.gpu;
extern const a_pos: @Vector(2, f32) addrspace(.input);
extern const a_color: @Vector(3, f32) addrspace(.input);
extern var v_color: @Vector(3, f32) addrspace(.output);
export fn main() callconv(.spirv_vertex) void {
gpu.location(&a_pos, 0);
gpu.location(&a_color, 1);
gpu.location(&v_color, 0);
gpu.position_out.* = .{ a_pos[0], a_pos[1], 0.0, 1.0 };
v_color = a_color;
}

View File

@@ -10,7 +10,7 @@ pub const Swapchain = struct {
}; };
gc: *const GraphicsContext, gc: *const GraphicsContext,
allocator: Allocator, allocator: *Allocator,
surface_format: vk.SurfaceFormatKHR, surface_format: vk.SurfaceFormatKHR,
present_mode: vk.PresentModeKHR, present_mode: vk.PresentModeKHR,
@@ -21,12 +21,12 @@ pub const Swapchain = struct {
image_index: u32, image_index: u32,
next_image_acquired: vk.Semaphore, next_image_acquired: vk.Semaphore,
pub fn init(gc: *const GraphicsContext, allocator: Allocator, extent: vk.Extent2D) !Swapchain { pub fn init(gc: *const GraphicsContext, allocator: *Allocator, extent: vk.Extent2D) !Swapchain {
return try initRecycle(gc, allocator, extent, .null_handle); return try initRecycle(gc, allocator, extent, .null_handle);
} }
pub fn initRecycle(gc: *const GraphicsContext, allocator: Allocator, extent: vk.Extent2D, old_handle: vk.SwapchainKHR) !Swapchain { pub fn initRecycle(gc: *const GraphicsContext, allocator: *Allocator, extent: vk.Extent2D, old_handle: vk.SwapchainKHR) !Swapchain {
const caps = try gc.instance.getPhysicalDeviceSurfaceCapabilitiesKHR(gc.pdev, gc.surface); const caps = try gc.vki.getPhysicalDeviceSurfaceCapabilitiesKHR(gc.pdev, gc.surface);
const actual_extent = findActualExtent(caps, extent); const actual_extent = findActualExtent(caps, extent);
if (actual_extent.width == 0 or actual_extent.height == 0) { if (actual_extent.width == 0 or actual_extent.height == 0) {
return error.InvalidSurfaceDimensions; return error.InvalidSurfaceDimensions;
@@ -37,16 +37,14 @@ pub const Swapchain = struct {
var image_count = caps.min_image_count + 1; var image_count = caps.min_image_count + 1;
if (caps.max_image_count > 0) { if (caps.max_image_count > 0) {
image_count = @min(image_count, caps.max_image_count); image_count = std.math.min(image_count, caps.max_image_count);
} }
const concurrent = gc.graphics_queue.family != gc.present_queue.family;
const qfi = [_]u32{ gc.graphics_queue.family, gc.present_queue.family }; const qfi = [_]u32{ gc.graphics_queue.family, gc.present_queue.family };
const sharing_mode: vk.SharingMode = if (gc.graphics_queue.family != gc.present_queue.family)
.concurrent
else
.exclusive;
const handle = gc.dev.createSwapchainKHR(&.{ const handle = try gc.vkd.createSwapchainKHR(gc.dev, .{
.flags = .{},
.surface = gc.surface, .surface = gc.surface,
.min_image_count = image_count, .min_image_count = image_count,
.image_format = surface_format.format, .image_format = surface_format.format,
@@ -54,39 +52,30 @@ pub const Swapchain = struct {
.image_extent = actual_extent, .image_extent = actual_extent,
.image_array_layers = 1, .image_array_layers = 1,
.image_usage = .{ .color_attachment_bit = true, .transfer_dst_bit = true }, .image_usage = .{ .color_attachment_bit = true, .transfer_dst_bit = true },
.image_sharing_mode = sharing_mode, .image_sharing_mode = if (concurrent) .concurrent else .exclusive,
.queue_family_index_count = qfi.len, .queue_family_index_count = qfi.len,
.p_queue_family_indices = &qfi, .p_queue_family_indices = &qfi,
.pre_transform = caps.current_transform, .pre_transform = caps.current_transform,
.composite_alpha = .{ .opaque_bit_khr = true }, .composite_alpha = .{ .opaque_bit_khr = true },
.present_mode = present_mode, .present_mode = present_mode,
.clipped = .true, .clipped = vk.TRUE,
.old_swapchain = old_handle, .old_swapchain = old_handle,
}, null) catch { }, null);
return error.SwapchainCreationFailed; errdefer gc.vkd.destroySwapchainKHR(gc.dev, handle, null);
};
errdefer gc.dev.destroySwapchainKHR(handle, null);
if (old_handle != .null_handle) { if (old_handle != .null_handle) {
// Apparently, the old swapchain handle still needs to be destroyed after recreating. // Apparently, the old swapchain handle still needs to be destroyed after recreating.
gc.dev.destroySwapchainKHR(old_handle, null); gc.vkd.destroySwapchainKHR(gc.dev, old_handle, null);
} }
const swap_images = try initSwapchainImages(gc, handle, surface_format.format, allocator); const swap_images = try initSwapchainImages(gc, handle, surface_format.format, allocator);
errdefer { errdefer for (swap_images) |si| si.deinit(gc);
for (swap_images) |si| si.deinit(gc);
allocator.free(swap_images);
}
var next_image_acquired = try gc.dev.createSemaphore(&.{}, null); var next_image_acquired = try gc.vkd.createSemaphore(gc.dev, .{ .flags = .{} }, null);
errdefer gc.dev.destroySemaphore(next_image_acquired, null); errdefer gc.vkd.destroySemaphore(gc.dev, next_image_acquired, null);
const result = try gc.dev.acquireNextImageKHR(handle, std.math.maxInt(u64), next_image_acquired, .null_handle); const result = try gc.vkd.acquireNextImageKHR(gc.dev, handle, std.math.maxInt(u64), next_image_acquired, .null_handle);
// event with a .suboptimal_khr we can still go on to present if (result.result != .success) {
// if we error even for .suboptimal_khr the example will crash and segfault
// on resize, since even the recreated swapchain can be suboptimal during a
// resize.
if (result.result == .not_ready or result.result == .timeout) {
return error.ImageAcquireFailed; return error.ImageAcquireFailed;
} }
@@ -106,8 +95,7 @@ pub const Swapchain = struct {
fn deinitExceptSwapchain(self: Swapchain) void { fn deinitExceptSwapchain(self: Swapchain) void {
for (self.swap_images) |si| si.deinit(self.gc); for (self.swap_images) |si| si.deinit(self.gc);
self.allocator.free(self.swap_images); self.gc.vkd.destroySemaphore(self.gc.dev, self.next_image_acquired, null);
self.gc.dev.destroySemaphore(self.next_image_acquired, null);
} }
pub fn waitForAllFences(self: Swapchain) !void { pub fn waitForAllFences(self: Swapchain) !void {
@@ -115,10 +103,8 @@ pub const Swapchain = struct {
} }
pub fn deinit(self: Swapchain) void { pub fn deinit(self: Swapchain) void {
// if we have no swapchain none of these should exist and we can just return
if (self.handle == .null_handle) return;
self.deinitExceptSwapchain(); self.deinitExceptSwapchain();
self.gc.dev.destroySwapchainKHR(self.handle, null); self.gc.vkd.destroySwapchainKHR(self.gc.dev, self.handle, null);
} }
pub fn recreate(self: *Swapchain, new_extent: vk.Extent2D) !void { pub fn recreate(self: *Swapchain, new_extent: vk.Extent2D) !void {
@@ -126,18 +112,7 @@ pub const Swapchain = struct {
const allocator = self.allocator; const allocator = self.allocator;
const old_handle = self.handle; const old_handle = self.handle;
self.deinitExceptSwapchain(); self.deinitExceptSwapchain();
// set current handle to NULL_HANDLE to signal that the current swapchain does no longer need to be self.* = try initRecycle(gc, allocator, new_extent, old_handle);
// de-initialized if we fail to recreate it.
self.handle = .null_handle;
self.* = initRecycle(gc, allocator, new_extent, old_handle) catch |err| switch (err) {
error.SwapchainCreationFailed => {
// we failed while recreating so our current handle still exists,
// but we won't destroy it in the deferred deinit of this object.
gc.dev.destroySwapchainKHR(old_handle, null);
return err;
},
else => return err,
};
} }
pub fn currentImage(self: Swapchain) vk.Image { pub fn currentImage(self: Swapchain) vk.Image {
@@ -169,31 +144,33 @@ pub const Swapchain = struct {
// Step 1: Make sure the current frame has finished rendering // Step 1: Make sure the current frame has finished rendering
const current = self.currentSwapImage(); const current = self.currentSwapImage();
try current.waitForFence(self.gc); try current.waitForFence(self.gc);
try self.gc.dev.resetFences(1, @ptrCast(&current.frame_fence)); try self.gc.vkd.resetFences(self.gc.dev, 1, @ptrCast([*]const vk.Fence, &current.frame_fence));
// Step 2: Submit the command buffer // Step 2: Submit the command buffer
const wait_stage = [_]vk.PipelineStageFlags{.{ .top_of_pipe_bit = true }}; const wait_stage = [_]vk.PipelineStageFlags{.{ .top_of_pipe_bit = true }};
try self.gc.dev.queueSubmit(self.gc.graphics_queue.handle, 1, &[_]vk.SubmitInfo{.{ try self.gc.vkd.queueSubmit(self.gc.graphics_queue.handle, 1, &[_]vk.SubmitInfo{.{
.wait_semaphore_count = 1, .wait_semaphore_count = 1,
.p_wait_semaphores = @ptrCast(&current.image_acquired), .p_wait_semaphores = @ptrCast([*]const vk.Semaphore, &current.image_acquired),
.p_wait_dst_stage_mask = &wait_stage, .p_wait_dst_stage_mask = &wait_stage,
.command_buffer_count = 1, .command_buffer_count = 1,
.p_command_buffers = @ptrCast(&cmdbuf), .p_command_buffers = @ptrCast([*]const vk.CommandBuffer, &cmdbuf),
.signal_semaphore_count = 1, .signal_semaphore_count = 1,
.p_signal_semaphores = @ptrCast(&current.render_finished), .p_signal_semaphores = @ptrCast([*]const vk.Semaphore, &current.render_finished),
}}, current.frame_fence); }}, current.frame_fence);
// Step 3: Present the current frame // Step 3: Present the current frame
_ = try self.gc.dev.queuePresentKHR(self.gc.present_queue.handle, &.{ _ = try self.gc.vkd.queuePresentKHR(self.gc.present_queue.handle, .{
.wait_semaphore_count = 1, .wait_semaphore_count = 1,
.p_wait_semaphores = @ptrCast(&current.render_finished), .p_wait_semaphores = @ptrCast([*]const vk.Semaphore, &current.render_finished),
.swapchain_count = 1, .swapchain_count = 1,
.p_swapchains = @ptrCast(&self.handle), .p_swapchains = @ptrCast([*]const vk.SwapchainKHR, &self.handle),
.p_image_indices = @ptrCast(&self.image_index), .p_image_indices = @ptrCast([*]const u32, &self.image_index),
.p_results = null,
}); });
// Step 4: Acquire next frame // Step 4: Acquire next frame
const result = try self.gc.dev.acquireNextImageKHR( const result = try self.gc.vkd.acquireNextImageKHR(
self.gc.dev,
self.handle, self.handle,
std.math.maxInt(u64), std.math.maxInt(u64),
self.next_image_acquired, self.next_image_acquired,
@@ -219,7 +196,8 @@ const SwapImage = struct {
frame_fence: vk.Fence, frame_fence: vk.Fence,
fn init(gc: *const GraphicsContext, image: vk.Image, format: vk.Format) !SwapImage { fn init(gc: *const GraphicsContext, image: vk.Image, format: vk.Format) !SwapImage {
const view = try gc.dev.createImageView(&.{ const view = try gc.vkd.createImageView(gc.dev, .{
.flags = .{},
.image = image, .image = image,
.view_type = .@"2d", .view_type = .@"2d",
.format = format, .format = format,
@@ -232,16 +210,16 @@ const SwapImage = struct {
.layer_count = 1, .layer_count = 1,
}, },
}, null); }, null);
errdefer gc.dev.destroyImageView(view, null); errdefer gc.vkd.destroyImageView(gc.dev, view, null);
const image_acquired = try gc.dev.createSemaphore(&.{}, null); const image_acquired = try gc.vkd.createSemaphore(gc.dev, .{ .flags = .{} }, null);
errdefer gc.dev.destroySemaphore(image_acquired, null); errdefer gc.vkd.destroySemaphore(gc.dev, image_acquired, null);
const render_finished = try gc.dev.createSemaphore(&.{}, null); const render_finished = try gc.vkd.createSemaphore(gc.dev, .{ .flags = .{} }, null);
errdefer gc.dev.destroySemaphore(render_finished, null); errdefer gc.vkd.destroySemaphore(gc.dev, image_acquired, null);
const frame_fence = try gc.dev.createFence(&.{ .flags = .{ .signaled_bit = true } }, null); const frame_fence = try gc.vkd.createFence(gc.dev, .{ .flags = .{ .signaled_bit = true } }, null);
errdefer gc.dev.destroyFence(frame_fence, null); errdefer gc.vkd.destroyFence(gc.dev, frame_fence, null);
return SwapImage{ return SwapImage{
.image = image, .image = image,
@@ -254,22 +232,25 @@ const SwapImage = struct {
fn deinit(self: SwapImage, gc: *const GraphicsContext) void { fn deinit(self: SwapImage, gc: *const GraphicsContext) void {
self.waitForFence(gc) catch return; self.waitForFence(gc) catch return;
gc.dev.destroyImageView(self.view, null); gc.vkd.destroyImageView(gc.dev, self.view, null);
gc.dev.destroySemaphore(self.image_acquired, null); gc.vkd.destroySemaphore(gc.dev, self.image_acquired, null);
gc.dev.destroySemaphore(self.render_finished, null); gc.vkd.destroySemaphore(gc.dev, self.render_finished, null);
gc.dev.destroyFence(self.frame_fence, null); gc.vkd.destroyFence(gc.dev, self.frame_fence, null);
} }
fn waitForFence(self: SwapImage, gc: *const GraphicsContext) !void { fn waitForFence(self: SwapImage, gc: *const GraphicsContext) !void {
_ = try gc.dev.waitForFences(1, @ptrCast(&self.frame_fence), .true, std.math.maxInt(u64)); _ = try gc.vkd.waitForFences(gc.dev, 1, @ptrCast([*]const vk.Fence, &self.frame_fence), vk.TRUE, std.math.maxInt(u64));
} }
}; };
fn initSwapchainImages(gc: *const GraphicsContext, swapchain: vk.SwapchainKHR, format: vk.Format, allocator: Allocator) ![]SwapImage { fn initSwapchainImages(gc: *const GraphicsContext, swapchain: vk.SwapchainKHR, format: vk.Format, allocator: *Allocator) ![]SwapImage {
const images = try gc.dev.getSwapchainImagesAllocKHR(swapchain, allocator); var count: u32 = undefined;
_ = try gc.vkd.getSwapchainImagesKHR(gc.dev, swapchain, &count, null);
const images = try allocator.alloc(vk.Image, count);
defer allocator.free(images); defer allocator.free(images);
_ = try gc.vkd.getSwapchainImagesKHR(gc.dev, swapchain, &count, images.ptr);
const swap_images = try allocator.alloc(SwapImage, images.len); const swap_images = try allocator.alloc(SwapImage, count);
errdefer allocator.free(swap_images); errdefer allocator.free(swap_images);
var i: usize = 0; var i: usize = 0;
@@ -283,14 +264,17 @@ fn initSwapchainImages(gc: *const GraphicsContext, swapchain: vk.SwapchainKHR, f
return swap_images; return swap_images;
} }
fn findSurfaceFormat(gc: *const GraphicsContext, allocator: Allocator) !vk.SurfaceFormatKHR { fn findSurfaceFormat(gc: *const GraphicsContext, allocator: *Allocator) !vk.SurfaceFormatKHR {
const preferred = vk.SurfaceFormatKHR{ const preferred = vk.SurfaceFormatKHR{
.format = .b8g8r8a8_srgb, .format = .b8g8r8a8_srgb,
.color_space = .srgb_nonlinear_khr, .color_space = .srgb_nonlinear_khr,
}; };
const surface_formats = try gc.instance.getPhysicalDeviceSurfaceFormatsAllocKHR(gc.pdev, gc.surface, allocator); var count: u32 = undefined;
_ = try gc.vki.getPhysicalDeviceSurfaceFormatsKHR(gc.pdev, gc.surface, &count, null);
const surface_formats = try allocator.alloc(vk.SurfaceFormatKHR, count);
defer allocator.free(surface_formats); defer allocator.free(surface_formats);
_ = try gc.vki.getPhysicalDeviceSurfaceFormatsKHR(gc.pdev, gc.surface, &count, surface_formats.ptr);
for (surface_formats) |sfmt| { for (surface_formats) |sfmt| {
if (std.meta.eql(sfmt, preferred)) { if (std.meta.eql(sfmt, preferred)) {
@@ -301,9 +285,12 @@ fn findSurfaceFormat(gc: *const GraphicsContext, allocator: Allocator) !vk.Surfa
return surface_formats[0]; // There must always be at least one supported surface format return surface_formats[0]; // There must always be at least one supported surface format
} }
fn findPresentMode(gc: *const GraphicsContext, allocator: Allocator) !vk.PresentModeKHR { fn findPresentMode(gc: *const GraphicsContext, allocator: *Allocator) !vk.PresentModeKHR {
const present_modes = try gc.instance.getPhysicalDeviceSurfacePresentModesAllocKHR(gc.pdev, gc.surface, allocator); var count: u32 = undefined;
_ = try gc.vki.getPhysicalDeviceSurfacePresentModesKHR(gc.pdev, gc.surface, &count, null);
const present_modes = try allocator.alloc(vk.PresentModeKHR, count);
defer allocator.free(present_modes); defer allocator.free(present_modes);
_ = try gc.vki.getPhysicalDeviceSurfacePresentModesKHR(gc.pdev, gc.surface, &count, present_modes.ptr);
const preferred = [_]vk.PresentModeKHR{ const preferred = [_]vk.PresentModeKHR{
.mailbox_khr, .mailbox_khr,

View File

@@ -1,13 +1,11 @@
const std = @import("std"); const std = @import("std");
const vk = @import("vulkan"); const vk = @import("vulkan");
const c = @import("c.zig"); const c = @import("c.zig");
const resources = @import("resources");
const GraphicsContext = @import("graphics_context.zig").GraphicsContext; const GraphicsContext = @import("graphics_context.zig").GraphicsContext;
const Swapchain = @import("swapchain.zig").Swapchain; const Swapchain = @import("swapchain.zig").Swapchain;
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
const vert_spv align(@alignOf(u32)) = @embedFile("vertex_shader").*;
const frag_spv align(@alignOf(u32)) = @embedFile("fragment_shader").*;
const app_name = "vulkan-zig triangle example"; const app_name = "vulkan-zig triangle example";
const Vertex = struct { const Vertex = struct {
@@ -22,13 +20,13 @@ const Vertex = struct {
.binding = 0, .binding = 0,
.location = 0, .location = 0,
.format = .r32g32_sfloat, .format = .r32g32_sfloat,
.offset = @offsetOf(Vertex, "pos"), .offset = @byteOffsetOf(Vertex, "pos"),
}, },
.{ .{
.binding = 0, .binding = 0,
.location = 1, .location = 1,
.format = .r32g32b32_sfloat, .format = .r32g32b32_sfloat,
.offset = @offsetOf(Vertex, "color"), .offset = @byteOffsetOf(Vertex, "color"),
}, },
}; };
@@ -46,85 +44,65 @@ pub fn main() !void {
if (c.glfwInit() != c.GLFW_TRUE) return error.GlfwInitFailed; if (c.glfwInit() != c.GLFW_TRUE) return error.GlfwInitFailed;
defer c.glfwTerminate(); defer c.glfwTerminate();
if (c.glfwVulkanSupported() != c.GLFW_TRUE) {
std.log.err("GLFW could not find libvulkan", .{});
return error.NoVulkan;
}
var extent = vk.Extent2D{ .width = 800, .height = 600 }; var extent = vk.Extent2D{ .width = 800, .height = 600 };
c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API);
const window = c.glfwCreateWindow( const window = c.glfwCreateWindow(
@intCast(extent.width), @intCast(c_int, extent.width),
@intCast(extent.height), @intCast(c_int, extent.height),
app_name, app_name,
null, null,
null, null,
) orelse return error.WindowInitFailed; ) orelse return error.WindowInitFailed;
defer c.glfwDestroyWindow(window); defer c.glfwDestroyWindow(window);
// According to the GLFW docs: const allocator = std.heap.page_allocator;
//
// > Window systems put limits on window sizes. Very large or very small window dimensions
// > may be overridden by the window system on creation. Check the actual size after creation.
// -- https://www.glfw.org/docs/3.3/group__window.html#ga3555a418df92ad53f917597fe2f64aeb
//
// This happens in practice, for example, when using Wayland with a scaling factor that is not a
// divisor of the initial window size (see https://github.com/Snektron/vulkan-zig/pull/192).
// To fix it, just fetch the actual size here, after the windowing system has had the time to
// update the window.
extent.width, extent.height = blk: {
var w: c_int = undefined;
var h: c_int = undefined;
c.glfwGetFramebufferSize(window, &w, &h);
break :blk .{ @intCast(w), @intCast(h) };
};
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
const gc = try GraphicsContext.init(allocator, app_name, window); const gc = try GraphicsContext.init(allocator, app_name, window);
defer gc.deinit(); defer gc.deinit();
std.log.debug("Using device: {s}", .{gc.deviceName()}); std.debug.print("Using device: {s}\n", .{gc.deviceName()});
var swapchain = try Swapchain.init(&gc, allocator, extent); var swapchain = try Swapchain.init(&gc, allocator, extent);
defer swapchain.deinit(); defer swapchain.deinit();
const pipeline_layout = try gc.dev.createPipelineLayout(&.{ const pipeline_layout = try gc.vkd.createPipelineLayout(gc.dev, .{
.flags = .{}, .flags = .{},
.set_layout_count = 0, .set_layout_count = 0,
.p_set_layouts = undefined, .p_set_layouts = undefined,
.push_constant_range_count = 0, .push_constant_range_count = 0,
.p_push_constant_ranges = undefined, .p_push_constant_ranges = undefined,
}, null); }, null);
defer gc.dev.destroyPipelineLayout(pipeline_layout, null); defer gc.vkd.destroyPipelineLayout(gc.dev, pipeline_layout, null);
const render_pass = try createRenderPass(&gc, swapchain); const render_pass = try createRenderPass(&gc, swapchain);
defer gc.dev.destroyRenderPass(render_pass, null); defer gc.vkd.destroyRenderPass(gc.dev, render_pass, null);
const pipeline = try createPipeline(&gc, pipeline_layout, render_pass); var pipeline = try createPipeline(&gc, pipeline_layout, render_pass);
defer gc.dev.destroyPipeline(pipeline, null); defer gc.vkd.destroyPipeline(gc.dev, pipeline, null);
var framebuffers = try createFramebuffers(&gc, allocator, render_pass, swapchain); var framebuffers = try createFramebuffers(&gc, allocator, render_pass, swapchain);
defer destroyFramebuffers(&gc, allocator, framebuffers); defer destroyFramebuffers(&gc, allocator, framebuffers);
const pool = try gc.dev.createCommandPool(&.{ const pool = try gc.vkd.createCommandPool(gc.dev, .{
.flags = .{},
.queue_family_index = gc.graphics_queue.family, .queue_family_index = gc.graphics_queue.family,
}, null); }, null);
defer gc.dev.destroyCommandPool(pool, null); defer gc.vkd.destroyCommandPool(gc.dev, pool, null);
const buffer = try gc.dev.createBuffer(&.{ const buffer = try gc.vkd.createBuffer(gc.dev, .{
.flags = .{},
.size = @sizeOf(@TypeOf(vertices)), .size = @sizeOf(@TypeOf(vertices)),
.usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, .usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true },
.sharing_mode = .exclusive, .sharing_mode = .exclusive,
.queue_family_index_count = 0,
.p_queue_family_indices = undefined,
}, null); }, null);
defer gc.dev.destroyBuffer(buffer, null); defer gc.vkd.destroyBuffer(gc.dev, buffer, null);
const mem_reqs = gc.dev.getBufferMemoryRequirements(buffer); const mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, buffer);
const memory = try gc.allocate(mem_reqs, .{ .device_local_bit = true }); const memory = try gc.allocate(mem_reqs, .{ .device_local_bit = true });
defer gc.dev.freeMemory(memory, null); defer gc.vkd.freeMemory(gc.dev, memory, null);
try gc.dev.bindBufferMemory(buffer, memory, 0); try gc.vkd.bindBufferMemory(gc.dev, buffer, memory, 0);
try uploadVertices(&gc, pool, buffer); try uploadVertices(&gc, pool, buffer);
@@ -140,23 +118,20 @@ pub fn main() !void {
); );
defer destroyCommandBuffers(&gc, pool, allocator, cmdbufs); defer destroyCommandBuffers(&gc, pool, allocator, cmdbufs);
var state: Swapchain.PresentState = .optimal;
while (c.glfwWindowShouldClose(window) == c.GLFW_FALSE) { while (c.glfwWindowShouldClose(window) == c.GLFW_FALSE) {
var w: c_int = undefined;
var h: c_int = undefined;
c.glfwGetFramebufferSize(window, &w, &h);
// Don't present or resize swapchain while the window is minimized
if (w == 0 or h == 0) {
c.glfwPollEvents();
continue;
}
const cmdbuf = cmdbufs[swapchain.image_index]; const cmdbuf = cmdbufs[swapchain.image_index];
if (state == .suboptimal or extent.width != @as(u32, @intCast(w)) or extent.height != @as(u32, @intCast(h))) { const state = swapchain.present(cmdbuf) catch |err| switch (err) {
extent.width = @intCast(w); error.OutOfDateKHR => Swapchain.PresentState.suboptimal,
extent.height = @intCast(h); else => |narrow| return narrow,
};
if (state == .suboptimal) {
var w: c_int = undefined;
var h: c_int = undefined;
c.glfwGetWindowSize(window, &w, &h);
extent.width = @intCast(u32, w);
extent.height = @intCast(u32, h);
try swapchain.recreate(extent); try swapchain.recreate(extent);
destroyFramebuffers(&gc, allocator, framebuffers); destroyFramebuffers(&gc, allocator, framebuffers);
@@ -174,54 +149,53 @@ pub fn main() !void {
framebuffers, framebuffers,
); );
} }
state = swapchain.present(cmdbuf) catch |err| switch (err) {
error.OutOfDateKHR => Swapchain.PresentState.suboptimal,
else => |narrow| return narrow,
};
c.glfwPollEvents(); c.glfwPollEvents();
} }
try swapchain.waitForAllFences(); try swapchain.waitForAllFences();
try gc.dev.deviceWaitIdle();
} }
fn uploadVertices(gc: *const GraphicsContext, pool: vk.CommandPool, buffer: vk.Buffer) !void { fn uploadVertices(gc: *const GraphicsContext, pool: vk.CommandPool, buffer: vk.Buffer) !void {
const staging_buffer = try gc.dev.createBuffer(&.{ const staging_buffer = try gc.vkd.createBuffer(gc.dev, .{
.flags = .{},
.size = @sizeOf(@TypeOf(vertices)), .size = @sizeOf(@TypeOf(vertices)),
.usage = .{ .transfer_src_bit = true }, .usage = .{ .transfer_src_bit = true },
.sharing_mode = .exclusive, .sharing_mode = .exclusive,
.queue_family_index_count = 0,
.p_queue_family_indices = undefined,
}, null); }, null);
defer gc.dev.destroyBuffer(staging_buffer, null); defer gc.vkd.destroyBuffer(gc.dev, staging_buffer, null);
const mem_reqs = gc.dev.getBufferMemoryRequirements(staging_buffer); const mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, staging_buffer);
const staging_memory = try gc.allocate(mem_reqs, .{ .host_visible_bit = true, .host_coherent_bit = true }); const staging_memory = try gc.allocate(mem_reqs, .{ .host_visible_bit = true, .host_coherent_bit = true });
defer gc.dev.freeMemory(staging_memory, null); defer gc.vkd.freeMemory(gc.dev, staging_memory, null);
try gc.dev.bindBufferMemory(staging_buffer, staging_memory, 0); try gc.vkd.bindBufferMemory(gc.dev, staging_buffer, staging_memory, 0);
{ {
const data = try gc.dev.mapMemory(staging_memory, 0, vk.WHOLE_SIZE, .{}); const data = try gc.vkd.mapMemory(gc.dev, staging_memory, 0, vk.WHOLE_SIZE, .{});
defer gc.dev.unmapMemory(staging_memory); defer gc.vkd.unmapMemory(gc.dev, staging_memory);
const gpu_vertices: [*]Vertex = @ptrCast(@alignCast(data)); const gpu_vertices = @ptrCast([*]Vertex, @alignCast(@alignOf(Vertex), data));
@memcpy(gpu_vertices, vertices[0..]); for (vertices) |vertex, i| {
gpu_vertices[i] = vertex;
}
} }
try copyBuffer(gc, pool, buffer, staging_buffer, @sizeOf(@TypeOf(vertices))); try copyBuffer(gc, pool, buffer, staging_buffer, @sizeOf(@TypeOf(vertices)));
} }
fn copyBuffer(gc: *const GraphicsContext, pool: vk.CommandPool, dst: vk.Buffer, src: vk.Buffer, size: vk.DeviceSize) !void { fn copyBuffer(gc: *const GraphicsContext, pool: vk.CommandPool, dst: vk.Buffer, src: vk.Buffer, size: vk.DeviceSize) !void {
var cmdbuf_handle: vk.CommandBuffer = undefined; var cmdbuf: vk.CommandBuffer = undefined;
try gc.dev.allocateCommandBuffers(&.{ try gc.vkd.allocateCommandBuffers(gc.dev, .{
.command_pool = pool, .command_pool = pool,
.level = .primary, .level = .primary,
.command_buffer_count = 1, .command_buffer_count = 1,
}, @ptrCast(&cmdbuf_handle)); }, @ptrCast([*]vk.CommandBuffer, &cmdbuf));
defer gc.dev.freeCommandBuffers(pool, 1, @ptrCast(&cmdbuf_handle)); defer gc.vkd.freeCommandBuffers(gc.dev, pool, 1, @ptrCast([*]const vk.CommandBuffer, &cmdbuf));
const cmdbuf = GraphicsContext.CommandBuffer.init(cmdbuf_handle, gc.dev.wrapper); try gc.vkd.beginCommandBuffer(cmdbuf, .{
try cmdbuf.beginCommandBuffer(&.{
.flags = .{ .one_time_submit_bit = true }, .flags = .{ .one_time_submit_bit = true },
.p_inheritance_info = null,
}); });
const region = vk.BufferCopy{ const region = vk.BufferCopy{
@@ -229,23 +203,27 @@ fn copyBuffer(gc: *const GraphicsContext, pool: vk.CommandPool, dst: vk.Buffer,
.dst_offset = 0, .dst_offset = 0,
.size = size, .size = size,
}; };
cmdbuf.copyBuffer(src, dst, 1, @ptrCast(&region)); gc.vkd.cmdCopyBuffer(cmdbuf, src, dst, 1, @ptrCast([*]const vk.BufferCopy, &region));
try cmdbuf.endCommandBuffer(); try gc.vkd.endCommandBuffer(cmdbuf);
const si = vk.SubmitInfo{ const si = vk.SubmitInfo{
.command_buffer_count = 1, .wait_semaphore_count = 0,
.p_command_buffers = (&cmdbuf.handle)[0..1], .p_wait_semaphores = undefined,
.p_wait_dst_stage_mask = undefined, .p_wait_dst_stage_mask = undefined,
.command_buffer_count = 1,
.p_command_buffers = @ptrCast([*]const vk.CommandBuffer, &cmdbuf),
.signal_semaphore_count = 0,
.p_signal_semaphores = undefined,
}; };
try gc.dev.queueSubmit(gc.graphics_queue.handle, 1, @ptrCast(&si), .null_handle); try gc.vkd.queueSubmit(gc.graphics_queue.handle, 1, @ptrCast([*]const vk.SubmitInfo, &si), .null_handle);
try gc.dev.queueWaitIdle(gc.graphics_queue.handle); try gc.vkd.queueWaitIdle(gc.graphics_queue.handle);
} }
fn createCommandBuffers( fn createCommandBuffers(
gc: *const GraphicsContext, gc: *const GraphicsContext,
pool: vk.CommandPool, pool: vk.CommandPool,
allocator: Allocator, allocator: *Allocator,
buffer: vk.Buffer, buffer: vk.Buffer,
extent: vk.Extent2D, extent: vk.Extent2D,
render_pass: vk.RenderPass, render_pass: vk.RenderPass,
@@ -255,12 +233,12 @@ fn createCommandBuffers(
const cmdbufs = try allocator.alloc(vk.CommandBuffer, framebuffers.len); const cmdbufs = try allocator.alloc(vk.CommandBuffer, framebuffers.len);
errdefer allocator.free(cmdbufs); errdefer allocator.free(cmdbufs);
try gc.dev.allocateCommandBuffers(&.{ try gc.vkd.allocateCommandBuffers(gc.dev, .{
.command_pool = pool, .command_pool = pool,
.level = .primary, .level = .primary,
.command_buffer_count = @intCast(cmdbufs.len), .command_buffer_count = @truncate(u32, cmdbufs.len),
}, cmdbufs.ptr); }, cmdbufs.ptr);
errdefer gc.dev.freeCommandBuffers(pool, @intCast(cmdbufs.len), cmdbufs.ptr); errdefer gc.vkd.freeCommandBuffers(gc.dev, pool, @truncate(u32, cmdbufs.len), cmdbufs.ptr);
const clear = vk.ClearValue{ const clear = vk.ClearValue{
.color = .{ .float_32 = .{ 0, 0, 0, 1 } }, .color = .{ .float_32 = .{ 0, 0, 0, 1 } },
@@ -269,8 +247,8 @@ fn createCommandBuffers(
const viewport = vk.Viewport{ const viewport = vk.Viewport{
.x = 0, .x = 0,
.y = 0, .y = 0,
.width = @floatFromInt(extent.width), .width = @intToFloat(f32, extent.width),
.height = @floatFromInt(extent.height), .height = @intToFloat(f32, extent.height),
.min_depth = 0, .min_depth = 0,
.max_depth = 1, .max_depth = 1,
}; };
@@ -280,55 +258,56 @@ fn createCommandBuffers(
.extent = extent, .extent = extent,
}; };
for (cmdbufs, framebuffers) |cmdbuf, framebuffer| { for (cmdbufs) |cmdbuf, i| {
try gc.dev.beginCommandBuffer(cmdbuf, &.{}); try gc.vkd.beginCommandBuffer(cmdbuf, .{
.flags = .{},
.p_inheritance_info = null,
});
gc.dev.cmdSetViewport(cmdbuf, 0, 1, @ptrCast(&viewport)); gc.vkd.cmdSetViewport(cmdbuf, 0, 1, @ptrCast([*]const vk.Viewport, &viewport));
gc.dev.cmdSetScissor(cmdbuf, 0, 1, @ptrCast(&scissor)); gc.vkd.cmdSetScissor(cmdbuf, 0, 1, @ptrCast([*]const vk.Rect2D, &scissor));
// This needs to be a separate definition - see https://github.com/ziglang/zig/issues/7627. gc.vkd.cmdBeginRenderPass(cmdbuf, .{
const render_area = vk.Rect2D{
.offset = .{ .x = 0, .y = 0 },
.extent = extent,
};
gc.dev.cmdBeginRenderPass(cmdbuf, &.{
.render_pass = render_pass, .render_pass = render_pass,
.framebuffer = framebuffer, .framebuffer = framebuffers[i],
.render_area = render_area, .render_area = .{
.offset = .{ .x = 0, .y = 0 },
.extent = extent,
},
.clear_value_count = 1, .clear_value_count = 1,
.p_clear_values = @ptrCast(&clear), .p_clear_values = @ptrCast([*]const vk.ClearValue, &clear),
}, .@"inline"); }, .@"inline");
gc.dev.cmdBindPipeline(cmdbuf, .graphics, pipeline); gc.vkd.cmdBindPipeline(cmdbuf, .graphics, pipeline);
const offset = [_]vk.DeviceSize{0}; const offset = [_]vk.DeviceSize{0};
gc.dev.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast(&buffer), &offset); gc.vkd.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast([*]const vk.Buffer, &buffer), &offset);
gc.dev.cmdDraw(cmdbuf, vertices.len, 1, 0, 0); gc.vkd.cmdDraw(cmdbuf, vertices.len, 1, 0, 0);
gc.dev.cmdEndRenderPass(cmdbuf); gc.vkd.cmdEndRenderPass(cmdbuf);
try gc.dev.endCommandBuffer(cmdbuf); try gc.vkd.endCommandBuffer(cmdbuf);
} }
return cmdbufs; return cmdbufs;
} }
fn destroyCommandBuffers(gc: *const GraphicsContext, pool: vk.CommandPool, allocator: Allocator, cmdbufs: []vk.CommandBuffer) void { fn destroyCommandBuffers(gc: *const GraphicsContext, pool: vk.CommandPool, allocator: *Allocator, cmdbufs: []vk.CommandBuffer) void {
gc.dev.freeCommandBuffers(pool, @truncate(cmdbufs.len), cmdbufs.ptr); gc.vkd.freeCommandBuffers(gc.dev, pool, @truncate(u32, cmdbufs.len), cmdbufs.ptr);
allocator.free(cmdbufs); allocator.free(cmdbufs);
} }
fn createFramebuffers(gc: *const GraphicsContext, allocator: Allocator, render_pass: vk.RenderPass, swapchain: Swapchain) ![]vk.Framebuffer { fn createFramebuffers(gc: *const GraphicsContext, allocator: *Allocator, render_pass: vk.RenderPass, swapchain: Swapchain) ![]vk.Framebuffer {
const framebuffers = try allocator.alloc(vk.Framebuffer, swapchain.swap_images.len); const framebuffers = try allocator.alloc(vk.Framebuffer, swapchain.swap_images.len);
errdefer allocator.free(framebuffers); errdefer allocator.free(framebuffers);
var i: usize = 0; var i: usize = 0;
errdefer for (framebuffers[0..i]) |fb| gc.dev.destroyFramebuffer(fb, null); errdefer for (framebuffers[0..i]) |fb| gc.vkd.destroyFramebuffer(gc.dev, fb, null);
for (framebuffers) |*fb| { for (framebuffers) |*fb| {
fb.* = try gc.dev.createFramebuffer(&.{ fb.* = try gc.vkd.createFramebuffer(gc.dev, .{
.flags = .{},
.render_pass = render_pass, .render_pass = render_pass,
.attachment_count = 1, .attachment_count = 1,
.p_attachments = @ptrCast(&swapchain.swap_images[i].view), .p_attachments = @ptrCast([*]const vk.ImageView, &swapchain.swap_images[i].view),
.width = swapchain.extent.width, .width = swapchain.extent.width,
.height = swapchain.extent.height, .height = swapchain.extent.height,
.layers = 1, .layers = 1,
@@ -339,20 +318,21 @@ fn createFramebuffers(gc: *const GraphicsContext, allocator: Allocator, render_p
return framebuffers; return framebuffers;
} }
fn destroyFramebuffers(gc: *const GraphicsContext, allocator: Allocator, framebuffers: []const vk.Framebuffer) void { fn destroyFramebuffers(gc: *const GraphicsContext, allocator: *Allocator, framebuffers: []const vk.Framebuffer) void {
for (framebuffers) |fb| gc.dev.destroyFramebuffer(fb, null); for (framebuffers) |fb| gc.vkd.destroyFramebuffer(gc.dev, fb, null);
allocator.free(framebuffers); allocator.free(framebuffers);
} }
fn createRenderPass(gc: *const GraphicsContext, swapchain: Swapchain) !vk.RenderPass { fn createRenderPass(gc: *const GraphicsContext, swapchain: Swapchain) !vk.RenderPass {
const color_attachment = vk.AttachmentDescription{ const color_attachment = vk.AttachmentDescription{
.flags = .{},
.format = swapchain.surface_format.format, .format = swapchain.surface_format.format,
.samples = .{ .@"1_bit" = true }, .samples = .{ .@"1_bit" = true },
.load_op = .clear, .load_op = .clear,
.store_op = .store, .store_op = .store,
.stencil_load_op = .dont_care, .stencil_load_op = .dont_care,
.stencil_store_op = .dont_care, .stencil_store_op = .dont_care,
.initial_layout = .undefined, .initial_layout = .@"undefined",
.final_layout = .present_src_khr, .final_layout = .present_src_khr,
}; };
@@ -362,16 +342,26 @@ fn createRenderPass(gc: *const GraphicsContext, swapchain: Swapchain) !vk.Render
}; };
const subpass = vk.SubpassDescription{ const subpass = vk.SubpassDescription{
.flags = .{},
.pipeline_bind_point = .graphics, .pipeline_bind_point = .graphics,
.input_attachment_count = 0,
.p_input_attachments = undefined,
.color_attachment_count = 1, .color_attachment_count = 1,
.p_color_attachments = @ptrCast(&color_attachment_ref), .p_color_attachments = @ptrCast([*]const vk.AttachmentReference, &color_attachment_ref),
.p_resolve_attachments = null,
.p_depth_stencil_attachment = null,
.preserve_attachment_count = 0,
.p_preserve_attachments = undefined,
}; };
return try gc.dev.createRenderPass(&.{ return try gc.vkd.createRenderPass(gc.dev, .{
.flags = .{},
.attachment_count = 1, .attachment_count = 1,
.p_attachments = @ptrCast(&color_attachment), .p_attachments = @ptrCast([*]const vk.AttachmentDescription, &color_attachment),
.subpass_count = 1, .subpass_count = 1,
.p_subpasses = @ptrCast(&subpass), .p_subpasses = @ptrCast([*]const vk.SubpassDescription, &subpass),
.dependency_count = 0,
.p_dependencies = undefined,
}, null); }, null);
} }
@@ -380,44 +370,53 @@ fn createPipeline(
layout: vk.PipelineLayout, layout: vk.PipelineLayout,
render_pass: vk.RenderPass, render_pass: vk.RenderPass,
) !vk.Pipeline { ) !vk.Pipeline {
const vert = try gc.dev.createShaderModule(&.{ const vert = try gc.vkd.createShaderModule(gc.dev, .{
.code_size = vert_spv.len, .flags = .{},
.p_code = @ptrCast(&vert_spv), .code_size = resources.triangle_vert.len,
.p_code = @ptrCast([*]const u32, resources.triangle_vert),
}, null); }, null);
defer gc.dev.destroyShaderModule(vert, null); defer gc.vkd.destroyShaderModule(gc.dev, vert, null);
const frag = try gc.dev.createShaderModule(&.{ const frag = try gc.vkd.createShaderModule(gc.dev, .{
.code_size = frag_spv.len, .flags = .{},
.p_code = @ptrCast(&frag_spv), .code_size = resources.triangle_frag.len,
.p_code = @ptrCast([*]const u32, resources.triangle_frag),
}, null); }, null);
defer gc.dev.destroyShaderModule(frag, null); defer gc.vkd.destroyShaderModule(gc.dev, frag, null);
const pssci = [_]vk.PipelineShaderStageCreateInfo{ const pssci = [_]vk.PipelineShaderStageCreateInfo{
.{ .{
.flags = .{},
.stage = .{ .vertex_bit = true }, .stage = .{ .vertex_bit = true },
.module = vert, .module = vert,
.p_name = "main", .p_name = "main",
.p_specialization_info = null,
}, },
.{ .{
.flags = .{},
.stage = .{ .fragment_bit = true }, .stage = .{ .fragment_bit = true },
.module = frag, .module = frag,
.p_name = "main", .p_name = "main",
.p_specialization_info = null,
}, },
}; };
const pvisci = vk.PipelineVertexInputStateCreateInfo{ const pvisci = vk.PipelineVertexInputStateCreateInfo{
.flags = .{},
.vertex_binding_description_count = 1, .vertex_binding_description_count = 1,
.p_vertex_binding_descriptions = @ptrCast(&Vertex.binding_description), .p_vertex_binding_descriptions = @ptrCast([*]const vk.VertexInputBindingDescription, &Vertex.binding_description),
.vertex_attribute_description_count = Vertex.attribute_description.len, .vertex_attribute_description_count = Vertex.attribute_description.len,
.p_vertex_attribute_descriptions = &Vertex.attribute_description, .p_vertex_attribute_descriptions = &Vertex.attribute_description,
}; };
const piasci = vk.PipelineInputAssemblyStateCreateInfo{ const piasci = vk.PipelineInputAssemblyStateCreateInfo{
.flags = .{},
.topology = .triangle_list, .topology = .triangle_list,
.primitive_restart_enable = .false, .primitive_restart_enable = vk.FALSE,
}; };
const pvsci = vk.PipelineViewportStateCreateInfo{ const pvsci = vk.PipelineViewportStateCreateInfo{
.flags = .{},
.viewport_count = 1, .viewport_count = 1,
.p_viewports = undefined, // set in createCommandBuffers with cmdSetViewport .p_viewports = undefined, // set in createCommandBuffers with cmdSetViewport
.scissor_count = 1, .scissor_count = 1,
@@ -425,12 +424,13 @@ fn createPipeline(
}; };
const prsci = vk.PipelineRasterizationStateCreateInfo{ const prsci = vk.PipelineRasterizationStateCreateInfo{
.depth_clamp_enable = .false, .flags = .{},
.rasterizer_discard_enable = .false, .depth_clamp_enable = vk.FALSE,
.rasterizer_discard_enable = vk.FALSE,
.polygon_mode = .fill, .polygon_mode = .fill,
.cull_mode = .{ .back_bit = true }, .cull_mode = .{ .back_bit = true },
.front_face = .clockwise, .front_face = .clockwise,
.depth_bias_enable = .false, .depth_bias_enable = vk.FALSE,
.depth_bias_constant_factor = 0, .depth_bias_constant_factor = 0,
.depth_bias_clamp = 0, .depth_bias_clamp = 0,
.depth_bias_slope_factor = 0, .depth_bias_slope_factor = 0,
@@ -438,15 +438,17 @@ fn createPipeline(
}; };
const pmsci = vk.PipelineMultisampleStateCreateInfo{ const pmsci = vk.PipelineMultisampleStateCreateInfo{
.flags = .{},
.rasterization_samples = .{ .@"1_bit" = true }, .rasterization_samples = .{ .@"1_bit" = true },
.sample_shading_enable = .false, .sample_shading_enable = vk.FALSE,
.min_sample_shading = 1, .min_sample_shading = 1,
.alpha_to_coverage_enable = .false, .p_sample_mask = null,
.alpha_to_one_enable = .false, .alpha_to_coverage_enable = vk.FALSE,
.alpha_to_one_enable = vk.FALSE,
}; };
const pcbas = vk.PipelineColorBlendAttachmentState{ const pcbas = vk.PipelineColorBlendAttachmentState{
.blend_enable = .false, .blend_enable = vk.FALSE,
.src_color_blend_factor = .one, .src_color_blend_factor = .one,
.dst_color_blend_factor = .zero, .dst_color_blend_factor = .zero,
.color_blend_op = .add, .color_blend_op = .add,
@@ -457,10 +459,11 @@ fn createPipeline(
}; };
const pcbsci = vk.PipelineColorBlendStateCreateInfo{ const pcbsci = vk.PipelineColorBlendStateCreateInfo{
.logic_op_enable = .false, .flags = .{},
.logic_op_enable = vk.FALSE,
.logic_op = .copy, .logic_op = .copy,
.attachment_count = 1, .attachment_count = 1,
.p_attachments = @ptrCast(&pcbas), .p_attachments = @ptrCast([*]const vk.PipelineColorBlendAttachmentState, &pcbas),
.blend_constants = [_]f32{ 0, 0, 0, 0 }, .blend_constants = [_]f32{ 0, 0, 0, 0 },
}; };
@@ -492,12 +495,13 @@ fn createPipeline(
}; };
var pipeline: vk.Pipeline = undefined; var pipeline: vk.Pipeline = undefined;
_ = try gc.dev.createGraphicsPipelines( _ = try gc.vkd.createGraphicsPipelines(
gc.dev,
.null_handle, .null_handle,
1, 1,
@ptrCast(&gpci), @ptrCast([*]const vk.GraphicsPipelineCreateInfo, &gpci),
null, null,
@ptrCast(&pipeline), @ptrCast([*]vk.Pipeline, &pipeline),
); );
return pipeline; return pipeline;
} }

16562
examples/vk.xml Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,75 @@
const std = @import("std");
const path = std.fs.path;
const Builder = std.build.Builder;
const Step = std.build.Step;
/// Utility functionality to help with compiling shaders from build.zig.
/// Invokes glslc (or another shader compiler passed to `init`) for each shader
/// added via `addShader`.
pub const ShaderCompileStep = struct {
/// Structure representing a shader to be compiled.
const Shader = struct {
/// The path to the shader, relative to the current build root.
source_path: []const u8,
/// The full output path where the compiled shader binary is placed.
full_out_path: []const u8,
};
step: Step,
builder: *Builder,
/// The command and optional arguments used to invoke the shader compiler.
glslc_cmd: []const []const u8,
/// List of shaders that are to be compiled.
shaders: std.ArrayList(Shader),
/// Create a ShaderCompilerStep for `builder`. When this step is invoked by the build
/// system, `<glcl_cmd...> <shader_source> -o <dst_addr>` is invoked for each shader.
pub fn init(builder: *Builder, glslc_cmd: []const []const u8) *ShaderCompileStep {
const self = builder.allocator.create(ShaderCompileStep) catch unreachable;
self.* = .{
.step = Step.init(.Custom, "shader-compile", builder.allocator, make),
.builder = builder,
.glslc_cmd = glslc_cmd,
.shaders = std.ArrayList(Shader).init(builder.allocator),
};
return self;
}
/// Add a shader to be compiled. `src` is shader source path, relative to the project root.
/// Returns the full path where the compiled binary will be stored upon successful compilation.
/// This path can then be used to include the binary into an executable, for example by passing it
/// to @embedFile via an additional generated file.
pub fn add(self: *ShaderCompileStep, src: []const u8) []const u8 {
const full_out_path = path.join(self.builder.allocator, &[_][]const u8{
self.builder.build_root,
self.builder.cache_root,
"shaders",
src,
}) catch unreachable;
self.shaders.append(.{ .source_path = src, .full_out_path = full_out_path }) catch unreachable;
return full_out_path;
}
/// Internal build function.
fn make(step: *Step) !void {
const self = @fieldParentPtr(ShaderCompileStep, "step", step);
const cwd = std.fs.cwd();
const cmd = try self.builder.allocator.alloc([]const u8, self.glslc_cmd.len + 3);
for (self.glslc_cmd) |part, i| {
cmd[i] = part;
}
cmd[cmd.len - 2] = "-o";
for (self.shaders.items) |shader| {
const dir = path.dirname(shader.full_out_path).?;
try cwd.makePath(dir);
cmd[cmd.len - 3] = shader.source_path;
cmd[cmd.len - 1] = shader.full_out_path;
try self.builder.spawnChild(cmd);
}
}
};

View File

@@ -7,9 +7,10 @@ pub fn isZigPrimitiveType(name: []const u8) bool {
for (name[1..]) |c| { for (name[1..]) |c| {
switch (c) { switch (c) {
'0'...'9' => {}, '0'...'9' => {},
else => break, else => return false,
} }
} else return true; }
return true;
} }
const primitives = [_][]const u8{ const primitives = [_][]const u8{
@@ -23,6 +24,7 @@ pub fn isZigPrimitiveType(name: []const u8) bool {
"f32", "f32",
"f64", "f64",
"f128", "f128",
"c_longdouble",
"noreturn", "noreturn",
"type", "type",
"anyerror", "anyerror",
@@ -34,13 +36,6 @@ pub fn isZigPrimitiveType(name: []const u8) bool {
"c_ulong", "c_ulong",
"c_longlong", "c_longlong",
"c_ulonglong", "c_ulonglong",
"c_longdouble",
// Removed in stage 2 in https://github.com/ziglang/zig/commit/05cf44933d753f7a5a53ab289ea60fd43761de57,
// but these are still invalid identifiers in stage 1.
"undefined",
"true",
"false",
"null",
}; };
for (primitives) |reserved| { for (primitives) |reserved| {
@@ -52,8 +47,17 @@ pub fn isZigPrimitiveType(name: []const u8) bool {
return false; return false;
} }
pub fn writeIdentifier(w: *std.Io.Writer, id: []const u8) !void { fn needZigEscape(name: []const u8) bool {
try w.print("{f}", .{std.zig.fmtId(id)}); return !std.zig.fmt.isValidId(name) or isZigPrimitiveType(name);
}
pub fn writeIdentifier(out: anytype, id: []const u8) !void {
// https://github.com/ziglang/zig/issues/2897
if (isZigPrimitiveType(id)) {
try out.print("{s}_", .{id});
} else {
try out.print("{}", .{std.zig.fmtId(id)});
}
} }
pub const CaseStyle = enum { pub const CaseStyle = enum {
@@ -121,12 +125,12 @@ pub const SegmentIterator = struct {
pub const IdRenderer = struct { pub const IdRenderer = struct {
tags: []const []const u8, tags: []const []const u8,
text_cache: std.Io.Writer.Allocating, text_cache: std.ArrayList(u8),
pub fn init(allocator: Allocator, tags: []const []const u8) IdRenderer { pub fn init(allocator: *Allocator, tags: []const []const u8) IdRenderer {
return .{ return .{
.tags = tags, .tags = tags,
.text_cache = .init(allocator), .text_cache = std.ArrayList(u8).init(allocator),
}; };
} }
@@ -137,24 +141,25 @@ pub const IdRenderer = struct {
fn renderSnake(self: *IdRenderer, screaming: bool, id: []const u8, tag: ?[]const u8) !void { fn renderSnake(self: *IdRenderer, screaming: bool, id: []const u8, tag: ?[]const u8) !void {
var it = SegmentIterator.init(id); var it = SegmentIterator.init(id);
var first = true; var first = true;
const transform = if (screaming) std.ascii.toUpper else std.ascii.toLower;
while (it.next()) |segment| { while (it.next()) |segment| {
if (first) { if (first) {
first = false; first = false;
} else { } else {
try self.text_cache.writer.writeByte('_'); try self.text_cache.append('_');
} }
for (segment) |c| { for (segment) |c| {
try self.text_cache.writer.writeByte(if (screaming) std.ascii.toUpper(c) else std.ascii.toLower(c)); try self.text_cache.append(transform(c));
} }
} }
if (tag) |name| { if (tag) |name| {
try self.text_cache.writer.writeByte('_'); try self.text_cache.append('_');
for (name) |c| { for (name) |c| {
try self.text_cache.writer.writeByte(if (screaming) std.ascii.toUpper(c) else std.ascii.toLower(c)); try self.text_cache.append(transform(c));
} }
} }
} }
@@ -166,7 +171,7 @@ pub const IdRenderer = struct {
while (it.next()) |segment| { while (it.next()) |segment| {
var i: usize = 0; var i: usize = 0;
while (i < segment.len and std.ascii.isDigit(segment[i])) { while (i < segment.len and std.ascii.isDigit(segment[i])) {
try self.text_cache.writer.writeByte(segment[i]); try self.text_cache.append(segment[i]);
i += 1; i += 1;
} }
@@ -175,34 +180,34 @@ pub const IdRenderer = struct {
} }
if (i == 0 and lower_first) { if (i == 0 and lower_first) {
try self.text_cache.writer.writeByte(std.ascii.toLower(segment[i])); try self.text_cache.append(std.ascii.toLower(segment[i]));
} else { } else {
try self.text_cache.writer.writeByte(std.ascii.toUpper(segment[i])); try self.text_cache.append(std.ascii.toUpper(segment[i]));
} }
lower_first = false; lower_first = false;
for (segment[i + 1 ..]) |c| { for (segment[i + 1 ..]) |c| {
try self.text_cache.writer.writeByte(std.ascii.toLower(c)); try self.text_cache.append(std.ascii.toLower(c));
} }
} }
if (tag) |name| { if (tag) |name| {
try self.text_cache.writer.writeAll(name); try self.text_cache.appendSlice(name);
} }
} }
pub fn renderFmt(self: *IdRenderer, out: *std.Io.Writer, comptime fmt: []const u8, args: anytype) !void { pub fn renderFmt(self: *IdRenderer, out: anytype, comptime fmt: []const u8, args: anytype) !void {
_ = self.text_cache.writer.consumeAll(); self.text_cache.items.len = 0;
try self.text_cache.writer.print(fmt, args); try std.fmt.format(self.text_cache.writer(), fmt, args);
try writeIdentifier(out, self.text_cache.writer.buffered()); try writeIdentifier(out, self.text_cache.items);
} }
pub fn renderWithCase(self: *IdRenderer, out: *std.Io.Writer, case_style: CaseStyle, id: []const u8) !void { pub fn renderWithCase(self: *IdRenderer, out: anytype, case_style: CaseStyle, id: []const u8) !void {
const tag = self.getAuthorTag(id); const tag = self.getAuthorTag(id);
// The trailing underscore doesn't need to be removed here as its removed by the SegmentIterator. // The trailing underscore doesn't need to be removed here as its removed by the SegmentIterator.
const adjusted_id = if (tag) |name| id[0 .. id.len - name.len] else id; const adjusted_id = if (tag) |name| id[0 .. id.len - name.len] else id;
_ = self.text_cache.writer.consumeAll(); self.text_cache.items.len = 0;
switch (case_style) { switch (case_style) {
.snake => try self.renderSnake(false, adjusted_id, tag), .snake => try self.renderSnake(false, adjusted_id, tag),
@@ -211,7 +216,7 @@ pub const IdRenderer = struct {
.camel => try self.renderCamel(false, adjusted_id, tag), .camel => try self.renderCamel(false, adjusted_id, tag),
} }
try writeIdentifier(out, self.text_cache.writer.buffered()); try writeIdentifier(out, self.text_cache.items);
} }
pub fn getAuthorTag(self: IdRenderer, id: []const u8) ?[]const u8 { pub fn getAuthorTag(self: IdRenderer, id: []const u8) ?[]const u8 {

8
generator/index.zig Normal file
View File

@@ -0,0 +1,8 @@
pub const generateVk = @import("vulkan/generator.zig").generate;
pub const VkGenerateStep = @import("vulkan/build_integration.zig").GenerateStep;
pub const ShaderCompileStep = @import("build_integration.zig").ShaderCompileStep;
test "main" {
_ = @import("xml.zig");
_ = @import("vulkan/c_parse.zig");
}

80
generator/main.zig Normal file
View File

@@ -0,0 +1,80 @@
const std = @import("std");
const generate = @import("vulkan/generator.zig").generate;
const usage = "Usage: {s} [-h|--help] <spec xml path> <output zig source>\n";
pub fn main() !void {
const stderr = std.io.getStdErr();
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = &arena.allocator;
var args = std.process.args();
const prog_name = try args.next(allocator) orelse return error.ExecutableNameMissing;
var maybe_xml_path: ?[]const u8 = null;
var maybe_out_path: ?[]const u8 = null;
while (args.next(allocator)) |err_or_arg| {
const arg = try err_or_arg;
if (std.mem.eql(u8, arg, "--help") or std.mem.eql(u8, arg, "-h")) {
@setEvalBranchQuota(2000);
try stderr.writer().print(
\\Utility to generate a Zig binding from the Vulkan XML API registry.
\\
\\The most recent Vulkan XML API registry can be obtained from
\\https://github.com/KhronosGroup/Vulkan-Docs/blob/master/xml/vk.xml,
\\and the most recent LunarG Vulkan SDK version can be found at
\\$VULKAN_SDK/x86_64/share/vulkan/registry/vk.xml.
\\
\\
++ usage,
.{prog_name},
);
return;
} else if (maybe_xml_path == null) {
maybe_xml_path = arg;
} else if (maybe_out_path == null) {
maybe_out_path = arg;
} else {
try stderr.writer().print("Error: Superficial argument '{s}'\n", .{arg});
}
}
const xml_path = maybe_xml_path orelse {
try stderr.writer().print("Error: Missing required argument <spec xml path>\n" ++ usage, .{prog_name});
return;
};
const out_path = maybe_out_path orelse {
try stderr.writer().print("Error: Missing required argument <output zig source>\n" ++ usage, .{prog_name});
return;
};
const cwd = std.fs.cwd();
const xml_src = cwd.readFileAlloc(allocator, xml_path, std.math.maxInt(usize)) catch |err| {
try stderr.writer().print("Error: Failed to open input file '{s}' ({s})\n", .{ xml_path, @errorName(err) });
return;
};
var out_buffer = std.ArrayList(u8).init(allocator);
try generate(allocator, xml_src, out_buffer.writer());
const tree = try std.zig.parse(allocator, out_buffer.items);
const formatted = try tree.render(allocator);
defer allocator.free(formatted);
if (std.fs.path.dirname(out_path)) |dir| {
cwd.makePath(dir) catch |err| {
try stderr.writer().print("Error: Failed to create output directory '{s}' ({s})\n", .{ dir, @errorName(err) });
return;
};
}
cwd.writeFile(out_path, formatted) catch |err| {
try stderr.writer().print("Error: Failed to write to output file '{s}' ({s})\n", .{ out_path, @errorName(err) });
return;
};
}

View File

@@ -0,0 +1,83 @@
const std = @import("std");
const generate = @import("generator.zig").generate;
const path = std.fs.path;
const Builder = std.build.Builder;
const Step = std.build.Step;
/// build.zig integration for Vulkan binding generation. This step can be used to generate
/// Vulkan bindings at compiletime from vk.xml, by providing the path to vk.xml and the output
/// path relative to zig-cache. The final package can then be obtained by `package()`, the result
/// of which can be added to the project using `std.build.Builder.addPackage`.
pub const GenerateStep = struct {
step: Step,
builder: *Builder,
/// The path to vk.xml
spec_path: []const u8,
/// The package representing the generated bindings. The generated bindings will be placed
/// in `package.path`. When using this step, this member should be passed to
/// `std.build.Builder.addPackage`, which causes the bindings to become available under the
/// name `vulkan`.
package: std.build.Pkg,
/// Initialize a Vulkan generation step, for `builder`. `spec_path` is the path to
/// vk.xml, relative to the project root. The generated bindings will be placed at
/// `out_path`, which is relative to the zig-cache directory.
pub fn init(builder: *Builder, spec_path: []const u8, out_path: []const u8) *GenerateStep {
const self = builder.allocator.create(GenerateStep) catch unreachable;
const full_out_path = path.join(builder.allocator, &[_][]const u8{
builder.build_root,
builder.cache_root,
out_path,
}) catch unreachable;
self.* = .{
.step = Step.init(.Custom, "vulkan-generate", builder.allocator, make),
.builder = builder,
.spec_path = spec_path,
.package = .{
.name = "vulkan",
.path = full_out_path,
.dependencies = null,
}
};
return self;
}
/// Initialize a Vulkan generation step for `builder`, by extracting vk.xml from the LunarG installation
/// root. Typically, the location of the LunarG SDK root can be retrieved by querying for the VULKAN_SDK
/// environment variable, set by activating the environment setup script located in the SDK root.
/// `builder` and `out_path` are used in the same manner as `init`.
pub fn initFromSdk(builder: *Builder, sdk_path: []const u8, out_path: []const u8) *GenerateStep {
const spec_path = std.fs.path.join(
builder.allocator,
&[_][]const u8{ sdk_path, "share/vulkan/registry/vk.xml" },
) catch unreachable;
return init(builder, spec_path, out_path);
}
/// Internal build function. This reads `vk.xml`, and passes it to `generate`, which then generates
/// the final bindings. The resulting generated bindings are not formatted, which is why an ArrayList
/// writer is passed instead of a file writer. This is then formatted into standard formatting
/// by parsing it and rendering with `std.zig.parse` and `std.zig.render` respectively.
fn make(step: *Step) !void {
const self = @fieldParentPtr(GenerateStep, "step", step);
const cwd = std.fs.cwd();
const spec = try cwd.readFileAlloc(self.builder.allocator, self.spec_path, std.math.maxInt(usize));
var out_buffer = std.ArrayList(u8).init(self.builder.allocator);
try generate(self.builder.allocator, spec, out_buffer.writer());
const tree = try std.zig.parse(self.builder.allocator, out_buffer.items);
std.debug.assert(tree.errors.len == 0); // If this triggers, vulkan-zig produced invalid code.
var formatted = try tree.render(self.builder.allocator);
const dir = path.dirname(self.package.path).?;
try cwd.makePath(dir);
try cwd.writeFile(self.package.path, formatted);
}
};

View File

@@ -90,18 +90,10 @@ pub const CTokenizer = struct {
const start = self.offset; const start = self.offset;
_ = self.consumeNoEof(); _ = self.consumeNoEof();
const hex = self.peek() == 'x';
if (hex) {
_ = self.consumeNoEof();
}
while (true) { while (true) {
switch (self.peek() orelse break) { const c = self.peek() orelse break;
switch (c) {
'0'...'9' => _ = self.consumeNoEof(), '0'...'9' => _ = self.consumeNoEof(),
'A'...'F', 'a'...'f' => {
if (!hex) break;
_ = self.consumeNoEof();
},
else => break, else => break,
} }
} }
@@ -172,22 +164,19 @@ pub const XmlCTokenizer = struct {
} }
fn elemToToken(elem: *xml.Element) !?Token { fn elemToToken(elem: *xml.Element) !?Token {
// Sometimes we encounter empty comment tags. Filter those out if (elem.children.items.len != 1 or elem.children.items[0] != .CharData) {
// by early returning here, otherwise the next check will
// determine that the input is not valid XML.
if (mem.eql(u8, elem.tag, "comment")) {
return null;
} else if (elem.children.len != 1 or elem.children[0] != .char_data) {
return error.InvalidXml; return error.InvalidXml;
} }
const text = elem.children[0].char_data; const text = elem.children.items[0].CharData;
if (mem.eql(u8, elem.tag, "type")) { if (mem.eql(u8, elem.tag, "type")) {
return Token{ .kind = .type_name, .text = text }; return Token{ .kind = .type_name, .text = text };
} else if (mem.eql(u8, elem.tag, "enum")) { } else if (mem.eql(u8, elem.tag, "enum")) {
return Token{ .kind = .enum_name, .text = text }; return Token{ .kind = .enum_name, .text = text };
} else if (mem.eql(u8, elem.tag, "name")) { } else if (mem.eql(u8, elem.tag, "name")) {
return Token{ .kind = .name, .text = text }; return Token{ .kind = .name, .text = text };
} else if (mem.eql(u8, elem.tag, "comment")) {
return null;
} else { } else {
return error.InvalidTag; return error.InvalidTag;
} }
@@ -214,9 +203,9 @@ pub const XmlCTokenizer = struct {
if (self.it.next()) |child| { if (self.it.next()) |child| {
switch (child.*) { switch (child.*) {
.char_data => |cdata| self.ctok = CTokenizer{ .source = cdata, .in_comment = in_comment }, .CharData => |cdata| self.ctok = CTokenizer{ .source = cdata, .in_comment = in_comment },
.comment => {}, // xml comment .Comment => {}, // xml comment
.element => |elem| if (!in_comment) if (try elemToToken(elem)) |tok| return tok, .Element => |elem| if (!in_comment) if (try elemToToken(elem)) |tok| return tok,
} }
} else { } else {
return null; return null;
@@ -252,9 +241,9 @@ pub const XmlCTokenizer = struct {
}; };
// TYPEDEF = kw_typedef DECLARATION ';' // TYPEDEF = kw_typedef DECLARATION ';'
pub fn parseTypedef(allocator: Allocator, xctok: *XmlCTokenizer, ptrs_optional: bool) !registry.Declaration { pub fn parseTypedef(allocator: *Allocator, xctok: *XmlCTokenizer) !registry.Declaration {
_ = try xctok.expect(.kw_typedef); _ = try xctok.expect(.kw_typedef);
const decl = try parseDeclaration(allocator, xctok, ptrs_optional); const decl = try parseDeclaration(allocator, xctok);
_ = try xctok.expect(.semicolon); _ = try xctok.expect(.semicolon);
if (try xctok.peek()) |_| { if (try xctok.peek()) |_| {
return error.InvalidSyntax; return error.InvalidSyntax;
@@ -267,14 +256,13 @@ pub fn parseTypedef(allocator: Allocator, xctok: *XmlCTokenizer, ptrs_optional:
} }
// MEMBER = DECLARATION (':' int)? // MEMBER = DECLARATION (':' int)?
pub fn parseMember(allocator: Allocator, xctok: *XmlCTokenizer, ptrs_optional: bool) !registry.Container.Field { pub fn parseMember(allocator: *Allocator, xctok: *XmlCTokenizer) !registry.Container.Field {
const decl = try parseDeclaration(allocator, xctok, ptrs_optional); const decl = try parseDeclaration(allocator, xctok);
var field = registry.Container.Field{ var field = registry.Container.Field{
.name = decl.name orelse return error.MissingTypeIdentifier, .name = decl.name orelse return error.MissingTypeIdentifier,
.field_type = decl.decl_type, .field_type = decl.decl_type,
.bits = null, .bits = null,
.is_buffer_len = false, .is_buffer_len = false,
.is_optional = false,
}; };
if (try xctok.peek()) |tok| { if (try xctok.peek()) |tok| {
@@ -296,30 +284,11 @@ pub fn parseMember(allocator: Allocator, xctok: *XmlCTokenizer, ptrs_optional: b
return field; return field;
} }
pub fn parseParamOrProto(allocator: Allocator, xctok: *XmlCTokenizer, ptrs_optional: bool) !registry.Declaration { pub fn parseParamOrProto(allocator: *Allocator, xctok: *XmlCTokenizer) !registry.Declaration {
var decl = try parseDeclaration(allocator, xctok, ptrs_optional); const decl = try parseDeclaration(allocator, xctok);
if (try xctok.peek()) |_| { if (try xctok.peek()) |_| {
return error.InvalidSyntax; return error.InvalidSyntax;
} }
// Decay pointers
switch (decl.decl_type) {
.array => {
const child = try allocator.create(TypeInfo);
child.* = decl.decl_type;
decl.decl_type = .{
.pointer = .{
.is_const = decl.is_const,
.is_optional = false,
.size = .one,
.child = child,
},
};
},
else => {},
}
return registry.Declaration{ return registry.Declaration{
.name = decl.name orelse return error.MissingTypeIdentifier, .name = decl.name orelse return error.MissingTypeIdentifier,
.decl_type = .{ .typedef = decl.decl_type }, .decl_type = .{ .typedef = decl.decl_type },
@@ -329,7 +298,6 @@ pub fn parseParamOrProto(allocator: Allocator, xctok: *XmlCTokenizer, ptrs_optio
pub const Declaration = struct { pub const Declaration = struct {
name: ?[]const u8, // Parameter names may be optional, especially in case of func(void) name: ?[]const u8, // Parameter names may be optional, especially in case of func(void)
decl_type: TypeInfo, decl_type: TypeInfo,
is_const: bool,
}; };
pub const ParseError = error{ pub const ParseError = error{
@@ -347,7 +315,7 @@ pub const ParseError = error{
// DECLARATION = kw_const? type_name DECLARATOR // DECLARATION = kw_const? type_name DECLARATOR
// DECLARATOR = POINTERS (id | name)? ('[' ARRAY_DECLARATOR ']')* // DECLARATOR = POINTERS (id | name)? ('[' ARRAY_DECLARATOR ']')*
// | POINTERS '(' FNPTRSUFFIX // | POINTERS '(' FNPTRSUFFIX
fn parseDeclaration(allocator: Allocator, xctok: *XmlCTokenizer, ptrs_optional: bool) ParseError!Declaration { fn parseDeclaration(allocator: *Allocator, xctok: *XmlCTokenizer) ParseError!Declaration {
// Parse declaration constness // Parse declaration constness
var tok = try xctok.nextNoEof(); var tok = try xctok.nextNoEof();
const inner_is_const = tok.kind == .kw_const; const inner_is_const = tok.kind == .kw_const;
@@ -365,16 +333,12 @@ fn parseDeclaration(allocator: Allocator, xctok: *XmlCTokenizer, ptrs_optional:
var type_info = TypeInfo{ .name = type_name }; var type_info = TypeInfo{ .name = type_name };
// Parse pointers // Parse pointers
type_info = try parsePointers(allocator, xctok, inner_is_const, type_info, ptrs_optional); type_info = try parsePointers(allocator, xctok, inner_is_const, type_info);
// Parse name / fn ptr // Parse name / fn ptr
if (try parseFnPtrSuffix(allocator, xctok, type_info, ptrs_optional)) |decl| { if (try parseFnPtrSuffix(allocator, xctok, type_info)) |decl| {
return Declaration{ return decl;
.name = decl.name,
.decl_type = decl.decl_type,
.is_const = inner_is_const,
};
} }
const name = blk: { const name = blk: {
@@ -397,8 +361,6 @@ fn parseDeclaration(allocator: Allocator, xctok: *XmlCTokenizer, ptrs_optional:
inner_type.* = .{ inner_type.* = .{
.array = .{ .array = .{
.size = array_size, .size = array_size,
.valid_size = .all, // Refined later
.is_optional = true,
.child = child, .child = child,
}, },
}; };
@@ -411,12 +373,11 @@ fn parseDeclaration(allocator: Allocator, xctok: *XmlCTokenizer, ptrs_optional:
return Declaration{ return Declaration{
.name = name, .name = name,
.decl_type = type_info, .decl_type = type_info,
.is_const = inner_is_const,
}; };
} }
// FNPTRSUFFIX = kw_vkapi_ptr '*' name' ')' '(' ('void' | (DECLARATION (',' DECLARATION)*)?) ')' // FNPTRSUFFIX = kw_vkapi_ptr '*' name' ')' '(' ('void' | (DECLARATION (',' DECLARATION)*)?) ')'
fn parseFnPtrSuffix(allocator: Allocator, xctok: *XmlCTokenizer, return_type: TypeInfo, ptrs_optional: bool) !?Declaration { fn parseFnPtrSuffix(allocator: *Allocator, xctok: *XmlCTokenizer, return_type: TypeInfo) !?Declaration {
const lparen = try xctok.peek(); const lparen = try xctok.peek();
if (lparen == null or lparen.?.kind != .lparen) { if (lparen == null or lparen.?.kind != .lparen) {
return null; return null;
@@ -441,10 +402,9 @@ fn parseFnPtrSuffix(allocator: Allocator, xctok: *XmlCTokenizer, return_type: Ty
.error_codes = &[_][]const u8{}, .error_codes = &[_][]const u8{},
}, },
}, },
.is_const = false,
}; };
const first_param = try parseDeclaration(allocator, xctok, ptrs_optional); const first_param = try parseDeclaration(allocator, xctok);
if (first_param.name == null) { if (first_param.name == null) {
if (first_param.decl_type != .name or !mem.eql(u8, first_param.decl_type.name, "void")) { if (first_param.decl_type != .name or !mem.eql(u8, first_param.decl_type.name, "void")) {
return error.InvalidSyntax; return error.InvalidSyntax;
@@ -457,12 +417,11 @@ fn parseFnPtrSuffix(allocator: Allocator, xctok: *XmlCTokenizer, return_type: Ty
// There is no good way to estimate the number of parameters beforehand. // There is no good way to estimate the number of parameters beforehand.
// Fortunately, there are usually a relatively low number of parameters to a function pointer, // Fortunately, there are usually a relatively low number of parameters to a function pointer,
// so an ArrayList backed by an arena allocator is good enough. // so an ArrayList backed by an arena allocator is good enough.
var params: std.ArrayList(registry.Command.Param) = .empty; var params = std.ArrayList(registry.Command.Param).init(allocator);
try params.append(allocator, .{ try params.append(.{
.name = first_param.name.?, .name = first_param.name.?,
.param_type = first_param.decl_type, .param_type = first_param.decl_type,
.is_buffer_len = false, .is_buffer_len = false,
.is_optional = false,
}); });
while (true) { while (true) {
@@ -472,22 +431,21 @@ fn parseFnPtrSuffix(allocator: Allocator, xctok: *XmlCTokenizer, return_type: Ty
else => return error.InvalidSyntax, else => return error.InvalidSyntax,
} }
const decl = try parseDeclaration(allocator, xctok, ptrs_optional); const decl = try parseDeclaration(allocator, xctok);
try params.append(allocator, .{ try params.append(.{
.name = decl.name orelse return error.MissingTypeIdentifier, .name = decl.name orelse return error.MissingTypeIdentifier,
.param_type = decl.decl_type, .param_type = decl.decl_type,
.is_buffer_len = false, .is_buffer_len = false,
.is_optional = false,
}); });
} }
_ = try xctok.nextNoEof(); _ = try xctok.nextNoEof();
command_ptr.decl_type.command_ptr.params = try params.toOwnedSlice(allocator); command_ptr.decl_type.command_ptr.params = params.toOwnedSlice();
return command_ptr; return command_ptr;
} }
// POINTERS = (kw_const? '*')* // POINTERS = (kw_const? '*')*
fn parsePointers(allocator: Allocator, xctok: *XmlCTokenizer, inner_const: bool, inner: TypeInfo, ptrs_optional: bool) !TypeInfo { fn parsePointers(allocator: *Allocator, xctok: *XmlCTokenizer, inner_const: bool, inner: TypeInfo) !TypeInfo {
var type_info = inner; var type_info = inner;
var first_const = inner_const; var first_const = inner_const;
@@ -516,7 +474,7 @@ fn parsePointers(allocator: Allocator, xctok: *XmlCTokenizer, inner_const: bool,
type_info = .{ type_info = .{
.pointer = .{ .pointer = .{
.is_const = is_const or first_const, .is_const = is_const or first_const,
.is_optional = ptrs_optional, // set elsewhere .is_optional = false, // set elsewhere
.size = .one, // set elsewhere .size = .one, // set elsewhere
.child = child, .child = child,
}, },
@@ -541,10 +499,7 @@ fn parseArrayDeclarator(xctok: *XmlCTokenizer) !?ArraySize {
error.InvalidCharacter => unreachable, error.InvalidCharacter => unreachable,
}, },
}, },
// Sometimes, arrays are declared as `<type>T</type> <name>aa</name>[<enum>SIZE</enum>]`, .enum_name => .{ .alias = size_tok.text },
// and sometimes just as `<type>T</type> <name>aa</name>[SIZE]`, so we have to account
// for both `.enum_name` and `.id` here.
.enum_name, .id => .{ .alias = size_tok.text },
else => return error.InvalidSyntax, else => return error.InvalidSyntax,
}; };
@@ -552,7 +507,7 @@ fn parseArrayDeclarator(xctok: *XmlCTokenizer) !?ArraySize {
return size; return size;
} }
pub fn parseVersion(xctok: *XmlCTokenizer) !registry.ApiConstant.Value { pub fn parseVersion(xctok: *XmlCTokenizer) ![4][]const u8 {
_ = try xctok.expect(.hash); _ = try xctok.expect(.hash);
const define = try xctok.expect(.id); const define = try xctok.expect(.id);
if (!mem.eql(u8, define.text, "define")) { if (!mem.eql(u8, define.text, "define")) {
@@ -561,23 +516,13 @@ pub fn parseVersion(xctok: *XmlCTokenizer) !registry.ApiConstant.Value {
_ = try xctok.expect(.name); _ = try xctok.expect(.name);
const vk_make_version = try xctok.expect(.type_name); const vk_make_version = try xctok.expect(.type_name);
if (mem.eql(u8, vk_make_version.text, "VK_MAKE_API_VERSION")) { if (!mem.eql(u8, vk_make_version.text, "VK_MAKE_API_VERSION")) {
return .{
.version = try parseVersionValues(xctok, 4),
};
} else if (mem.eql(u8, vk_make_version.text, "VK_MAKE_VIDEO_STD_VERSION")) {
return .{
.video_std_version = try parseVersionValues(xctok, 3),
};
} else {
return error.NotVersion; return error.NotVersion;
} }
}
fn parseVersionValues(xctok: *XmlCTokenizer, comptime count: usize) ![count][]const u8 {
_ = try xctok.expect(.lparen); _ = try xctok.expect(.lparen);
var version: [count][]const u8 = undefined; var version: [4][]const u8 = undefined;
for (&version, 0..) |*part, i| { for (version) |*part, i| {
if (i != 0) { if (i != 0) {
_ = try xctok.expect(.comma); _ = try xctok.expect(.comma);
} }
@@ -661,7 +606,7 @@ test "parseTypedef" {
defer arena.deinit(); defer arena.deinit();
var xctok = XmlCTokenizer.init(document.root); var xctok = XmlCTokenizer.init(document.root);
const decl = try parseTypedef(arena.allocator(), &xctok, false); const decl = try parseTypedef(&arena.allocator, &xctok);
try testing.expectEqualSlices(u8, "pythons", decl.name); try testing.expectEqualSlices(u8, "pythons", decl.name);
const array = decl.decl_type.typedef.array; const array = decl.decl_type.typedef.array;

View File

@@ -10,21 +10,18 @@ const FeatureLevel = reg.FeatureLevel;
const EnumFieldMerger = struct { const EnumFieldMerger = struct {
const EnumExtensionMap = std.StringArrayHashMapUnmanaged(std.ArrayListUnmanaged(reg.Enum.Field)); const EnumExtensionMap = std.StringArrayHashMapUnmanaged(std.ArrayListUnmanaged(reg.Enum.Field));
const ApiConstantMap = std.StringArrayHashMapUnmanaged(reg.ApiConstant);
const FieldSet = std.StringArrayHashMapUnmanaged(void); const FieldSet = std.StringArrayHashMapUnmanaged(void);
arena: Allocator, arena: *Allocator,
registry: *reg.Registry, registry: *reg.Registry,
enum_extensions: EnumExtensionMap, enum_extensions: EnumExtensionMap,
api_constants: ApiConstantMap,
field_set: FieldSet, field_set: FieldSet,
fn init(arena: Allocator, registry: *reg.Registry) EnumFieldMerger { fn init(arena: *Allocator, registry: *reg.Registry) EnumFieldMerger {
return .{ return .{
.arena = arena, .arena = arena,
.registry = registry, .registry = registry,
.enum_extensions = .{}, .enum_extensions = .{},
.api_constants = .{},
.field_set = .{}, .field_set = .{},
}; };
} }
@@ -32,7 +29,7 @@ const EnumFieldMerger = struct {
fn putEnumExtension(self: *EnumFieldMerger, enum_name: []const u8, field: reg.Enum.Field) !void { fn putEnumExtension(self: *EnumFieldMerger, enum_name: []const u8, field: reg.Enum.Field) !void {
const res = try self.enum_extensions.getOrPut(self.arena, enum_name); const res = try self.enum_extensions.getOrPut(self.arena, enum_name);
if (!res.found_existing) { if (!res.found_existing) {
res.value_ptr.* = .empty; res.value_ptr.* = std.ArrayListUnmanaged(reg.Enum.Field){};
} }
try res.value_ptr.append(self.arena, field); try res.value_ptr.append(self.arena, field);
@@ -41,17 +38,7 @@ const EnumFieldMerger = struct {
fn addRequires(self: *EnumFieldMerger, reqs: []const reg.Require) !void { fn addRequires(self: *EnumFieldMerger, reqs: []const reg.Require) !void {
for (reqs) |req| { for (reqs) |req| {
for (req.extends) |enum_ext| { for (req.extends) |enum_ext| {
switch (enum_ext.value) { try self.putEnumExtension(enum_ext.extends, enum_ext.field);
.field => try self.putEnumExtension(enum_ext.extends, enum_ext.value.field),
.new_api_constant_expr => |expr| try self.api_constants.put(
self.arena,
enum_ext.extends,
.{
.name = enum_ext.extends,
.value = .{ .expr = expr },
},
),
}
} }
} }
} }
@@ -85,14 +72,10 @@ const EnumFieldMerger = struct {
// Existing base_enum.fields was allocated by `self.arena`, so // Existing base_enum.fields was allocated by `self.arena`, so
// it gets cleaned up whenever that is deinited. // it gets cleaned up whenever that is deinited.
base_enum.fields = new_fields[0..i]; base_enum.fields = self.arena.shrink(new_fields, i);
} }
fn merge(self: *EnumFieldMerger) !void { fn merge(self: *EnumFieldMerger) !void {
for (self.registry.api_constants) |api_constant| {
try self.api_constants.put(self.arena, api_constant.name, api_constant);
}
for (self.registry.features) |feature| { for (self.registry.features) |feature| {
try self.addRequires(feature.requires); try self.addRequires(feature.requires);
} }
@@ -108,8 +91,6 @@ const EnumFieldMerger = struct {
try self.mergeEnumFields(decl.name, &decl.decl_type.enumeration); try self.mergeEnumFields(decl.name, &decl.decl_type.enumeration);
} }
} }
self.registry.api_constants = self.api_constants.values();
} }
}; };
@@ -117,19 +98,17 @@ pub const Generator = struct {
arena: std.heap.ArenaAllocator, arena: std.heap.ArenaAllocator,
registry: reg.Registry, registry: reg.Registry,
id_renderer: IdRenderer, id_renderer: IdRenderer,
have_video: bool,
fn init(allocator: Allocator, spec: *xml.Element, maybe_video_spec: ?*xml.Element, api: reg.Api) !Generator { fn init(allocator: *Allocator, spec: *xml.Element) !Generator {
const result = try parseXml(allocator, spec, maybe_video_spec, api); const result = try parseXml(allocator, spec);
const tags = try allocator.alloc([]const u8, result.registry.tags.len); const tags = try allocator.alloc([]const u8, result.registry.tags.len);
for (tags, result.registry.tags) |*tag, registry_tag| tag.* = registry_tag.name; for (tags) |*tag, i| tag.* = result.registry.tags[i].name;
return Generator{ return Generator{
.arena = result.arena, .arena = result.arena,
.registry = result.registry, .registry = result.registry,
.id_renderer = IdRenderer.init(allocator, tags), .id_renderer = IdRenderer.init(allocator, tags),
.have_video = maybe_video_spec != null,
}; };
} }
@@ -149,13 +128,13 @@ pub const Generator = struct {
// Solve `registry.declarations` according to `registry.extensions` and `registry.features`. // Solve `registry.declarations` according to `registry.extensions` and `registry.features`.
fn mergeEnumFields(self: *Generator) !void { fn mergeEnumFields(self: *Generator) !void {
var merger = EnumFieldMerger.init(self.arena.allocator(), &self.registry); var merger = EnumFieldMerger.init(&self.arena.allocator, &self.registry);
try merger.merge(); try merger.merge();
} }
// https://github.com/KhronosGroup/Vulkan-Docs/pull/1556 // https://github.com/KhronosGroup/Vulkan-Docs/pull/1556
fn fixupBitFlags(self: *Generator) !void { fn fixupBitFlags(self: *Generator) !void {
var seen_bits = std.StringArrayHashMap(void).init(self.arena.allocator()); var seen_bits = std.StringArrayHashMap(void).init(&self.arena.allocator);
defer seen_bits.deinit(); defer seen_bits.deinit();
for (self.registry.decls) |decl| { for (self.registry.decls) |decl| {
@@ -186,88 +165,23 @@ pub const Generator = struct {
self.registry.decls.len = i; self.registry.decls.len = i;
} }
fn render(self: *Generator, writer: *std.Io.Writer) !void { fn render(self: *Generator, writer: anytype) !void {
try renderRegistry(writer, self.arena.allocator(), &self.registry, &self.id_renderer, self.have_video); try renderRegistry(writer, &self.arena.allocator, &self.registry, &self.id_renderer);
} }
}; };
/// The vulkan registry contains the specification for multiple APIs: Vulkan and VulkanSC. This enum
/// describes applicable APIs.
pub const Api = reg.Api;
/// Main function for generating the Vulkan bindings. vk.xml is to be provided via `spec_xml`, /// Main function for generating the Vulkan bindings. vk.xml is to be provided via `spec_xml`,
/// and the resulting binding is written to `writer`. `allocator` will be used to allocate temporary /// and the resulting binding is written to `writer`. `allocator` will be used to allocate temporary
/// internal datastructures - mostly via an ArenaAllocator, but sometimes a hashmap uses this allocator /// internal datastructures - mostly via an ArenaAllocator, but sometimes a hashmap uses this allocator
/// directly. `api` is the API to generate the bindings for, usually `.vulkan`. /// directly.
pub fn generate( pub fn generate(allocator: *Allocator, spec_xml: []const u8, writer: anytype) !void {
allocator: Allocator, const spec = try xml.parse(allocator, spec_xml);
api: Api,
spec_xml: []const u8,
maybe_video_spec_xml: ?[]const u8,
writer: *std.Io.Writer,
) !void {
const spec = xml.parse(allocator, spec_xml) catch |err| switch (err) {
error.InvalidDocument,
error.UnexpectedEof,
error.UnexpectedCharacter,
error.IllegalCharacter,
error.InvalidEntity,
error.InvalidName,
error.InvalidStandaloneValue,
error.NonMatchingClosingTag,
error.UnclosedComment,
error.UnclosedValue,
=> return error.InvalidXml,
error.OutOfMemory => return error.OutOfMemory,
};
defer spec.deinit(); defer spec.deinit();
const maybe_video_spec_root = if (maybe_video_spec_xml) |video_spec_xml| blk: { var gen = try Generator.init(allocator, spec.root);
const video_spec = xml.parse(allocator, video_spec_xml) catch |err| switch (err) {
error.InvalidDocument,
error.UnexpectedEof,
error.UnexpectedCharacter,
error.IllegalCharacter,
error.InvalidEntity,
error.InvalidName,
error.InvalidStandaloneValue,
error.NonMatchingClosingTag,
error.UnclosedComment,
error.UnclosedValue,
=> return error.InvalidXml,
error.OutOfMemory => return error.OutOfMemory,
};
break :blk video_spec.root;
} else null;
var gen = Generator.init(allocator, spec.root, maybe_video_spec_root, api) catch |err| switch (err) {
error.InvalidXml,
error.InvalidCharacter,
error.Overflow,
error.InvalidFeatureLevel,
error.InvalidSyntax,
error.InvalidTag,
error.MissingTypeIdentifier,
error.UnexpectedCharacter,
error.UnexpectedEof,
error.UnexpectedToken,
error.InvalidRegistry,
=> return error.InvalidRegistry,
error.OutOfMemory => return error.OutOfMemory,
};
defer gen.deinit(); defer gen.deinit();
try gen.mergeEnumFields(); try gen.mergeEnumFields();
try gen.fixupBitFlags(); try gen.fixupBitFlags();
gen.render(writer) catch |err| switch (err) { try gen.render(writer);
error.InvalidApiConstant,
error.InvalidConstantExpr,
error.InvalidRegistry,
error.UnexpectedCharacter,
error.InvalidCharacter,
error.Overflow,
=> return error.InvalidRegistry,
else => |others| return others,
};
} }

View File

@@ -17,43 +17,19 @@ pub const ParseResult = struct {
} }
}; };
pub fn parseXml( pub fn parseXml(backing_allocator: *Allocator, root: *xml.Element) !ParseResult {
backing_allocator: Allocator,
root: *xml.Element,
maybe_video_root: ?*xml.Element,
api: registry.Api,
) !ParseResult {
var arena = ArenaAllocator.init(backing_allocator); var arena = ArenaAllocator.init(backing_allocator);
errdefer arena.deinit(); errdefer arena.deinit();
const allocator = arena.allocator(); const allocator = &arena.allocator;
var decls: std.ArrayList(registry.Declaration) = .empty; var reg = registry.Registry{
var api_constants: std.ArrayList(registry.ApiConstant) = .empty; .copyright = root.getCharData("comment") orelse return error.InvalidRegistry,
var tags: std.ArrayList(registry.Tag) = .empty; .decls = try parseDeclarations(allocator, root),
var features: std.ArrayList(registry.Feature) = .empty; .api_constants = try parseApiConstants(allocator, root),
var extensions: std.ArrayList(registry.Extension) = .empty; .tags = try parseTags(allocator, root),
.features = try parseFeatures(allocator, root),
try parseDeclarations(allocator, root, api, &decls); .extensions = try parseExtensions(allocator, root),
try parseApiConstants(allocator, root, api, &api_constants);
try parseTags(allocator, root, &tags);
try parseFeatures(allocator, root, api, &features);
try parseExtensions(allocator, root, api, &extensions);
if (maybe_video_root) |video_root| {
try parseDeclarations(allocator, video_root, api, &decls);
try parseApiConstants(allocator, video_root, api, &api_constants);
try parseTags(allocator, video_root, &tags);
try parseFeatures(allocator, video_root, api, &features);
try parseExtensions(allocator, video_root, api, &extensions);
}
const reg = registry.Registry{
.decls = decls.items,
.api_constants = api_constants.items,
.tags = tags.items,
.features = features.items,
.extensions = extensions.items,
}; };
return ParseResult{ return ParseResult{
@@ -62,36 +38,25 @@ pub fn parseXml(
}; };
} }
fn parseDeclarations( fn parseDeclarations(allocator: *Allocator, root: *xml.Element) ![]registry.Declaration {
allocator: Allocator, var types_elem = root.findChildByTag("types") orelse return error.InvalidRegistry;
root: *xml.Element, var commands_elem = root.findChildByTag("commands") orelse return error.InvalidRegistry;
api: registry.Api,
decls: *std.ArrayList(registry.Declaration),
) !void {
const types_elem = root.findChildByTag("types") orelse return error.InvalidRegistry;
try decls.ensureUnusedCapacity(allocator, types_elem.children.len);
try parseTypes(allocator, types_elem, api, decls); const decl_upper_bound = types_elem.children.items.len + commands_elem.children.items.len;
try parseEnums(allocator, root, api, decls); const decls = try allocator.alloc(registry.Declaration, decl_upper_bound);
if (root.findChildByTag("commands")) |commands_elem| { var count: usize = 0;
try decls.ensureUnusedCapacity(allocator, commands_elem.children.len); count += try parseTypes(allocator, decls, types_elem);
try parseCommands(allocator, commands_elem, api, decls); count += try parseEnums(allocator, decls[count..], root);
} count += try parseCommands(allocator, decls[count..], commands_elem);
return allocator.shrink(decls, count);
} }
fn parseTypes( fn parseTypes(allocator: *Allocator, out: []registry.Declaration, types_elem: *xml.Element) !usize {
allocator: Allocator, var i: usize = 0;
types_elem: *xml.Element,
api: registry.Api,
decls: *std.ArrayList(registry.Declaration),
) !void {
var it = types_elem.findChildrenByTag("type"); var it = types_elem.findChildrenByTag("type");
while (it.next()) |ty| { while (it.next()) |ty| {
try decls.append(allocator, blk: { out[i] = blk: {
if (!requiredByApi(ty, api))
continue;
const category = ty.getAttribute("category") orelse { const category = ty.getAttribute("category") orelse {
break :blk try parseForeigntype(ty); break :blk try parseForeigntype(ty);
}; };
@@ -103,9 +68,9 @@ fn parseTypes(
} else if (mem.eql(u8, category, "basetype")) { } else if (mem.eql(u8, category, "basetype")) {
break :blk try parseBaseType(allocator, ty); break :blk try parseBaseType(allocator, ty);
} else if (mem.eql(u8, category, "struct")) { } else if (mem.eql(u8, category, "struct")) {
break :blk try parseContainer(allocator, ty, false, api); break :blk try parseContainer(allocator, ty, false);
} else if (mem.eql(u8, category, "union")) { } else if (mem.eql(u8, category, "union")) {
break :blk try parseContainer(allocator, ty, true, api); break :blk try parseContainer(allocator, ty, true);
} else if (mem.eql(u8, category, "funcpointer")) { } else if (mem.eql(u8, category, "funcpointer")) {
break :blk try parseFuncPointer(allocator, ty); break :blk try parseFuncPointer(allocator, ty);
} else if (mem.eql(u8, category, "enum")) { } else if (mem.eql(u8, category, "enum")) {
@@ -113,8 +78,12 @@ fn parseTypes(
} }
continue; continue;
}); };
i += 1;
} }
return i;
} }
fn parseForeigntype(ty: *xml.Element) !registry.Declaration { fn parseForeigntype(ty: *xml.Element) !registry.Declaration {
@@ -190,22 +159,22 @@ fn parseHandleType(ty: *xml.Element) !registry.Declaration {
} }
} }
fn parseBaseType(allocator: Allocator, ty: *xml.Element) !registry.Declaration { fn parseBaseType(allocator: *Allocator, ty: *xml.Element) !registry.Declaration {
const name = ty.getCharData("name") orelse return error.InvalidRegistry; const name = ty.getCharData("name") orelse return error.InvalidRegistry;
if (ty.getCharData("type")) |_| { if (ty.getCharData("type")) |_| {
var tok = cparse.XmlCTokenizer.init(ty); var tok = cparse.XmlCTokenizer.init(ty);
return try cparse.parseTypedef(allocator, &tok, false); return try cparse.parseTypedef(allocator, &tok);
} else { } else {
// Either ANativeWindow, AHardwareBuffer or CAMetalLayer. The latter has a lot of // Either ANativeWindow, AHardwareBuffer or CAMetalLayer. The latter has a lot of
// macros, which is why this part is not built into the xml/c parser. // macros, which is why this part is not built into the xml/c parser.
return registry.Declaration{ return registry.Declaration{
.name = name, .name = name,
.decl_type = .{ .foreign = .{ .depends = &.{} } }, .decl_type = .{ .external = {} },
}; };
} }
} }
fn parseContainer(allocator: Allocator, ty: *xml.Element, is_union: bool, api: registry.Api) !registry.Declaration { fn parseContainer(allocator: *Allocator, ty: *xml.Element, is_union: bool) !registry.Declaration {
const name = ty.getAttribute("name") orelse return error.InvalidRegistry; const name = ty.getAttribute("name") orelse return error.InvalidRegistry;
if (ty.getAttribute("alias")) |alias| { if (ty.getAttribute("alias")) |alias| {
@@ -217,42 +186,30 @@ fn parseContainer(allocator: Allocator, ty: *xml.Element, is_union: bool, api: r
}; };
} }
var members = try allocator.alloc(registry.Container.Field, ty.children.len); var members = try allocator.alloc(registry.Container.Field, ty.children.items.len);
var i: usize = 0; var i: usize = 0;
var it = ty.findChildrenByTag("member"); var it = ty.findChildrenByTag("member");
var maybe_stype: ?[]const u8 = null; var maybe_stype: ?[]const u8 = null;
while (it.next()) |member| { while (it.next()) |member| {
if (!requiredByApi(member, api))
continue;
var xctok = cparse.XmlCTokenizer.init(member); var xctok = cparse.XmlCTokenizer.init(member);
members[i] = try cparse.parseMember(allocator, &xctok, false); members[i] = try cparse.parseMember(allocator, &xctok);
if (mem.eql(u8, members[i].name, "sType")) { if (mem.eql(u8, members[i].name, "sType")) {
if (member.getAttribute("values")) |stype| { if (member.getAttribute("values")) |stype| {
maybe_stype = stype; maybe_stype = stype;
} }
} }
if (member.getAttribute("optional")) |optionals| {
var optional_it = mem.splitScalar(u8, optionals, ',');
if (optional_it.next()) |first_optional| {
members[i].is_optional = mem.eql(u8, first_optional, "true");
} else {
// Optional is empty, probably incorrect.
return error.InvalidRegistry;
}
}
i += 1; i += 1;
} }
members = members[0..i]; members = allocator.shrink(members, i);
var maybe_extends: ?[][]const u8 = null; var maybe_extends: ?[][]const u8 = null;
if (ty.getAttribute("structextends")) |extends| { if (ty.getAttribute("structextends")) |extends| {
const n_structs = std.mem.count(u8, extends, ",") + 1; const n_structs = std.mem.count(u8, extends, ",") + 1;
maybe_extends = try allocator.alloc([]const u8, n_structs); maybe_extends = try allocator.alloc([]const u8, n_structs);
var struct_extends = std.mem.splitScalar(u8, extends, ','); var struct_extends = std.mem.split(extends, ",");
var j: usize = 0; var j: usize = 0;
while (struct_extends.next()) |struct_extend| { while (struct_extends.next()) |struct_extend| {
maybe_extends.?[j] = struct_extend; maybe_extends.?[j] = struct_extend;
@@ -262,16 +219,8 @@ fn parseContainer(allocator: Allocator, ty: *xml.Element, is_union: bool, api: r
it = ty.findChildrenByTag("member"); it = ty.findChildrenByTag("member");
for (members) |*member| { for (members) |*member| {
const member_elem = while (it.next()) |elem| { const member_elem = it.next().?;
if (requiredByApi(elem, api)) break elem;
} else unreachable;
try parsePointerMeta(.{ .container = members }, &member.field_type, member_elem); try parsePointerMeta(.{ .container = members }, &member.field_type, member_elem);
// pNext isn't always properly marked as optional, so just manually override it,
if (mem.eql(u8, member.name, "pNext")) {
member.field_type.pointer.is_optional = true;
}
} }
return registry.Declaration{ return registry.Declaration{
@@ -287,12 +236,12 @@ fn parseContainer(allocator: Allocator, ty: *xml.Element, is_union: bool, api: r
}; };
} }
fn parseFuncPointer(allocator: Allocator, ty: *xml.Element) !registry.Declaration { fn parseFuncPointer(allocator: *Allocator, ty: *xml.Element) !registry.Declaration {
var xctok = cparse.XmlCTokenizer.init(ty); var xctok = cparse.XmlCTokenizer.init(ty);
return try cparse.parseTypedef(allocator, &xctok, true); return try cparse.parseTypedef(allocator, &xctok);
} }
// For some reason, the DeclarationType cannot be passed to lenToPointer, as // For some reason, the DeclarationType cannot be passed to lenToPointerSize, as
// that causes the Zig compiler to generate invalid code for the function. Using a // that causes the Zig compiler to generate invalid code for the function. Using a
// dedicated enum fixes the issue... // dedicated enum fixes the issue...
const Fields = union(enum) { const Fields = union(enum) {
@@ -300,14 +249,13 @@ const Fields = union(enum) {
container: []registry.Container.Field, container: []registry.Container.Field,
}; };
// returns .{ size, nullable } fn lenToPointerSize(fields: Fields, len: []const u8) registry.Pointer.PointerSize {
fn lenToPointer(fields: Fields, len: []const u8) std.meta.Tuple(&.{ registry.Pointer.PointerSize, bool }) {
switch (fields) { switch (fields) {
.command => |params| { .command => |params| {
for (params) |*param| { for (params) |*param| {
if (mem.eql(u8, param.name, len)) { if (mem.eql(u8, param.name, len)) {
param.is_buffer_len = true; param.is_buffer_len = true;
return .{ .{ .other_field = param.name }, param.is_optional }; return .{ .other_field = param.name };
} }
} }
}, },
@@ -315,99 +263,50 @@ fn lenToPointer(fields: Fields, len: []const u8) std.meta.Tuple(&.{ registry.Poi
for (members) |*member| { for (members) |*member| {
if (mem.eql(u8, member.name, len)) { if (mem.eql(u8, member.name, len)) {
member.is_buffer_len = true; member.is_buffer_len = true;
return .{ .{ .other_field = member.name }, member.is_optional }; return .{ .other_field = member.name };
} }
} }
}, },
} }
if (mem.eql(u8, len, "null-terminated")) { if (mem.eql(u8, len, "null-terminated")) {
return .{ .zero_terminated, false }; return .zero_terminated;
} else { } else {
return .{ .many, false }; return .many;
} }
} }
fn parsePointerMeta(fields: Fields, type_info: *registry.TypeInfo, elem: *xml.Element) !void { fn parsePointerMeta(fields: Fields, type_info: *registry.TypeInfo, elem: *xml.Element) !void {
var len_attribute_depth: usize = 0;
if (elem.getAttribute("len")) |lens| { if (elem.getAttribute("len")) |lens| {
var it = mem.splitScalar(u8, lens, ','); var it = mem.split(lens, ",");
var current_type_info = type_info; var current_type_info = type_info;
while (current_type_info.* == .pointer) {
// TODO: Check altlen
const size = if (it.next()) |len_str| lenToPointerSize(fields, len_str) else .one;
current_type_info.pointer.size = size;
current_type_info = current_type_info.pointer.child;
}
while (true) switch (current_type_info.*) { if (it.next()) |_| {
.pointer => |*ptr| {
if (it.next()) |len_str| {
ptr.size, ptr.is_optional = lenToPointer(fields, len_str);
} else {
ptr.size = .many;
}
current_type_info = ptr.child;
len_attribute_depth += 1;
},
.array => |*arr| {
if (it.next()) |len_str| {
const size, _ = lenToPointer(fields, len_str);
arr.valid_size = switch (size) {
.one => .all,
.many => .many,
.other_field => |field| .{ .other_field = field },
.zero_terminated => .zero_terminated,
};
} else {
arr.valid_size = .all;
}
current_type_info = arr.child;
len_attribute_depth += 1;
},
else => break,
};
if (it.next()) |_| ignore: {
// There are more elements in the `len` attribute than there are pointers // There are more elements in the `len` attribute than there are pointers
// Something probably went wrong // Something probably went wrong
switch (current_type_info.*) {
.name => |name| if (std.mem.eql(u8, name, "StdVideoH265SubLayerHrdParameters")) {
// Known issue: https://github.com/KhronosGroup/Vulkan-Docs/issues/2557
break :ignore;
},
else => {},
}
std.log.err("excessive pointer lengths: {s}", .{lens});
return error.InvalidRegistry; return error.InvalidRegistry;
} }
} }
var current_depth: usize = 0;
if (elem.getAttribute("optional")) |optionals| { if (elem.getAttribute("optional")) |optionals| {
var it = mem.splitScalar(u8, optionals, ','); var it = mem.split(optionals, ",");
var current_type_info = type_info; var current_type_info = type_info;
while (true) switch (current_type_info.*) { while (current_type_info.* == .pointer) {
inline .pointer, .array => |*info| { if (it.next()) |current_optional| {
if (it.next()) |optional_str| { current_type_info.pointer.is_optional = mem.eql(u8, current_optional, "true");
} else {
// There is no information for this pointer, probably incorrect.
return error.InvalidRegistry;
}
// The pointer may have already been marked as optional due to its `len` attribute. current_type_info = current_type_info.pointer.child;
const is_already_optional = current_depth < len_attribute_depth and info.is_optional; }
info.is_optional = is_already_optional or mem.eql(u8, optional_str, "true");
} else {
// There is no information for this pointer, probably incorrect.
// Currently there is one definition where this is the case, VkCudaLaunchInfoNV.
// We work around these by assuming that they are optional, so that in the case
// that they are, we can assign null to them.
// See https://github.com/Snektron/vulkan-zig/issues/109
info.is_optional = true;
}
current_type_info = info.child;
current_depth += 1;
},
else => break,
};
} }
} }
@@ -425,27 +324,26 @@ fn parseEnumAlias(elem: *xml.Element) !?registry.Declaration {
return null; return null;
} }
fn parseEnums( fn parseEnums(allocator: *Allocator, out: []registry.Declaration, root: *xml.Element) !usize {
allocator: Allocator, var i: usize = 0;
root: *xml.Element,
api: registry.Api,
decls: *std.ArrayList(registry.Declaration),
) !void {
var it = root.findChildrenByTag("enums"); var it = root.findChildrenByTag("enums");
while (it.next()) |enums| { while (it.next()) |enums| {
const name = enums.getAttribute("name") orelse return error.InvalidRegistry; const name = enums.getAttribute("name") orelse return error.InvalidRegistry;
if (mem.eql(u8, name, api_constants_name) or !requiredByApi(enums, api)) { if (mem.eql(u8, name, api_constants_name)) {
continue; continue;
} }
try decls.append(allocator, .{ out[i] = .{
.name = name, .name = name,
.decl_type = .{ .enumeration = try parseEnumFields(allocator, enums, api) }, .decl_type = .{ .enumeration = try parseEnumFields(allocator, enums) },
}); };
i += 1;
} }
return i;
} }
fn parseEnumFields(allocator: Allocator, elem: *xml.Element, api: registry.Api) !registry.Enum { fn parseEnumFields(allocator: *Allocator, elem: *xml.Element) !registry.Enum {
// TODO: `type` was added recently, fall back to checking endswith FlagBits for older versions? // TODO: `type` was added recently, fall back to checking endswith FlagBits for older versions?
const enum_type = elem.getAttribute("type") orelse return error.InvalidRegistry; const enum_type = elem.getAttribute("type") orelse return error.InvalidRegistry;
const is_bitmask = mem.eql(u8, enum_type, "bitmask"); const is_bitmask = mem.eql(u8, enum_type, "bitmask");
@@ -458,20 +356,17 @@ fn parseEnumFields(allocator: Allocator, elem: *xml.Element, api: registry.Api)
else else
32; 32;
const fields = try allocator.alloc(registry.Enum.Field, elem.children.len); const fields = try allocator.alloc(registry.Enum.Field, elem.children.items.len);
var i: usize = 0; var i: usize = 0;
var it = elem.findChildrenByTag("enum"); var it = elem.findChildrenByTag("enum");
while (it.next()) |field| { while (it.next()) |field| {
if (!requiredByApi(field, api))
continue;
fields[i] = try parseEnumField(field); fields[i] = try parseEnumField(field);
i += 1; i += 1;
} }
return registry.Enum{ return registry.Enum{
.fields = fields[0..i], .fields = allocator.shrink(fields, i),
.bitwidth = bitwidth, .bitwidth = bitwidth,
.is_bitmask = is_bitmask, .is_bitmask = is_bitmask,
}; };
@@ -515,29 +410,25 @@ fn parseEnumField(field: *xml.Element) !registry.Enum.Field {
}; };
} }
fn parseCommands( fn parseCommands(allocator: *Allocator, out: []registry.Declaration, commands_elem: *xml.Element) !usize {
allocator: Allocator, var i: usize = 0;
commands_elem: *xml.Element,
api: registry.Api,
decls: *std.ArrayList(registry.Declaration),
) !void {
var it = commands_elem.findChildrenByTag("command"); var it = commands_elem.findChildrenByTag("command");
while (it.next()) |elem| { while (it.next()) |elem| {
if (!requiredByApi(elem, api)) out[i] = try parseCommand(allocator, elem);
continue; i += 1;
try decls.append(allocator, try parseCommand(allocator, elem, api));
} }
return i;
} }
fn splitCommaAlloc(allocator: Allocator, text: []const u8) ![][]const u8 { fn splitCommaAlloc(allocator: *Allocator, text: []const u8) ![][]const u8 {
var n_codes: usize = 1; var n_codes: usize = 1;
for (text) |c| { for (text) |c| {
if (c == ',') n_codes += 1; if (c == ',') n_codes += 1;
} }
const codes = try allocator.alloc([]const u8, n_codes); const codes = try allocator.alloc([]const u8, n_codes);
var it = mem.splitScalar(u8, text, ','); var it = mem.split(text, ",");
for (codes) |*code| { for (codes) |*code| {
code.* = it.next().?; code.* = it.next().?;
} }
@@ -545,7 +436,7 @@ fn splitCommaAlloc(allocator: Allocator, text: []const u8) ![][]const u8 {
return codes; return codes;
} }
fn parseCommand(allocator: Allocator, elem: *xml.Element, api: registry.Api) !registry.Declaration { fn parseCommand(allocator: *Allocator, elem: *xml.Element) !registry.Declaration {
if (elem.getAttribute("alias")) |alias| { if (elem.getAttribute("alias")) |alias| {
const name = elem.getAttribute("name") orelse return error.InvalidRegistry; const name = elem.getAttribute("name") orelse return error.InvalidRegistry;
return registry.Declaration{ return registry.Declaration{
@@ -558,34 +449,20 @@ fn parseCommand(allocator: Allocator, elem: *xml.Element, api: registry.Api) !re
const proto = elem.findChildByTag("proto") orelse return error.InvalidRegistry; const proto = elem.findChildByTag("proto") orelse return error.InvalidRegistry;
var proto_xctok = cparse.XmlCTokenizer.init(proto); var proto_xctok = cparse.XmlCTokenizer.init(proto);
const command_decl = try cparse.parseParamOrProto(allocator, &proto_xctok, false); const command_decl = try cparse.parseParamOrProto(allocator, &proto_xctok);
var params = try allocator.alloc(registry.Command.Param, elem.children.len); var params = try allocator.alloc(registry.Command.Param, elem.children.items.len);
var i: usize = 0; var i: usize = 0;
var it = elem.findChildrenByTag("param"); var it = elem.findChildrenByTag("param");
while (it.next()) |param| { while (it.next()) |param| {
if (!requiredByApi(param, api))
continue;
var xctok = cparse.XmlCTokenizer.init(param); var xctok = cparse.XmlCTokenizer.init(param);
const decl = try cparse.parseParamOrProto(allocator, &xctok, false); const decl = try cparse.parseParamOrProto(allocator, &xctok);
params[i] = .{ params[i] = .{
.name = decl.name, .name = decl.name,
.param_type = decl.decl_type.typedef, .param_type = decl.decl_type.typedef,
.is_buffer_len = false, .is_buffer_len = false,
.is_optional = false,
}; };
if (param.getAttribute("optional")) |optionals| {
var optional_it = mem.splitScalar(u8, optionals, ',');
if (optional_it.next()) |first_optional| {
params[i].is_optional = mem.eql(u8, first_optional, "true");
} else {
// Optional is empty, probably incorrect.
return error.InvalidRegistry;
}
}
i += 1; i += 1;
} }
@@ -602,14 +479,11 @@ fn parseCommand(allocator: Allocator, elem: *xml.Element, api: registry.Api) !re
else else
&[_][]const u8{}; &[_][]const u8{};
params = params[0..i]; params = allocator.shrink(params, i);
it = elem.findChildrenByTag("param"); it = elem.findChildrenByTag("param");
for (params) |*param| { for (params) |*param| {
const param_elem = while (it.next()) |param_elem| { const param_elem = it.next().?;
if (requiredByApi(param_elem, api)) break param_elem;
} else unreachable;
try parsePointerMeta(.{ .command = params }, &param.param_type, param_elem); try parsePointerMeta(.{ .command = params }, &param.param_type, param_elem);
} }
@@ -626,13 +500,8 @@ fn parseCommand(allocator: Allocator, elem: *xml.Element, api: registry.Api) !re
}; };
} }
fn parseApiConstants( fn parseApiConstants(allocator: *Allocator, root: *xml.Element) ![]registry.ApiConstant {
allocator: Allocator, var enums = blk: {
root: *xml.Element,
api: registry.Api,
api_constants: *std.ArrayList(registry.ApiConstant),
) !void {
const maybe_enums = blk: {
var it = root.findChildrenByTag("enums"); var it = root.findChildrenByTag("enums");
while (it.next()) |child| { while (it.next()) |child| {
const name = child.getAttribute("name") orelse continue; const name = child.getAttribute("name") orelse continue;
@@ -641,137 +510,138 @@ fn parseApiConstants(
} }
} }
break :blk null; return error.InvalidRegistry;
}; };
if (maybe_enums) |enums| { var types = root.findChildByTag("types") orelse return error.InvalidRegistry;
var it = enums.findChildrenByTag("enum"); const n_defines = blk: {
while (it.next()) |constant| { var n_defines: usize = 0;
if (!requiredByApi(constant, api)) var it = types.findChildrenByTag("type");
continue; while (it.next()) |ty| {
if (ty.getAttribute("category")) |category| {
const expr = if (constant.getAttribute("value")) |expr| if (mem.eql(u8, category, "define")) {
expr n_defines += 1;
else if (constant.getAttribute("alias")) |alias| }
alias }
else
return error.InvalidRegistry;
try api_constants.append(allocator, .{
.name = constant.getAttribute("name") orelse return error.InvalidRegistry,
.value = .{ .expr = expr },
});
} }
break :blk n_defines;
};
const constants = try allocator.alloc(registry.ApiConstant, enums.children.items.len + n_defines);
var i: usize = 0;
var it = enums.findChildrenByTag("enum");
while (it.next()) |constant| {
const expr = if (constant.getAttribute("value")) |expr|
expr
else if (constant.getAttribute("alias")) |alias|
alias
else
return error.InvalidRegistry;
constants[i] = .{
.name = constant.getAttribute("name") orelse return error.InvalidRegistry,
.value = .{ .expr = expr },
};
i += 1;
} }
const types = root.findChildByTag("types") orelse return error.InvalidRegistry; i += try parseDefines(types, constants[i..]);
try parseDefines(allocator, types, api, api_constants); return allocator.shrink(constants, i);
} }
fn parseDefines( fn parseDefines(types: *xml.Element, out: []registry.ApiConstant) !usize {
allocator: Allocator, var i: usize = 0;
types: *xml.Element,
api: registry.Api,
api_constants: *std.ArrayList(registry.ApiConstant),
) !void {
var it = types.findChildrenByTag("type"); var it = types.findChildrenByTag("type");
while (it.next()) |ty| { while (it.next()) |ty| {
if (!requiredByApi(ty, api))
continue;
const category = ty.getAttribute("category") orelse continue; const category = ty.getAttribute("category") orelse continue;
if (!mem.eql(u8, category, "define")) { if (!mem.eql(u8, category, "define")) {
continue; continue;
} }
const name = ty.getCharData("name") orelse continue; const name = ty.getCharData("name") orelse continue;
if (mem.eql(u8, name, "VK_HEADER_VERSION") or mem.eql(u8, name, "VKSC_API_VARIANT")) { if (mem.eql(u8, name, "VK_HEADER_VERSION")) {
try api_constants.append(allocator, .{ out[i] = .{
.name = name, .name = name,
.value = .{ .expr = mem.trim(u8, ty.children[2].char_data, " ") }, .value = .{ .expr = mem.trim(u8, ty.children.items[2].CharData, " ") },
}); };
} else { } else {
var xctok = cparse.XmlCTokenizer.init(ty); var xctok = cparse.XmlCTokenizer.init(ty);
try api_constants.append(allocator, .{ out[i] = .{
.name = name, .name = name,
.value = cparse.parseVersion(&xctok) catch continue, .value = .{ .version = cparse.parseVersion(&xctok) catch continue },
}); };
} }
i += 1;
} }
return i;
} }
fn parseTags( fn parseTags(allocator: *Allocator, root: *xml.Element) ![]registry.Tag {
allocator: Allocator, var tags_elem = root.findChildByTag("tags") orelse return error.InvalidRegistry;
root: *xml.Element, const tags = try allocator.alloc(registry.Tag, tags_elem.children.items.len);
tags: *std.ArrayList(registry.Tag),
) !void {
var tags_elem = root.findChildByTag("tags") orelse return;
try tags.ensureUnusedCapacity(allocator, tags_elem.children.len);
var i: usize = 0;
var it = tags_elem.findChildrenByTag("tag"); var it = tags_elem.findChildrenByTag("tag");
while (it.next()) |tag| { while (it.next()) |tag| {
tags.appendAssumeCapacity(.{ tags[i] = .{
.name = tag.getAttribute("name") orelse return error.InvalidRegistry, .name = tag.getAttribute("name") orelse return error.InvalidRegistry,
.author = tag.getAttribute("author") orelse return error.InvalidRegistry, .author = tag.getAttribute("author") orelse return error.InvalidRegistry,
}); };
i += 1;
} }
return allocator.shrink(tags, i);
} }
fn parseFeatures(allocator: Allocator, root: *xml.Element, api: registry.Api, features: *std.ArrayList(registry.Feature)) !void { fn parseFeatures(allocator: *Allocator, root: *xml.Element) ![]registry.Feature {
var it = root.findChildrenByTag("feature"); var it = root.findChildrenByTag("feature");
while (it.next()) |feature| { var count: usize = 0;
if (!requiredByApi(feature, api)) while (it.next()) |_| count += 1;
continue;
try features.append(allocator, try parseFeature(allocator, feature, api)); const features = try allocator.alloc(registry.Feature, count);
var i: usize = 0;
it = root.findChildrenByTag("feature");
while (it.next()) |feature| {
features[i] = try parseFeature(allocator, feature);
i += 1;
} }
return features;
} }
fn parseFeature(allocator: Allocator, feature: *xml.Element, api: registry.Api) !registry.Feature { fn parseFeature(allocator: *Allocator, feature: *xml.Element) !registry.Feature {
const name = feature.getAttribute("name") orelse return error.InvalidRegistry; const name = feature.getAttribute("name") orelse return error.InvalidRegistry;
const feature_level = blk: { const feature_level = blk: {
const number = feature.getAttribute("number") orelse return error.InvalidRegistry; const number = feature.getAttribute("number") orelse return error.InvalidRegistry;
break :blk try splitFeatureLevel(number, "."); break :blk try splitFeatureLevel(number, ".");
}; };
var requires = try allocator.alloc(registry.Require, feature.children.len); var requires = try allocator.alloc(registry.Require, feature.children.items.len);
var i: usize = 0; var i: usize = 0;
var it = feature.findChildrenByTag("require"); var it = feature.findChildrenByTag("require");
while (it.next()) |require| { while (it.next()) |require| {
if (!requiredByApi(require, api)) requires[i] = try parseRequire(allocator, require, null);
continue;
requires[i] = try parseRequire(allocator, require, null, api);
i += 1; i += 1;
} }
return registry.Feature{ return registry.Feature{
.name = name, .name = name,
.level = feature_level, .level = feature_level,
.requires = requires[0..i], .requires = allocator.shrink(requires, i),
}; };
} }
fn parseEnumExtension(elem: *xml.Element, parent_extnumber: ?u31) !?registry.Require.EnumExtension { fn parseEnumExtension(elem: *xml.Element, parent_extnumber: ?u31) !?registry.Require.EnumExtension {
// check for either _SPEC_VERSION or _EXTENSION_NAME // check for either _SPEC_VERSION or _EXTENSION_NAME
const name = elem.getAttribute("name") orelse return error.InvalidRegistry; const extends = elem.getAttribute("extends") orelse return null;
if (std.mem.endsWith(u8, name, "_SPEC_VERSION") or std.mem.endsWith(u8, name, "_EXTENSION_NAME")) {
return null;
}
const extends = elem.getAttribute("extends") orelse {
const expr = elem.getAttribute("value") orelse return null;
// This adds a value to the 'API constants' set
return registry.Require.EnumExtension{
.extends = name,
.extnumber = null,
.value = .{ .new_api_constant_expr = expr },
};
};
if (elem.getAttribute("offset")) |offset_str| { if (elem.getAttribute("offset")) |offset_str| {
const offset = try std.fmt.parseInt(u31, offset_str, 10); const offset = try std.fmt.parseInt(u31, offset_str, 10);
const name = elem.getAttribute("name") orelse return error.InvalidRegistry;
const extnumber = if (elem.getAttribute("extnumber")) |num| const extnumber = if (elem.getAttribute("extnumber")) |num|
try std.fmt.parseInt(u31, num, 10) try std.fmt.parseInt(u31, num, 10)
else else
@@ -794,11 +664,9 @@ fn parseEnumExtension(elem: *xml.Element, parent_extnumber: ?u31) !?registry.Req
return registry.Require.EnumExtension{ return registry.Require.EnumExtension{
.extends = extends, .extends = extends,
.extnumber = actual_extnumber, .extnumber = actual_extnumber,
.value = .{ .field = .{
.field = .{ .name = name,
.name = name, .value = .{ .int = value },
.value = .{ .int = value },
},
}, },
}; };
} }
@@ -806,7 +674,7 @@ fn parseEnumExtension(elem: *xml.Element, parent_extnumber: ?u31) !?registry.Req
return registry.Require.EnumExtension{ return registry.Require.EnumExtension{
.extends = extends, .extends = extends,
.extnumber = parent_extnumber, .extnumber = parent_extnumber,
.value = .{ .field = try parseEnumField(elem) }, .field = try parseEnumField(elem),
}; };
} }
@@ -816,7 +684,7 @@ fn enumExtOffsetToValue(extnumber: u31, offset: u31) u31 {
return extension_value_base + (extnumber - 1) * extension_block + offset; return extension_value_base + (extnumber - 1) * extension_block + offset;
} }
fn parseRequire(allocator: Allocator, require: *xml.Element, extnumber: ?u31, api: registry.Api) !registry.Require { fn parseRequire(allocator: *Allocator, require: *xml.Element, extnumber: ?u31) !registry.Require {
var n_extends: usize = 0; var n_extends: usize = 0;
var n_types: usize = 0; var n_types: usize = 0;
var n_commands: usize = 0; var n_commands: usize = 0;
@@ -842,9 +710,6 @@ fn parseRequire(allocator: Allocator, require: *xml.Element, extnumber: ?u31, ap
it = require.elements(); it = require.elements();
while (it.next()) |elem| { while (it.next()) |elem| {
if (!requiredByApi(elem, api))
continue;
if (mem.eql(u8, elem.tag, "enum")) { if (mem.eql(u8, elem.tag, "enum")) {
if (try parseEnumExtension(elem, extnumber)) |ext| { if (try parseEnumExtension(elem, extnumber)) |ext| {
extends[i_extends] = ext; extends[i_extends] = ext;
@@ -869,27 +734,21 @@ fn parseRequire(allocator: Allocator, require: *xml.Element, extnumber: ?u31, ap
}; };
return registry.Require{ return registry.Require{
.extends = extends[0..i_extends], .extends = allocator.shrink(extends, i_extends),
.types = types[0..i_types], .types = types,
.commands = commands[0..i_commands], .commands = commands,
.required_feature_level = required_feature_level, .required_feature_level = required_feature_level,
.required_extension = require.getAttribute("extension"), .required_extension = require.getAttribute("extension"),
}; };
} }
fn parseExtensions( fn parseExtensions(allocator: *Allocator, root: *xml.Element) ![]registry.Extension {
allocator: Allocator,
root: *xml.Element,
api: registry.Api,
extensions: *std.ArrayList(registry.Extension),
) !void {
const extensions_elem = root.findChildByTag("extensions") orelse return error.InvalidRegistry; const extensions_elem = root.findChildByTag("extensions") orelse return error.InvalidRegistry;
try extensions.ensureUnusedCapacity(allocator, extensions_elem.children.len);
const extensions = try allocator.alloc(registry.Extension, extensions_elem.children.items.len);
var i: usize = 0;
var it = extensions_elem.findChildrenByTag("extension"); var it = extensions_elem.findChildrenByTag("extension");
while (it.next()) |extension| { while (it.next()) |extension| {
if (!requiredByApi(extension, api))
continue;
// Some extensions (in particular 94) are disabled, so just skip them // Some extensions (in particular 94) are disabled, so just skip them
if (extension.getAttribute("supported")) |supported| { if (extension.getAttribute("supported")) |supported| {
if (mem.eql(u8, supported, "disabled")) { if (mem.eql(u8, supported, "disabled")) {
@@ -897,11 +756,14 @@ fn parseExtensions(
} }
} }
extensions.appendAssumeCapacity(try parseExtension(allocator, extension, api)); extensions[i] = try parseExtension(allocator, extension);
i += 1;
} }
return allocator.shrink(extensions, i);
} }
fn findExtVersion(extension: *xml.Element) !registry.Extension.Version { fn findExtVersion(extension: *xml.Element) !u32 {
var req_it = extension.findChildrenByTag("require"); var req_it = extension.findChildrenByTag("require");
while (req_it.next()) |req| { while (req_it.next()) |req| {
var enum_it = req.findChildrenByTag("enum"); var enum_it = req.findChildrenByTag("enum");
@@ -909,23 +771,17 @@ fn findExtVersion(extension: *xml.Element) !registry.Extension.Version {
const name = e.getAttribute("name") orelse continue; const name = e.getAttribute("name") orelse continue;
const value = e.getAttribute("value") orelse continue; const value = e.getAttribute("value") orelse continue;
if (mem.endsWith(u8, name, "_SPEC_VERSION")) { if (mem.endsWith(u8, name, "_SPEC_VERSION")) {
// Vulkan Video extensions are sometimes aliases. return try std.fmt.parseInt(u32, value, 10);
// If we fail to parse it as integer, just assume that its an alias and return that.
const version = std.fmt.parseInt(u32, value, 10) catch return .{ .alias = value };
return .{ .int = version };
} }
} }
} }
return .unknown; return error.InvalidRegistry;
} }
fn parseExtension(allocator: Allocator, extension: *xml.Element, api: registry.Api) !registry.Extension { fn parseExtension(allocator: *Allocator, extension: *xml.Element) !registry.Extension {
const name = extension.getAttribute("name") orelse return error.InvalidRegistry; const name = extension.getAttribute("name") orelse return error.InvalidRegistry;
const platform = extension.getAttribute("platform"); const platform = extension.getAttribute("platform");
const is_video = std.mem.startsWith(u8, name, "vulkan_video_");
const version = try findExtVersion(extension); const version = try findExtVersion(extension);
// For some reason there are two ways for an extension to state its required // For some reason there are two ways for an extension to state its required
@@ -947,14 +803,11 @@ fn parseExtension(allocator: Allocator, extension: *xml.Element, api: registry.A
}; };
const number = blk: { const number = blk: {
// Vulkan Video extensions do not have numbers.
if (is_video) break :blk 0;
const number_str = extension.getAttribute("number") orelse return error.InvalidRegistry; const number_str = extension.getAttribute("number") orelse return error.InvalidRegistry;
break :blk try std.fmt.parseInt(u31, number_str, 10); break :blk try std.fmt.parseInt(u31, number_str, 10);
}; };
const ext_type: ?registry.Extension.ExtensionType = blk: { const ext_type: ?registry.Extension.ExtensionType = blk: {
if (is_video) break :blk .video;
const ext_type_str = extension.getAttribute("type") orelse break :blk null; const ext_type_str = extension.getAttribute("type") orelse break :blk null;
if (mem.eql(u8, ext_type_str, "instance")) { if (mem.eql(u8, ext_type_str, "instance")) {
break :blk .instance; break :blk .instance;
@@ -970,13 +823,11 @@ fn parseExtension(allocator: Allocator, extension: *xml.Element, api: registry.A
break :blk try splitCommaAlloc(allocator, requires_str); break :blk try splitCommaAlloc(allocator, requires_str);
}; };
var requires = try allocator.alloc(registry.Require, extension.children.len); var requires = try allocator.alloc(registry.Require, extension.children.items.len);
var i: usize = 0; var i: usize = 0;
var it = extension.findChildrenByTag("require"); var it = extension.findChildrenByTag("require");
while (it.next()) |require| { while (it.next()) |require| {
if (!requiredByApi(require, api)) requires[i] = try parseRequire(allocator, require, number);
continue;
requires[i] = try parseRequire(allocator, require, number, api);
i += 1; i += 1;
} }
@@ -989,12 +840,12 @@ fn parseExtension(allocator: Allocator, extension: *xml.Element, api: registry.A
.promoted_to = promoted_to, .promoted_to = promoted_to,
.platform = platform, .platform = platform,
.required_feature_level = requires_core, .required_feature_level = requires_core,
.requires = requires[0..i], .requires = allocator.shrink(requires, i),
}; };
} }
fn splitFeatureLevel(ver: []const u8, split: []const u8) !registry.FeatureLevel { fn splitFeatureLevel(ver: []const u8, split: []const u8) !registry.FeatureLevel {
var it = mem.splitSequence(u8, ver, split); var it = mem.split(ver, split);
const major = it.next() orelse return error.InvalidFeatureLevel; const major = it.next() orelse return error.InvalidFeatureLevel;
const minor = it.next() orelse return error.InvalidFeatureLevel; const minor = it.next() orelse return error.InvalidFeatureLevel;
@@ -1007,14 +858,3 @@ fn splitFeatureLevel(ver: []const u8, split: []const u8) !registry.FeatureLevel
.minor = try std.fmt.parseInt(u32, minor, 10), .minor = try std.fmt.parseInt(u32, minor, 10),
}; };
} }
fn requiredByApi(elem: *xml.Element, api: registry.Api) bool {
const apis = elem.getAttribute("api") orelse return true; // If the 'api' element is not present, assume required.
var it = mem.splitScalar(u8, apis, ',');
while (it.next()) |required_by_api| {
if (std.mem.eql(u8, @tagName(api), required_by_api)) return true;
}
return false;
}

View File

@@ -1,9 +1,5 @@
pub const Api = enum {
vulkan,
vulkansc,
};
pub const Registry = struct { pub const Registry = struct {
copyright: []const u8,
decls: []Declaration, decls: []Declaration,
api_constants: []ApiConstant, api_constants: []ApiConstant,
tags: []Tag, tags: []Tag,
@@ -42,7 +38,6 @@ pub const ApiConstant = struct {
pub const Value = union(enum) { pub const Value = union(enum) {
expr: []const u8, expr: []const u8,
version: [4][]const u8, version: [4][]const u8,
video_std_version: [3][]const u8,
}; };
name: []const u8, name: []const u8,
@@ -67,7 +62,6 @@ pub const Container = struct {
field_type: TypeInfo, field_type: TypeInfo,
bits: ?usize, bits: ?usize,
is_buffer_len: bool, is_buffer_len: bool,
is_optional: bool,
}; };
stype: ?[]const u8, stype: ?[]const u8,
@@ -112,22 +106,19 @@ pub const Command = struct {
name: []const u8, name: []const u8,
param_type: TypeInfo, param_type: TypeInfo,
is_buffer_len: bool, is_buffer_len: bool,
is_optional: bool,
}; };
params: []Param, params: []Param,
return_type: *TypeInfo, return_type: *TypeInfo,
success_codes: []const []const u8, success_codes: [][]const u8,
error_codes: []const []const u8, error_codes: [][]const u8,
}; };
pub const Pointer = struct { pub const Pointer = struct {
pub const PointerSize = union(enum) { pub const PointerSize = union(enum) {
one, one,
/// The length is given by some complex expression, possibly involving another field many, // The length is given by some complex expression, possibly involving another field
many, other_field: []const u8, // The length is given by some other field or parameter
/// The length is given by some other field or parameter
other_field: []const u8,
zero_terminated, zero_terminated,
}; };
@@ -143,26 +134,7 @@ pub const Array = struct {
alias: []const u8, // Field size is given by an api constant alias: []const u8, // Field size is given by an api constant
}; };
pub const ArrayValidSize = union(enum) {
/// All elements are valid.
all,
/// The length is given by some complex expression, possibly involving another field
many,
/// The length is given by some complex expression, possibly involving another field
other_field: []const u8,
/// The valid elements are terminated by a 0, or by the bounds of the array.
zero_terminated,
};
/// This is the total size of the array
size: ArraySize, size: ArraySize,
/// The number of items that are actually filled with valid values
valid_size: ArrayValidSize,
/// Some members may indicate than an array is optional. This happens with
/// VkPhysicalDeviceHostImageCopyPropertiesEXT::optimalTilingLayoutUUID for example.
/// The spec is not entirely clear about what this means, but presumably it should
/// be filled with all zeroes.
is_optional: bool,
child: *TypeInfo, child: *TypeInfo,
}; };
@@ -180,7 +152,6 @@ pub const Extension = struct {
pub const ExtensionType = enum { pub const ExtensionType = enum {
instance, instance,
device, device,
video,
}; };
pub const Promotion = union(enum) { pub const Promotion = union(enum) {
@@ -189,15 +160,9 @@ pub const Extension = struct {
extension: []const u8, extension: []const u8,
}; };
pub const Version = union(enum) {
int: u32,
alias: []const u8,
unknown,
};
name: []const u8, name: []const u8,
number: u31, number: u31,
version: Version, version: u32,
extension_type: ?ExtensionType, extension_type: ?ExtensionType,
depends: []const []const u8, // Other extensions depends: []const []const u8, // Other extensions
promoted_to: Promotion, promoted_to: Promotion,
@@ -208,13 +173,9 @@ pub const Extension = struct {
pub const Require = struct { pub const Require = struct {
pub const EnumExtension = struct { pub const EnumExtension = struct {
pub const Value = union(enum) {
field: Enum.Field,
new_api_constant_expr: []const u8,
};
extends: []const u8, extends: []const u8,
extnumber: ?u31, extnumber: ?u31,
value: Value, field: Enum.Field,
}; };
extends: []EnumExtension, extends: []EnumExtension,

1364
generator/vulkan/render.zig Normal file

File diff suppressed because it is too large Load Diff

664
generator/xml.zig Normal file
View File

@@ -0,0 +1,664 @@
const std = @import("std");
const mem = std.mem;
const testing = std.testing;
const Allocator = mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator;
const ArrayList = std.ArrayList;
pub const Attribute = struct {
name: []const u8,
value: []const u8,
};
pub const Content = union(enum) {
CharData: []const u8,
Comment: []const u8,
Element: *Element,
};
pub const Element = struct {
pub const AttributeList = ArrayList(*Attribute);
pub const ContentList = ArrayList(Content);
tag: []const u8,
attributes: AttributeList,
children: ContentList,
fn init(tag: []const u8, alloc: *Allocator) Element {
return .{
.tag = tag,
.attributes = AttributeList.init(alloc),
.children = ContentList.init(alloc),
};
}
pub fn getAttribute(self: *Element, attrib_name: []const u8) ?[]const u8 {
for (self.attributes.items) |child| {
if (mem.eql(u8, child.name, attrib_name)) {
return child.value;
}
}
return null;
}
pub fn getCharData(self: *Element, child_tag: []const u8) ?[]const u8 {
const child = self.findChildByTag(child_tag) orelse return null;
if (child.children.items.len != 1) {
return null;
}
return switch (child.children.items[0]) {
.CharData => |char_data| char_data,
else => null,
};
}
pub fn iterator(self: *Element) ChildIterator {
return .{
.items = self.children.items,
.i = 0,
};
}
pub fn elements(self: *Element) ChildElementIterator {
return .{
.inner = self.iterator(),
};
}
pub fn findChildByTag(self: *Element, tag: []const u8) ?*Element {
return self.findChildrenByTag(tag).next();
}
pub fn findChildrenByTag(self: *Element, tag: []const u8) FindChildrenByTagIterator {
return .{
.inner = self.elements(),
.tag = tag,
};
}
pub const ChildIterator = struct {
items: []Content,
i: usize,
pub fn next(self: *ChildIterator) ?*Content {
if (self.i < self.items.len) {
self.i += 1;
return &self.items[self.i - 1];
}
return null;
}
};
pub const ChildElementIterator = struct {
inner: ChildIterator,
pub fn next(self: *ChildElementIterator) ?*Element {
while (self.inner.next()) |child| {
if (child.* != .Element) {
continue;
}
return child.*.Element;
}
return null;
}
};
pub const FindChildrenByTagIterator = struct {
inner: ChildElementIterator,
tag: []const u8,
pub fn next(self: *FindChildrenByTagIterator) ?*Element {
while (self.inner.next()) |child| {
if (!mem.eql(u8, child.tag, self.tag)) {
continue;
}
return child;
}
return null;
}
};
};
pub const XmlDecl = struct {
version: []const u8,
encoding: ?[]const u8,
standalone: ?bool,
};
pub const Document = struct {
arena: ArenaAllocator,
xml_decl: ?*XmlDecl,
root: *Element,
pub fn deinit(self: Document) void {
var arena = self.arena; // Copy to stack so self can be taken by value.
arena.deinit();
}
};
const ParseContext = struct {
source: []const u8,
offset: usize,
line: usize,
column: usize,
fn init(source: []const u8) ParseContext {
return .{
.source = source,
.offset = 0,
.line = 0,
.column = 0,
};
}
fn peek(self: *ParseContext) ?u8 {
return if (self.offset < self.source.len) self.source[self.offset] else null;
}
fn consume(self: *ParseContext) !u8 {
if (self.offset < self.source.len) {
return self.consumeNoEof();
}
return error.UnexpectedEof;
}
fn consumeNoEof(self: *ParseContext) u8 {
std.debug.assert(self.offset < self.source.len);
const c = self.source[self.offset];
self.offset += 1;
if (c == '\n') {
self.line += 1;
self.column = 0;
} else {
self.column += 1;
}
return c;
}
fn eat(self: *ParseContext, char: u8) bool {
self.expect(char) catch return false;
return true;
}
fn expect(self: *ParseContext, expected: u8) !void {
if (self.peek()) |actual| {
if (expected != actual) {
return error.UnexpectedCharacter;
}
_ = self.consumeNoEof();
return;
}
return error.UnexpectedEof;
}
fn eatStr(self: *ParseContext, text: []const u8) bool {
self.expectStr(text) catch return false;
return true;
}
fn expectStr(self: *ParseContext, text: []const u8) !void {
if (self.source.len < self.offset + text.len) {
return error.UnexpectedEof;
} else if (std.mem.startsWith(u8, self.source[self.offset..], text)) {
var i: usize = 0;
while (i < text.len) : (i += 1) {
_ = self.consumeNoEof();
}
return;
}
return error.UnexpectedCharacter;
}
fn eatWs(self: *ParseContext) bool {
var ws = false;
while (self.peek()) |ch| {
switch (ch) {
' ', '\t', '\n', '\r' => {
ws = true;
_ = self.consumeNoEof();
},
else => break,
}
}
return ws;
}
fn expectWs(self: *ParseContext) !void {
if (!self.eatWs()) return error.UnexpectedCharacter;
}
fn currentLine(self: ParseContext) []const u8 {
var begin: usize = 0;
if (mem.lastIndexOfScalar(u8, self.source[0..self.offset], '\n')) |prev_nl| {
begin = prev_nl + 1;
}
var end = mem.indexOfScalarPos(u8, self.source, self.offset, '\n') orelse self.source.len;
return self.source[begin..end];
}
};
test "ParseContext" {
{
var ctx = ParseContext.init("I like pythons");
try testing.expectEqual(@as(?u8, 'I'), ctx.peek());
try testing.expectEqual(@as(u8, 'I'), ctx.consumeNoEof());
try testing.expectEqual(@as(?u8, ' '), ctx.peek());
try testing.expectEqual(@as(u8, ' '), try ctx.consume());
try testing.expect(ctx.eat('l'));
try testing.expectEqual(@as(?u8, 'i'), ctx.peek());
try testing.expectEqual(false, ctx.eat('a'));
try testing.expectEqual(@as(?u8, 'i'), ctx.peek());
try ctx.expect('i');
try testing.expectEqual(@as(?u8, 'k'), ctx.peek());
try testing.expectError(error.UnexpectedCharacter, ctx.expect('a'));
try testing.expectEqual(@as(?u8, 'k'), ctx.peek());
try testing.expect(ctx.eatStr("ke"));
try testing.expectEqual(@as(?u8, ' '), ctx.peek());
try testing.expect(ctx.eatWs());
try testing.expectEqual(@as(?u8, 'p'), ctx.peek());
try testing.expectEqual(false, ctx.eatWs());
try testing.expectEqual(@as(?u8, 'p'), ctx.peek());
try testing.expectEqual(false, ctx.eatStr("aaaaaaaaa"));
try testing.expectEqual(@as(?u8, 'p'), ctx.peek());
try testing.expectError(error.UnexpectedEof, ctx.expectStr("aaaaaaaaa"));
try testing.expectEqual(@as(?u8, 'p'), ctx.peek());
try testing.expectError(error.UnexpectedCharacter, ctx.expectStr("pytn"));
try testing.expectEqual(@as(?u8, 'p'), ctx.peek());
try ctx.expectStr("python");
try testing.expectEqual(@as(?u8, 's'), ctx.peek());
}
{
var ctx = ParseContext.init("");
try testing.expectEqual(ctx.peek(), null);
try testing.expectError(error.UnexpectedEof, ctx.consume());
try testing.expectEqual(ctx.eat('p'), false);
try testing.expectError(error.UnexpectedEof, ctx.expect('p'));
}
}
pub const ParseError = error{
IllegalCharacter,
UnexpectedEof,
UnexpectedCharacter,
UnclosedValue,
UnclosedComment,
InvalidName,
InvalidEntity,
InvalidStandaloneValue,
NonMatchingClosingTag,
InvalidDocument,
OutOfMemory,
};
pub fn parse(backing_allocator: *Allocator, source: []const u8) !Document {
var ctx = ParseContext.init(source);
return try parseDocument(&ctx, backing_allocator);
}
fn parseDocument(ctx: *ParseContext, backing_allocator: *Allocator) !Document {
var doc = Document{
.arena = ArenaAllocator.init(backing_allocator),
.xml_decl = null,
.root = undefined,
};
errdefer doc.deinit();
try trySkipComments(ctx, &doc.arena.allocator);
doc.xml_decl = try tryParseProlog(ctx, &doc.arena.allocator);
_ = ctx.eatWs();
try trySkipComments(ctx, &doc.arena.allocator);
doc.root = (try tryParseElement(ctx, &doc.arena.allocator)) orelse return error.InvalidDocument;
_ = ctx.eatWs();
try trySkipComments(ctx, &doc.arena.allocator);
if (ctx.peek() != null) return error.InvalidDocument;
return doc;
}
fn parseAttrValue(ctx: *ParseContext, alloc: *Allocator) ![]const u8 {
const quote = try ctx.consume();
if (quote != '"' and quote != '\'') return error.UnexpectedCharacter;
const begin = ctx.offset;
while (true) {
const c = ctx.consume() catch return error.UnclosedValue;
if (c == quote) break;
}
const end = ctx.offset - 1;
return try dupeAndUnescape(alloc, ctx.source[begin..end]);
}
fn parseEqAttrValue(ctx: *ParseContext, alloc: *Allocator) ![]const u8 {
_ = ctx.eatWs();
try ctx.expect('=');
_ = ctx.eatWs();
return try parseAttrValue(ctx, alloc);
}
fn parseNameNoDupe(ctx: *ParseContext) ![]const u8 {
// XML's spec on names is very long, so to make this easier
// we just take any character that is not special and not whitespace
const begin = ctx.offset;
while (ctx.peek()) |ch| {
switch (ch) {
' ', '\t', '\n', '\r' => break,
'&', '"', '\'', '<', '>', '?', '=', '/' => break,
else => _ = ctx.consumeNoEof(),
}
}
const end = ctx.offset;
if (begin == end) return error.InvalidName;
return ctx.source[begin..end];
}
fn tryParseCharData(ctx: *ParseContext, alloc: *Allocator) !?[]const u8 {
const begin = ctx.offset;
while (ctx.peek()) |ch| {
switch (ch) {
'<' => break,
else => _ = ctx.consumeNoEof(),
}
}
const end = ctx.offset;
if (begin == end) return null;
return try dupeAndUnescape(alloc, ctx.source[begin..end]);
}
fn parseContent(ctx: *ParseContext, alloc: *Allocator) ParseError!Content {
if (try tryParseCharData(ctx, alloc)) |cd| {
return Content{ .CharData = cd };
} else if (try tryParseComment(ctx, alloc)) |comment| {
return Content{ .Comment = comment };
} else if (try tryParseElement(ctx, alloc)) |elem| {
return Content{ .Element = elem };
} else {
return error.UnexpectedCharacter;
}
}
fn tryParseAttr(ctx: *ParseContext, alloc: *Allocator) !?*Attribute {
const name = parseNameNoDupe(ctx) catch return null;
_ = ctx.eatWs();
try ctx.expect('=');
_ = ctx.eatWs();
const value = try parseAttrValue(ctx, alloc);
const attr = try alloc.create(Attribute);
attr.name = try mem.dupe(alloc, u8, name);
attr.value = value;
return attr;
}
fn tryParseElement(ctx: *ParseContext, alloc: *Allocator) !?*Element {
const start = ctx.offset;
if (!ctx.eat('<')) return null;
const tag = parseNameNoDupe(ctx) catch {
ctx.offset = start;
return null;
};
const element = try alloc.create(Element);
element.* = Element.init(try std.mem.dupe(alloc, u8, tag), alloc);
while (ctx.eatWs()) {
const attr = (try tryParseAttr(ctx, alloc)) orelse break;
try element.attributes.append(attr);
}
if (ctx.eatStr("/>")) {
return element;
}
try ctx.expect('>');
while (true) {
if (ctx.peek() == null) {
return error.UnexpectedEof;
} else if (ctx.eatStr("</")) {
break;
}
const content = try parseContent(ctx, alloc);
try element.children.append(content);
}
const closing_tag = try parseNameNoDupe(ctx);
if (!std.mem.eql(u8, tag, closing_tag)) {
return error.NonMatchingClosingTag;
}
_ = ctx.eatWs();
try ctx.expect('>');
return element;
}
test "tryParseElement" {
var arena = std.heap.ArenaAllocator.init(testing.allocator);
defer arena.deinit();
var alloc = &arena.allocator;
{
var ctx = ParseContext.init("<= a='b'/>");
try testing.expectEqual(@as(?*Element, null), try tryParseElement(&ctx, alloc));
try testing.expectEqual(@as(?u8, '<'), ctx.peek());
}
{
var ctx = ParseContext.init("<python size='15' color = \"green\"/>");
const elem = try tryParseElement(&ctx, alloc);
try testing.expectEqualSlices(u8, elem.?.tag, "python");
const size_attr = elem.?.attributes.items[0];
try testing.expectEqualSlices(u8, size_attr.name, "size");
try testing.expectEqualSlices(u8, size_attr.value, "15");
const color_attr = elem.?.attributes.items[1];
try testing.expectEqualSlices(u8, color_attr.name, "color");
try testing.expectEqualSlices(u8, color_attr.value, "green");
}
{
var ctx = ParseContext.init("<python>test</python>");
const elem = try tryParseElement(&ctx, alloc);
try testing.expectEqualSlices(u8, elem.?.tag, "python");
try testing.expectEqualSlices(u8, elem.?.children.items[0].CharData, "test");
}
{
var ctx = ParseContext.init("<a>b<c/>d<e/>f<!--g--></a>");
const elem = try tryParseElement(&ctx, alloc);
try testing.expectEqualSlices(u8, elem.?.tag, "a");
try testing.expectEqualSlices(u8, elem.?.children.items[0].CharData, "b");
try testing.expectEqualSlices(u8, elem.?.children.items[1].Element.tag, "c");
try testing.expectEqualSlices(u8, elem.?.children.items[2].CharData, "d");
try testing.expectEqualSlices(u8, elem.?.children.items[3].Element.tag, "e");
try testing.expectEqualSlices(u8, elem.?.children.items[4].CharData, "f");
try testing.expectEqualSlices(u8, elem.?.children.items[5].Comment, "g");
}
}
fn tryParseProlog(ctx: *ParseContext, alloc: *Allocator) !?*XmlDecl {
const start = ctx.offset;
if (!ctx.eatStr("<?") or !mem.eql(u8, try parseNameNoDupe(ctx), "xml")) {
ctx.offset = start;
return null;
}
const decl = try alloc.create(XmlDecl);
decl.encoding = null;
decl.standalone = null;
// Version info is mandatory
try ctx.expectWs();
try ctx.expectStr("version");
decl.version = try parseEqAttrValue(ctx, alloc);
if (ctx.eatWs()) {
// Optional encoding and standalone info
var require_ws = false;
if (ctx.eatStr("encoding")) {
decl.encoding = try parseEqAttrValue(ctx, alloc);
require_ws = true;
}
if (require_ws == ctx.eatWs() and ctx.eatStr("standalone")) {
const standalone = try parseEqAttrValue(ctx, alloc);
if (std.mem.eql(u8, standalone, "yes")) {
decl.standalone = true;
} else if (std.mem.eql(u8, standalone, "no")) {
decl.standalone = false;
} else {
return error.InvalidStandaloneValue;
}
}
_ = ctx.eatWs();
}
try ctx.expectStr("?>");
return decl;
}
test "tryParseProlog" {
var arena = std.heap.ArenaAllocator.init(testing.allocator);
defer arena.deinit();
var alloc = &arena.allocator;
{
var ctx = ParseContext.init("<?xmla version='aa'?>");
try testing.expectEqual(@as(?*XmlDecl, null), try tryParseProlog(&ctx, alloc));
try testing.expectEqual(@as(?u8, '<'), ctx.peek());
}
{
var ctx = ParseContext.init("<?xml version='aa'?>");
const decl = try tryParseProlog(&ctx, alloc);
try testing.expectEqualSlices(u8, "aa", decl.?.version);
try testing.expectEqual(@as(?[]const u8, null), decl.?.encoding);
try testing.expectEqual(@as(?bool, null), decl.?.standalone);
}
{
var ctx = ParseContext.init("<?xml version=\"aa\" encoding = 'bbb' standalone \t = 'yes'?>");
const decl = try tryParseProlog(&ctx, alloc);
try testing.expectEqualSlices(u8, "aa", decl.?.version);
try testing.expectEqualSlices(u8, "bbb", decl.?.encoding.?);
try testing.expectEqual(@as(?bool, true), decl.?.standalone.?);
}
}
fn trySkipComments(ctx: *ParseContext, alloc: *Allocator) !void {
while (try tryParseComment(ctx, alloc)) |_| {
_ = ctx.eatWs();
}
}
fn tryParseComment(ctx: *ParseContext, alloc: *Allocator) !?[]const u8 {
if (!ctx.eatStr("<!--")) return null;
const begin = ctx.offset;
while (!ctx.eatStr("-->")) {
_ = ctx.consume() catch return error.UnclosedComment;
}
const end = ctx.offset - "-->".len;
return try mem.dupe(alloc, u8, ctx.source[begin..end]);
}
fn unescapeEntity(text: []const u8) !u8 {
const EntitySubstition = struct { text: []const u8, replacement: u8 };
const entities = [_]EntitySubstition{
.{ .text = "&lt;", .replacement = '<' },
.{ .text = "&gt;", .replacement = '>' },
.{ .text = "&amp;", .replacement = '&' },
.{ .text = "&apos;", .replacement = '\'' },
.{ .text = "&quot;", .replacement = '"' },
};
for (entities) |entity| {
if (std.mem.eql(u8, text, entity.text)) return entity.replacement;
}
return error.InvalidEntity;
}
fn dupeAndUnescape(alloc: *Allocator, text: []const u8) ![]const u8 {
const str = try alloc.alloc(u8, text.len);
var j: usize = 0;
var i: usize = 0;
while (i < text.len) : (j += 1) {
if (text[i] == '&') {
const entity_end = 1 + (mem.indexOfScalarPos(u8, text, i, ';') orelse return error.InvalidEntity);
str[j] = try unescapeEntity(text[i..entity_end]);
i = entity_end;
} else {
str[j] = text[i];
i += 1;
}
}
return alloc.shrink(str, j);
}
test "dupeAndUnescape" {
var arena = std.heap.ArenaAllocator.init(testing.allocator);
defer arena.deinit();
var alloc = &arena.allocator;
try testing.expectEqualSlices(u8, "test", try dupeAndUnescape(alloc, "test"));
try testing.expectEqualSlices(u8, "a<b&c>d\"e'f<", try dupeAndUnescape(alloc, "a&lt;b&amp;c&gt;d&quot;e&apos;f&lt;"));
try testing.expectError(error.InvalidEntity, dupeAndUnescape(alloc, "python&"));
try testing.expectError(error.InvalidEntity, dupeAndUnescape(alloc, "python&&"));
try testing.expectError(error.InvalidEntity, dupeAndUnescape(alloc, "python&test;"));
try testing.expectError(error.InvalidEntity, dupeAndUnescape(alloc, "python&boa"));
}
test "Top level comments" {
var arena = std.heap.ArenaAllocator.init(testing.allocator);
defer arena.deinit();
var alloc = &arena.allocator;
const doc = try parse(alloc, "<?xml version='aa'?><!--comment--><python color='green'/><!--another comment-->");
try testing.expectEqualSlices(u8, "python", doc.root.tag);
}

View File

@@ -1,186 +0,0 @@
const std = @import("std");
const generator = @import("vulkan/generator.zig");
fn invalidUsage(prog_name: []const u8, comptime fmt: []const u8, args: anytype) noreturn {
std.log.err(fmt, args);
std.log.err("see {s} --help for usage", .{prog_name});
std.process.exit(1);
}
fn reportParseErrors(tree: std.zig.Ast) !void {
var buf: [1024]u8 = undefined;
var stderr = std.fs.File.stderr().writer(&buf);
const w = &stderr.interface;
for (tree.errors) |err| {
const loc = tree.tokenLocation(0, err.token);
try w.print("(vulkan-zig error):{}:{}: error: ", .{ loc.line + 1, loc.column + 1 });
try tree.renderError(err, w);
try w.print("\n{s}\n", .{tree.source[loc.line_start..loc.line_end]});
for (0..loc.column) |_| {
try w.writeAll(" ");
}
try w.writeAll("^\n");
}
}
fn oomPanic() noreturn {
@panic("Out of memory");
}
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
var args = std.process.argsWithAllocator(allocator) catch |err| switch (err) {
error.OutOfMemory => oomPanic(),
};
const prog_name = args.next() orelse "vulkan-zig-generator";
var maybe_xml_path: ?[]const u8 = null;
var maybe_out_path: ?[]const u8 = null;
var maybe_video_xml_path: ?[]const u8 = null;
var debug: bool = false;
var api = generator.Api.vulkan;
while (args.next()) |arg| {
if (std.mem.eql(u8, arg, "--help") or std.mem.eql(u8, arg, "-h")) {
@setEvalBranchQuota(2000);
var buf: [1024]u8 = undefined;
var w = std.fs.File.stdout().writer(&buf);
w.interface.print(
\\Utility to generate a Zig binding from the Vulkan XML API registry.
\\
\\The most recent Vulkan XML API registry can be obtained from
\\https://github.com/KhronosGroup/Vulkan-Docs/blob/master/xml/vk.xml,
\\and the most recent LunarG Vulkan SDK version can be found at
\\$VULKAN_SDK/x86_64/share/vulkan/registry/vk.xml.
\\
\\Usage: {s} [options] <spec xml path> <output zig source>
\\Options:
\\-h --help show this message and exit.
\\-a --api <api> Generate API for 'vulkan' or 'vulkansc'. Defaults to 'vulkan'.
\\--debug Write out unformatted source if does not parse correctly.
\\--video <path> Also gnerate Vulkan Video API bindings from video.xml
\\ registry at <path>.
\\
,
.{prog_name},
) catch |err| {
std.process.fatal("failed to write to stdout: {s}", .{@errorName(err)});
};
return;
} else if (std.mem.eql(u8, arg, "-a") or std.mem.eql(u8, arg, "--api")) {
const api_str = args.next() orelse {
invalidUsage(prog_name, "{s} expects argument <api>", .{arg});
};
api = std.meta.stringToEnum(generator.Api, api_str) orelse {
invalidUsage(prog_name, "invalid api '{s}'", .{api_str});
};
} else if (std.mem.eql(u8, arg, "--debug")) {
debug = true;
} else if (std.mem.eql(u8, arg, "--video")) {
maybe_video_xml_path = args.next() orelse {
invalidUsage(prog_name, "{s} expects argument <path>", .{arg});
};
} else if (maybe_xml_path == null) {
maybe_xml_path = arg;
} else if (maybe_out_path == null) {
maybe_out_path = arg;
} else {
invalidUsage(prog_name, "superficial argument '{s}'", .{arg});
}
}
const xml_path = maybe_xml_path orelse {
invalidUsage(prog_name, "missing required argument <spec xml path>", .{});
};
const out_path = maybe_out_path orelse {
invalidUsage(prog_name, "missing required argument <output zig source>", .{});
};
const cwd = std.fs.cwd();
const xml_src = cwd.readFileAlloc(xml_path, allocator, .unlimited) catch |err| {
std.process.fatal("failed to open input file '{s}' ({s})", .{ xml_path, @errorName(err) });
};
const maybe_video_xml_src = if (maybe_video_xml_path) |video_xml_path|
cwd.readFileAlloc(video_xml_path, allocator, .unlimited) catch |err| {
std.process.fatal("failed to open input file '{s}' ({s})", .{ video_xml_path, @errorName(err) });
}
else
null;
var aw: std.Io.Writer.Allocating = .init(allocator);
generator.generate(allocator, api, xml_src, maybe_video_xml_src, &aw.writer) catch |err| {
if (debug) {
return err;
}
switch (err) {
error.InvalidXml => {
std.log.err("invalid vulkan registry - invalid xml", .{});
std.log.err("please check that the correct vk.xml file is passed", .{});
std.process.exit(1);
},
error.InvalidRegistry => {
std.log.err("invalid vulkan registry - registry is valid xml but contents are invalid", .{});
std.log.err("please check that the correct vk.xml file is passed", .{});
std.process.exit(1);
},
error.UnhandledBitfieldStruct => {
std.log.err("unhandled struct with bit fields detected in vk.xml", .{});
std.log.err("this is a bug in vulkan-zig", .{});
std.log.err("please make a bug report at https://github.com/Snektron/vulkan-zig/issues/", .{});
std.process.exit(1);
},
error.OutOfMemory, error.WriteFailed => oomPanic(),
}
};
aw.writer.writeByte(0) catch oomPanic();
const buffered = aw.writer.buffered();
const src = buffered[0 .. buffered.len - 1 :0];
const tree = std.zig.Ast.parse(allocator, src, .zig) catch |err| switch (err) {
error.OutOfMemory => oomPanic(),
};
const formatted = if (tree.errors.len > 0) blk: {
std.log.err("generated invalid zig code", .{});
std.log.err("this is a bug in vulkan-zig", .{});
std.log.err("please make a bug report at https://github.com/Snektron/vulkan-zig/issues/", .{});
std.log.err("or run with --debug to write out unformatted source", .{});
reportParseErrors(tree) catch |err| {
std.process.fatal("failed to dump ast errors: {s}", .{@errorName(err)});
};
if (debug) {
break :blk src;
}
std.process.exit(1);
} else tree.renderAlloc(allocator) catch |err| switch (err) {
error.OutOfMemory => oomPanic(),
};
if (std.fs.path.dirname(out_path)) |dir| {
cwd.makePath(dir) catch |err| {
std.process.fatal("failed to create output directory '{s}' ({s})", .{ dir, @errorName(err) });
};
}
cwd.writeFile(.{
.sub_path = out_path,
.data = formatted,
}) catch |err| {
std.process.fatal("failed to write to output file '{s}' ({s})", .{ out_path, @errorName(err) });
};
}
test "main" {
_ = @import("xml.zig");
_ = @import("vulkan/c_parse.zig");
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,638 +0,0 @@
const std = @import("std");
const mem = std.mem;
const testing = std.testing;
const Allocator = mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator;
pub const Attribute = struct {
name: []const u8,
value: []const u8,
};
pub const Content = union(enum) {
char_data: []const u8,
comment: []const u8,
element: *Element,
};
pub const Element = struct {
tag: []const u8,
attributes: []Attribute = &.{},
children: []Content = &.{},
pub fn getAttribute(self: Element, attrib_name: []const u8) ?[]const u8 {
for (self.attributes) |child| {
if (mem.eql(u8, child.name, attrib_name)) {
return child.value;
}
}
return null;
}
pub fn getCharData(self: Element, child_tag: []const u8) ?[]const u8 {
const child = self.findChildByTag(child_tag) orelse return null;
if (child.children.len != 1) {
return null;
}
return switch (child.children[0]) {
.char_data => |char_data| char_data,
else => null,
};
}
pub fn iterator(self: Element) ChildIterator {
return .{
.items = self.children,
.i = 0,
};
}
pub fn elements(self: Element) ChildElementIterator {
return .{
.inner = self.iterator(),
};
}
pub fn findChildByTag(self: Element, tag: []const u8) ?*Element {
var it = self.findChildrenByTag(tag);
return it.next();
}
pub fn findChildrenByTag(self: Element, tag: []const u8) FindChildrenByTagIterator {
return .{
.inner = self.elements(),
.tag = tag,
};
}
pub const ChildIterator = struct {
items: []Content,
i: usize,
pub fn next(self: *ChildIterator) ?*Content {
if (self.i < self.items.len) {
self.i += 1;
return &self.items[self.i - 1];
}
return null;
}
};
pub const ChildElementIterator = struct {
inner: ChildIterator,
pub fn next(self: *ChildElementIterator) ?*Element {
while (self.inner.next()) |child| {
if (child.* != .element) {
continue;
}
return child.*.element;
}
return null;
}
};
pub const FindChildrenByTagIterator = struct {
inner: ChildElementIterator,
tag: []const u8,
pub fn next(self: *FindChildrenByTagIterator) ?*Element {
while (self.inner.next()) |child| {
if (!mem.eql(u8, child.tag, self.tag)) {
continue;
}
return child;
}
return null;
}
};
};
pub const Document = struct {
arena: ArenaAllocator,
xml_decl: ?*Element,
root: *Element,
pub fn deinit(self: Document) void {
var arena = self.arena; // Copy to stack so self can be taken by value.
arena.deinit();
}
};
const Parser = struct {
source: []const u8,
offset: usize,
line: usize,
column: usize,
fn init(source: []const u8) Parser {
return .{
.source = source,
.offset = 0,
.line = 0,
.column = 0,
};
}
fn peek(self: *Parser) ?u8 {
return if (self.offset < self.source.len) self.source[self.offset] else null;
}
fn consume(self: *Parser) !u8 {
if (self.offset < self.source.len) {
return self.consumeNoEof();
}
return error.UnexpectedEof;
}
fn consumeNoEof(self: *Parser) u8 {
std.debug.assert(self.offset < self.source.len);
const c = self.source[self.offset];
self.offset += 1;
if (c == '\n') {
self.line += 1;
self.column = 0;
} else {
self.column += 1;
}
return c;
}
fn eat(self: *Parser, char: u8) bool {
self.expect(char) catch return false;
return true;
}
fn expect(self: *Parser, expected: u8) !void {
if (self.peek()) |actual| {
if (expected != actual) {
return error.UnexpectedCharacter;
}
_ = self.consumeNoEof();
return;
}
return error.UnexpectedEof;
}
fn eatStr(self: *Parser, text: []const u8) bool {
self.expectStr(text) catch return false;
return true;
}
fn expectStr(self: *Parser, text: []const u8) !void {
if (self.source.len < self.offset + text.len) {
return error.UnexpectedEof;
} else if (mem.startsWith(u8, self.source[self.offset..], text)) {
var i: usize = 0;
while (i < text.len) : (i += 1) {
_ = self.consumeNoEof();
}
return;
}
return error.UnexpectedCharacter;
}
fn eatWs(self: *Parser) bool {
var ws = false;
while (self.peek()) |ch| {
switch (ch) {
' ', '\t', '\n', '\r' => {
ws = true;
_ = self.consumeNoEof();
},
else => break,
}
}
return ws;
}
fn expectWs(self: *Parser) !void {
if (!self.eatWs()) return error.UnexpectedCharacter;
}
fn currentLine(self: Parser) []const u8 {
var begin: usize = 0;
if (mem.lastIndexOfScalar(u8, self.source[0..self.offset], '\n')) |prev_nl| {
begin = prev_nl + 1;
}
const end = mem.indexOfScalarPos(u8, self.source, self.offset, '\n') orelse self.source.len;
return self.source[begin..end];
}
};
test "xml: Parser" {
{
var parser = Parser.init("I like pythons");
try testing.expectEqual(@as(?u8, 'I'), parser.peek());
try testing.expectEqual(@as(u8, 'I'), parser.consumeNoEof());
try testing.expectEqual(@as(?u8, ' '), parser.peek());
try testing.expectEqual(@as(u8, ' '), try parser.consume());
try testing.expect(parser.eat('l'));
try testing.expectEqual(@as(?u8, 'i'), parser.peek());
try testing.expectEqual(false, parser.eat('a'));
try testing.expectEqual(@as(?u8, 'i'), parser.peek());
try parser.expect('i');
try testing.expectEqual(@as(?u8, 'k'), parser.peek());
try testing.expectError(error.UnexpectedCharacter, parser.expect('a'));
try testing.expectEqual(@as(?u8, 'k'), parser.peek());
try testing.expect(parser.eatStr("ke"));
try testing.expectEqual(@as(?u8, ' '), parser.peek());
try testing.expect(parser.eatWs());
try testing.expectEqual(@as(?u8, 'p'), parser.peek());
try testing.expectEqual(false, parser.eatWs());
try testing.expectEqual(@as(?u8, 'p'), parser.peek());
try testing.expectEqual(false, parser.eatStr("aaaaaaaaa"));
try testing.expectEqual(@as(?u8, 'p'), parser.peek());
try testing.expectError(error.UnexpectedEof, parser.expectStr("aaaaaaaaa"));
try testing.expectEqual(@as(?u8, 'p'), parser.peek());
try testing.expectError(error.UnexpectedCharacter, parser.expectStr("pytn"));
try testing.expectEqual(@as(?u8, 'p'), parser.peek());
try parser.expectStr("python");
try testing.expectEqual(@as(?u8, 's'), parser.peek());
}
{
var parser = Parser.init("");
try testing.expectEqual(parser.peek(), null);
try testing.expectError(error.UnexpectedEof, parser.consume());
try testing.expectEqual(parser.eat('p'), false);
try testing.expectError(error.UnexpectedEof, parser.expect('p'));
}
}
pub const ParseError = error{
IllegalCharacter,
UnexpectedEof,
UnexpectedCharacter,
UnclosedValue,
UnclosedComment,
InvalidName,
InvalidEntity,
InvalidStandaloneValue,
NonMatchingClosingTag,
InvalidDocument,
OutOfMemory,
};
pub fn parse(backing_allocator: Allocator, source: []const u8) !Document {
var parser = Parser.init(source);
return try parseDocument(&parser, backing_allocator);
}
fn parseDocument(parser: *Parser, backing_allocator: Allocator) !Document {
var doc = Document{
.arena = ArenaAllocator.init(backing_allocator),
.xml_decl = null,
.root = undefined,
};
errdefer doc.deinit();
const allocator = doc.arena.allocator();
try skipComments(parser, allocator);
doc.xml_decl = try parseElement(parser, allocator, .xml_decl);
_ = parser.eatWs();
try skipComments(parser, allocator);
doc.root = (try parseElement(parser, allocator, .element)) orelse return error.InvalidDocument;
_ = parser.eatWs();
try skipComments(parser, allocator);
if (parser.peek() != null) return error.InvalidDocument;
return doc;
}
fn parseAttrValue(parser: *Parser, alloc: Allocator) ![]const u8 {
const quote = try parser.consume();
if (quote != '"' and quote != '\'') return error.UnexpectedCharacter;
const begin = parser.offset;
while (true) {
const c = parser.consume() catch return error.UnclosedValue;
if (c == quote) break;
}
const end = parser.offset - 1;
return try unescape(alloc, parser.source[begin..end]);
}
fn parseEqAttrValue(parser: *Parser, alloc: Allocator) ![]const u8 {
_ = parser.eatWs();
try parser.expect('=');
_ = parser.eatWs();
return try parseAttrValue(parser, alloc);
}
fn parseNameNoDupe(parser: *Parser) ![]const u8 {
// XML's spec on names is very long, so to make this easier
// we just take any character that is not special and not whitespace
const begin = parser.offset;
while (parser.peek()) |ch| {
switch (ch) {
' ', '\t', '\n', '\r' => break,
'&', '"', '\'', '<', '>', '?', '=', '/' => break,
else => _ = parser.consumeNoEof(),
}
}
const end = parser.offset;
if (begin == end) return error.InvalidName;
return parser.source[begin..end];
}
fn parseCharData(parser: *Parser, alloc: Allocator) !?[]const u8 {
const begin = parser.offset;
while (parser.peek()) |ch| {
switch (ch) {
'<' => break,
else => _ = parser.consumeNoEof(),
}
}
const end = parser.offset;
if (begin == end) return null;
return try unescape(alloc, parser.source[begin..end]);
}
fn parseContent(parser: *Parser, alloc: Allocator) ParseError!Content {
if (try parseCharData(parser, alloc)) |cd| {
return Content{ .char_data = cd };
} else if (try parseComment(parser, alloc)) |comment| {
return Content{ .comment = comment };
} else if (try parseElement(parser, alloc, .element)) |elem| {
return Content{ .element = elem };
} else {
return error.UnexpectedCharacter;
}
}
fn parseAttr(parser: *Parser, alloc: Allocator) !?Attribute {
const name = parseNameNoDupe(parser) catch return null;
_ = parser.eatWs();
try parser.expect('=');
_ = parser.eatWs();
const value = try parseAttrValue(parser, alloc);
const attr = Attribute{
.name = try alloc.dupe(u8, name),
.value = value,
};
return attr;
}
const ElementKind = enum {
xml_decl,
element,
};
fn parseElement(parser: *Parser, alloc: Allocator, comptime kind: ElementKind) !?*Element {
const start = parser.offset;
const tag = switch (kind) {
.xml_decl => blk: {
if (!parser.eatStr("<?") or !mem.eql(u8, try parseNameNoDupe(parser), "xml")) {
parser.offset = start;
return null;
}
break :blk "xml";
},
.element => blk: {
if (!parser.eat('<')) return null;
const tag = parseNameNoDupe(parser) catch {
parser.offset = start;
return null;
};
break :blk tag;
},
};
var attributes: std.ArrayList(Attribute) = .empty;
defer attributes.deinit(alloc);
var children: std.ArrayList(Content) = .empty;
defer children.deinit(alloc);
while (parser.eatWs()) {
const attr = (try parseAttr(parser, alloc)) orelse break;
try attributes.append(alloc, attr);
}
switch (kind) {
.xml_decl => try parser.expectStr("?>"),
.element => {
if (!parser.eatStr("/>")) {
try parser.expect('>');
while (true) {
if (parser.peek() == null) {
return error.UnexpectedEof;
} else if (parser.eatStr("</")) {
break;
}
const content = try parseContent(parser, alloc);
try children.append(alloc, content);
}
const closing_tag = try parseNameNoDupe(parser);
if (!mem.eql(u8, tag, closing_tag)) {
return error.NonMatchingClosingTag;
}
_ = parser.eatWs();
try parser.expect('>');
}
},
}
const element = try alloc.create(Element);
element.* = .{
.tag = try alloc.dupe(u8, tag),
.attributes = try attributes.toOwnedSlice(alloc),
.children = try children.toOwnedSlice(alloc),
};
return element;
}
test "xml: parseElement" {
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
const alloc = arena.allocator();
{
var parser = Parser.init("<= a='b'/>");
try testing.expectEqual(@as(?*Element, null), try parseElement(&parser, alloc, .element));
try testing.expectEqual(@as(?u8, '<'), parser.peek());
}
{
var parser = Parser.init("<python size='15' color = \"green\"/>");
const elem = try parseElement(&parser, alloc, .element);
try testing.expectEqualSlices(u8, elem.?.tag, "python");
const size_attr = elem.?.attributes[0];
try testing.expectEqualSlices(u8, size_attr.name, "size");
try testing.expectEqualSlices(u8, size_attr.value, "15");
const color_attr = elem.?.attributes[1];
try testing.expectEqualSlices(u8, color_attr.name, "color");
try testing.expectEqualSlices(u8, color_attr.value, "green");
}
{
var parser = Parser.init("<python>test</python>");
const elem = try parseElement(&parser, alloc, .element);
try testing.expectEqualSlices(u8, elem.?.tag, "python");
try testing.expectEqualSlices(u8, elem.?.children[0].char_data, "test");
}
{
var parser = Parser.init("<a>b<c/>d<e/>f<!--g--></a>");
const elem = try parseElement(&parser, alloc, .element);
try testing.expectEqualSlices(u8, elem.?.tag, "a");
try testing.expectEqualSlices(u8, elem.?.children[0].char_data, "b");
try testing.expectEqualSlices(u8, elem.?.children[1].element.tag, "c");
try testing.expectEqualSlices(u8, elem.?.children[2].char_data, "d");
try testing.expectEqualSlices(u8, elem.?.children[3].element.tag, "e");
try testing.expectEqualSlices(u8, elem.?.children[4].char_data, "f");
try testing.expectEqualSlices(u8, elem.?.children[5].comment, "g");
}
}
test "xml: parse prolog" {
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
const a = arena.allocator();
{
var parser = Parser.init("<?xmla version='aa'?>");
try testing.expectEqual(@as(?*Element, null), try parseElement(&parser, a, .xml_decl));
try testing.expectEqual(@as(?u8, '<'), parser.peek());
}
{
var parser = Parser.init("<?xml version='aa'?>");
const decl = try parseElement(&parser, a, .xml_decl);
try testing.expectEqualSlices(u8, "aa", decl.?.getAttribute("version").?);
try testing.expectEqual(@as(?[]const u8, null), decl.?.getAttribute("encoding"));
try testing.expectEqual(@as(?[]const u8, null), decl.?.getAttribute("standalone"));
}
{
var parser = Parser.init("<?xml version=\"ccc\" encoding = 'bbb' standalone \t = 'yes'?>");
const decl = try parseElement(&parser, a, .xml_decl);
try testing.expectEqualSlices(u8, "ccc", decl.?.getAttribute("version").?);
try testing.expectEqualSlices(u8, "bbb", decl.?.getAttribute("encoding").?);
try testing.expectEqualSlices(u8, "yes", decl.?.getAttribute("standalone").?);
}
}
fn skipComments(parser: *Parser, alloc: Allocator) !void {
while ((try parseComment(parser, alloc)) != null) {
_ = parser.eatWs();
}
}
fn parseComment(parser: *Parser, alloc: Allocator) !?[]const u8 {
if (!parser.eatStr("<!--")) return null;
const begin = parser.offset;
while (!parser.eatStr("-->")) {
_ = parser.consume() catch return error.UnclosedComment;
}
const end = parser.offset - "-->".len;
return try alloc.dupe(u8, parser.source[begin..end]);
}
fn unescapeEntity(text: []const u8) !u8 {
const EntitySubstition = struct { text: []const u8, replacement: u8 };
const entities = [_]EntitySubstition{
.{ .text = "&lt;", .replacement = '<' },
.{ .text = "&gt;", .replacement = '>' },
.{ .text = "&amp;", .replacement = '&' },
.{ .text = "&apos;", .replacement = '\'' },
.{ .text = "&quot;", .replacement = '"' },
};
for (entities) |entity| {
if (mem.eql(u8, text, entity.text)) return entity.replacement;
}
return error.InvalidEntity;
}
fn unescape(arena: Allocator, text: []const u8) ![]const u8 {
const unescaped = try arena.alloc(u8, text.len);
var j: usize = 0;
var i: usize = 0;
while (i < text.len) : (j += 1) {
if (text[i] == '&') {
const entity_end = 1 + (mem.indexOfScalarPos(u8, text, i, ';') orelse return error.InvalidEntity);
unescaped[j] = try unescapeEntity(text[i..entity_end]);
i = entity_end;
} else {
unescaped[j] = text[i];
i += 1;
}
}
return unescaped[0..j];
}
test "xml: unescape" {
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
const a = arena.allocator();
try testing.expectEqualSlices(u8, "test", try unescape(a, "test"));
try testing.expectEqualSlices(u8, "a<b&c>d\"e'f<", try unescape(a, "a&lt;b&amp;c&gt;d&quot;e&apos;f&lt;"));
try testing.expectError(error.InvalidEntity, unescape(a, "python&"));
try testing.expectError(error.InvalidEntity, unescape(a, "python&&"));
try testing.expectError(error.InvalidEntity, unescape(a, "python&test;"));
try testing.expectError(error.InvalidEntity, unescape(a, "python&boa"));
}
test "xml: top level comments" {
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
const a = arena.allocator();
const doc = try parse(a, "<?xml version='aa'?><!--comment--><python color='green'/><!--another comment-->");
try testing.expectEqualSlices(u8, "python", doc.root.tag);
}

View File

@@ -1,121 +0,0 @@
const std = @import("std");
const vk = @import("vulkan");
// Provide bogus defaults for unknown platform types
// The actual type does not really matter here...
pub const GgpStreamDescriptor = u32;
pub const GgpFrameToken = u32;
pub const _screen_buffer = u32;
pub const NvSciSyncAttrList = u32;
pub const NvSciSyncObj = u32;
pub const NvSciSyncFence = u32;
pub const NvSciBufAttrList = u32;
pub const NvSciBufObj = u32;
pub const ANativeWindow = u32;
pub const AHardwareBuffer = u32;
pub const CAMetalLayer = u32;
pub const MTLDevice_id = u32;
pub const MTLCommandQueue_id = u32;
pub const MTLBuffer_id = u32;
pub const MTLTexture_id = u32;
pub const MTLSharedEvent_id = u32;
pub const IOSurfaceRef = u32;
pub const StdVideoH264ProfileIdc = u32;
pub const StdVideoH264LevelIdc = u32;
pub const StdVideoH264ChromaFormatIdc = u32;
pub const StdVideoH264PocType = u32;
pub const StdVideoH264SpsFlags = u32;
pub const StdVideoH264ScalingLists = u32;
pub const StdVideoH264SequenceParameterSetVui = u32;
pub const StdVideoH264AspectRatioIdc = u32;
pub const StdVideoH264HrdParameters = u32;
pub const StdVideoH264SpsVuiFlags = u32;
pub const StdVideoH264WeightedBipredIdc = u32;
pub const StdVideoH264PpsFlags = u32;
pub const StdVideoH264SliceType = u32;
pub const StdVideoH264CabacInitIdc = u32;
pub const StdVideoH264DisableDeblockingFilterIdc = u32;
pub const StdVideoH264PictureType = u32;
pub const StdVideoH264ModificationOfPicNumsIdc = u32;
pub const StdVideoH264MemMgmtControlOp = u32;
pub const StdVideoDecodeH264PictureInfo = u32;
pub const StdVideoDecodeH264ReferenceInfo = u32;
pub const StdVideoDecodeH264PictureInfoFlags = u32;
pub const StdVideoDecodeH264ReferenceInfoFlags = u32;
pub const StdVideoH264SequenceParameterSet = u32;
pub const StdVideoH264PictureParameterSet = u32;
pub const StdVideoH265ProfileIdc = u32;
pub const StdVideoH265VideoParameterSet = u32;
pub const StdVideoH265SequenceParameterSet = u32;
pub const StdVideoH265PictureParameterSet = u32;
pub const StdVideoH265DecPicBufMgr = u32;
pub const StdVideoH265HrdParameters = u32;
pub const StdVideoH265VpsFlags = u32;
pub const StdVideoH265LevelIdc = u32;
pub const StdVideoH265SpsFlags = u32;
pub const StdVideoH265ScalingLists = u32;
pub const StdVideoH265SequenceParameterSetVui = u32;
pub const StdVideoH265PredictorPaletteEntries = u32;
pub const StdVideoH265PpsFlags = u32;
pub const StdVideoH265SubLayerHrdParameters = u32;
pub const StdVideoH265HrdFlags = u32;
pub const StdVideoH265SpsVuiFlags = u32;
pub const StdVideoH265SliceType = u32;
pub const StdVideoH265PictureType = u32;
pub const StdVideoDecodeH265PictureInfo = u32;
pub const StdVideoDecodeH265ReferenceInfo = u32;
pub const StdVideoDecodeH265PictureInfoFlags = u32;
pub const StdVideoDecodeH265ReferenceInfoFlags = u32;
pub const StdVideoAV1Profile = u32;
pub const StdVideoAV1Level = u32;
pub const StdVideoAV1SequenceHeader = u32;
pub const StdVideoDecodeAV1PictureInfo = u32;
pub const StdVideoDecodeAV1ReferenceInfo = u32;
pub const StdVideoEncodeH264SliceHeader = u32;
pub const StdVideoEncodeH264PictureInfo = u32;
pub const StdVideoEncodeH264ReferenceInfo = u32;
pub const StdVideoEncodeH264SliceHeaderFlags = u32;
pub const StdVideoEncodeH264ReferenceListsInfo = u32;
pub const StdVideoEncodeH264PictureInfoFlags = u32;
pub const StdVideoEncodeH264ReferenceInfoFlags = u32;
pub const StdVideoEncodeH264RefMgmtFlags = u32;
pub const StdVideoEncodeH264RefListModEntry = u32;
pub const StdVideoEncodeH264RefPicMarkingEntry = u32;
pub const StdVideoEncodeH265PictureInfoFlags = u32;
pub const StdVideoEncodeH265PictureInfo = u32;
pub const StdVideoEncodeH265SliceSegmentHeader = u32;
pub const StdVideoEncodeH265ReferenceInfo = u32;
pub const StdVideoEncodeH265ReferenceListsInfo = u32;
pub const StdVideoEncodeH265SliceSegmentHeaderFlags = u32;
pub const StdVideoEncodeH265ReferenceInfoFlags = u32;
pub const StdVideoEncodeH265ReferenceModificationFlags = u32;
pub const StdVideoEncodeAV1OperatingPointInfo = u32;
comptime {
@setEvalBranchQuota(1000000);
reallyRefAllDecls(vk);
}
fn reallyRefAllDecls(comptime T: type) void {
switch (@typeInfo(T)) {
.@"struct", .@"union" => {
reallyRefAllContainerDecls(T);
inline for (std.meta.fields(T)) |field| {
reallyRefAllDecls(field.type);
}
},
.@"enum", .@"opaque" => {
reallyRefAllContainerDecls(T);
},
else => {},
}
}
fn reallyRefAllContainerDecls(comptime T: type) void {
inline for (comptime std.meta.declarations(T)) |decl| {
if (@TypeOf(@field(T, decl.name)) == type) {
reallyRefAllDecls(@field(T, decl.name));
}
}
}