Start adding VMA
This commit is contained in:
parent
c77c00cfe1
commit
3cab3f7946
@ -1,5 +1,17 @@
|
||||
#extension GL_EXT_buffer_reference : require
|
||||
|
||||
#if VERTEX_SHADER
|
||||
|
||||
layout(buffer_reference, std430) readonly buffer CameraMatrices {
|
||||
mat4 view_projection;
|
||||
mat4 projection;
|
||||
mat4 view;
|
||||
};
|
||||
|
||||
layout(push_constant) uniform constants {
|
||||
CameraMatrices camera_matrices;
|
||||
} PushConstants;
|
||||
|
||||
vec2 positions[3] = vec2[](
|
||||
vec2(-0.5, 0.5),
|
||||
vec2(0.5, 0.5),
|
||||
|
22
build.zig
22
build.zig
@ -41,6 +41,21 @@ pub fn build(b: *Build) void {
|
||||
|
||||
const vk = buildVulkanWrapper(b, target, optimize);
|
||||
|
||||
const vma_dep = b.dependency("vma", .{});
|
||||
const vma = b.addStaticLibrary(.{
|
||||
.name = "vma",
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
vma.linkLibC();
|
||||
vma.linkLibCpp();
|
||||
vma.addIncludePath(b.dependency("vulkan_headers", .{}).path("include"));
|
||||
vma.addIncludePath(vma_dep.path("include"));
|
||||
vma.installHeadersDirectory(vma_dep.path("include"), "", .{});
|
||||
vma.addCSourceFile(.{
|
||||
.file = b.path("libs/vma/vma.cpp"),
|
||||
});
|
||||
|
||||
const tracy = b.dependency("zig-tracy", .{
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
@ -87,6 +102,7 @@ pub fn build(b: *Build) void {
|
||||
l.root_module.addImport("tracy", tracy.module("tracy"));
|
||||
l.root_module.addImport("vk", vk);
|
||||
l.linkLibrary(tracy.artifact("tracy"));
|
||||
l.linkLibrary(vma);
|
||||
}
|
||||
|
||||
const install_lib = b.addInstallArtifact(lib, .{ .dest_dir = .{ .override = .prefix } });
|
||||
@ -266,7 +282,12 @@ fn buildAssetCompiler(b: *Build, optimize: std.builtin.OptimizeMode, assets_mod:
|
||||
.skip_tests = true,
|
||||
});
|
||||
const zalgebra_dep = b.dependency("zalgebra", .{});
|
||||
const spirv_cross_dep = b.dependency("spirv-cross", .{
|
||||
.target = b.host,
|
||||
.optimize = optimize,
|
||||
});
|
||||
const assimp_lib = assimp_dep.artifact("assimp");
|
||||
const spirv_cross_lib = spirv_cross_dep.artifact("spirv-cross");
|
||||
|
||||
const assetc = b.addExecutable(.{
|
||||
.name = "assetc",
|
||||
@ -296,6 +317,7 @@ fn buildAssetCompiler(b: *Build, optimize: std.builtin.OptimizeMode, assets_mod:
|
||||
assetc.root_module.addImport("assets", assets_mod);
|
||||
|
||||
assetc.linkLibrary(assimp_lib);
|
||||
assetc.linkLibrary(spirv_cross_lib);
|
||||
assetc.linkLibC();
|
||||
assetc.linkLibCpp();
|
||||
|
||||
|
@ -43,6 +43,14 @@
|
||||
.url = "https://github.com/sinnwrig/mach-dxcompiler/tarball/c3dfe92f3f04d4a3262dbc1a71f0016b9af92eb4",
|
||||
.hash = "12202f48e7cf06b1f2ecfd84f16effbd5bb9d644ea17e8a6144b4301a4dea198cf9c",
|
||||
},
|
||||
.vma = .{
|
||||
.url = "https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator/tarball/1c35ba99ce775f8342d87a83a3f0f696f99c2a39",
|
||||
.hash = "1220521e256ea64cb942c37cfe08065073f2c71bfaa91d5c929fd382314c73ac0369",
|
||||
},
|
||||
.@"spirv-cross" = .{
|
||||
.url = "https://github.com/hexops/spirv-cross/tarball/872bd405fece4bf6388abdea916356e26cb8fed9",
|
||||
.hash = "12207bebf82eef06f4f80a7e54c91e4402c0055d04167fdbcf1f350846a350266976",
|
||||
},
|
||||
},
|
||||
.paths = .{
|
||||
// This makes *all* files, recursively, included in this package. It is generally
|
||||
|
4
libs/vma/vma.cpp
Normal file
4
libs/vma/vma.cpp
Normal file
@ -0,0 +1,4 @@
|
||||
#define VMA_IMPLEMENTATION
|
||||
#define VMA_STATIC_VULKAN_FUNCTIONS 0
|
||||
#define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
|
||||
#include "vk_mem_alloc.h"
|
@ -323,6 +323,16 @@ fn loadShaderProgramErr(self: *AssetManager, id: AssetId) !LoadedShaderProgram {
|
||||
var program: formats.ShaderProgram = undefined;
|
||||
try program.serialize(&serializer);
|
||||
|
||||
const pipeline_layout = try self.gc.device.createPipelineLayout(&.{
|
||||
.p_push_constant_ranges = &.{
|
||||
vk.PushConstantRange{
|
||||
.stage_flags = .{ .vertex_bit = true },
|
||||
.offset = 0,
|
||||
.size = 1,
|
||||
},
|
||||
},
|
||||
}, null);
|
||||
|
||||
const pipeline = blk: {
|
||||
switch (program) {
|
||||
.graphics => |graphics_pipeline| {
|
||||
@ -342,9 +352,6 @@ fn loadShaderProgramErr(self: *AssetManager, id: AssetId) !LoadedShaderProgram {
|
||||
.scissor_with_count,
|
||||
};
|
||||
|
||||
const pipeline_layout = try self.gc.device.createPipelineLayout(&.{}, null);
|
||||
defer self.gc.device.destroyPipelineLayout(pipeline_layout, null);
|
||||
|
||||
var pipelines = [1]vk.Pipeline{.null_handle};
|
||||
_ = try self.gc.device.createGraphicsPipelines(self.gc.pipeline_cache, 1, &.{
|
||||
vk.GraphicsPipelineCreateInfo{
|
||||
@ -443,6 +450,7 @@ fn loadShaderProgramErr(self: *AssetManager, id: AssetId) !LoadedShaderProgram {
|
||||
|
||||
const loaded_shader_program = LoadedShaderProgram{
|
||||
.pipeline = pipeline,
|
||||
.layout = pipeline_layout,
|
||||
};
|
||||
|
||||
{
|
||||
@ -1571,8 +1579,11 @@ fn freeAsset(self: *AssetManager, asset: *LoadedAsset) void {
|
||||
self.allocator.free(shader.source);
|
||||
},
|
||||
.shaderProgram => |*program| {
|
||||
// NOTE: We use maintenance4 extension, no need to wait for pipeline
|
||||
// usage to end
|
||||
self.gc.queues.graphics.mu.lock();
|
||||
defer self.gc.queues.graphics.mu.unlock();
|
||||
|
||||
self.gc.device.queueWaitIdle(self.gc.queues.graphics.handle) catch @panic("Wait Idle failed");
|
||||
|
||||
self.gc.device.destroyPipeline(program.pipeline, null);
|
||||
},
|
||||
.texture => |*texture| {
|
||||
|
@ -1,6 +1,7 @@
|
||||
const std = @import("std");
|
||||
const vk = @import("vk");
|
||||
const c = @import("sdl.zig");
|
||||
const vma = @import("vma.zig");
|
||||
|
||||
pub const GraphicsContext = @This();
|
||||
|
||||
@ -27,6 +28,7 @@ const device_extensions = [_][:0]const u8{
|
||||
const vk_layers = [_][:0]const u8{"VK_LAYER_KHRONOS_validation"};
|
||||
|
||||
allocator: std.mem.Allocator = undefined,
|
||||
vma_allocator: vma.Allocator = null,
|
||||
window: *c.SDL_Window = undefined,
|
||||
vkb: BaseDispatch = undefined,
|
||||
vki: InstanceDispatch = undefined,
|
||||
@ -59,6 +61,10 @@ pub const CommandPool = struct {
|
||||
pub fn freeCommandBuffer(self: *const CommandPool, command_buffer: CommandBuffer) void {
|
||||
self.device.freeCommandBuffers(self.handle, 1, &.{command_buffer.handle});
|
||||
}
|
||||
|
||||
pub fn deinit(self: *CommandPool) void {
|
||||
self.device.destroyCommandPool(self.handle, null);
|
||||
}
|
||||
};
|
||||
|
||||
// Simple sync barrier tracking without a render graph
|
||||
@ -68,36 +74,27 @@ pub const SyncBarrierMasks = struct {
|
||||
stage_mask: vk.PipelineStageFlags2 = .{},
|
||||
};
|
||||
|
||||
pub const Image = struct {
|
||||
handle: vk.Image,
|
||||
mip_count: u32,
|
||||
layer_count: u32,
|
||||
format: vk.Format,
|
||||
pub const SyncRequest = struct {
|
||||
src_stage_mask: vk.PipelineStageFlags2,
|
||||
src_access_mask: vk.AccessFlags2,
|
||||
dst_stage_mask: vk.PipelineStageFlags2,
|
||||
dst_access_mask: vk.AccessFlags2,
|
||||
};
|
||||
|
||||
pub const ImageSyncRequest = struct {
|
||||
req: SyncRequest,
|
||||
layout: vk.ImageLayout,
|
||||
};
|
||||
|
||||
pub const SyncState = struct {
|
||||
last_writer: SyncBarrierMasks = .{},
|
||||
per_stage_readers: vk.PipelineStageFlags2 = .{},
|
||||
/// Current layout
|
||||
layout: vk.ImageLayout = .undefined,
|
||||
|
||||
pub fn createView(self: *const Image, device: Device, aspect_mask: vk.ImageAspectFlags) !vk.ImageView {
|
||||
return device.createImageView(&vk.ImageViewCreateInfo{
|
||||
.format = self.format,
|
||||
.components = .{ .r = .r, .g = .g, .b = .b, .a = .a },
|
||||
.image = self.handle,
|
||||
.view_type = .@"2d",
|
||||
.subresource_range = .{
|
||||
.aspect_mask = aspect_mask,
|
||||
.base_array_layer = 0,
|
||||
.layer_count = self.layer_count,
|
||||
.base_mip_level = 0,
|
||||
.level_count = self.mip_count,
|
||||
},
|
||||
}, null);
|
||||
}
|
||||
|
||||
pub fn sync(self: *Image, cmds: CommandBuffer, masks: SyncBarrierMasks, layout: vk.ImageLayout) !void {
|
||||
pub fn sync(self: *SyncState, masks: SyncBarrierMasks, force_write: bool) ?SyncRequest {
|
||||
const is_read = isRead(masks.access_mask);
|
||||
const is_write = layout != self.layout or isWrite(masks.access_mask);
|
||||
const is_write = force_write or isWrite(masks.access_mask);
|
||||
|
||||
var result: ?SyncRequest = null;
|
||||
|
||||
// Read only
|
||||
if (is_read and !is_write) {
|
||||
@ -108,28 +105,12 @@ pub const Image = struct {
|
||||
const new_stages = masks.stage_mask.subtract(self.per_stage_readers);
|
||||
self.per_stage_readers = masks.stage_mask.merge(self.per_stage_readers);
|
||||
|
||||
const barrier = vk.ImageMemoryBarrier2{
|
||||
.image = self.handle,
|
||||
.old_layout = self.layout,
|
||||
.new_layout = layout,
|
||||
result = SyncRequest{
|
||||
.src_stage_mask = self.last_writer.stage_mask,
|
||||
.dst_stage_mask = new_stages,
|
||||
.src_access_mask = self.last_writer.access_mask,
|
||||
.dst_access_mask = masks.access_mask,
|
||||
.src_queue_family_index = 0,
|
||||
.dst_queue_family_index = 0,
|
||||
.subresource_range = .{
|
||||
.base_mip_level = 0,
|
||||
.base_array_layer = 0,
|
||||
.layer_count = self.layer_count,
|
||||
.level_count = self.mip_count,
|
||||
.aspect_mask = .{ .color_bit = true },
|
||||
},
|
||||
};
|
||||
cmds.pipelineBarrier2(&vk.DependencyInfo{
|
||||
.image_memory_barrier_count = 1,
|
||||
.p_image_memory_barriers = &.{barrier},
|
||||
});
|
||||
}
|
||||
} else {
|
||||
self.per_stage_readers = self.per_stage_readers.merge(masks.stage_mask);
|
||||
@ -138,36 +119,21 @@ pub const Image = struct {
|
||||
// If there are any reads
|
||||
const earlier_stages = self.last_writer.stage_mask.merge(self.per_stage_readers);
|
||||
|
||||
if (earlier_stages.toInt() != 0 or self.layout != layout) {
|
||||
if (earlier_stages.toInt() != 0 or force_write) {
|
||||
// Emit barrier for earlier stages and last writer mask
|
||||
|
||||
const barrier = vk.ImageMemoryBarrier2{
|
||||
.image = self.handle,
|
||||
.old_layout = self.layout,
|
||||
.new_layout = layout,
|
||||
result = SyncRequest{
|
||||
.src_stage_mask = earlier_stages,
|
||||
.dst_stage_mask = masks.stage_mask,
|
||||
.src_access_mask = self.last_writer.access_mask,
|
||||
.dst_access_mask = masks.access_mask,
|
||||
.src_queue_family_index = 0,
|
||||
.dst_queue_family_index = 0,
|
||||
.subresource_range = .{
|
||||
.base_mip_level = 0,
|
||||
.base_array_layer = 0,
|
||||
.layer_count = self.layer_count,
|
||||
.level_count = self.mip_count,
|
||||
.aspect_mask = .{ .color_bit = true },
|
||||
},
|
||||
};
|
||||
cmds.pipelineBarrier2(&vk.DependencyInfo{
|
||||
.image_memory_barrier_count = 1,
|
||||
.p_image_memory_barriers = &.{barrier},
|
||||
});
|
||||
}
|
||||
self.last_writer = masks;
|
||||
self.per_stage_readers = .{};
|
||||
self.layout = layout;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
const read_access_mask = vk.AccessFlags2{
|
||||
@ -227,6 +193,123 @@ pub const Image = struct {
|
||||
}
|
||||
};
|
||||
|
||||
pub const ImageSyncState = struct {
|
||||
sync_state: SyncState = .{},
|
||||
|
||||
/// Current layout
|
||||
layout: vk.ImageLayout = .undefined,
|
||||
|
||||
pub fn sync(self: *ImageSyncState, masks: SyncBarrierMasks, layout: vk.ImageLayout) ?ImageSyncRequest {
|
||||
var result: ?ImageSyncRequest = null;
|
||||
if (self.sync_state.sync(masks, layout != self.layout)) |req| {
|
||||
result = .{
|
||||
.req = req,
|
||||
.layout = layout,
|
||||
};
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
pub const Image = struct {
|
||||
handle: vk.Image,
|
||||
mip_count: u32,
|
||||
layer_count: u32,
|
||||
format: vk.Format,
|
||||
|
||||
sync_state: ImageSyncState = .{},
|
||||
|
||||
pub fn sync(self: *Image, cmds: CommandBuffer, masks: SyncBarrierMasks, layout: vk.ImageLayout) !void {
|
||||
const old_layout = self.sync_state.layout;
|
||||
if (self.sync_state.sync(masks, layout)) |req| {
|
||||
cmds.pipelineBarrier2(&vk.DependencyInfo{
|
||||
.image_memory_barrier_count = 1,
|
||||
.p_image_memory_barriers = &.{
|
||||
vk.ImageMemoryBarrier2{
|
||||
.image = self.handle,
|
||||
.old_layout = old_layout,
|
||||
.new_layout = layout,
|
||||
.src_stage_mask = req.req.src_stage_mask,
|
||||
.src_access_mask = req.req.src_access_mask,
|
||||
.dst_stage_mask = req.req.dst_stage_mask,
|
||||
.dst_access_mask = req.req.dst_access_mask,
|
||||
.src_queue_family_index = vk.QUEUE_FAMILY_IGNORED,
|
||||
.dst_queue_family_index = vk.QUEUE_FAMILY_IGNORED,
|
||||
.subresource_range = .{
|
||||
.aspect_mask = .{ .color_bit = true },
|
||||
.base_array_layer = 0,
|
||||
.base_mip_level = 0,
|
||||
.layer_count = self.layer_count,
|
||||
.level_count = self.mip_count,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub fn createView(self: *const Image, device: Device, aspect_mask: vk.ImageAspectFlags) !vk.ImageView {
|
||||
return device.createImageView(&vk.ImageViewCreateInfo{
|
||||
.format = self.format,
|
||||
.components = .{ .r = .r, .g = .g, .b = .b, .a = .a },
|
||||
.image = self.handle,
|
||||
.view_type = .@"2d",
|
||||
.subresource_range = .{
|
||||
.aspect_mask = aspect_mask,
|
||||
.base_array_layer = 0,
|
||||
.layer_count = self.layer_count,
|
||||
.base_mip_level = 0,
|
||||
.level_count = self.mip_count,
|
||||
},
|
||||
}, null);
|
||||
}
|
||||
};
|
||||
|
||||
pub const Buffer = struct {
|
||||
gc: *GraphicsContext,
|
||||
handle: vk.Buffer,
|
||||
allocation: vma.Allocation,
|
||||
allocation_info: vma.c.VmaAllocationInfo,
|
||||
|
||||
sync_state: SyncState,
|
||||
|
||||
pub fn sync(self: *const Buffer, cmds: CommandBuffer, masks: SyncBarrierMasks) !void {
|
||||
if (self.sync_state.sync(masks, false)) |req| {
|
||||
cmds.pipelineBarrier2(&vk.DependencyInfo{
|
||||
.buffer_memory_barrier_count = 1,
|
||||
.p_buffer_memory_barriers = &.{
|
||||
vk.BufferMemoryBarrier2{
|
||||
.buffer = self.handle,
|
||||
.src_stage_mask = req.src_stage_mask,
|
||||
.src_access_mask = req.src_access_mask,
|
||||
.dst_stage_mask = req.dst_stage_mask,
|
||||
.dst_access_mask = req.dst_access_mask,
|
||||
.offset = 0,
|
||||
.size = self.allocation_info.size,
|
||||
.src_queue_family_index = vk.QUEUE_FAMILY_IGNORED,
|
||||
.dst_queue_family_index = vk.QUEUE_FAMILY_IGNORED,
|
||||
},
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub fn flush(self: *Buffer, offset: vk.DeviceSize, size: vk.DeviceSize) !void {
|
||||
try vma.flushAllocation(self.gc.vma_allocator, self.allocation, offset, size);
|
||||
}
|
||||
|
||||
pub fn getAllocationMemoryProperties(self: *Buffer) !vk.MemoryPropertyFlags {
|
||||
var mem_prop_flags = vk.MemoryPropertyFlags{};
|
||||
try vma.getAllocationMemoryProperties(self.gc.vma_allocator, self.allocation, &mem_prop_flags);
|
||||
return mem_prop_flags;
|
||||
}
|
||||
|
||||
pub fn deinit(self: *const Buffer, gc: *GraphicsContext) void {
|
||||
vma.destroyBuffer(gc.vma_allocator, self.handle, self.allocation);
|
||||
}
|
||||
};
|
||||
|
||||
pub fn init(self: *GraphicsContext, allocator: std.mem.Allocator, window: *c.SDL_Window) !void {
|
||||
self.allocator = allocator;
|
||||
self.window = window;
|
||||
@ -300,6 +383,18 @@ pub fn init(self: *GraphicsContext, allocator: std.mem.Allocator, window: *c.SDL
|
||||
errdefer self.vkd.destroyDevice(device_handle, null);
|
||||
self.device = Device.init(device_handle, &self.vkd);
|
||||
|
||||
self.vma_allocator = vma.createAllocator(&.{
|
||||
.instance = instance_handle,
|
||||
.physical_device = self.device_info.physical_device,
|
||||
.device = device_handle,
|
||||
.vulkanApiVersion = vk.API_VERSION_1_3,
|
||||
.pVulkanFunctions = &vma.VulkanFunctions{
|
||||
.vkGetInstanceProcAddr = @ptrCast(vkGetInstanceProcAddr),
|
||||
.vkGetDeviceProcAddr = @ptrCast(self.instance.wrapper.dispatch.vkGetDeviceProcAddr),
|
||||
},
|
||||
});
|
||||
std.debug.assert(self.vma_allocator != null);
|
||||
|
||||
try self.maybeResizeSwapchain();
|
||||
errdefer self.device.destroySwapchainKHR(self.swapchain, null);
|
||||
|
||||
@ -335,6 +430,12 @@ pub fn init(self: *GraphicsContext, allocator: std.mem.Allocator, window: *c.SDL
|
||||
self.pipeline_cache = try self.device.createPipelineCache(&.{}, null);
|
||||
}
|
||||
|
||||
pub fn createBuffer(self: *GraphicsContext, create_info: *const vk.BufferCreateInfo, allocation_create_info: *const vma.AllocationCreateInfo) !Buffer {
|
||||
var result: Buffer = undefined;
|
||||
result.handle = try vma.createBuffer(self.vma_allocator, create_info, allocation_create_info, result.allocation, result.allocation_info);
|
||||
return result;
|
||||
}
|
||||
|
||||
pub fn acquireSwapchainImage(self: *GraphicsContext, acuire_semaphore: vk.Semaphore) !u32 {
|
||||
var found = false;
|
||||
var swapchain_img: u32 = 0;
|
||||
|
@ -3,6 +3,8 @@ const AssetManager = @import("AssetManager.zig");
|
||||
const GraphicsContext = @import("GraphicsContext.zig");
|
||||
const vk = @import("vk");
|
||||
const a = @import("asset_manifest");
|
||||
const za = @import("zalgebra");
|
||||
const Mat4 = za.Mat4;
|
||||
|
||||
const Render2 = @This();
|
||||
|
||||
@ -13,6 +15,7 @@ gc: *GraphicsContext,
|
||||
command_pool: GraphicsContext.CommandPool,
|
||||
|
||||
// NOTE: TEST
|
||||
camera_matrices_buffer: GraphicsContext.Buffer,
|
||||
frame: u32 = 0,
|
||||
frame_data: [MAX_FRAME_LAG]FrameData = undefined,
|
||||
|
||||
@ -22,11 +25,31 @@ pub fn init(assetman: *AssetManager, gc: *GraphicsContext) !Render2 {
|
||||
.gc = gc,
|
||||
.command_pool = try gc.queues.graphics.createCommandPool(.{ .reset_command_buffer_bit = true }),
|
||||
};
|
||||
errdefer self.command_pool.deinit();
|
||||
|
||||
// NOTE: TEST
|
||||
for (0..MAX_FRAME_LAG) |i| {
|
||||
self.frame_data[i] = try FrameData.init(gc, self.command_pool);
|
||||
}
|
||||
|
||||
self.camera_matrices_buffer = try self.gc.createBuffer(&.{
|
||||
.usage = .{ .storage_buffer_bit = true, .transfer_dst_bit = true, .shader_device_address_bit = true },
|
||||
.size = @sizeOf(CameraMatrices) * MAX_FRAME_LAG,
|
||||
.sharing_mode = .exclusive,
|
||||
}, &.{
|
||||
.usage = .auto,
|
||||
.flags = .{
|
||||
.host_access_sequential_write_bit = true,
|
||||
.host_access_allow_transfer_instead_bit = true,
|
||||
.mapped_bit = true,
|
||||
},
|
||||
});
|
||||
errdefer self.camera_matrices_buffer.deinit(self.gc);
|
||||
const mem_props = try self.camera_matrices_buffer.getAllocationMemoryProperties();
|
||||
|
||||
// TODO: Assuming unified memory or resizable bar right now, should not assume that
|
||||
std.debug.assert(mem_props.contains(.{ .host_visible_bit = true }));
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
@ -46,6 +69,25 @@ pub fn draw(self: *Render2) !void {
|
||||
|
||||
try cmds.beginCommandBuffer(&.{});
|
||||
{
|
||||
// Camera matrices
|
||||
{
|
||||
try self.camera_matrices_buffer.sync(cmds, .{ .stage_mask = .{ .host_bit = true }, .access_mask = .{ .host_write_bit = true } });
|
||||
const c_matrices: [*c]CameraMatrices = @alignCast(@ptrCast(self.camera_matrices_buffer.allocation_info.pMappedData.?));
|
||||
const matrices: *CameraMatrices = c_matrices[0..MAX_FRAME_LAG][self.frame];
|
||||
|
||||
const view = Mat4.identity();
|
||||
const projection = Mat4.perspective(60, 1.0 / 16.0, 0.1, 1000);
|
||||
const view_projection = projection.mul(view);
|
||||
matrices.* = .{
|
||||
.view = view,
|
||||
.projection = projection,
|
||||
.view_projection = view_projection,
|
||||
};
|
||||
|
||||
try self.camera_matrices_buffer.flush(self.frame * @sizeOf(CameraMatrices), @sizeOf(CameraMatrices));
|
||||
try self.camera_matrices_buffer.sync(cmds, .{ .stage_mask = .{ .vertex_shader_bit = true }, .access_mask = .{ .shader_read_bit = true } });
|
||||
}
|
||||
|
||||
try current_image.sync(cmds, .{ .stage_mask = .{ .color_attachment_output_bit = true }, .access_mask = .{ .color_attachment_write_bit = true } }, .attachment_optimal);
|
||||
{
|
||||
cmds.beginRendering(&.{
|
||||
@ -82,6 +124,7 @@ pub fn draw(self: *Render2) !void {
|
||||
.extent = self.gc.swapchain_extent,
|
||||
}});
|
||||
|
||||
try self.camera_matrices_buffer.sync(cmds, .{ .stage_mask = .{ .vertex_shader_bit = true }, .access_mask = .{ .shader_read_bit = true } });
|
||||
cmds.draw(3, 1, 0, 0);
|
||||
}
|
||||
|
||||
@ -110,6 +153,21 @@ pub fn draw(self: *Render2) !void {
|
||||
self.frame = (self.frame + 1) % MAX_FRAME_LAG;
|
||||
}
|
||||
|
||||
fn uploadData(self: *Render2, cmds: GraphicsContext.CommandBuffer, dst: GraphicsContext.Buffer, dst_offset: usize, len: usize) !void {
|
||||
cmds.copyBuffer2(&.{
|
||||
.src_buffer = self.upload_buffer.handle,
|
||||
.dst_buffer = dst.handle,
|
||||
.p_regions = &.{
|
||||
vk.BufferCopy2{
|
||||
.src_offset = self.upload_buffer_cursor,
|
||||
.dst_offset = dst_offset,
|
||||
.size = len,
|
||||
},
|
||||
},
|
||||
});
|
||||
self.upload_buffer_cursor += len;
|
||||
}
|
||||
|
||||
// Per frame stuff
|
||||
const FrameData = struct {
|
||||
// Sync
|
||||
@ -136,3 +194,9 @@ const FrameData = struct {
|
||||
try self.command_buffer.resetCommandBuffer(.{ .release_resources_bit = true });
|
||||
}
|
||||
};
|
||||
|
||||
const CameraMatrices = extern struct {
|
||||
view_projection: Mat4,
|
||||
projection: Mat4,
|
||||
view: Mat4,
|
||||
};
|
||||
|
220
src/vma.zig
Normal file
220
src/vma.zig
Normal file
@ -0,0 +1,220 @@
|
||||
const std = @import("std");
|
||||
const vk = @import("vk");
|
||||
pub const c = @cImport({
|
||||
@cDefine("VMA_STATIC_VULKAN_FUNCTIONS", "0");
|
||||
@cDefine("VMA_DYNAMIC_VULKAN_FUNCTIONS", "1");
|
||||
@cInclude("vk_mem_alloc.h");
|
||||
});
|
||||
|
||||
pub const Allocator = c.VmaAllocator;
|
||||
pub const Allocation = c.VmaAllocation;
|
||||
pub const Pool = c.VmaPool;
|
||||
|
||||
pub const AllocatorCreateFlags = packed struct {
|
||||
externally_synchronized_bit: bool = false,
|
||||
khr_dedicated_allocation_bit: bool = false,
|
||||
khr_bind_memory2_bit: bool = false,
|
||||
ext_memory_budget_bit: bool = false,
|
||||
amd_device_coherent_memory_bit: bool = false,
|
||||
jbuffer_device_address_bit: bool = false,
|
||||
ext_memory_priority_bit: bool = false,
|
||||
khr_maintenance4_bit: bool = false,
|
||||
khr_maintenance5_bit: bool = false,
|
||||
khr_external_memory_win32_bit: bool = false,
|
||||
_reserved_bits: u22 = 0,
|
||||
};
|
||||
|
||||
pub const VulkanFunctions = c.VmaVulkanFunctions;
|
||||
|
||||
pub const AllocatorCreateInfo = extern struct {
|
||||
flags: AllocatorCreateFlags = .{},
|
||||
physical_device: vk.PhysicalDevice,
|
||||
device: vk.Device,
|
||||
preferredLargeHeapBlockSize: vk.DeviceSize = 0,
|
||||
pAllocationCallbacks: ?*const vk.AllocationCallbacks = null,
|
||||
pDeviceMemoryCallbacks: ?*const c.VmaDeviceMemoryCallbacks = null,
|
||||
pHeapSizeLimit: vk.DeviceSize = 0,
|
||||
pVulkanFunctions: *const VulkanFunctions,
|
||||
instance: vk.Instance,
|
||||
vulkanApiVersion: u32,
|
||||
pTypeExternalMemoryHandleTypes: ?*opaque {} = null,
|
||||
};
|
||||
|
||||
comptime {
|
||||
if (@sizeOf(AllocatorCreateInfo) != @sizeOf(c.VmaAllocatorCreateInfo)) {
|
||||
@compileError("update vma bindings");
|
||||
}
|
||||
}
|
||||
|
||||
pub fn createAllocator(p_create_info: *const AllocatorCreateInfo) Allocator {
|
||||
var allocator: Allocator = null;
|
||||
// Judging by the code, there is no possible error code
|
||||
const result = c.vmaCreateAllocator(@ptrCast(p_create_info), &allocator);
|
||||
std.debug.assert(result == @intFromEnum(vk.Result.success));
|
||||
return allocator;
|
||||
}
|
||||
|
||||
pub fn destroyAllocator(allocator: Allocator) void {
|
||||
c.vmaDestroyAllocator(allocator);
|
||||
}
|
||||
|
||||
pub const AllocationCreateFlags = packed struct {
|
||||
dedicated_memory_bit: bool = false,
|
||||
never_allocate_bit: bool = false,
|
||||
mapped_bit: bool = false,
|
||||
user_data_copy_string_bit: bool = false,
|
||||
upper_address_bit: bool = false,
|
||||
dont_bind_bit: bool = false,
|
||||
within_budget_bit: bool = false,
|
||||
can_alias_bit: bool = false,
|
||||
host_access_sequential_write_bit: bool = false,
|
||||
host_access_random_bit: bool = false,
|
||||
host_access_allow_transfer_instead_bit: bool = false,
|
||||
strategy_min_memory_bit: bool = false,
|
||||
strategy_min_time_bit: bool = false,
|
||||
strategy_min_offset_bit: bool = false,
|
||||
strategy_best_fit_bit: bool = false,
|
||||
strategy_first_fit_bit: bool = false,
|
||||
strategy_mask: bool = false,
|
||||
_reserved_bits: u15 = 0,
|
||||
};
|
||||
|
||||
pub const MemoryUsage = enum(c_uint) {
|
||||
unknown = 0,
|
||||
gpu_only = 1,
|
||||
cpu_only = 2,
|
||||
cpu_to_gpu = 3,
|
||||
gpu_to_cpu = 4,
|
||||
cpu_copy = 5,
|
||||
gpu_lazily_allocated = 6,
|
||||
auto = 7,
|
||||
auto_prefer_device = 8,
|
||||
auto_prefer_host = 9,
|
||||
};
|
||||
|
||||
pub const AllocationCreateInfo = extern struct {
|
||||
flags: AllocationCreateFlags = .{},
|
||||
usage: MemoryUsage,
|
||||
required_flags: vk.MemoryPropertyFlags = .{},
|
||||
preferred_flags: vk.MemoryPropertyFlags = .{},
|
||||
memory_type_bits: u32 = 0,
|
||||
pool: Pool = null,
|
||||
p_user_data: ?*anyopaque = null,
|
||||
priority: f32 = 0,
|
||||
};
|
||||
|
||||
comptime {
|
||||
if (@sizeOf(AllocationCreateInfo) != @sizeOf(c.VmaAllocationCreateInfo)) {
|
||||
@compileError("update vma bindings");
|
||||
}
|
||||
}
|
||||
|
||||
const Error = error{
|
||||
OutOfHostMemory,
|
||||
OutOfDeviceMemory,
|
||||
DeviceLost,
|
||||
MemoryMapFailed,
|
||||
TooManyObjects,
|
||||
FormatNotSupported,
|
||||
Unknown,
|
||||
InvalidExternalHandle,
|
||||
InvalidOpaqueCaptureAddress,
|
||||
ImageUsageNotSupportedKHR,
|
||||
OperationDeferredKHR,
|
||||
OperationNotDeferredKHR,
|
||||
CompressionExhaustedEXT,
|
||||
};
|
||||
|
||||
fn checkError(result: c.VkResult) Error!void {
|
||||
switch (@as(vk.Result, @enumFromInt(result))) {
|
||||
.success => {},
|
||||
.error_out_of_host_memory => return error.OutOfHostMemory,
|
||||
.error_out_of_device_memory => return error.OutOfDeviceMemory,
|
||||
.error_device_lost => return error.DeviceLost,
|
||||
.error_memory_map_failed => return error.MemoryMapFailed,
|
||||
.error_too_many_objects => return error.TooManyObjects,
|
||||
.error_format_not_supported => return error.FormatNotSupported,
|
||||
.error_unknown => return error.Unknown,
|
||||
.error_invalid_external_handle => return error.InvalidExternalHandle,
|
||||
.error_invalid_opaque_capture_address => return error.InvalidOpaqueCaptureAddress,
|
||||
.error_image_usage_not_supported_khr => return error.ImageUsageNotSupportedKHR,
|
||||
.operation_deferred_khr => return error.OperationDeferredKHR,
|
||||
.operation_not_deferred_khr => return error.OperationNotDeferredKHR,
|
||||
.error_compression_exhausted_ext => return error.CompressionExhaustedEXT,
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn allocateMemory(allocator: Allocator, p_memory_requirements: *const vk.MemoryRequirements, p_create_info: *const AllocationCreateInfo, p_allocation_info: ?*c.VmaAllocationInfo) Error!Allocation {
|
||||
var allocation: Allocation = null;
|
||||
try checkError(c.vmaAllocateMemory(allocator, @ptrCast(p_memory_requirements), @ptrCast(p_create_info), &allocation, p_allocation_info));
|
||||
|
||||
return allocation;
|
||||
}
|
||||
|
||||
pub fn allocateMemoryForBuffer(allocator: Allocator, buffer: vk.Buffer, p_create_info: *const AllocationCreateInfo, p_allocation_info: ?*c.VmaAllocationInfo) Error!Allocation {
|
||||
var allocation: Allocation = null;
|
||||
try checkError(c.vmaAllocateMemoryForBuffer(allocator, @as(*c.VkBuffer, @ptrCast(&buffer)).*, @ptrCast(p_create_info), &allocation, p_allocation_info));
|
||||
|
||||
return allocation;
|
||||
}
|
||||
|
||||
pub fn allocateMemoryForImage(allocator: Allocator, image: vk.Image, p_create_info: *const AllocationCreateInfo, p_allocation_info: ?*c.VmaAllocationInfo) Error!Allocation {
|
||||
var allocation: Allocation = null;
|
||||
try checkError(c.vmaAllocateMemoryForImage(allocator, @as(*c.VkImage, @ptrCast(&image)).*, @ptrCast(p_create_info), &allocation, p_allocation_info));
|
||||
|
||||
return allocation;
|
||||
}
|
||||
|
||||
pub fn allocateMemoryPages(allocator: Allocator, p_memory_requirements: *const vk.MemoryRequirements, p_create_info: *const AllocationCreateInfo, allocations: []Allocation, p_allocation_info: ?*c.VmaAllocationInfo) Error!void {
|
||||
try checkError(c.vmaAllocateMemoryPages(allocator, @ptrCast(p_memory_requirements), @ptrCast(p_create_info), allocations.len, allocations.ptr, @ptrCast(p_allocation_info)));
|
||||
}
|
||||
|
||||
pub fn freeMemory(allocator: Allocator, allocation: Allocation) void {
|
||||
c.vmaFreeMemory(allocator, allocation);
|
||||
}
|
||||
|
||||
pub fn freeMemoryPages(allocator: Allocator, allocations: []Allocation) void {
|
||||
c.vmaFreeMemoryPages(allocator, allocations.len, allocations.ptr);
|
||||
}
|
||||
|
||||
pub fn createBuffer(allocator: Allocator, p_buffer_create_info: *const vk.BufferCreateInfo, p_allocation_create_info: *const AllocationCreateInfo, p_allocation: *Allocation, p_allocation_info: ?*c.VmaAllocationInfo) Error!vk.Buffer {
|
||||
var buffer: vk.Buffer = .null_handle;
|
||||
try checkError(c.vmaCreateBuffer(allocator, @ptrCast(p_buffer_create_info), @ptrCast(p_allocation_create_info), @ptrCast(&buffer), p_allocation, p_allocation_info));
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
pub fn createBufferWithAlignment(allocator: Allocator, p_buffer_create_info: *const vk.BufferCreateInfo, p_allocation_create_info: *const AllocationCreateInfo, min_alignment: vk.DeviceSize, p_allocation: *Allocation, p_allocation_info: ?*c.VmaAllocationInfo) Error!vk.Buffer {
|
||||
var buffer: vk.Buffer = .null_handle;
|
||||
try checkError(c.vmaCreateBufferWithAlignment(allocator, @ptrCast(p_buffer_create_info), @ptrCast(p_allocation_create_info), min_alignment, @ptrCast(&buffer), p_allocation, p_allocation_info));
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
pub fn createImage(allocator: Allocator, p_image_create_info: *const vk.ImageCreateInfo, p_allocation_create_info: *const AllocationCreateInfo, p_allocation: *Allocation, p_allocation_info: ?*c.VmaAllocationInfo) Error!vk.Image {
|
||||
var image: vk.Image = .null_handle;
|
||||
try checkError(c.vmaCreateImage(allocator, @ptrCast(p_image_create_info), @ptrCast(p_allocation_create_info), @ptrCast(&image), p_allocation, p_allocation_info));
|
||||
return image;
|
||||
}
|
||||
|
||||
pub fn destroyBuffer(allocator: Allocator, buffer: vk.Buffer, allocation: Allocation) void {
|
||||
c.vmaDestroyBuffer(allocator, @as(*const c.VkBuffer, @ptrCast(&buffer)).*, allocation);
|
||||
}
|
||||
|
||||
pub fn destroyImage(allocator: Allocator, image: vk.Image, allocation: Allocation) void {
|
||||
c.vmaDestroyImage(allocator, @as(*const c.VkImage, @ptrCast(&image)).*, allocation);
|
||||
}
|
||||
|
||||
pub fn flushAllocation(allocator: Allocator, allocation: Allocation, offset: vk.DeviceSize, size: vk.DeviceSize) void {
|
||||
try checkError(c.vmaFlushAllocation(allocator, allocation, offset, size));
|
||||
}
|
||||
|
||||
pub fn flushAllocations(allocator: Allocator, allocations: []const Allocation, offsets: []const vk.DeviceSize, sizes: []const vk.DeviceSize) void {
|
||||
std.debug.assert(allocations.len == offsets.len and offsets.len == sizes.len);
|
||||
try checkError(c.vmaFlushAllocations(allocator, allocations.len, allocations.ptr, offsets.ptr, sizes.ptr));
|
||||
}
|
||||
|
||||
pub fn getAllocationMemoryProperties(allocator: Allocator, allocation: Allocation, mem_prop_flags: *vk.MemoryPropertyFlags) void {
|
||||
try checkError(c.vmaGetAllocationMemoryProperties(allocator, allocation, mem_prop_flags));
|
||||
}
|
@ -21,6 +21,8 @@ const c = @cImport({
|
||||
@cInclude("stb_image.h");
|
||||
|
||||
@cInclude("ispc_texcomp.h");
|
||||
|
||||
@cInclude("spirv-cross/spirv_cross_c.h");
|
||||
});
|
||||
|
||||
const ASSET_MAX_BYTES = 1024 * 1024 * 1024;
|
||||
@ -53,6 +55,83 @@ const Args = struct {
|
||||
output_dir: []const u8 = "",
|
||||
};
|
||||
|
||||
// Single producer, multiple consumers
|
||||
fn JobQueue(comptime JobPayload: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
pub const JobFn = fn (payload: *JobPayload) void;
|
||||
pub const Job = struct {
|
||||
payload: JobPayload,
|
||||
func: JobFn,
|
||||
};
|
||||
|
||||
running: std.atomic.Value(bool) = std.atomic.Value(bool).init(true),
|
||||
|
||||
read: std.atomic.Value(u32) = std.atomic.Value(u32).init(0),
|
||||
write: std.atomic.Value(u32) = std.atomic.Value(u32).init(0),
|
||||
buffer: []Job,
|
||||
|
||||
pub fn init(buffer: []Job) JobQueue {
|
||||
return JobQueue{
|
||||
.buffer = buffer,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn pushJobs(self: *Self, jobs: []const Job) void {
|
||||
var left_to_write: usize = jobs.len;
|
||||
while (left_to_write > 0) {
|
||||
const write = self.write.load(.unordered);
|
||||
if (write >= (self.read.load(.unordered) + self.buffer.len)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const read = self.read.load(.acquire);
|
||||
|
||||
const to_write = @min(self.buffer.len - (write - read), left_to_write);
|
||||
for (0..to_write) |i| {
|
||||
self.buffer[(write + i) % self.buffer.len] = jobs[i];
|
||||
}
|
||||
|
||||
_ = self.write.fetchAdd(to_write, .release);
|
||||
|
||||
left_to_write -= to_write;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn takeJob(self: *Self) Job {
|
||||
while (true) {
|
||||
const read = self.read.load(.acquire);
|
||||
if (self.write.load(.acquire) - read > 0) {
|
||||
if (self.read.cmpxchgStrong(read, read + 1, .release, .acquire)) {
|
||||
return self.buffer[read % self.buffer.len];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn workerEntry(userdata: *anyopaque) void {
|
||||
const self: Self = @ptrCast(userdata);
|
||||
|
||||
while (self.running.load(.acquire)) {
|
||||
const job = self.takeJob();
|
||||
|
||||
job.func(job.payload);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const ProcessAssetJobPayload = struct {
|
||||
allocator: std.mem.Allocator,
|
||||
asset_type: AssetType,
|
||||
rel_input: []const u8,
|
||||
output_dir: std.fs.Dir,
|
||||
dep_file: ?[]const u8,
|
||||
};
|
||||
|
||||
const ProcessAssetJobQueue = JobQueue(ProcessAssetJobPayload);
|
||||
|
||||
fn parseArgs(allocator: std.mem.Allocator) !Args {
|
||||
var args = try std.process.argsWithAllocator(allocator);
|
||||
defer args.deinit();
|
||||
@ -130,14 +209,27 @@ pub fn main() !void {
|
||||
var buf_asset_list_writer = std.io.bufferedWriter(std.io.getStdOut().writer());
|
||||
const asset_list_writer = buf_asset_list_writer.writer();
|
||||
|
||||
std.log.debug("type: {s}, rel_input: {s}, output_dir: {s}", .{ @tagName(asset_type), rel_input, rel_output });
|
||||
var queue = ProcessAssetJobQueue.init(try allocator.alloc(ProcessAssetJobQueue.Job, 1024));
|
||||
|
||||
switch (asset_type) {
|
||||
.Scene => try processScene(allocator, rel_input, output_dir, asset_list_writer),
|
||||
.ShaderProgram => try processShaderProgram(allocator, rel_input, output_dir, args.dep_file, asset_list_writer),
|
||||
.Texture => try processTextureFromFile(allocator, rel_input, output_dir, asset_list_writer),
|
||||
else => unreachable,
|
||||
const num_workers = std.Thread.getCpuCount() - 1;
|
||||
const worker_threads = allocator.alloc(std.Thread, num_workers);
|
||||
|
||||
for (0..num_workers) |i| {
|
||||
worker_threads[i] = try std.Thread.spawn(.{}, ProcessAssetJobQueue.workerEntry, .{&queue});
|
||||
}
|
||||
|
||||
std.log.debug("type: {s}, rel_input: {s}, output_dir: {s}", .{ @tagName(asset_type), rel_input, rel_output });
|
||||
queue.pushJobs(&[]ProcessAssetJobQueue.Job{.{
|
||||
.payload = .{
|
||||
.allocator = allocator,
|
||||
.asset_type = asset_type,
|
||||
.rel_input = rel_input,
|
||||
.output_dir = output_dir,
|
||||
.dep_file = args.dep_file,
|
||||
},
|
||||
.func = processAsset,
|
||||
}});
|
||||
|
||||
try buf_asset_list_writer.flush();
|
||||
|
||||
if (args.dep_file) |dep_file_path| {
|
||||
@ -146,6 +238,15 @@ pub fn main() !void {
|
||||
}
|
||||
}
|
||||
|
||||
fn processAsset(payload: ProcessAssetJobPayload) void {
|
||||
switch (payload.asset_type) {
|
||||
.Scene => try processScene(payload.allocator, payload.rel_input, payload.output_dir, payload.asset_list_writer),
|
||||
.ShaderProgram => try processShaderProgram(payload.allocator, payload.rel_input, payload.output_dir, payload.dep_file, payload.asset_list_writer),
|
||||
.Texture => try processTextureFromFile(payload.allocator, payload.rel_input, payload.output_dir, payload.asset_list_writer),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn copyFile(_type: AssetType, input: []const u8, output_dir: std.fs.Dir, asset_list_writer: anytype) !void {
|
||||
const asset_path = AssetPath{ .simple = input };
|
||||
|
||||
@ -622,6 +723,21 @@ fn processShader(allocator: std.mem.Allocator, flags: []const []const u8, input:
|
||||
try file.writeAll(old_depfile_contents);
|
||||
}
|
||||
|
||||
// {
|
||||
// var spvc_context: c.spvc_context = null;
|
||||
// _ = c.spvc_context_create(&spvc_context);
|
||||
// defer c.spvc_context_destroy(spvc_context);
|
||||
|
||||
// var ir: c.spvc_parsed_ir = null;
|
||||
// // c.spvc_context_parse_spirv(spvc_context, spirv: [*c]const c.SpvId, word_count: usize, &ir);
|
||||
|
||||
// var compiler_glsl: c.spvc_compiler = null;
|
||||
// c.spvc_context_create_compiler(spvc_context, c.SPVC_BACKEND_GLSL, ir, c.SPVC_CAPTURE_MODE_TAKE_OWNERSHIP, &compiler_glsl);
|
||||
|
||||
// var resources: c.spvc_resources = null;
|
||||
// c.spvc_compiler_create_shader_resources(compiler_glsl, &resources);
|
||||
// }
|
||||
|
||||
return result.stdout;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user