Compare commits

...

2 Commits

Author SHA1 Message Date
52cdbef876 Hardcore Vulkan migration began 2024-09-21 21:09:17 +04:00
d708144dca Add compute shader support 2024-09-10 13:05:27 +04:00
21 changed files with 895 additions and 190 deletions

16
.vscode/launch.json vendored Normal file
View File

@ -0,0 +1,16 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Debug",
"type": "gdb",
"request": "launch",
"target": "./zig-out/learnopengl",
"cwd": "${workspaceRoot}",
"valuesFormatting": "parseText"
}
]
}

View File

@ -2,5 +2,6 @@
{
"shader": "bloom_downsample.glsl",
"vertex": true,
"fragment": true
"fragment": true,
"compute": false
}

View File

@ -1,6 +1,6 @@
{
"shader": "bloom_upsample.glsl",
"vertex": true,
"fragment": true
"fragment": true,
"compute": false
}

View File

@ -2,5 +2,6 @@
{
"shader": "cube_shadow.glsl",
"vertex": true,
"fragment": true
"fragment": true,
"compute": false
}

View File

@ -1,5 +1,6 @@
{
"shader": "debug.glsl",
"vertex": true,
"fragment": true
"fragment": true,
"compute": false
}

View File

@ -1,5 +1,6 @@
{
"shader": "mesh.glsl",
"vertex": true,
"fragment": true
"fragment": true,
"compute": false
}

View File

@ -2,5 +2,6 @@
{
"shader": "post_process.glsl",
"vertex": true,
"fragment": true
"fragment": true,
"compute": false
}

View File

@ -2,5 +2,6 @@
{
"shader": "shadow.glsl",
"vertex": true,
"fragment": true
"fragment": true,
"compute": false
}

View File

@ -1,5 +1,6 @@
{
"shader": "unlit.glsl",
"vertex": true,
"fragment": true
"fragment": true,
"compute": false
}

View File

@ -2,5 +2,6 @@
{
"shader": "z_prepass.glsl",
"vertex": true,
"fragment": true
"fragment": true,
"compute": false
}

View File

@ -3,6 +3,20 @@ const Build = std.Build;
const Step = Build.Step;
const GenerateAssetManifest = @import("tools/GenerateAssetManifest.zig");
fn buildVulkanWrapper(b: *Build, target: Build.ResolvedTarget, optimize: std.builtin.OptimizeMode) *Build.Module {
const registry = b.dependency("vulkan_headers", .{}).path("registry/vk.xml");
const vk_gen = b.dependency("vulkan_zig", .{}).artifact("vulkan-zig-generator");
const vk_generate_cmd = b.addRunArtifact(vk_gen);
vk_generate_cmd.addFileArg(registry);
const vk_zig_path = vk_generate_cmd.addOutputFileArg("vk.zig");
return b.addModule("vk", .{
.root_source_file = vk_zig_path,
.optimize = optimize,
.target = target,
});
}
// Although this function looks imperative, note that its job is to
// declaratively construct a build graph that will be executed by an external
// runner.
@ -23,6 +37,8 @@ pub fn build(b: *Build) void {
"Prioritize performance, safety, or binary size for build time tools",
) orelse .Debug;
const vk = buildVulkanWrapper(b, target, optimize);
const tracy = b.dependency("zig-tracy", .{
.target = target,
.optimize = optimize,
@ -67,6 +83,7 @@ pub fn build(b: *Build) void {
l.root_module.addImport("assets", assets_mod);
l.root_module.addImport("asset_manifest", asset_manifest_mod);
l.root_module.addImport("tracy", tracy.module("tracy"));
l.root_module.addImport("vk", vk);
l.linkLibrary(tracy.artifact("tracy"));
}
@ -153,7 +170,6 @@ pub fn build(b: *Build) void {
const asset_extensions = [_][]const u8{
"obj",
"glsl",
"prog",
"png",
"dds",
@ -226,8 +242,14 @@ fn buildAssetCompiler(b: *Build, optimize: std.builtin.OptimizeMode, assets_mod:
//.formats = @as([]const u8, "3DS,3MF,AC,AMF,ASE,Assbin,Assjson,Assxml,B3D,Blender,BVH,C4D,COB,Collada,CSM,DXF,FBX,glTF,glTF2,HMP,IFC,Irr,LWO,LWS,M3D,MD2,MD3,MD5,MDC,MDL,MMD,MS3D,NDO,NFF,Obj,OFF,Ogre,OpenGEX,Ply,Q3BSP,Q3D,Raw,SIB,SMD,Step,STEPParser,STL,Terragen,Unreal,X,X3D,XGL"),
.formats = @as([]const u8, "Obj,FBX,glTF,glTF2,Blend"),
});
const mach_dxcompiler_dep = b.dependency("mach_dxcompiler", .{
.target = b.graph.host,
.optimize = optimize,
.from_source = true,
.spirv = true,
.skip_tests = true,
});
const zalgebra_dep = b.dependency("zalgebra", .{});
const assimp_lib = assimp_dep.artifact("assimp");
const assetc = b.addExecutable(.{
@ -247,8 +269,11 @@ fn buildAssetCompiler(b: *Build, optimize: std.builtin.OptimizeMode, assets_mod:
assetc.linkSystemLibrary("ispc_texcomp");
const zalgebra_mod = zalgebra_dep.module("zalgebra");
const mach_dxcompiler_mod = mach_dxcompiler_dep.module("mach-dxcompiler");
const formats_mod = b.addModule("formats", .{ .root_source_file = b.path("src/formats.zig") });
formats_mod.addImport("zalgebra", zalgebra_mod);
formats_mod.addImport("mach-dxcompiler", mach_dxcompiler_mod);
formats_mod.addImport("assets", assets_mod);
assetc.root_module.addImport("formats", formats_mod);
assetc.root_module.addImport("zalgebra", zalgebra_mod);

View File

@ -31,6 +31,18 @@
.url = "https://github.com/sergeypdev/zig-tracy/tarball/2b818574810a66deacc424298c1a7679ca6e4375",
.hash = "1220638bc94d67225a620e1abd71d85b299c8b764490fd51233ed73d76ee44cc5835",
},
.vulkan_headers = .{
.url = "https://github.com/KhronosGroup/Vulkan-Headers/archive/v1.3.283.tar.gz",
.hash = "1220a7e73d72a0d56bc2a65f9d8999a7c019e42260a0744c408d1cded111bc205e10",
},
.vulkan_zig = .{
.url = "https://github.com/Snektron/vulkan-zig/tarball/66b7b773bb61e2102025f2d5ff0ae8c5f53e19cc",
.hash = "12208958f173b8b81bfac797955f0416ab38b21d1f69d4ebf6c7ca460a828a41cd45",
},
.mach_dxcompiler = .{
.url = "https://github.com/sinnwrig/mach-dxcompiler/tarball/c3dfe92f3f04d4a3262dbc1a71f0016b9af92eb4",
.hash = "12202f48e7cf06b1f2ecfd84f16effbd5bb9d644ea17e8a6144b4301a4dea198cf9c",
},
},
.paths = .{
// This makes *all* files, recursively, included in this package. It is generally

View File

@ -304,6 +304,7 @@ fn didUpdate(self: *AssetManager, path: []const u8, last_modified: i128) bool {
pub const ShaderProgramDefinition = struct {
vertex: []const u8,
fragment: []const u8,
compute: []const u8,
};
pub fn loadShaderProgram(self: *AssetManager, handle: Handle.ShaderProgram, permuted_id: AssetId, defines: []const DefinePair) LoadedShaderProgram {
@ -318,8 +319,11 @@ fn loadShaderProgramErr(self: *AssetManager, id: AssetId, permuted_id: AssetId,
const data = try self.loadFile(self.frame_arena, asset_manifest.getPath(id), SHADER_MAX_BYTES);
const program = formats.ShaderProgram.fromBuffer(data.bytes);
if (!program.flags.vertex or !program.flags.fragment) {
std.log.err("Can't compile shader program {s} without vertex AND fragment shaders\n", .{asset_manifest.getPath(id)});
const graphics_pipeline = program.flags.vertex and program.flags.fragment;
const compute_pipeline = program.flags.compute;
if (!graphics_pipeline and !compute_pipeline) {
std.log.err("Can't compile shader program {s} without vertex AND fragment shaders or a compute shader\n", .{asset_manifest.getPath(id)});
return error.UnsupportedShader;
}
@ -330,6 +334,7 @@ fn loadShaderProgramErr(self: *AssetManager, id: AssetId, permuted_id: AssetId,
const prog = gl.createProgram();
errdefer gl.deleteProgram(prog);
if (program.flags.vertex and program.flags.fragment) {
const vertex_shader = try self.compileShader(shader.source, .vertex);
defer gl.deleteShader(vertex_shader);
const fragment_shader = try self.compileShader(shader.source, .fragment);
@ -341,6 +346,15 @@ fn loadShaderProgramErr(self: *AssetManager, id: AssetId, permuted_id: AssetId,
defer gl.detachShader(prog, fragment_shader);
gl.linkProgram(prog);
} else {
const compute_shader = try self.compileShader(shader.source, .compute);
defer gl.deleteShader(compute_shader);
gl.attachShader(prog, compute_shader);
defer gl.detachShader(prog, compute_shader);
gl.linkProgram(prog);
}
var success: c_int = 0;
gl.getProgramiv(prog, gl.LINK_STATUS, &success);
@ -774,20 +788,24 @@ pub const IndexSlice = struct {
pub const ShaderType = enum {
vertex,
fragment,
compute,
pub fn goGLType(self: ShaderType) gl.GLenum {
return switch (self) {
.vertex => gl.VERTEX_SHADER,
.fragment => gl.FRAGMENT_SHADER,
.compute => gl.COMPUTE_SHADER,
};
}
const VERTEX_DEFINES = "#version 460 core\n#define VERTEX_SHADER 1\n#define VERTEX_EXPORT out\n";
const FRAGMENT_DEFINES = "#version 460 core\n#define FRAGMENT_SHADER 1\n#define VERTEX_EXPORT in\n";
const COMPUTE_DEFINES = "#version 460 core\n#define COMPUTE_SHADER 1\n";
pub fn getDefines(self: ShaderType) []const u8 {
return switch (self) {
.vertex => VERTEX_DEFINES,
.fragment => FRAGMENT_DEFINES,
.compute => COMPUTE_DEFINES,
};
}
};
@ -1578,18 +1596,18 @@ const VertexBufferHeap = struct {
var index_buddy = try BuddyAllocator.init(allocator, 4096, 13);
errdefer index_buddy.deinit();
const vertex_buf_size = vertex_buddy.getSize();
const index_buf_size = index_buddy.getSize();
// const vertex_buf_size = vertex_buddy.getSize();
// const index_buf_size = index_buddy.getSize();
var bufs = [_]gl.GLuint{ 0, 0, 0, 0, 0 };
gl.createBuffers(bufs.len, &bufs);
errdefer gl.deleteBuffers(bufs.len, &bufs);
const bufs = [_]gl.GLuint{ 0, 0, 0, 0, 0 };
// gl.createBuffers(bufs.len, &bufs);
// errdefer gl.deleteBuffers(bufs.len, &bufs);
for (bufs) |buf| {
if (buf == 0) {
return error.BufferAllocationFailed;
}
}
// for (bufs) |buf| {
// if (buf == 0) {
// return error.BufferAllocationFailed;
// }
// }
const vertices = Buffer.init(bufs[0], @sizeOf(formats.Vector3));
const normals = Buffer.init(bufs[1], @sizeOf(formats.Vector3));
@ -1597,36 +1615,36 @@ const VertexBufferHeap = struct {
const uvs = Buffer.init(bufs[3], @sizeOf(formats.Vector2));
const indices = Buffer.init(bufs[4], @sizeOf(formats.Index));
gl.namedBufferStorage(
vertices.buffer,
@intCast(vertex_buf_size * @sizeOf(formats.Vector3)),
null,
gl.DYNAMIC_STORAGE_BIT,
);
gl.namedBufferStorage(
normals.buffer,
@intCast(vertex_buf_size * @sizeOf(formats.Vector3)),
null,
gl.DYNAMIC_STORAGE_BIT,
);
gl.namedBufferStorage(
tangents.buffer,
@intCast(vertex_buf_size * @sizeOf(formats.Vector3)),
null,
gl.DYNAMIC_STORAGE_BIT,
);
gl.namedBufferStorage(
uvs.buffer,
@intCast(vertex_buf_size * @sizeOf(formats.Vector2)),
null,
gl.DYNAMIC_STORAGE_BIT,
);
gl.namedBufferStorage(
indices.buffer,
@intCast(index_buf_size * @sizeOf(formats.Index)),
null,
gl.DYNAMIC_STORAGE_BIT,
);
// gl.namedBufferStorage(
// vertices.buffer,
// @intCast(vertex_buf_size * @sizeOf(formats.Vector3)),
// null,
// gl.DYNAMIC_STORAGE_BIT,
// );
// gl.namedBufferStorage(
// normals.buffer,
// @intCast(vertex_buf_size * @sizeOf(formats.Vector3)),
// null,
// gl.DYNAMIC_STORAGE_BIT,
// );
// gl.namedBufferStorage(
// tangents.buffer,
// @intCast(vertex_buf_size * @sizeOf(formats.Vector3)),
// null,
// gl.DYNAMIC_STORAGE_BIT,
// );
// gl.namedBufferStorage(
// uvs.buffer,
// @intCast(vertex_buf_size * @sizeOf(formats.Vector2)),
// null,
// gl.DYNAMIC_STORAGE_BIT,
// );
// gl.namedBufferStorage(
// indices.buffer,
// @intCast(index_buf_size * @sizeOf(formats.Index)),
// null,
// gl.DYNAMIC_STORAGE_BIT,
// );
return .{
.vertex_buddy = vertex_buddy,

534
src/GraphicsContext.zig Normal file
View File

@ -0,0 +1,534 @@
const std = @import("std");
const vk = @import("vk");
const c = @import("sdl.zig");
pub const GraphicsContext = @This();
const apis: []const vk.ApiInfo = &.{
vk.features.version_1_0,
vk.features.version_1_1,
vk.features.version_1_2,
vk.features.version_1_3,
vk.extensions.khr_surface,
vk.extensions.khr_swapchain,
};
const BaseDispatch = vk.BaseWrapper(apis);
const InstanceDispatch = vk.InstanceWrapper(apis);
const Instance = vk.InstanceProxy(apis);
const Device = vk.DeviceProxy(apis);
const DeviceDispatch = Device.Wrapper;
const CommandBuffer = vk.CommandBufferProxy(apis);
const device_extensions = [_][:0]const u8{
vk.extensions.khr_swapchain.name,
};
const vk_layers = [_][:0]const u8{"VK_LAYER_KHRONOS_validation"};
const MAX_FRAME_LAG = 3;
allocator: std.mem.Allocator = undefined,
window: *c.SDL_Window = undefined,
vkb: BaseDispatch = undefined,
vki: InstanceDispatch = undefined,
vkd: DeviceDispatch = undefined,
device_info: SelectedPhysicalDevice = undefined,
instance: Instance = undefined,
device: Device = undefined,
queues: DeviceQueues = undefined,
surface: vk.SurfaceKHR = .null_handle,
swapchain: vk.SwapchainKHR = .null_handle,
swapchain_extent: vk.Extent2D = .{ .width = 0, .height = 0 },
swapchain_images: []vk.Image = &.{},
// NOTE: TEST
frame: u32 = 0,
frame_syncs: [MAX_FRAME_LAG]Sync = [1]Sync{.{}} ** MAX_FRAME_LAG,
command_pool: vk.CommandPool = .null_handle,
const Sync = struct {
acquire_swapchain_image: vk.Semaphore = .null_handle,
draw_sema: vk.Semaphore = .null_handle,
draw_fence: vk.Fence = .null_handle,
pub fn waitForDrawAndReset(self: *Sync, gc: *GraphicsContext) !void {
_ = try gc.device.waitForFences(1, &.{self.draw_fence}, vk.TRUE, std.math.maxInt(u64));
try gc.device.resetFences(1, &.{self.draw_fence});
}
};
pub fn init(self: *GraphicsContext, allocator: std.mem.Allocator, window: *c.SDL_Window) !void {
self.allocator = allocator;
self.window = window;
var scratch: [4096]u8 = undefined;
var fba = std.heap.FixedBufferAllocator.init(&scratch);
const vkGetInstanceProcAddr: vk.PfnGetInstanceProcAddr = @ptrCast(c.SDL_Vulkan_GetVkGetInstanceProcAddr());
var sdl_instance_ext_count: c_uint = 0;
if (c.SDL_Vulkan_GetInstanceExtensions(window, &sdl_instance_ext_count, null) == c.SDL_FALSE) {
std.debug.print("SDL_Vulkan_GetInstanceExtensions: get count {s}\n", .{c.SDL_GetError()});
return error.GetSDLExtensions;
}
const sdl_instance_ext_names = try fba.allocator().alloc([*:0]const u8, sdl_instance_ext_count);
if (c.SDL_Vulkan_GetInstanceExtensions(window, &sdl_instance_ext_count, @ptrCast(sdl_instance_ext_names.ptr)) == c.SDL_FALSE) {
std.debug.print("SDL_Vulkan_GetInstanceExtensions: get names {s}\n", .{c.SDL_GetError()});
return error.GetSDLExtensions;
}
std.debug.print("SDL Extensions: {s}\n", .{sdl_instance_ext_names});
self.vkb = try BaseDispatch.load(vkGetInstanceProcAddr);
const instance_handle = try self.vkb.createInstance(&vk.InstanceCreateInfo{
.p_application_info = &vk.ApplicationInfo{
.api_version = vk.API_VERSION_1_3,
.application_version = 0,
.engine_version = 0,
},
.pp_enabled_layer_names = @ptrCast((&vk_layers).ptr),
.enabled_layer_count = @intCast(vk_layers.len),
.enabled_extension_count = @intCast(sdl_instance_ext_names.len),
.pp_enabled_extension_names = sdl_instance_ext_names.ptr,
}, null);
self.vki = try InstanceDispatch.load(instance_handle, vkGetInstanceProcAddr);
errdefer self.vki.destroyInstance(instance_handle, null);
self.instance = Instance.init(instance_handle, &self.vki);
var sdl_vksurface: c.VkSurfaceKHR = null;
if (c.SDL_Vulkan_CreateSurface(window, @as(*c.VkInstance, @ptrCast(&self.instance.handle)).*, &sdl_vksurface) == c.SDL_FALSE) {
std.debug.print("SDL_Vulkan_CreateSurface: {s}\n", .{c.SDL_GetError()});
return error.SDLVulkanCreateSurface;
}
std.debug.assert(sdl_vksurface != null);
self.surface = @as(*vk.SurfaceKHR, @ptrCast(&sdl_vksurface)).*;
const physical_devices = try self.instance.enumeratePhysicalDevicesAlloc(fba.allocator());
self.device_info = try selectPhysicalDevice(self.instance, self.surface, physical_devices);
const queue_config = try selectQueues(self.instance, self.device_info.physical_device);
const device_create_config = vk.DeviceCreateInfo{
.p_next = @ptrCast(&vk.PhysicalDeviceSynchronization2Features{ .synchronization_2 = vk.TRUE }),
.p_queue_create_infos = &queue_config.queue_create_info,
.queue_create_info_count = queue_config.queue_count,
.p_enabled_features = &self.device_info.features,
.pp_enabled_layer_names = @ptrCast((&vk_layers).ptr),
.enabled_layer_count = @intCast(vk_layers.len),
.pp_enabled_extension_names = @ptrCast((&device_extensions).ptr),
.enabled_extension_count = @intCast(device_extensions.len),
};
const device_handle = try self.instance.createDevice(self.device_info.physical_device, &device_create_config, null);
self.vkd = try DeviceDispatch.load(device_handle, self.instance.wrapper.dispatch.vkGetDeviceProcAddr);
errdefer self.vkd.destroyDevice(device_handle, null);
self.device = Device.init(device_handle, &self.vkd);
try self.maybeResizeSwapchain();
errdefer self.device.destroySwapchainKHR(self.swapchain, null);
// TODO: handle the case when different queue instance map to the same queue
const graphics_queue = QueueInstance{
.device = self.device,
.family = queue_config.graphics.family,
.handle = self.device.getDeviceQueue(queue_config.graphics.family, queue_config.graphics.index),
};
const compute_queue = QueueInstance{
.device = self.device,
.family = queue_config.compute.family,
.handle = self.device.getDeviceQueue(queue_config.graphics.family, queue_config.compute.index),
};
const host_to_device_queue = QueueInstance{
.device = self.device,
.family = queue_config.host_to_device.family,
.handle = self.device.getDeviceQueue(queue_config.graphics.family, queue_config.host_to_device.index),
};
const device_to_host_queue = QueueInstance{
.device = self.device,
.family = queue_config.device_to_host.family,
.handle = self.device.getDeviceQueue(queue_config.graphics.family, queue_config.device_to_host.index),
};
self.queues = DeviceQueues{
.graphics = graphics_queue,
.compute = compute_queue,
.host_to_device = host_to_device_queue,
.device_to_host = device_to_host_queue,
};
for (0..MAX_FRAME_LAG) |i| {
self.frame_syncs[i].acquire_swapchain_image = try self.device.createSemaphore(&.{}, null);
self.frame_syncs[i].draw_sema = try self.device.createSemaphore(&.{}, null);
self.frame_syncs[i].draw_fence = try self.device.createFence(&.{ .flags = .{ .signaled_bit = true } }, null);
}
self.command_pool = try self.queues.graphics.createCommandPool(.{});
}
pub fn draw(self: *GraphicsContext) !void {
var cmd_bufs = [_]vk.CommandBuffer{.null_handle};
try self.device.allocateCommandBuffers(&.{
.command_pool = self.command_pool,
.level = .primary,
.command_buffer_count = cmd_bufs.len,
}, &cmd_bufs);
const test_cmd_buf = CommandBuffer.init(cmd_bufs[0], &self.vkd);
const sync = &self.frame_syncs[self.frame];
try sync.waitForDrawAndReset(self);
// Move this out into a separate func
const swapchain_image_index: u32 = blk: {
var found = false;
var swapchain_img: u32 = 0;
try self.maybeResizeSwapchain();
while (!found) {
const acquire_result = try self.device.acquireNextImageKHR(self.swapchain, std.math.maxInt(u64), sync.acquire_swapchain_image, .null_handle);
switch (acquire_result.result) {
.success, .suboptimal_khr => {
swapchain_img = acquire_result.image_index;
found = true;
},
.error_out_of_date_khr => {
// TODO: resize swapchain
std.debug.print("Out of date swapchain\n", .{});
try self.maybeResizeSwapchain();
},
.error_surface_lost_khr => {
// TODO: recreate surface
return error.SurfaceLost;
},
.not_ready => return error.SwapchainImageNotReady,
.timeout => return error.SwapchainImageTimeout,
else => {
std.debug.print("Unexpected value: {}\n", .{acquire_result.result});
@panic("Unexpected");
},
}
}
break :blk swapchain_img;
};
const current_image = self.swapchain_images[swapchain_image_index];
try test_cmd_buf.beginCommandBuffer(&.{});
{
{
const img_barrier = vk.ImageMemoryBarrier2{
.image = current_image,
.old_layout = .undefined,
.new_layout = .transfer_dst_optimal,
.src_access_mask = .{},
.dst_access_mask = .{},
.src_queue_family_index = self.queues.graphics.family,
.dst_queue_family_index = self.queues.graphics.family,
.subresource_range = .{
.aspect_mask = .{ .color_bit = true },
.base_array_layer = 0,
.base_mip_level = 0,
.layer_count = 1,
.level_count = 1,
},
};
test_cmd_buf.pipelineBarrier2(&.{
.p_image_memory_barriers = &.{img_barrier},
.image_memory_barrier_count = 1,
});
}
test_cmd_buf.clearColorImage(current_image, .transfer_dst_optimal, &.{ .float_32 = .{ 0.8, 0.7, 0.6, 1.0 } }, 1, &.{.{
.aspect_mask = .{ .color_bit = true },
.base_array_layer = 0,
.base_mip_level = 0,
.layer_count = 1,
.level_count = 1,
}});
{
const img_barrier = vk.ImageMemoryBarrier2{
.image = current_image,
.old_layout = .transfer_dst_optimal,
.new_layout = .present_src_khr,
.src_access_mask = .{},
.dst_access_mask = .{},
.src_queue_family_index = self.queues.graphics.family,
.dst_queue_family_index = self.queues.graphics.family,
.subresource_range = .{
.aspect_mask = .{ .color_bit = true },
.base_array_layer = 0,
.base_mip_level = 0,
.layer_count = 1,
.level_count = 1,
},
};
test_cmd_buf.pipelineBarrier2(&.{
.p_image_memory_barriers = &.{img_barrier},
.image_memory_barrier_count = 1,
});
}
}
try test_cmd_buf.endCommandBuffer();
try self.queues.graphics.submit(
&SubmitInfo{
.wait_semaphores = &.{sync.acquire_swapchain_image},
.wait_dst_stage_mask = &.{.{ .transfer_bit = true }},
.command_buffers = &.{test_cmd_buf.handle},
.signal_semaphores = &.{sync.draw_sema},
},
sync.draw_fence,
);
_ = try self.device.queuePresentKHR(self.queues.graphics.handle, &.{
.swapchain_count = 1,
.wait_semaphore_count = 1,
.p_wait_semaphores = &.{sync.draw_sema},
.p_swapchains = &.{self.swapchain},
.p_image_indices = &.{swapchain_image_index},
});
self.frame = (self.frame + 1) % MAX_FRAME_LAG;
}
fn maybeResizeSwapchain(self: *GraphicsContext) !void {
var width: c_int = 0;
var height: c_int = 0;
c.SDL_Vulkan_GetDrawableSize(self.window, &width, &height);
const new_extent = vk.Extent2D{ .width = @intCast(width), .height = @intCast(height) };
if (self.swapchain_extent.width == new_extent.width and self.swapchain_extent.height == new_extent.height) {
return;
}
if (self.swapchain_images.len > 0) {
self.allocator.free(self.swapchain_images);
self.swapchain_images = &.{};
}
self.swapchain_extent = new_extent;
const surface_caps = self.device_info.surface_capabilities;
self.swapchain = try self.device.createSwapchainKHR(&.{
.surface = self.surface,
.min_image_count = std.math.clamp(3, surface_caps.min_image_count, if (surface_caps.max_image_count == 0) std.math.maxInt(u32) else surface_caps.max_image_count),
.image_format = .r8g8b8a8_unorm, // tonemapping handles srgb
.image_color_space = .srgb_nonlinear_khr,
.image_extent = self.swapchain_extent,
.image_array_layers = 1,
.image_usage = .{
.color_attachment_bit = true,
.transfer_dst_bit = true,
},
.image_sharing_mode = .exclusive,
.present_mode = .fifo_khr, // required to be supported
.pre_transform = surface_caps.current_transform,
.composite_alpha = .{ .opaque_bit_khr = true },
.clipped = vk.TRUE,
.old_swapchain = self.swapchain,
}, null);
self.swapchain_images = try self.device.getSwapchainImagesAllocKHR(self.swapchain, self.allocator);
}
pub const DeviceQueues = struct {
graphics: QueueInstance,
compute: QueueInstance,
host_to_device: QueueInstance,
device_to_host: QueueInstance,
};
pub const SubmitInfo = struct {
wait_semaphores: []const vk.Semaphore = &.{},
wait_dst_stage_mask: []const vk.PipelineStageFlags = &.{},
command_buffers: []const vk.CommandBuffer = &.{},
signal_semaphores: []const vk.Semaphore = &.{},
};
pub const QueueInstance = struct {
const Self = @This();
mu: std.Thread.Mutex = .{},
device: Device,
handle: vk.Queue,
family: u32,
pub fn createCommandPool(self: *Self, flags: vk.CommandPoolCreateFlags) !vk.CommandPool {
return self.device.createCommandPool(&.{
.flags = flags,
.queue_family_index = self.family,
}, null);
}
pub fn submit(self: *Self, info: *const SubmitInfo, fence: vk.Fence) Device.QueueSubmitError!void {
std.debug.assert(info.wait_semaphores.len == info.wait_dst_stage_mask.len);
var vk_submit_info = vk.SubmitInfo{};
if (info.wait_semaphores.len > 0) {
vk_submit_info.p_wait_semaphores = info.wait_semaphores.ptr;
vk_submit_info.p_wait_dst_stage_mask = info.wait_dst_stage_mask.ptr;
vk_submit_info.wait_semaphore_count = @intCast(info.wait_semaphores.len);
}
if (info.command_buffers.len > 0) {
vk_submit_info.p_command_buffers = info.command_buffers.ptr;
vk_submit_info.command_buffer_count = @intCast(info.command_buffers.len);
}
if (info.signal_semaphores.len > 0) {
vk_submit_info.p_signal_semaphores = info.signal_semaphores.ptr;
vk_submit_info.signal_semaphore_count = @intCast(info.signal_semaphores.len);
}
try self.submitVK(&.{vk_submit_info}, fence);
}
pub fn submitVK(self: *Self, infos: []const vk.SubmitInfo, fence: vk.Fence) Device.QueueSubmitError!void {
self.mu.lock();
defer self.mu.unlock();
try self.device.queueSubmit(self.handle, @intCast(infos.len), infos.ptr, fence);
}
};
const SelectedPhysicalDevice = struct {
physical_device: vk.PhysicalDevice,
properties: vk.PhysicalDeviceProperties,
features: vk.PhysicalDeviceFeatures,
surface_capabilities: vk.SurfaceCapabilitiesKHR,
};
fn selectPhysicalDevice(vki: Instance, surface: vk.SurfaceKHR, devices: []vk.PhysicalDevice) !SelectedPhysicalDevice {
// TODO: select suitable physical device, allow overriding using some user config
for (devices) |device| {
const props = vki.getPhysicalDeviceProperties(device);
const features = vki.getPhysicalDeviceFeatures(device);
const surface_caps = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR(device, surface);
return SelectedPhysicalDevice{
.physical_device = device,
.properties = props,
.features = features,
.surface_capabilities = surface_caps,
};
}
return error.NoDeviceFound;
}
const DeviceQueueConfig = struct {
const Config = struct {
family: u32,
index: u32,
};
queue_create_info: [4]vk.DeviceQueueCreateInfo = undefined,
queue_count: u32 = 0,
graphics: Config,
compute: Config,
host_to_device: Config,
device_to_host: Config,
};
// Hardcode queue priorities, no idea why I would need to use them
const queue_priorities = [_]f32{ 1.0, 1.0, 1.0, 1.0, 1.0 };
fn selectQueues(instance: Instance, device: vk.PhysicalDevice) !DeviceQueueConfig {
var scratch: [1024]u8 = undefined;
var fba = std.heap.FixedBufferAllocator.init(&scratch);
const queue_family_props = try instance.getPhysicalDeviceQueueFamilyPropertiesAlloc(device, fba.allocator());
if (queue_family_props.len == 0) {
return error.NoQueues;
}
var queue_create_info: [4]vk.DeviceQueueCreateInfo = undefined;
var queue_count: u32 = 0;
var graphics: ?DeviceQueueConfig.Config = null;
var compute: ?DeviceQueueConfig.Config = null;
var host_to_device: ?DeviceQueueConfig.Config = null;
var device_to_host: ?DeviceQueueConfig.Config = null;
// We're on Intel most likely, just a single queue for everything :(
if (queue_family_props.len == 1) {
if (!queue_family_props[0].queue_flags.contains(.{ .graphics_bit = true, .compute_bit = true, .transfer_bit = true })) {
return error.InvalidQueue;
}
graphics = .{ .family = 0, .index = 0 };
compute = graphics;
device_to_host = graphics;
host_to_device = graphics;
queue_create_info[0] = .{
.queue_family_index = 0,
.queue_count = 1,
.p_queue_priorities = &queue_priorities,
};
queue_count = 1;
} else {
for (queue_family_props, 0..) |props, family_idx| {
// Jackpot, generous Jensen provided us with an all powerfull queue family, use it for everything
if (props.queue_flags.contains(.{ .graphics_bit = true, .compute_bit = true, .transfer_bit = true }) and props.queue_count >= 4) {
graphics = .{
.family = @intCast(family_idx),
.index = 0,
};
compute = .{
.family = @intCast(family_idx),
.index = 1,
};
host_to_device = .{
.family = @intCast(family_idx),
.index = 2,
};
device_to_host = .{
.family = @intCast(family_idx),
.index = 3,
};
queue_create_info[0] = .{
.queue_family_index = 0,
.queue_count = 4,
.p_queue_priorities = &queue_priorities,
};
queue_count = 1;
break;
}
// TODO: make queue create info for AMD
// Probably AMD, one graphics+compute queue, 2 separate compute queues, one pure transfer queue
if (props.queue_flags.graphics_bit) {
graphics = .{
.family = @intCast(family_idx),
.index = 0,
};
}
if (props.queue_flags.compute_bit and (compute == null or !props.queue_flags.graphics_bit)) {
compute = .{
.family = @intCast(family_idx),
.index = 0,
};
}
if (props.queue_flags.transfer_bit and (host_to_device == null or !props.queue_flags.graphics_bit or !props.queue_flags.compute_bit)) {
device_to_host = .{
.family = @intCast(family_idx),
.index = 0,
};
host_to_device = .{
.family = @intCast(family_idx),
.index = 0,
};
}
}
}
if (graphics == null or compute == null or device_to_host == null or host_to_device == null) {
return error.MissingQueueFeatures;
}
return .{
.queue_create_info = queue_create_info,
.queue_count = queue_count,
.graphics = graphics.?,
.compute = compute.?,
.host_to_device = host_to_device.?,
.device_to_host = device_to_host.?,
};
}

View File

@ -9,6 +9,9 @@ const math = @import("math.zig");
const formats = @import("formats.zig");
const AABB = AssetManager.AABB; // TODO: move AABB out of formats pls
const tracy = @import("tracy");
const GraphicsContext = @import("GraphicsContext.zig");
const gc = GraphicsContext.init();
const za = @import("zalgebra");
const Vec2 = za.Vec2;
@ -294,15 +297,16 @@ pub fn init(allocator: std.mem.Allocator, frame_arena: std.mem.Allocator, assetm
gl.createFramebuffers(1, &render.shadow_framebuffer);
checkGLError();
std.debug.assert(render.shadow_framebuffer != 0);
gl.namedFramebufferDrawBuffer(render.shadow_framebuffer, gl.FRONT_LEFT);
gl.namedFramebufferReadBuffer(render.shadow_framebuffer, gl.NONE);
}
// Verify directional shadow framebuffer setup
{
gl.namedFramebufferTextureLayer(render.shadow_framebuffer, gl.COLOR_ATTACHMENT0, render.shadow_texture_array, 0, 0);
checkGLError();
gl.namedFramebufferTexture(render.shadow_framebuffer, gl.DEPTH_ATTACHMENT, render.direct_shadow_depth_buffer_texture, 0);
checkGLError();
const check_fbo_status = gl.checkNamedFramebufferStatus(render.shadow_framebuffer, gl.DRAW_FRAMEBUFFER);
checkGLError();
if (check_fbo_status != gl.FRAMEBUFFER_COMPLETE) {
std.log.debug("Shadow Framebuffer Incomplete: {}\n", .{check_fbo_status});
}
@ -311,14 +315,18 @@ pub fn init(allocator: std.mem.Allocator, frame_arena: std.mem.Allocator, assetm
// Verify cube shadow framebuffer setup
{
gl.namedFramebufferTextureLayer(render.shadow_framebuffer, gl.COLOR_ATTACHMENT0, render.cube_shadow_texture_array, 0, 0);
checkGLError();
gl.namedFramebufferTexture(render.shadow_framebuffer, gl.DEPTH_ATTACHMENT, render.direct_shadow_depth_buffer_texture, 0);
checkGLError();
const check_fbo_status = gl.checkNamedFramebufferStatus(render.shadow_framebuffer, gl.DRAW_FRAMEBUFFER);
checkGLError();
if (check_fbo_status != gl.FRAMEBUFFER_COMPLETE) {
std.log.debug("Shadow Framebuffer Incomplete: {}\n", .{check_fbo_status});
}
}
gl.createBuffers(1, &render.shadow_matrices_buffer);
checkGLError();
gl.namedBufferStorage(
render.shadow_matrices_buffer,
@ -326,6 +334,7 @@ pub fn init(allocator: std.mem.Allocator, frame_arena: std.mem.Allocator, assetm
null,
gl.DYNAMIC_STORAGE_BIT,
);
checkGLError();
// SHADOW VAO
var vao: gl.GLuint = 0;
@ -338,35 +347,19 @@ pub fn init(allocator: std.mem.Allocator, frame_arena: std.mem.Allocator, assetm
gl.enableVertexArrayAttrib(vao, Attrib.Position.value());
gl.vertexArrayAttribBinding(vao, Attrib.Position.value(), 0);
gl.vertexArrayAttribFormat(vao, Attrib.Position.value(), 3, gl.FLOAT, gl.FALSE, 0);
checkGLError();
}
// Screen HDR FBO
{
gl.createFramebuffers(1, &render.screen_fbo);
checkGLError();
std.debug.assert(render.screen_fbo != 0);
var width: c_int = 0;
var height: c_int = 0;
c.SDL_GL_GetDrawableSize(globals.g_init.window, &width, &height);
var textures = [2]gl.GLuint{ 0, 0 };
gl.createTextures(gl.TEXTURE_2D, textures.len, &textures);
render.screen_color_texture = textures[0];
render.screen_depth_texture = textures[1];
std.debug.assert(render.screen_color_texture != 0);
std.debug.assert(render.screen_depth_texture != 0);
gl.textureParameteri(render.screen_color_texture, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.textureParameteri(render.screen_color_texture, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.textureParameteri(render.screen_color_texture, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.textureParameteri(render.screen_color_texture, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.textureParameteri(render.screen_depth_texture, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.textureParameteri(render.screen_depth_texture, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.textureParameteri(render.screen_depth_texture, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.textureParameteri(render.screen_depth_texture, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
render.updateScreenBufferSize(width, height);
}
@ -425,21 +418,44 @@ fn calculateMipCount(width: c_int, height: c_int) usize {
}
fn updateScreenBufferSize(self: *Render, width: c_int, height: c_int) void {
const mip_count = calculateMipCount(width, height);
gl.bindTexture(gl.TEXTURE_2D, self.screen_color_texture);
for (0..mip_count) |mip_level| {
const size = getMipSize(width, height, mip_level);
std.log.debug("screen_color mip {} size {}x{}\n", .{ mip_level, size.x(), size.y() });
gl.texImage2D(gl.TEXTURE_2D, @intCast(mip_level), gl.RGB16F, size.x(), size.y(), 0, gl.RGB, gl.HALF_FLOAT, null);
checkGLError();
if (self.screen_tex_size.eql(Vec2_i32.new(width, height)) and self.screen_color_texture != 0) {
return;
}
// Depth doesn't need any mips cause it's not filterable anyway
gl.bindTexture(gl.TEXTURE_2D, self.screen_depth_texture);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.DEPTH_COMPONENT32F, width, height, 0, gl.DEPTH_COMPONENT, gl.FLOAT, null);
if (self.screen_color_texture != 0) {
const old_textures = [_]gl.GLuint{ self.screen_color_texture, self.screen_depth_texture };
gl.deleteTextures(old_textures.len, &old_textures);
checkGLError();
self.screen_color_texture = 0;
self.screen_depth_texture = 0;
}
var textures = [2]gl.GLuint{ 0, 0 };
gl.createTextures(gl.TEXTURE_2D, textures.len, &textures);
checkGLError();
self.screen_color_texture = textures[0];
self.screen_depth_texture = textures[1];
std.debug.assert(self.screen_color_texture != 0);
std.debug.assert(self.screen_depth_texture != 0);
const mip_count = calculateMipCount(width, height);
std.debug.print("screen_color_tex {}, depth {}, screen size {}x{}, mip count: {}\n", .{ self.screen_color_texture, self.screen_depth_texture, width, height, mip_count });
gl.textureStorage2D(self.screen_color_texture, @intCast(mip_count), gl.RGBA16F, width, height);
checkGLError();
gl.textureStorage2D(self.screen_depth_texture, 1, gl.DEPTH_COMPONENT24, width, height);
checkGLError();
gl.textureParameteri(self.screen_color_texture, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.textureParameteri(self.screen_color_texture, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.textureParameteri(self.screen_color_texture, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.textureParameteri(self.screen_color_texture, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.textureParameteri(self.screen_depth_texture, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.textureParameteri(self.screen_depth_texture, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.textureParameteri(self.screen_depth_texture, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.textureParameteri(self.screen_depth_texture, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
self.screen_tex_size = Vec2_i32.new(width, height);
self.screen_mip_count = mip_count;
@ -1489,6 +1505,7 @@ pub fn checkGLError() void {
std.log.scoped(.OpenGL).err("OpenGL Failure: {s}\n", .{name});
}
@panic("GL Error");
}
pub const DrawCommand = struct {

View File

@ -124,7 +124,8 @@ pub const ShaderProgram = extern struct {
pub const Flags = packed struct {
vertex: bool,
fragment: bool,
_pad: u6 = 0,
compute: bool,
_pad: u5 = 0,
};
comptime {
if (@bitSizeOf(Flags) != 8) {
@ -142,24 +143,24 @@ pub const ShaderProgram = extern struct {
test "ShaderProgram serialization" {
const source = ShaderProgram{
.flags = .{ .vertex = true, .fragment = true },
.flags = .{ .vertex = true, .fragment = true, .compute = true },
.shader = .{ .id = 123 },
};
var buf: [@sizeOf(ShaderProgram)]u8 = undefined;
var stream = std.io.fixedBufferStream(&buf);
try writeShaderProgram(stream.writer(), source.shader.id, source.flags.vertex, source.flags.fragment, native_endian);
try writeShaderProgram(stream.writer(), source.shader.id, source.flags.vertex, source.flags.fragment, source.flags.compute, native_endian);
const result: *align(1) ShaderProgram = @ptrCast(&buf);
try std.testing.expectEqual(source, result.*);
}
pub fn writeShaderProgram(writer: anytype, shader: u64, vertex: bool, fragment: bool, endian: std.builtin.Endian) !void {
pub fn writeShaderProgram(writer: anytype, shader: u64, vertex: bool, fragment: bool, compute: bool, endian: std.builtin.Endian) !void {
try writer.writeInt(u64, shader, endian);
try writer.writeInt(
u8,
@bitCast(ShaderProgram.Flags{ .vertex = vertex, .fragment = fragment }),
@bitCast(ShaderProgram.Flags{ .vertex = vertex, .fragment = fragment, .compute = compute }),
endian,
);
}

View File

@ -3,7 +3,7 @@ const globals = @import("globals.zig");
const InitMemory = globals.InitMemory;
const GameMemory = globals.GameMemory;
const c = @import("sdl.zig");
const gl = @import("gl.zig");
// const gl = @import("gl.zig");
const AssetManager = @import("AssetManager.zig");
const Render = @import("Render.zig");
const formats = @import("formats.zig");
@ -16,6 +16,7 @@ const Quat = za.Quat;
const a = @import("asset_manifest");
const windows = std.os.windows;
const tracy = @import("tracy");
const GraphicsContext = @import("GraphicsContext.zig");
pub extern "dwmapi" fn DwmEnableMMCSS(fEnableMMCSS: windows.BOOL) callconv(windows.WINAPI) windows.HRESULT;
pub extern "dwmapi" fn DwmFlush() callconv(windows.WINAPI) void;
@ -42,19 +43,19 @@ fn game_init_window_err(global_allocator: std.mem.Allocator) !void {
// _ = DwmEnableMMCSS(1);
try sdl_try(c.SDL_Init(c.SDL_INIT_EVERYTHING));
try sdl_try(c.SDL_GL_SetAttribute(c.SDL_GL_DOUBLEBUFFER, 1));
try sdl_try(c.SDL_GL_SetAttribute(c.SDL_GL_CONTEXT_MAJOR_VERSION, 4));
try sdl_try(c.SDL_GL_SetAttribute(c.SDL_GL_CONTEXT_MINOR_VERSION, 5));
try sdl_try(c.SDL_GL_SetAttribute(c.SDL_GL_CONTEXT_PROFILE_MASK, c.SDL_GL_CONTEXT_PROFILE_CORE));
try sdl_try(c.SDL_GL_SetAttribute(c.SDL_GL_FRAMEBUFFER_SRGB_CAPABLE, 0));
// try sdl_try(c.SDL_GL_SetAttribute(c.SDL_GL_DOUBLEBUFFER, 1));
// try sdl_try(c.SDL_GL_SetAttribute(c.SDL_GL_CONTEXT_MAJOR_VERSION, 4));
// try sdl_try(c.SDL_GL_SetAttribute(c.SDL_GL_CONTEXT_MINOR_VERSION, 5));
// try sdl_try(c.SDL_GL_SetAttribute(c.SDL_GL_CONTEXT_PROFILE_MASK, c.SDL_GL_CONTEXT_PROFILE_CORE));
// try sdl_try(c.SDL_GL_SetAttribute(c.SDL_GL_FRAMEBUFFER_SRGB_CAPABLE, 0));
const maybe_window = c.SDL_CreateWindow(
"Learn OpenGL with Zig!",
"Vulkan Engine",
c.SDL_WINDOWPOS_UNDEFINED,
c.SDL_WINDOWPOS_UNDEFINED,
globals.DEFAULT_WIDTH,
globals.DEFAULT_HEIGHT,
c.SDL_WINDOW_SHOWN | c.SDL_WINDOW_OPENGL | c.SDL_WINDOW_ALLOW_HIGHDPI | c.SDL_WINDOW_RESIZABLE,
c.SDL_WINDOW_SHOWN | c.SDL_WINDOW_ALLOW_HIGHDPI | c.SDL_WINDOW_RESIZABLE | c.SDL_WINDOW_VULKAN,
);
if (maybe_window == null) {
std.log.err("SDL Error: {s}", .{c.SDL_GetError()});
@ -62,26 +63,28 @@ fn game_init_window_err(global_allocator: std.mem.Allocator) !void {
}
const window = maybe_window.?;
const context = c.SDL_GL_CreateContext(window);
// const context = c.SDL_GL_CreateContext(window);
try sdl_try(c.SDL_GL_SetSwapInterval(0));
// try sdl_try(c.SDL_GL_SetSwapInterval(0));
globals.g_init = try global_allocator.create(InitMemory);
globals.g_init_exists = true;
globals.g_init.* = .{
.global_allocator = global_allocator,
.window = window,
.context = context,
.context = null,
.width = globals.DEFAULT_WIDTH,
.height = globals.DEFAULT_HEIGHT,
};
try globals.g_init.gc.init(global_allocator, window);
const version = &globals.g_init.syswm_info.version;
version.major = c.SDL_MAJOR_VERSION;
version.minor = c.SDL_MINOR_VERSION;
version.patch = c.SDL_PATCHLEVEL;
c.SDL_GL_GetDrawableSize(window, &globals.g_init.width, &globals.g_init.height);
// c.SDL_GL_GetDrawableSize(window, &globals.g_init.width, &globals.g_init.height);
if (c.SDL_GetWindowWMInfo(window, &globals.g_init.syswm_info) == 0) {
const err = c.SDL_GetError();
@ -98,70 +101,70 @@ export fn game_init_window(global_allocator: *std.mem.Allocator) void {
};
}
fn loadGL() void {
const getProcAddress = struct {
fn getProcAddress(ctx: @TypeOf(null), proc: [:0]const u8) ?gl.FunctionPointer {
_ = ctx;
return @ptrCast(c.SDL_GL_GetProcAddress(proc));
}
}.getProcAddress;
gl.load(null, getProcAddress) catch |err| {
std.log.debug("Failed to load gl funcs {}\n", .{err});
@panic("gl.load");
};
gl.GL_ARB_bindless_texture.load(null, getProcAddress) catch |err| {
std.log.debug("Failed to load gl funcs GL_ARB_bindless_texture {}\n", .{err});
@panic("gl.load");
};
gl.debugMessageCallback(glDebugCallback, null);
// gl.enable(gl.DEBUG_OUTPUT);
// gl.enable(gl.DEBUG_OUTPUT_SYNCHRONOUS);
}
fn glDebugCallback(source: gl.GLenum, _type: gl.GLenum, id: gl.GLuint, severity: gl.GLenum, length: gl.GLsizei, message: [*:0]const u8, userParam: ?*anyopaque) callconv(.C) void {
_ = userParam; // autofix
const source_str = switch (source) {
gl.DEBUG_SOURCE_API => "API",
gl.DEBUG_SOURCE_WINDOW_SYSTEM => "WindowSystem",
gl.DEBUG_SOURCE_APPLICATION => "App",
gl.DEBUG_SOURCE_SHADER_COMPILER => "ShaderCompiler",
gl.DEBUG_SOURCE_THIRD_PARTY => "ThirdParty",
gl.DEBUG_SOURCE_OTHER => "Other",
else => unreachable,
};
const type_str = switch (_type) {
gl.DEBUG_TYPE_ERROR => "Error",
gl.DEBUG_TYPE_DEPRECATED_BEHAVIOR => "Deprecated Behaviour",
gl.DEBUG_TYPE_UNDEFINED_BEHAVIOR => "Undefined Behaviour",
gl.DEBUG_TYPE_PORTABILITY => "Portability",
gl.DEBUG_TYPE_PERFORMANCE => "Performance",
gl.DEBUG_TYPE_MARKER => "Marker",
gl.DEBUG_TYPE_PUSH_GROUP => "Push Group",
gl.DEBUG_TYPE_POP_GROUP => "Pop Group",
gl.DEBUG_TYPE_OTHER => "Other",
else => unreachable,
};
switch (severity) {
gl.DEBUG_SEVERITY_HIGH => {
std.log.scoped(.OpenGL).err("{s}:{}:{s}: {s}", .{ source_str, id, type_str, message[0..@intCast(length)] });
},
gl.DEBUG_SEVERITY_MEDIUM => {
std.log.scoped(.OpenGL).warn("{s}:{}:{s}: {s}", .{ source_str, id, type_str, message[0..@intCast(length)] });
},
gl.DEBUG_SEVERITY_LOW => {
std.log.scoped(.OpenGL).debug("{s}:{}:{s}: {s}", .{ source_str, id, type_str, message[0..@intCast(length)] });
},
gl.DEBUG_SEVERITY_NOTIFICATION => {
std.log.scoped(.OpenGL).info("{s}:{}:{s}: {s}", .{ source_str, id, type_str, message[0..@intCast(length)] });
},
else => unreachable,
}
}
// fn loadGL() void {
// const getProcAddress = struct {
// fn getProcAddress(ctx: @TypeOf(null), proc: [:0]const u8) ?gl.FunctionPointer {
// _ = ctx;
// return @ptrCast(c.SDL_GL_GetProcAddress(proc));
// }
// }.getProcAddress;
// gl.load(null, getProcAddress) catch |err| {
// std.log.debug("Failed to load gl funcs {}\n", .{err});
// @panic("gl.load");
// };
// gl.GL_ARB_bindless_texture.load(null, getProcAddress) catch |err| {
// std.log.debug("Failed to load gl funcs GL_ARB_bindless_texture {}\n", .{err});
// @panic("gl.load");
// };
// gl.debugMessageCallback(glDebugCallback, null);
// // gl.enable(gl.DEBUG_OUTPUT);
// // gl.enable(gl.DEBUG_OUTPUT_SYNCHRONOUS);
// }
//
// fn glDebugCallback(source: gl.GLenum, _type: gl.GLenum, id: gl.GLuint, severity: gl.GLenum, length: gl.GLsizei, message: [*:0]const u8, userParam: ?*anyopaque) callconv(.C) void {
// _ = userParam; // autofix
// const source_str = switch (source) {
// gl.DEBUG_SOURCE_API => "API",
// gl.DEBUG_SOURCE_WINDOW_SYSTEM => "WindowSystem",
// gl.DEBUG_SOURCE_APPLICATION => "App",
// gl.DEBUG_SOURCE_SHADER_COMPILER => "ShaderCompiler",
// gl.DEBUG_SOURCE_THIRD_PARTY => "ThirdParty",
// gl.DEBUG_SOURCE_OTHER => "Other",
// else => unreachable,
// };
// const type_str = switch (_type) {
// gl.DEBUG_TYPE_ERROR => "Error",
// gl.DEBUG_TYPE_DEPRECATED_BEHAVIOR => "Deprecated Behaviour",
// gl.DEBUG_TYPE_UNDEFINED_BEHAVIOR => "Undefined Behaviour",
// gl.DEBUG_TYPE_PORTABILITY => "Portability",
// gl.DEBUG_TYPE_PERFORMANCE => "Performance",
// gl.DEBUG_TYPE_MARKER => "Marker",
// gl.DEBUG_TYPE_PUSH_GROUP => "Push Group",
// gl.DEBUG_TYPE_POP_GROUP => "Pop Group",
// gl.DEBUG_TYPE_OTHER => "Other",
// else => unreachable,
// };
// switch (severity) {
// gl.DEBUG_SEVERITY_HIGH => {
// std.log.scoped(.OpenGL).err("{s}:{}:{s}: {s}", .{ source_str, id, type_str, message[0..@intCast(length)] });
// },
// gl.DEBUG_SEVERITY_MEDIUM => {
// std.log.scoped(.OpenGL).warn("{s}:{}:{s}: {s}", .{ source_str, id, type_str, message[0..@intCast(length)] });
// },
// gl.DEBUG_SEVERITY_LOW => {
// std.log.scoped(.OpenGL).debug("{s}:{}:{s}: {s}", .{ source_str, id, type_str, message[0..@intCast(length)] });
// },
// gl.DEBUG_SEVERITY_NOTIFICATION => {
// std.log.scoped(.OpenGL).info("{s}:{}:{s}: {s}", .{ source_str, id, type_str, message[0..@intCast(length)] });
// },
// else => unreachable,
// }
// }
const mesh_program = a.ShaderPrograms.mesh;
export fn game_init(global_allocator: *std.mem.Allocator) void {
loadGL();
// loadGL();
tracy.startupProfiler();
std.log.debug("game_init\n", .{});
@ -171,7 +174,7 @@ export fn game_init(global_allocator: *std.mem.Allocator) void {
.global_allocator = global_allocator.*,
.frame_fba = std.heap.FixedBufferAllocator.init(frame_arena_buffer),
.assetman = AssetManager.init(global_allocator.*, globals.g_mem.frame_fba.allocator()),
.render = Render.init(global_allocator.*, globals.g_mem.frame_fba.allocator(), &globals.g_mem.assetman),
// .render = Render.init(global_allocator.*, globals.g_mem.frame_fba.allocator(), &globals.g_mem.assetman),
.world = .{ .frame_arena = globals.g_mem.frame_fba.allocator() },
};
globals.g_mem.render.camera = &globals.g_mem.free_cam.camera;
@ -181,13 +184,13 @@ export fn game_init(global_allocator: *std.mem.Allocator) void {
globals.g_mem.performance_frequency = c.SDL_GetPerformanceFrequency();
globals.g_mem.last_frame_time = c.SDL_GetPerformanceCounter();
var majorVer: gl.GLint = 0;
var minorVer: gl.GLint = 0;
gl.getIntegerv(gl.MAJOR_VERSION, &majorVer);
gl.getIntegerv(gl.MINOR_VERSION, &minorVer);
std.log.debug("OpenGL Version: {}.{}", .{ majorVer, minorVer });
// var majorVer: gl.GLint = 0;
// var minorVer: gl.GLint = 0;
// gl.getIntegerv(gl.MAJOR_VERSION, &majorVer);
// gl.getIntegerv(gl.MINOR_VERSION, &minorVer);
// std.log.debug("OpenGL Version: {}.{}", .{ majorVer, minorVer });
gl.viewport(0, 0, globals.g_init.width, globals.g_init.height);
// gl.viewport(0, 0, globals.g_init.width, globals.g_init.height);
_ = globals.g_mem.world.addEntity(.{
.flags = .{ .dir_light = true, .rotate = true },
@ -402,7 +405,7 @@ export fn game_update() bool {
c.SDL_GL_GetDrawableSize(ginit.window, &ginit.width, &ginit.height);
std.log.debug("w: {}, h: {}\n", .{ ginit.width, ginit.height });
gl.viewport(0, 0, ginit.width, ginit.height);
// gl.viewport(0, 0, ginit.width, ginit.height);
},
else => {},
}
@ -458,8 +461,10 @@ export fn game_update() bool {
}
}
ginit.gc.draw() catch @panic("draw error");
// Render
{
if (false) {
const zone = tracy.initZone(@src(), .{ .name = "game.render()" });
defer zone.deinit();
gmem.render.begin();
@ -516,11 +521,11 @@ export fn game_update() bool {
}
}
{
const zone = tracy.initZone(@src(), .{ .name = "SDL_GL_SwapWindow" });
defer zone.deinit();
c.SDL_GL_SwapWindow(ginit.window);
}
// {
// const zone = tracy.initZone(@src(), .{ .name = "SDL_GL_SwapWindow" });
// defer zone.deinit();
// c.SDL_GL_SwapWindow(ginit.window);
// }
tracy.frameMark();
//c.SDL_Delay(1);
@ -535,7 +540,7 @@ export fn game_shutdown() void {
gmem.assetman.deinit();
gmem.global_allocator.free(gmem.frame_fba.buffer);
gmem.global_allocator.destroy(gmem);
gl.disable(gl.DEBUG_OUTPUT);
// gl.disable(gl.DEBUG_OUTPUT);
tracy.shutdownProfiler();
}
@ -553,7 +558,7 @@ export fn game_hot_reload(init_memory: ?*anyopaque, gmemory: ?*anyopaque) void {
if (init_memory) |init_mem| {
globals.g_init = @alignCast(@ptrCast(init_mem));
globals.g_init_exists = true;
loadGL();
// loadGL();
}
if (gmemory) |gmem| {
globals.g_mem = @alignCast(@ptrCast(gmem));

View File

@ -3,6 +3,7 @@ const c = @import("sdl.zig");
const Render = @import("Render.zig");
const AssetManager = @import("AssetManager.zig");
const World = @import("entity.zig").World;
const GraphicsContext = @import("GraphicsContext.zig");
const za = @import("zalgebra");
const Vec2 = za.Vec2;
@ -28,12 +29,13 @@ pub const InitMemory = struct {
fullscreen: bool = false,
vsync: bool = false,
syswm_info: c.SDL_SysWMinfo = .{},
gc: GraphicsContext = .{},
};
pub const GameMemory = struct {
global_allocator: std.mem.Allocator,
frame_fba: std.heap.FixedBufferAllocator,
assetman: AssetManager,
render: Render,
render: Render = undefined,
performance_frequency: u64 = 0,
last_frame_time: u64 = 0,
delta_time: f32 = 0.0000001,

65
src/mem/Arena.zig Normal file
View File

@ -0,0 +1,65 @@
const std = @import("std");
const Arena = @This();
memory: []align(std.mem.page_size) u8,
ptr: usize = 0,
commited_pages: usize = 0,
pub fn init(reserve_mem: usize) !Arena {
const slice = try std.posix.mmap(null, reserve_mem, std.posix.PROT.NONE, .{ .TYPE = .PRIVATE, .ANONYMOUS = true }, -1, 0);
return Arena{
.memory = slice,
};
}
pub fn allocator(self: *Arena) std.mem.Allocator {
return std.mem.Allocator{
.ptr = @ptrCast(self),
.vtable = &vtable,
};
}
const vtable = std.mem.Allocator.VTable{
.alloc = rawAlloc,
};
/// This function is not intended to be called except from within the
/// implementation of an Allocator
pub inline fn rawAlloc(self: *Arena, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8 {
_ = ret_addr; // autofix
const result = std.mem.alignForwardLog2(usize, self.ptr, ptr_align);
const new_ptr = result + len;
if (new_ptr >= self.memory.len) {
return null;
}
if (new_ptr >= self.getLastPageEnd()) {
self.reservePagesUpTo(new_ptr) catch {
return null;
};
}
self.ptr = new_ptr;
return &self.memory.ptr[result];
}
inline fn getLastPageEnd(self: *const Arena) usize {
return std.mem.page_size * (self.commited_pages + 1);
}
fn reservePagesUpTo(self: *Arena, addr: usize) !void {
while (addr >= self.getLastPageEnd()) {
switch (std.os.errno(std.os.linux.mprotect(self.getLastPageEnd(), std.mem.page_size, std.os.linux.PROT.WRITE | std.os.linux.PROT.READ))) {
.SUCCESS => {},
.ENOMEM => return error.OutOfMemory,
.EINVAL => unreachable,
else => unreachable,
}
self.commited_pages += 1;
}
}

View File

@ -1,6 +1,7 @@
pub usingnamespace @cImport({
@cInclude("SDL2/SDL.h");
@cInclude("SDL2/SDL_syswm.h");
@cInclude("SDL2/SDL_vulkan.h");
});
// When using system sdl2 library this might not be defined

View File

@ -532,6 +532,7 @@ fn processShaderProgram(allocator: std.mem.Allocator, input: []const u8, output_
shader: []const u8,
vertex: bool,
fragment: bool,
compute: bool,
};
const program = try std.json.parseFromSlice(ShaderProgram, allocator, file_contents, .{});
defer program.deinit();
@ -549,7 +550,7 @@ fn processShaderProgram(allocator: std.mem.Allocator, input: []const u8, output_
defer output.file.close();
var buf_writer = std.io.bufferedWriter(output.file.writer());
try formats.writeShaderProgram(buf_writer.writer(), shader_asset_id, program.value.vertex, program.value.fragment, formats.native_endian);
try formats.writeShaderProgram(buf_writer.writer(), shader_asset_id, program.value.vertex, program.value.fragment, program.value.compute, formats.native_endian);
try buf_writer.flush();
}
const MipLevel = struct {