Compare commits

..

No commits in common. "3fdd0bfea8314d450540b64e7cd9b97b87ba9032" and "48ef1fb4b7ec8e2156ec8b9f36398841d75be124" have entirely different histories.

2 changed files with 35 additions and 187 deletions

View File

@ -2,20 +2,16 @@ package xarr
import "base:builtin"
import "base:intrinsics"
import "common:relptr"
import "core:mem"
BASE_CHUNK_SIZE :: uint(64)
BASE_CHUNK_SIZE_LOG2 :: intrinsics.constant_log2(BASE_CHUNK_SIZE)
BASE_CHUNK_SHIFT :: BASE_CHUNK_SIZE_LOG2 - 1
NUM_CHUNKS :: 30 when (size_of(uint) == 8) else 26 // on 32 bit systems max size is 0x80000000 which is about half the addressable space
NUM_CHUNKS :: 30
Xarr :: struct($T: typeid, $SOA := false) {
len: int,
allocated_chunks_mask: u32,
chunks: ([NUM_CHUNKS]relptr.SOA_Slice(T) when SOA else [NUM_CHUNKS]relptr.Ptr(
T,
)),
chunks: ([NUM_CHUNKS]#soa[]T when SOA else [NUM_CHUNKS][^]T),
}
UINT_BITS :: size_of(uint) * 8
@ -32,20 +28,18 @@ chunk_size :: #force_inline proc "contextless" (chunk_idx: i32) -> uint {
return BASE_CHUNK_SIZE << intrinsics.saturating_sub(u32(chunk_idx), 1)
}
get_chunk_slice_scalar :: #force_inline proc(
get_chunk_slice_scalar :: #force_inline proc "contextless" (
a: $T/Xarr($E, false),
chunk_idx: i32,
base := context.user_ptr,
) -> []E {
return relptr.deref_multi_ptr(a.chunks[chunk_idx], base)[:chunk_size(chunk_idx)]
return a.chunks[chunk_idx][:chunk_size(chunk_idx)]
}
get_chunk_slice_soa :: #force_inline proc(
get_chunk_slice_soa :: #force_inline proc "contextless" (
a: $T/Xarr($E, true),
chunk_idx: i32,
base := context.user_ptr,
) -> #soa[]E {
return relptr.deref_soa_slice(a.chunks[chunk_idx], base)
return a.chunks[chunk_idx]
}
get_chunk_slice :: proc {
@ -65,16 +59,7 @@ capacity :: #force_inline proc "contextless" (a: $T/Xarr($E, $SOA)) -> uint {
return capacity_from_allocated_mask(allocated_mask)
}
len :: #force_inline proc "contextless" (a: $T/Xarr($E, $SOA)) -> int {
return a.len
}
reserve :: proc(
a: $T/^Xarr($E, $SOA),
cap: int,
allocator := context.allocator,
base := context.user_ptr,
) #no_bounds_check {
reserve :: proc(a: $T/^Xarr($E, $SOA), cap: int, allocator := context.allocator) #no_bounds_check {
allocated_mask := a.allocated_chunks_mask
current_chunk := msb(allocated_mask)
@ -85,61 +70,36 @@ reserve :: proc(
for i := current_chunk + 1; i < required_chunks; i += 1 {
when SOA {
chunk_slice := make_soa_slice(#soa[]E, chunk_size(i), allocator)
a.chunks[i] = relptr.from_soa_slice(chunk_slice, base)
a.chunks[i] = chunk_slice
} else {
chunk_slice := make([]E, chunk_size(i), allocator)
a.chunks[i] = relptr.from_multi_ptr(raw_data(chunk_slice), base)
a.chunks[i] = raw_data(chunk_slice)
}
a.allocated_chunks_mask |= u32(1) << u8(i)
}
}
resize :: proc(
a: $T/^Xarr($E, $SOA),
new_len: int,
allocator := context.allocator,
base := context.user_ptr,
) {
reserve(a, new_len, allocator, base)
a.len = new_len
}
append_elem :: proc(
a: $T/^Xarr($E, $SOA),
elem: E,
allocator := context.allocator,
base := context.user_ptr,
) {
if capacity(a^) < uint(a.len + 1) {
reserve(a, a.len + 1, allocator, base)
append_elem :: proc(a: $T/^Xarr($E, $SOA), elem: E, allocator := context.allocator) {
if capacity(a^) <= uint(a.len + 1) {
reserve(a, a.len + 1)
}
#no_bounds_check {
chunk_idx, idx_within_chunk := translate_index(a.len)
when SOA {
slice := relptr.deref_soa_slice(a.chunks[chunk_idx], base)
slice[idx_within_chunk] = elem
} else {
relptr.deref_multi_ptr(a.chunks[chunk_idx], base)[idx_within_chunk] = elem
}
a.chunks[chunk_idx][idx_within_chunk] = elem
}
a.len += 1
}
append_elems :: proc(
a: $T/^Xarr($E, $SOA),
elems: ..E,
allocator := context.allocator,
base := context.user_ptr,
) {
if builtin.len(elems) == 0 {
append_elems :: proc(a: $T/^Xarr($E, $SOA), elems: ..E, allocator := context.allocator) {
if len(elems) == 0 {
return
}
if capacity(a^) < uint(a.len + builtin.len(elems)) {
reserve(a, a.len + builtin.len(elems), allocator, base)
if capacity(a^) < uint(a.len + len(elems)) {
reserve(a, a.len + len(elems))
}
set_elems_assume_allocated(a, elems, base)
a.len += builtin.len(elems)
set_elems_assume_allocated(a, elems)
a.len += len(elems)
}
append :: proc {
@ -160,20 +120,22 @@ translate_index :: #force_inline proc "contextless" (
}
@(private = "file")
set_elems_assume_allocated :: proc(
set_elems_assume_allocated :: proc "contextless" (
a: $T/^Xarr($E, $SOA),
elems: []E,
base: rawptr,
) #no_bounds_check {
for &e, i in elems {
idx := a.len + i
chunk_idx, idx_within_chunk := translate_index(idx)
when SOA {
slice := relptr.deref_soa_slice(a.chunks[chunk_idx], base)
slice[idx_within_chunk] = e
a.chunks[chunk_idx][idx_within_chunk] = e
} else {
relptr.deref_multi_ptr(a.chunks[chunk_idx], base)[idx_within_chunk] = e
intrinsics.mem_copy_non_overlapping(
&a.chunks[chunk_idx][idx_within_chunk],
&e,
size_of(E),
)
}
}
}
@ -191,29 +153,17 @@ get :: proc(a: $T/Xarr($E, $SOA), #any_int idx: int) -> E {
return get_chunk_slice(a, chunk_idx)[idx_within_chunk]
}
get_ptr_scalar :: proc(a: $T/^Xarr($E, false), #any_int idx: int) -> ^E {
get_ptr :: proc(a: $T/Xarr($E, $SOA), #any_int idx: int) -> ^E {
assert(idx >= 0 && idx < a.len)
chunk_idx, idx_within_chunk := translate_index(idx)
return &get_chunk_slice_scalar(a, chunk_idx)[idx_within_chunk]
}
get_ptr_soa :: proc(a: $T/^Xarr($E, true), #any_int idx: int) -> #soa^#soa[]E {
assert(idx >= 0 && idx < a.len)
chunk_idx, idx_within_chunk := translate_index(idx)
return &get_chunk_slice_soa(a, chunk_idx)[idx_within_chunk]
}
get_ptr :: proc {
get_ptr_scalar,
get_ptr_soa,
return &get_chunk_slice(a, chunk_idx)[idx_within_chunk]
}
unordered_remove :: proc(a: $T/^Xarr($E, $SOA), #any_int idx: int) {
assert(idx >= 0 && idx < a.len)
get_ptr(a, idx)^ = get(a^, a.len - 1)
get_ptr(a^, idx)^ = get(a^, a.len - 1)
a.len -= 1
}
@ -222,7 +172,7 @@ clear :: proc "contextless" (a: $T/^Xarr($E, $SOA)) {
}
delete :: proc(a: $T/^Xarr($E, $SOA), allocator := context.allocator) {
for i in 0 ..< builtin.len(a.chunks) {
for i in 0 ..< len(a.chunks) {
builtin.delete(get_chunk_slice(a^, i32(i)), allocator)
}
@ -243,7 +193,7 @@ iterator_next :: proc(it: ^Iterator($E, $SOA)) -> (e: ^E, idx: int, ok: bool) {
return nil, it.idx, false
}
e = get_ptr(it.xarr, it.idx)
e = get_ptr(it.xarr^, it.idx)
idx = it.idx
ok = true
@ -275,7 +225,7 @@ chunk_iterator_next_scalar :: proc(
chunk = get_chunk_slice_scalar(it.xarr^, it.chunk_idx)
// Limit the chunk to the length so user code doesn't have to worry about this
base_element_idx = it.base_element_idx
chunk = chunk[:min(builtin.len(chunk), it.xarr.len - base_element_idx)]
chunk = chunk[:min(len(chunk), it.xarr.len - base_element_idx)]
ok = true
base_element_idx += int(chunk_size(it.chunk_idx))
@ -290,17 +240,17 @@ chunk_iterator_next_soa :: proc(
base_element_idx: int,
ok: bool,
) {
if (it.xarr.allocated_chunks_mask & (u32(1) << u32(it.chunk_idx))) == 0 {
if (it.xarr.allocated_chunks_mask & (u32(1) << it.chunk_idx)) == 0 {
return nil, 0, false
}
chunk = get_chunk_slice_soa(it.xarr^, it.chunk_idx)
// Limit the chunk to the length so user code doesn't have to worry about this
base_element_idx = it.base_element_idx
chunk = chunk[:min(builtin.len(chunk), it.xarr.len - base_element_idx)]
chunk = chunk[:min(len(chunk), it.xarr.len - base_element_idx)]
ok = true
base_element_idx += int(chunk_size(it.chunk_idx))
base_element_idx += chunk_size(it.chunk_idx)
it.chunk_idx += 1
return
}

View File

@ -1,102 +0,0 @@
// Relative pointer
package relptr
import "base:intrinsics"
Ptr :: struct($T: typeid) {
offset: uintptr,
}
Slice :: struct($T: typeid) {
offset: uintptr,
len: int,
}
SOA_Slice :: struct($T: typeid) {
// Offset for each field of SOA struct
offsets: [len(
T,
) when intrinsics.type_is_array(T) else intrinsics.type_struct_field_count(T)]uintptr,
len: int,
}
from_rawptr :: #force_inline proc($T: typeid, addr: rawptr, base := context.user_ptr) -> Ptr(T) {
offset := uintptr(addr) - uintptr(base)
assert(offset >= 0, "ptr does not belong to this base")
return Ptr(T){offset = offset}
}
from_ptr :: #force_inline proc(addr: ^$T, base := context.user_ptr) -> Ptr(T) {
offset := uintptr(rawptr(addr)) - uintptr(base)
assert(offset >= 0, "ptr does not belong to this base")
return Ptr(T){offset = offset}
}
from_multi_ptr :: #force_inline proc(addr: [^]$T, base := context.user_ptr) -> Ptr(T) {
offset := uintptr(rawptr(addr)) - uintptr(base)
assert(offset >= 0, "ptr does not belong to this base")
return Ptr(T){offset = offset}
}
from_slice :: #force_inline proc(slice: []$T, base := context.user_ptr) -> Slice(T) {
offset := uintptr(rawptr(raw_data(slice))) - uintptr(base)
assert(offset >= 0, "ptr does not belong to this base")
return Slice(T){offset = offset, len = len(slice)}
}
from_soa_slice :: #force_inline proc(slice: #soa[]$T, base := context.user_ptr) -> SOA_Slice(T) {
slice := slice
result: SOA_Slice(T)
FIELD_COUNT ::
(len(T) when intrinsics.type_is_array(T) else intrinsics.type_struct_field_count(T))
// SOA slice is just an array of pointers to each member + a footer
src_ptrs := (transmute([^]uintptr)(&slice))[:FIELD_COUNT]
for i in 0 ..< len(result.offsets) {
result.offsets[i] = src_ptrs[i] - uintptr(base)
}
result.len = len(slice)
return result
}
deref_ptr :: #force_inline proc(ptr: Ptr($T), base := context.user_ptr) -> ^T {
return transmute(^T)(uintptr(base) + ptr.offset)
}
deref_multi_ptr :: #force_inline proc(ptr: Ptr($T), base := context.user_ptr) -> [^]T {
return transmute([^]T)(uintptr(base) + ptr.offset)
}
deref_slice :: #force_inline proc(slice: Slice($T), base := context.user_ptr) -> []T {
return (transmute([^]T)(uintptr(base) + slice.offset))[:slice.len]
}
deref_soa_slice :: #force_inline proc(slice: SOA_Slice($T), base := context.user_ptr) -> #soa[]T {
result: #soa[]T
footer := raw_soa_footer_slice(&result)
FIELD_COUNT ::
(len(T) when intrinsics.type_is_array(T) else intrinsics.type_struct_field_count(T))
// Just in case SOA layout changes
#assert(size_of(result) == (FIELD_COUNT * size_of(rawptr)) + size_of(footer))
// SOA slice is just an array of pointers to each member + a footer
result_ptrs := (transmute([^]uintptr)(&result))[:FIELD_COUNT]
for i in 0 ..< len(slice.offsets) {
result_ptrs[i] = uintptr(base) + slice.offsets[i]
}
footer.len = slice.len
return result
}