Compare commits
2 Commits
48ef1fb4b7
...
3fdd0bfea8
Author | SHA1 | Date | |
---|---|---|---|
3fdd0bfea8 | |||
c1c722e1f7 |
@ -2,16 +2,20 @@ package xarr
|
|||||||
|
|
||||||
import "base:builtin"
|
import "base:builtin"
|
||||||
import "base:intrinsics"
|
import "base:intrinsics"
|
||||||
|
import "common:relptr"
|
||||||
|
import "core:mem"
|
||||||
|
|
||||||
BASE_CHUNK_SIZE :: uint(64)
|
BASE_CHUNK_SIZE :: uint(64)
|
||||||
BASE_CHUNK_SIZE_LOG2 :: intrinsics.constant_log2(BASE_CHUNK_SIZE)
|
BASE_CHUNK_SIZE_LOG2 :: intrinsics.constant_log2(BASE_CHUNK_SIZE)
|
||||||
BASE_CHUNK_SHIFT :: BASE_CHUNK_SIZE_LOG2 - 1
|
BASE_CHUNK_SHIFT :: BASE_CHUNK_SIZE_LOG2 - 1
|
||||||
NUM_CHUNKS :: 30
|
NUM_CHUNKS :: 30 when (size_of(uint) == 8) else 26 // on 32 bit systems max size is 0x80000000 which is about half the addressable space
|
||||||
|
|
||||||
Xarr :: struct($T: typeid, $SOA := false) {
|
Xarr :: struct($T: typeid, $SOA := false) {
|
||||||
len: int,
|
len: int,
|
||||||
allocated_chunks_mask: u32,
|
allocated_chunks_mask: u32,
|
||||||
chunks: ([NUM_CHUNKS]#soa[]T when SOA else [NUM_CHUNKS][^]T),
|
chunks: ([NUM_CHUNKS]relptr.SOA_Slice(T) when SOA else [NUM_CHUNKS]relptr.Ptr(
|
||||||
|
T,
|
||||||
|
)),
|
||||||
}
|
}
|
||||||
|
|
||||||
UINT_BITS :: size_of(uint) * 8
|
UINT_BITS :: size_of(uint) * 8
|
||||||
@ -28,18 +32,20 @@ chunk_size :: #force_inline proc "contextless" (chunk_idx: i32) -> uint {
|
|||||||
return BASE_CHUNK_SIZE << intrinsics.saturating_sub(u32(chunk_idx), 1)
|
return BASE_CHUNK_SIZE << intrinsics.saturating_sub(u32(chunk_idx), 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
get_chunk_slice_scalar :: #force_inline proc "contextless" (
|
get_chunk_slice_scalar :: #force_inline proc(
|
||||||
a: $T/Xarr($E, false),
|
a: $T/Xarr($E, false),
|
||||||
chunk_idx: i32,
|
chunk_idx: i32,
|
||||||
|
base := context.user_ptr,
|
||||||
) -> []E {
|
) -> []E {
|
||||||
return a.chunks[chunk_idx][:chunk_size(chunk_idx)]
|
return relptr.deref_multi_ptr(a.chunks[chunk_idx], base)[:chunk_size(chunk_idx)]
|
||||||
}
|
}
|
||||||
|
|
||||||
get_chunk_slice_soa :: #force_inline proc "contextless" (
|
get_chunk_slice_soa :: #force_inline proc(
|
||||||
a: $T/Xarr($E, true),
|
a: $T/Xarr($E, true),
|
||||||
chunk_idx: i32,
|
chunk_idx: i32,
|
||||||
|
base := context.user_ptr,
|
||||||
) -> #soa[]E {
|
) -> #soa[]E {
|
||||||
return a.chunks[chunk_idx]
|
return relptr.deref_soa_slice(a.chunks[chunk_idx], base)
|
||||||
}
|
}
|
||||||
|
|
||||||
get_chunk_slice :: proc {
|
get_chunk_slice :: proc {
|
||||||
@ -59,7 +65,16 @@ capacity :: #force_inline proc "contextless" (a: $T/Xarr($E, $SOA)) -> uint {
|
|||||||
return capacity_from_allocated_mask(allocated_mask)
|
return capacity_from_allocated_mask(allocated_mask)
|
||||||
}
|
}
|
||||||
|
|
||||||
reserve :: proc(a: $T/^Xarr($E, $SOA), cap: int, allocator := context.allocator) #no_bounds_check {
|
len :: #force_inline proc "contextless" (a: $T/Xarr($E, $SOA)) -> int {
|
||||||
|
return a.len
|
||||||
|
}
|
||||||
|
|
||||||
|
reserve :: proc(
|
||||||
|
a: $T/^Xarr($E, $SOA),
|
||||||
|
cap: int,
|
||||||
|
allocator := context.allocator,
|
||||||
|
base := context.user_ptr,
|
||||||
|
) #no_bounds_check {
|
||||||
allocated_mask := a.allocated_chunks_mask
|
allocated_mask := a.allocated_chunks_mask
|
||||||
|
|
||||||
current_chunk := msb(allocated_mask)
|
current_chunk := msb(allocated_mask)
|
||||||
@ -70,36 +85,61 @@ reserve :: proc(a: $T/^Xarr($E, $SOA), cap: int, allocator := context.allocator)
|
|||||||
for i := current_chunk + 1; i < required_chunks; i += 1 {
|
for i := current_chunk + 1; i < required_chunks; i += 1 {
|
||||||
when SOA {
|
when SOA {
|
||||||
chunk_slice := make_soa_slice(#soa[]E, chunk_size(i), allocator)
|
chunk_slice := make_soa_slice(#soa[]E, chunk_size(i), allocator)
|
||||||
a.chunks[i] = chunk_slice
|
a.chunks[i] = relptr.from_soa_slice(chunk_slice, base)
|
||||||
} else {
|
} else {
|
||||||
chunk_slice := make([]E, chunk_size(i), allocator)
|
chunk_slice := make([]E, chunk_size(i), allocator)
|
||||||
a.chunks[i] = raw_data(chunk_slice)
|
a.chunks[i] = relptr.from_multi_ptr(raw_data(chunk_slice), base)
|
||||||
}
|
}
|
||||||
a.allocated_chunks_mask |= u32(1) << u8(i)
|
a.allocated_chunks_mask |= u32(1) << u8(i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
append_elem :: proc(a: $T/^Xarr($E, $SOA), elem: E, allocator := context.allocator) {
|
resize :: proc(
|
||||||
if capacity(a^) <= uint(a.len + 1) {
|
a: $T/^Xarr($E, $SOA),
|
||||||
reserve(a, a.len + 1)
|
new_len: int,
|
||||||
|
allocator := context.allocator,
|
||||||
|
base := context.user_ptr,
|
||||||
|
) {
|
||||||
|
reserve(a, new_len, allocator, base)
|
||||||
|
a.len = new_len
|
||||||
|
}
|
||||||
|
|
||||||
|
append_elem :: proc(
|
||||||
|
a: $T/^Xarr($E, $SOA),
|
||||||
|
elem: E,
|
||||||
|
allocator := context.allocator,
|
||||||
|
base := context.user_ptr,
|
||||||
|
) {
|
||||||
|
if capacity(a^) < uint(a.len + 1) {
|
||||||
|
reserve(a, a.len + 1, allocator, base)
|
||||||
}
|
}
|
||||||
#no_bounds_check {
|
#no_bounds_check {
|
||||||
chunk_idx, idx_within_chunk := translate_index(a.len)
|
chunk_idx, idx_within_chunk := translate_index(a.len)
|
||||||
a.chunks[chunk_idx][idx_within_chunk] = elem
|
when SOA {
|
||||||
|
slice := relptr.deref_soa_slice(a.chunks[chunk_idx], base)
|
||||||
|
slice[idx_within_chunk] = elem
|
||||||
|
} else {
|
||||||
|
relptr.deref_multi_ptr(a.chunks[chunk_idx], base)[idx_within_chunk] = elem
|
||||||
|
}
|
||||||
}
|
}
|
||||||
a.len += 1
|
a.len += 1
|
||||||
}
|
}
|
||||||
|
|
||||||
append_elems :: proc(a: $T/^Xarr($E, $SOA), elems: ..E, allocator := context.allocator) {
|
append_elems :: proc(
|
||||||
if len(elems) == 0 {
|
a: $T/^Xarr($E, $SOA),
|
||||||
|
elems: ..E,
|
||||||
|
allocator := context.allocator,
|
||||||
|
base := context.user_ptr,
|
||||||
|
) {
|
||||||
|
if builtin.len(elems) == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if capacity(a^) < uint(a.len + len(elems)) {
|
if capacity(a^) < uint(a.len + builtin.len(elems)) {
|
||||||
reserve(a, a.len + len(elems))
|
reserve(a, a.len + builtin.len(elems), allocator, base)
|
||||||
}
|
}
|
||||||
set_elems_assume_allocated(a, elems)
|
set_elems_assume_allocated(a, elems, base)
|
||||||
a.len += len(elems)
|
a.len += builtin.len(elems)
|
||||||
}
|
}
|
||||||
|
|
||||||
append :: proc {
|
append :: proc {
|
||||||
@ -120,22 +160,20 @@ translate_index :: #force_inline proc "contextless" (
|
|||||||
}
|
}
|
||||||
|
|
||||||
@(private = "file")
|
@(private = "file")
|
||||||
set_elems_assume_allocated :: proc "contextless" (
|
set_elems_assume_allocated :: proc(
|
||||||
a: $T/^Xarr($E, $SOA),
|
a: $T/^Xarr($E, $SOA),
|
||||||
elems: []E,
|
elems: []E,
|
||||||
|
base: rawptr,
|
||||||
) #no_bounds_check {
|
) #no_bounds_check {
|
||||||
for &e, i in elems {
|
for &e, i in elems {
|
||||||
idx := a.len + i
|
idx := a.len + i
|
||||||
chunk_idx, idx_within_chunk := translate_index(idx)
|
chunk_idx, idx_within_chunk := translate_index(idx)
|
||||||
|
|
||||||
when SOA {
|
when SOA {
|
||||||
a.chunks[chunk_idx][idx_within_chunk] = e
|
slice := relptr.deref_soa_slice(a.chunks[chunk_idx], base)
|
||||||
|
slice[idx_within_chunk] = e
|
||||||
} else {
|
} else {
|
||||||
intrinsics.mem_copy_non_overlapping(
|
relptr.deref_multi_ptr(a.chunks[chunk_idx], base)[idx_within_chunk] = e
|
||||||
&a.chunks[chunk_idx][idx_within_chunk],
|
|
||||||
&e,
|
|
||||||
size_of(E),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -153,17 +191,29 @@ get :: proc(a: $T/Xarr($E, $SOA), #any_int idx: int) -> E {
|
|||||||
return get_chunk_slice(a, chunk_idx)[idx_within_chunk]
|
return get_chunk_slice(a, chunk_idx)[idx_within_chunk]
|
||||||
}
|
}
|
||||||
|
|
||||||
get_ptr :: proc(a: $T/Xarr($E, $SOA), #any_int idx: int) -> ^E {
|
get_ptr_scalar :: proc(a: $T/^Xarr($E, false), #any_int idx: int) -> ^E {
|
||||||
assert(idx >= 0 && idx < a.len)
|
assert(idx >= 0 && idx < a.len)
|
||||||
|
|
||||||
chunk_idx, idx_within_chunk := translate_index(idx)
|
chunk_idx, idx_within_chunk := translate_index(idx)
|
||||||
return &get_chunk_slice(a, chunk_idx)[idx_within_chunk]
|
return &get_chunk_slice_scalar(a, chunk_idx)[idx_within_chunk]
|
||||||
|
}
|
||||||
|
|
||||||
|
get_ptr_soa :: proc(a: $T/^Xarr($E, true), #any_int idx: int) -> #soa^#soa[]E {
|
||||||
|
assert(idx >= 0 && idx < a.len)
|
||||||
|
|
||||||
|
chunk_idx, idx_within_chunk := translate_index(idx)
|
||||||
|
return &get_chunk_slice_soa(a, chunk_idx)[idx_within_chunk]
|
||||||
|
}
|
||||||
|
|
||||||
|
get_ptr :: proc {
|
||||||
|
get_ptr_scalar,
|
||||||
|
get_ptr_soa,
|
||||||
}
|
}
|
||||||
|
|
||||||
unordered_remove :: proc(a: $T/^Xarr($E, $SOA), #any_int idx: int) {
|
unordered_remove :: proc(a: $T/^Xarr($E, $SOA), #any_int idx: int) {
|
||||||
assert(idx >= 0 && idx < a.len)
|
assert(idx >= 0 && idx < a.len)
|
||||||
|
|
||||||
get_ptr(a^, idx)^ = get(a^, a.len - 1)
|
get_ptr(a, idx)^ = get(a^, a.len - 1)
|
||||||
a.len -= 1
|
a.len -= 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -172,7 +222,7 @@ clear :: proc "contextless" (a: $T/^Xarr($E, $SOA)) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
delete :: proc(a: $T/^Xarr($E, $SOA), allocator := context.allocator) {
|
delete :: proc(a: $T/^Xarr($E, $SOA), allocator := context.allocator) {
|
||||||
for i in 0 ..< len(a.chunks) {
|
for i in 0 ..< builtin.len(a.chunks) {
|
||||||
builtin.delete(get_chunk_slice(a^, i32(i)), allocator)
|
builtin.delete(get_chunk_slice(a^, i32(i)), allocator)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,7 +243,7 @@ iterator_next :: proc(it: ^Iterator($E, $SOA)) -> (e: ^E, idx: int, ok: bool) {
|
|||||||
return nil, it.idx, false
|
return nil, it.idx, false
|
||||||
}
|
}
|
||||||
|
|
||||||
e = get_ptr(it.xarr^, it.idx)
|
e = get_ptr(it.xarr, it.idx)
|
||||||
idx = it.idx
|
idx = it.idx
|
||||||
ok = true
|
ok = true
|
||||||
|
|
||||||
@ -225,7 +275,7 @@ chunk_iterator_next_scalar :: proc(
|
|||||||
chunk = get_chunk_slice_scalar(it.xarr^, it.chunk_idx)
|
chunk = get_chunk_slice_scalar(it.xarr^, it.chunk_idx)
|
||||||
// Limit the chunk to the length so user code doesn't have to worry about this
|
// Limit the chunk to the length so user code doesn't have to worry about this
|
||||||
base_element_idx = it.base_element_idx
|
base_element_idx = it.base_element_idx
|
||||||
chunk = chunk[:min(len(chunk), it.xarr.len - base_element_idx)]
|
chunk = chunk[:min(builtin.len(chunk), it.xarr.len - base_element_idx)]
|
||||||
ok = true
|
ok = true
|
||||||
|
|
||||||
base_element_idx += int(chunk_size(it.chunk_idx))
|
base_element_idx += int(chunk_size(it.chunk_idx))
|
||||||
@ -240,17 +290,17 @@ chunk_iterator_next_soa :: proc(
|
|||||||
base_element_idx: int,
|
base_element_idx: int,
|
||||||
ok: bool,
|
ok: bool,
|
||||||
) {
|
) {
|
||||||
if (it.xarr.allocated_chunks_mask & (u32(1) << it.chunk_idx)) == 0 {
|
if (it.xarr.allocated_chunks_mask & (u32(1) << u32(it.chunk_idx))) == 0 {
|
||||||
return nil, 0, false
|
return nil, 0, false
|
||||||
}
|
}
|
||||||
|
|
||||||
chunk = get_chunk_slice_soa(it.xarr^, it.chunk_idx)
|
chunk = get_chunk_slice_soa(it.xarr^, it.chunk_idx)
|
||||||
// Limit the chunk to the length so user code doesn't have to worry about this
|
// Limit the chunk to the length so user code doesn't have to worry about this
|
||||||
base_element_idx = it.base_element_idx
|
base_element_idx = it.base_element_idx
|
||||||
chunk = chunk[:min(len(chunk), it.xarr.len - base_element_idx)]
|
chunk = chunk[:min(builtin.len(chunk), it.xarr.len - base_element_idx)]
|
||||||
ok = true
|
ok = true
|
||||||
|
|
||||||
base_element_idx += chunk_size(it.chunk_idx)
|
base_element_idx += int(chunk_size(it.chunk_idx))
|
||||||
it.chunk_idx += 1
|
it.chunk_idx += 1
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
102
common/relptr/relptr.odin
Normal file
102
common/relptr/relptr.odin
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
// Relative pointer
|
||||||
|
package relptr
|
||||||
|
|
||||||
|
import "base:intrinsics"
|
||||||
|
|
||||||
|
Ptr :: struct($T: typeid) {
|
||||||
|
offset: uintptr,
|
||||||
|
}
|
||||||
|
|
||||||
|
Slice :: struct($T: typeid) {
|
||||||
|
offset: uintptr,
|
||||||
|
len: int,
|
||||||
|
}
|
||||||
|
|
||||||
|
SOA_Slice :: struct($T: typeid) {
|
||||||
|
// Offset for each field of SOA struct
|
||||||
|
offsets: [len(
|
||||||
|
T,
|
||||||
|
) when intrinsics.type_is_array(T) else intrinsics.type_struct_field_count(T)]uintptr,
|
||||||
|
len: int,
|
||||||
|
}
|
||||||
|
|
||||||
|
from_rawptr :: #force_inline proc($T: typeid, addr: rawptr, base := context.user_ptr) -> Ptr(T) {
|
||||||
|
offset := uintptr(addr) - uintptr(base)
|
||||||
|
assert(offset >= 0, "ptr does not belong to this base")
|
||||||
|
|
||||||
|
return Ptr(T){offset = offset}
|
||||||
|
}
|
||||||
|
|
||||||
|
from_ptr :: #force_inline proc(addr: ^$T, base := context.user_ptr) -> Ptr(T) {
|
||||||
|
offset := uintptr(rawptr(addr)) - uintptr(base)
|
||||||
|
assert(offset >= 0, "ptr does not belong to this base")
|
||||||
|
|
||||||
|
return Ptr(T){offset = offset}
|
||||||
|
}
|
||||||
|
|
||||||
|
from_multi_ptr :: #force_inline proc(addr: [^]$T, base := context.user_ptr) -> Ptr(T) {
|
||||||
|
offset := uintptr(rawptr(addr)) - uintptr(base)
|
||||||
|
assert(offset >= 0, "ptr does not belong to this base")
|
||||||
|
|
||||||
|
return Ptr(T){offset = offset}
|
||||||
|
}
|
||||||
|
|
||||||
|
from_slice :: #force_inline proc(slice: []$T, base := context.user_ptr) -> Slice(T) {
|
||||||
|
offset := uintptr(rawptr(raw_data(slice))) - uintptr(base)
|
||||||
|
assert(offset >= 0, "ptr does not belong to this base")
|
||||||
|
|
||||||
|
return Slice(T){offset = offset, len = len(slice)}
|
||||||
|
}
|
||||||
|
|
||||||
|
from_soa_slice :: #force_inline proc(slice: #soa[]$T, base := context.user_ptr) -> SOA_Slice(T) {
|
||||||
|
slice := slice
|
||||||
|
|
||||||
|
result: SOA_Slice(T)
|
||||||
|
|
||||||
|
FIELD_COUNT ::
|
||||||
|
(len(T) when intrinsics.type_is_array(T) else intrinsics.type_struct_field_count(T))
|
||||||
|
|
||||||
|
// SOA slice is just an array of pointers to each member + a footer
|
||||||
|
src_ptrs := (transmute([^]uintptr)(&slice))[:FIELD_COUNT]
|
||||||
|
|
||||||
|
for i in 0 ..< len(result.offsets) {
|
||||||
|
result.offsets[i] = src_ptrs[i] - uintptr(base)
|
||||||
|
}
|
||||||
|
result.len = len(slice)
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
deref_ptr :: #force_inline proc(ptr: Ptr($T), base := context.user_ptr) -> ^T {
|
||||||
|
return transmute(^T)(uintptr(base) + ptr.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
deref_multi_ptr :: #force_inline proc(ptr: Ptr($T), base := context.user_ptr) -> [^]T {
|
||||||
|
return transmute([^]T)(uintptr(base) + ptr.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
deref_slice :: #force_inline proc(slice: Slice($T), base := context.user_ptr) -> []T {
|
||||||
|
return (transmute([^]T)(uintptr(base) + slice.offset))[:slice.len]
|
||||||
|
}
|
||||||
|
|
||||||
|
deref_soa_slice :: #force_inline proc(slice: SOA_Slice($T), base := context.user_ptr) -> #soa[]T {
|
||||||
|
result: #soa[]T
|
||||||
|
|
||||||
|
footer := raw_soa_footer_slice(&result)
|
||||||
|
|
||||||
|
FIELD_COUNT ::
|
||||||
|
(len(T) when intrinsics.type_is_array(T) else intrinsics.type_struct_field_count(T))
|
||||||
|
|
||||||
|
// Just in case SOA layout changes
|
||||||
|
#assert(size_of(result) == (FIELD_COUNT * size_of(rawptr)) + size_of(footer))
|
||||||
|
|
||||||
|
// SOA slice is just an array of pointers to each member + a footer
|
||||||
|
result_ptrs := (transmute([^]uintptr)(&result))[:FIELD_COUNT]
|
||||||
|
for i in 0 ..< len(slice.offsets) {
|
||||||
|
result_ptrs[i] = uintptr(base) + slice.offsets[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
footer.len = slice.len
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user