Parsing complete mostly, ordering, resolving, C++ tests

This commit is contained in:
Krzosa Karol
2022-05-12 17:10:24 +02:00
parent feae74b0b9
commit 9d54ed8195
37 changed files with 2748 additions and 5341 deletions

400
main.cpp Normal file
View File

@@ -0,0 +1,400 @@
#define NOMINMAX
#include <windows.h>
#include <stdint.h>
typedef int8_t S8;
typedef int16_t S16;
typedef int32_t S32;
typedef int64_t S64;
typedef uint8_t U8;
typedef uint16_t U16;
typedef uint32_t U32;
typedef uint64_t U64;
typedef S8 B8;
typedef S16 B16;
typedef S32 B32;
typedef S64 B64;
typedef U64 SizeU;
typedef S64 SizeI;
typedef float F32;
typedef double F64;
#define function static
#define global static
#define force_inline __forceinline
#define assert(x) do{if(!(x))__debugbreak();}while(0)
#define assert_msg(x,...) assert(x)
#define invalid_codepath assert_msg(0, "Invalid codepath")
#define not_implemented assert_msg(0, "Not implemented")
#define buff_cap(x) (sizeof(x)/sizeof((x)[0]))
#define kib(x) ((x)*1024llu)
#define mib(x) (kib(x)*1024llu)
#define gib(x) (mib(x)*1024llu)
struct String{U8 *str;S64 len;};
function SizeU
get_align_offset(SizeU size, SizeU align){
SizeU mask = align - 1;
SizeU val = size & mask;
if(val){
val = align - val;
}
return val;
}
function SizeU
align_up(SizeU size, SizeU align){
SizeU result = size + get_align_offset(size, align);
return result;
}
function SizeU
align_down(SizeU size, SizeU align){
size += 1; // Make sure 8 when align is 8 doesn't get rounded down to 0
SizeU result = size - (align - get_align_offset(size, align));
return result;
}
function void
memory_copy(void *dst, void *src, SizeU size){
U8 *d = (U8*)dst;
U8 *s = (U8*)src;
for(SizeU i = 0; i < size; i++){
d[i] = s[i];
}
}
function void
memory_zero(void *p, SizeU size){
U8 *pp = (U8 *)p;
for(SizeU i = 0; i < size; i++)
pp[i] = 0;
}
template<class T>
T clamp_top(T val, T max){
if(val > max) val = max;
return val;
}
template<class T>
T clamp_bot(T bot, T val){
if(val < bot) val = bot;
return val;
}
template<class T>
T clamp(T min, T val, T max){
if(val > max) val = max;
if(val < min) val = min;
return val;
}
constexpr SizeU os_page_size = 4096;
struct OS_Memory{
SizeU commit, reserve;
U8 *data;
};
function OS_Memory
os_reserve(SizeU size){
OS_Memory result = {};
SizeU adjusted_size = align_up(size, os_page_size);
result.data = (U8*)VirtualAlloc(0, adjusted_size, MEM_RESERVE, PAGE_READWRITE);
assert_msg(result.data, "Failed to reserve virtual memory");
result.reserve = adjusted_size;
return result;
}
function B32
os_commit(OS_Memory *m, SizeU size){
SizeU commit = align_up(size, os_page_size);
SizeU total_commit = m->commit + commit;
total_commit = clamp_top(total_commit, m->reserve);
SizeU adjusted_commit = total_commit - m->commit;
if(adjusted_commit != 0){
void *result = VirtualAlloc((U8*)m->data + m->commit, adjusted_commit, MEM_COMMIT, PAGE_READWRITE);
assert_msg(result, "Failed to commit more memory");
m->commit += adjusted_commit;
return true;
}
return false;
}
function void
os_release(OS_Memory *m){
BOOL result = VirtualFree(m->data, 0, MEM_RELEASE);
assert_msg(result != 0, "Failed to release OS_Memory");
if(result){
m->data = 0;
m->commit = 0;
m->reserve = 0;
}
}
function B32
os_decommit_pos(OS_Memory *m, SizeU pos){
SizeU aligned = align_down(pos, os_page_size);
SizeU adjusted_pos = clamp_top(aligned, m->commit);
SizeU size_to_decommit = m->commit - adjusted_pos;
if(size_to_decommit){
U8 *imp_address = m->data + adjusted_pos;
BOOL result = VirtualFree(imp_address, size_to_decommit, MEM_DECOMMIT);
if(result){
m->commit -= size_to_decommit;
return true;
}
}
return false;
}
function void
test_os_memory(){
assert(align_down(4096, 4096) == 4096);
assert(align_down(4095, 4096) == 0);
OS_Memory memory = os_reserve(9000);
assert(memory.reserve == 4096*3 && memory.data && memory.commit == 0);
os_commit(&memory, 100);
assert(memory.commit == 4096);
os_commit(&memory, 100);
assert(memory.commit == 4096*2);
os_commit(&memory, 9000);
assert(memory.commit == 4096*3);
os_commit(&memory, 9000);
assert(memory.commit == 4096*3);
os_decommit_pos(&memory, 4096);
assert(memory.commit == 4096);
os_decommit_pos(&memory, 4096);
assert(memory.commit == 4096);
os_decommit_pos(&memory, 0);
assert(memory.commit == 0);
os_release(&memory);
assert(memory.data == 0);
}
enum Allocation_Kind{Allocation_Alloc,Allocation_Resize,Allocation_FreeAll,Allocation_Free};
struct Allocator;
typedef void *Allocator_Proc(Allocator*, Allocation_Kind, void *, SizeU);
struct Allocator{Allocator_Proc *proc;};
global const SizeU default_reserve_size = gib(4);
global const SizeU default_alignment = 8;
global const SizeU additional_commit_size = mib(1);
struct Arena:Allocator{
OS_Memory memory;
SizeU alignment;
SizeU len;
};
function void arena_init(Arena *arena);
function void
arena_pop_pos(Arena *arena, SizeU pos){
pos = clamp_top(pos, arena->len);
arena->len = pos;
}
function void
arena_clear(Arena *arena){
arena_pop_pos(arena, 0);
}
function void *
arena_push_size(Arena *a, SizeU size){
SizeU generous_size = size;
if(a->memory.commit+generous_size>a->memory.commit){
if(a->memory.reserve == 0){
arena_init(a);
}
B32 result = os_commit(&a->memory, generous_size+additional_commit_size);
assert(result);
}
a->len = align_up(a->len, a->alignment);
assert(a->memory.reserve > a->len + a->memory.commit);
void *result = (U8*)a->memory.data + a->len;
a->len += size;
return result;
}
force_inline void *
arena_allocator_proc(Allocator *a, Allocation_Kind kind, void *old_pointer, SizeU size){
Arena *arena = (Arena *)a;
switch(kind){
case Allocation_Alloc: return arena_push_size(arena, size);
case Allocation_Resize: return arena_push_size(arena, size);
case Allocation_Free : invalid_codepath; return 0;
case Allocation_FreeAll: arena_clear(arena); return 0;
}
}
force_inline void *
big_personal_arena_allocator_proc(Allocator *a, Allocation_Kind kind, void *old_pointer, SizeU size){
Arena *arena = (Arena *)a;
arena->alignment = 1;
return arena_allocator_proc(a, kind, old_pointer, size);
}
function void
arena_init(Arena *a){
a->memory = os_reserve(default_reserve_size);
a->alignment = default_alignment;
if(!a->proc) a->proc = arena_allocator_proc;
}
struct OS_Heap:Allocator{
HANDLE handle;
};
function void *
os_heap_allocator_proc(Allocator *a, Allocation_Kind kind, void *old_pointer, SizeU size){
OS_Heap *heap = (OS_Heap *)a;
switch(kind){
case Allocation_FreeAll:{
BOOL result = HeapDestroy(heap->handle);
assert(result != 0);
return 0;
}
case Allocation_Free:{
BOOL result = HeapFree(heap->handle, 0, old_pointer);
assert(result != 0);
return 0;
}
case Allocation_Alloc:{
void *result = HeapAlloc(heap->handle, 0, size);
assert(result);
return result;
}
case Allocation_Resize:{
void *result = HeapReAlloc(heap->handle, 0, old_pointer, size);
assert(result);
return result;
}
}
}
function OS_Heap // max_size == 0 == growing heap
win32_os_heap_create(B32 multithreaded, SizeU initial_size, SizeU max_size){
OS_Heap result = {};
result.proc = os_heap_allocator_proc;
result.handle = HeapCreate(multithreaded ? 0 : HEAP_NO_SERIALIZE, initial_size, max_size);
assert(result.handle);
return result;
}
struct Thread_Ctx{
Arena scratch[2];
Allocator *implicit_allocator;
};
thread_local Thread_Ctx thread_ctx;
global Arena pernament_arena;
global OS_Heap os_process_heap;
#define Set_Scratch() Scoped_Scratch scratch_##__LINE__
#define Set_Backup_Scratch() Scoped_Scratch scratch_##__LINE__(true)
struct Scoped_Scratch{
SizeU saved_pos;
Allocator *saved_allocator;
Arena *arena;
Scoped_Scratch(B32 backup_scratch=false){
if(!backup_scratch) arena = thread_ctx.scratch;
else arena = thread_ctx.scratch + 1;
saved_allocator = thread_ctx.implicit_allocator;
saved_pos = arena->len;
thread_ctx.implicit_allocator = arena;
}
~Scoped_Scratch(){
arena_pop_pos(arena, saved_pos);
thread_ctx.implicit_allocator = saved_allocator;
}
};
#define Set_Allocator(a) Scoped_Allocator scoped_##__LINE__(a)
struct Scoped_Allocator{
Allocator *allocator;
Scoped_Allocator(Allocator *a){
allocator = thread_ctx.implicit_allocator;
thread_ctx.implicit_allocator = a;
}
~Scoped_Allocator(){
thread_ctx.implicit_allocator = allocator;
}
};
#define exp_alloc_array(a, T, size) (T *)exp_alloc(a, sizeof(T)*(size))
#define exp_alloc_type(a, T) exp_alloc_array(a, T, 1)
#define exp_resize_array(a, p, T, size) expr_resize(a, p, sizeof(T)*(size))
force_inline void *
exp_alloc(Allocator *a, SizeU size){
return a->proc(a, Allocation_Alloc, 0, size);
}
force_inline void *
exp_resize(Allocator *a, void *pointer, SizeU size){
return a->proc(a, Allocation_Resize, pointer, size);
}
force_inline void
exp_free(Allocator *a, void *pointer){
a->proc(a, Allocation_Free, pointer, 0);
}
force_inline void
exp_free_all(Allocator *a){
a->proc(a, Allocation_FreeAll, 0, 0);
}
#define imp_alloc_array(T,size) (T *)imp_alloc(sizeof(T) * (size))
#define imp_alloc_type (T) imp_alloc_array(T,1)
#define imp_resize_array(p, T,size) (T *)imp_resize(p, sizeof(T) * (size))
force_inline void *
imp_alloc(SizeU size){
return exp_alloc(thread_ctx.implicit_allocator, size);
}
force_inline void *
imp_resize(void *pointer, SizeU size){
return exp_resize(thread_ctx.implicit_allocator, pointer, size);
}
force_inline void
imp_free(void *pointer){
exp_free(thread_ctx.implicit_allocator, pointer);
}
force_inline void
imp_free_all(){
exp_free_all(thread_ctx.implicit_allocator);
}
function void
thread_ctx_init(){
arena_init(thread_ctx.scratch);
arena_init(thread_ctx.scratch+1);
arena_init(&pernament_arena);
os_process_heap.proc = os_heap_allocator_proc;
os_process_heap.handle = GetProcessHeap();
}
function void
test_heap_allocator(){
OS_Heap heap = win32_os_heap_create(false, mib(1), 0);
Set_Allocator(&heap);
assert(thread_ctx.implicit_allocator == &heap);
U8 *result = imp_alloc_array(U8,1024);
result[1023] = 1;
result = exp_alloc_type(&heap, U8);
*result = 0;
imp_free_all();
assert(thread_ctx.implicit_allocator == &heap);
{
Set_Scratch();
assert(thread_ctx.implicit_allocator != &heap);
assert(thread_ctx.implicit_allocator == thread_ctx.scratch);
}
assert(thread_ctx.implicit_allocator == &heap);
}
int main(){
test_heap_allocator();
thread_ctx_init();
}