385 lines
11 KiB
C++
385 lines
11 KiB
C++
#if defined(USE_ADDRESS_SANITIZER)
|
|
#include <sanitizer/asan_interface.h>
|
|
#endif
|
|
|
|
#if !defined(ASAN_POISON_MEMORY_REGION)
|
|
#define MA_ASAN_POISON_MEMORY_REGION(addr, size) ((void)(addr), (void)(size))
|
|
#define MA_ASAN_UNPOISON_MEMORY_REGION(addr, size) ((void)(addr), (void)(size))
|
|
#else
|
|
#define MA_ASAN_POISON_MEMORY_REGION(addr, size) ASAN_POISON_MEMORY_REGION(addr, size)
|
|
#define MA_ASAN_UNPOISON_MEMORY_REGION(addr, size) ASAN_UNPOISON_MEMORY_REGION(addr, size)
|
|
#endif
|
|
|
|
#include <stdlib.h>
|
|
#if OS_WINDOWS
|
|
|
|
#ifndef NOMINMAX
|
|
#define NOMINMAX
|
|
#endif
|
|
#ifndef WIN32_LEAN_AND_MEAN
|
|
#define WIN32_LEAN_AND_MEAN
|
|
#endif
|
|
#include <windows.h>
|
|
|
|
API void *VReserve(size_t size) {
|
|
void *result = (uint8_t *)VirtualAlloc(0, size, MEM_RESERVE, PAGE_READWRITE);
|
|
return result;
|
|
}
|
|
|
|
API bool VCommit(void *p, size_t size) {
|
|
void *result = VirtualAlloc(p, size, MEM_COMMIT, PAGE_READWRITE);
|
|
return result ? true : false;
|
|
}
|
|
|
|
API bool VRelease(void *p, size_t size) {
|
|
BOOL result = VirtualFree(p, 0, MEM_RELEASE);
|
|
return result ? true : false;
|
|
}
|
|
|
|
API bool VDecommit(void *p, size_t size) {
|
|
BOOL result = VirtualFree(p, size, MEM_DECOMMIT);
|
|
return result ? true : false;
|
|
}
|
|
|
|
#elif OS_LINUX || OS_MAC
|
|
|
|
API void *VReserve(size_t size) {
|
|
void *result = mmap(0, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, (off_t)0);
|
|
return result == (void *)-1 ? 0 : result;
|
|
}
|
|
|
|
API bool VCommit(void *p, size_t size) {
|
|
int result = mprotect(p, size, PROT_READ | PROT_WRITE);
|
|
return result == 0;
|
|
}
|
|
|
|
API bool VRelease(void *p, size_t size) {
|
|
int result = munmap(p, size);
|
|
return result == 0;
|
|
}
|
|
|
|
API bool VDecommit(void *p, size_t size) {
|
|
mprotect(p, size, PROT_NONE);
|
|
madvise(p, size, MADV_DONTNEED);
|
|
return true;
|
|
}
|
|
|
|
#else
|
|
|
|
API void *VReserve(size_t size) {
|
|
InvalidCodepath();
|
|
return NULL;
|
|
}
|
|
|
|
API bool VCommit(void *p, size_t size) {
|
|
InvalidCodepath();
|
|
return false;
|
|
}
|
|
|
|
API bool VRelease(void *p, size_t size) {
|
|
InvalidCodepath();
|
|
return false;
|
|
}
|
|
|
|
API bool VDecommit(void *p, size_t size) {
|
|
InvalidCodepath();
|
|
return false;
|
|
}
|
|
|
|
#endif
|
|
|
|
API void *SystemAllocatorProc(void *object, int kind, void *p, size_t size) {
|
|
void *result = NULL;
|
|
if (kind == AllocatorKind_Allocate) {
|
|
result = malloc(size);
|
|
Assert(result);
|
|
} else if (kind == AllocatorKind_Deallocate) {
|
|
free(p);
|
|
} else {
|
|
InvalidCodepath();
|
|
}
|
|
return result;
|
|
}
|
|
|
|
API Allocator GetSystemAllocator() {
|
|
Allocator result = {SystemAllocatorProc};
|
|
return result;
|
|
}
|
|
|
|
API void *_AllocSize_(Allocator alo, size_t size) {
|
|
void *result = alo.proc(alo.object, AllocatorKind_Allocate, NULL, size);
|
|
memset(result, 0, size);
|
|
return result;
|
|
}
|
|
|
|
struct MemoryRecord {
|
|
size_t size;
|
|
void *addr;
|
|
char *file;
|
|
int line;
|
|
};
|
|
thread_local Array<MemoryRecord> MemoryTrackingRecord;
|
|
|
|
|
|
API void *TrackingAllocatorProc(void *object, int kind, void *p, size_t size) {
|
|
void *result = NULL;
|
|
|
|
if (kind == AllocatorKind_Allocate) {
|
|
result = malloc(size);
|
|
Add(&MemoryTrackingRecord, {size, result, LocationTraceO.file, LocationTraceO.line});
|
|
Assert(result);
|
|
} else if (kind == AllocatorKind_Deallocate) {
|
|
free(p);
|
|
|
|
bool found = false;
|
|
For(IterateInReverse(&MemoryTrackingRecord)) {
|
|
if (it.addr == p) {
|
|
found = true;
|
|
UnorderedRemove(&MemoryTrackingRecord, it);
|
|
break;
|
|
}
|
|
}
|
|
Assert(found);
|
|
} else {
|
|
InvalidCodepath();
|
|
}
|
|
return result;
|
|
}
|
|
|
|
API void TrackingAllocatorCheck() {
|
|
// For (MemoryTrackingRecord) {
|
|
// ReportConsolef("%s(%d): error: memory leak");
|
|
// }
|
|
Assert(MemoryTrackingRecord.len == 0);
|
|
}
|
|
|
|
API Allocator GetTrackingAllocator() {
|
|
Allocator result = {TrackingAllocatorProc};
|
|
return result;
|
|
}
|
|
|
|
///////////////////////////////
|
|
// Virtual Arena
|
|
|
|
API void InitArena(VirtualArena *arena, size_t reserve) {
|
|
reserve = AlignUp(reserve, PAGE_SIZE);
|
|
arena->align = DEFAULT_ALIGNMENT;
|
|
arena->data = (uint8_t *)VReserve(reserve);
|
|
if (arena->data) {
|
|
arena->reserve = reserve;
|
|
}
|
|
}
|
|
|
|
API VirtualArena *AllocArena(Allocator allocator, size_t size) {
|
|
VirtualArena *result = AllocType(allocator, VirtualArena);
|
|
result->data = (uint8_t *)AllocSize(allocator, size);
|
|
result->reserve = size;
|
|
result->commit = size;
|
|
result->align = DEFAULT_ALIGNMENT;
|
|
return result;
|
|
}
|
|
|
|
API VirtualArena *AllocArena(size_t reserve) {
|
|
VirtualArena *result = NULL;
|
|
|
|
void *data = VReserve(reserve);
|
|
if (!data) return result;
|
|
|
|
bool success = VCommit(data, PAGE_SIZE);
|
|
if (!success) {
|
|
VRelease(data, reserve);
|
|
return result;
|
|
}
|
|
|
|
result = (VirtualArena *)data;
|
|
result->data = (uint8_t *)data;
|
|
result->reserve = reserve;
|
|
result->commit = PAGE_SIZE;
|
|
result->len = result->base_len = sizeof(VirtualArena);
|
|
result->align = DEFAULT_ALIGNMENT;
|
|
return result;
|
|
}
|
|
|
|
API void *PushSize(VirtualArena *arena, size_t size) {
|
|
// base_len is used for bootstraping arenas, it denotes the
|
|
// space occupied by the arena. If len is smaller then base_len then
|
|
// we start to overwrite the arena itself - pure barbarism.
|
|
Assert(arena->len >= arena->base_len);
|
|
|
|
size_t align_offset = 0;
|
|
if (arena->align) {
|
|
align_offset = GetAlignOffset((uintptr_t)arena->data + arena->len, arena->align);
|
|
}
|
|
size_t size_with_alignment = size + align_offset;
|
|
size_t new_len = arena->len + size_with_alignment;
|
|
if (new_len > arena->commit) {
|
|
size_t new_len_aligned_to_page_size = AlignUp(new_len, PAGE_SIZE);
|
|
size_t to_commit = new_len_aligned_to_page_size - arena->commit;
|
|
size_t to_commit_clamped = ClampTop(to_commit, arena->reserve);
|
|
if (to_commit_clamped > 0) {
|
|
bool success = VCommit(arena->data + arena->commit, to_commit_clamped);
|
|
if (success) {
|
|
MA_ASAN_POISON_MEMORY_REGION(arena->data + arena->commit, to_commit_clamped);
|
|
arena->commit += to_commit_clamped;
|
|
}
|
|
}
|
|
if (new_len > arena->commit) {
|
|
return NULL;
|
|
}
|
|
}
|
|
uint8_t *result = arena->data + arena->len + align_offset;
|
|
arena->len = new_len;
|
|
MA_ASAN_UNPOISON_MEMORY_REGION(result, size);
|
|
return (void *)result;
|
|
}
|
|
|
|
API void Release(VirtualArena *arena) {
|
|
if (arena == NULL || arena->data == NULL) return;
|
|
bool zero_memory = (uint8_t *)arena != arena->data;
|
|
VRelease(arena->data, arena->reserve);
|
|
if (zero_memory) MemoryZero(arena, sizeof(VirtualArena));
|
|
}
|
|
|
|
API void PopToPos(VirtualArena *arena, size_t pos) {
|
|
// base_len is used for bootstraping arenas, it denotes the
|
|
// space occupied by the arena. If len is smaller then base_len then
|
|
// we start to overwrite the arena itself - pure barbarism.
|
|
Assert(arena->len >= arena->base_len);
|
|
|
|
pos = Clamp(pos, arena->base_len, arena->len);
|
|
size_t size = arena->len - pos;
|
|
arena->len = pos;
|
|
MA_ASAN_POISON_MEMORY_REGION(arena->data + arena->len, size);
|
|
}
|
|
|
|
API void *ArenaAllocatorProc(void *object, int kind, void *p, size_t size) {
|
|
if (kind == AllocatorKind_Allocate) {
|
|
return PushSize((VirtualArena *)object, size);
|
|
} else if (AllocatorKind_Deallocate) {
|
|
} else {
|
|
Assert(!"invalid codepath");
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
///////////////////////////////
|
|
// Block Arena
|
|
|
|
API void *PushSize(BlockArena *arena, size_t size) {
|
|
if (size > (size_t)(arena->end - arena->start)) {
|
|
size_t block_size = MiB(1);
|
|
if (size > block_size) {
|
|
block_size = size;
|
|
}
|
|
if (arena->allocator.proc == NULL) {
|
|
arena->allocator = GetSystemAllocator();
|
|
}
|
|
BlockArenaNode *new_block = (BlockArenaNode *)AllocSize(arena->allocator, block_size + sizeof(BlockArenaNode));
|
|
Assert(GetAlignOffset((size_t)new_block->start, DEFAULT_ALIGNMENT) == 0);
|
|
arena->start = new_block->start;
|
|
new_block->end = arena->end = new_block->start + block_size;
|
|
SLL_STACK_ADD(arena->blocks, new_block);
|
|
}
|
|
U8 *result = arena->start;
|
|
Assert(GetAlignOffset((size_t)result, DEFAULT_ALIGNMENT) == 0);
|
|
arena->start = (U8 *)AlignUp((size_t)(arena->start + size), DEFAULT_ALIGNMENT);
|
|
return result;
|
|
}
|
|
|
|
API void Release(BlockArena *arena) {
|
|
for (BlockArenaNode *it = arena->blocks, *next = NULL; it; it = next) {
|
|
next = it->next;
|
|
Dealloc(arena->allocator, it);
|
|
}
|
|
MemoryZero(arena, sizeof(BlockArena));
|
|
}
|
|
|
|
API void Unwind(BlockArena *arena, U8 *pos) {
|
|
bool contains = false;
|
|
for (BlockArenaNode *it = arena->blocks, *next = NULL; it; it = next) {
|
|
next = it->next;
|
|
if ((pos >= it->start) && (pos < it->end)) {
|
|
contains = true;
|
|
break;
|
|
} else {
|
|
arena->blocks = arena->blocks->next;
|
|
Dealloc(arena->allocator, it);
|
|
}
|
|
}
|
|
Assert(contains || pos == NULL);
|
|
arena->start = pos;
|
|
}
|
|
|
|
API void *BlockArenaAllocatorProc(void *object, int kind, void *p, size_t size) {
|
|
BlockArena *arena = (BlockArena *)object;
|
|
if (kind == AllocatorKind_Allocate) {
|
|
return PushSize(arena, size);
|
|
} else if (AllocatorKind_Deallocate) {
|
|
} else {
|
|
Assert(!"invalid codepath");
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
void RunArenaTest() {
|
|
Allocator memory_tracking_allocator = GetTrackingAllocator();
|
|
{
|
|
BlockArena arena = {};
|
|
arena.allocator = memory_tracking_allocator;
|
|
for (int i = 0; i < 10000; i += 1) {
|
|
int *vals = (int *)PushSize(&arena, sizeof(int)*i);
|
|
for (int j = 0; j < i; j += 1) {
|
|
vals[j] = j;
|
|
}
|
|
}
|
|
Release(&arena);
|
|
TrackingAllocatorCheck();
|
|
}
|
|
|
|
{
|
|
BlockArena arena = {};
|
|
U8 *start = arena.start;
|
|
arena.allocator = memory_tracking_allocator;
|
|
int *vals = (int *)PushSize(&arena, sizeof(int) * 32);
|
|
for (int i = 0; i < 32; i += 1) vals[i] = i;
|
|
Unwind(&arena, (U8 *)vals);
|
|
Assert(arena.blocks);
|
|
Assert(arena.blocks->next == NULL);
|
|
Assert(arena.start == (U8 *)vals);
|
|
Assert(arena.blocks[0].start == (U8 *)vals);
|
|
Unwind(&arena, NULL);
|
|
Dealloc(arena.allocator, arena.blocks);
|
|
TrackingAllocatorCheck();
|
|
}
|
|
|
|
{
|
|
BlockArena arena = {};
|
|
arena.allocator = memory_tracking_allocator;
|
|
int *vals = (int *)PushSize(&arena, sizeof(int) * 32);
|
|
for (int i = 0; i < 32; i += 1) vals[i] = i;
|
|
|
|
U8 *p = arena.start;
|
|
U8 *a = (U8 *)PushSize(&arena, KiB(32));
|
|
U8 *b = (U8 *)PushSize(&arena, KiB(1000));
|
|
Assert(arena.blocks);
|
|
Assert(arena.blocks->next);
|
|
Assert(arena.blocks->next->next == NULL);
|
|
Unwind(&arena, a);
|
|
Assert(arena.blocks);
|
|
Assert(arena.blocks->next == NULL);
|
|
Assert(arena.start == p);
|
|
|
|
Release(&arena);
|
|
TrackingAllocatorCheck();
|
|
}
|
|
|
|
{
|
|
BlockArena arena = {};
|
|
arena.allocator = memory_tracking_allocator;
|
|
U8 *a = (U8 *)PushSize(&arena, KiB(2000));
|
|
Assert((size_t)(arena.blocks[0].end - arena.blocks[0].start) == KiB(2000));
|
|
Release(&arena);
|
|
TrackingAllocatorCheck();
|
|
}
|
|
}
|