Arithmetic ops and pushes for all types

This commit is contained in:
Krzosa Karol
2022-06-20 20:14:34 +02:00
parent 5a8f36b16a
commit fd66781afb
3 changed files with 1084 additions and 361 deletions

View File

@@ -2,22 +2,77 @@
// Instructions
//
enum{
INS_PUSH_S64,
INS_PUSH_S32,
INS_PUSH_S16,
INS_PUSH_S8 ,
INS_PUSH_U64,
INS_PUSH_U32,
INS_PUSH_U16,
INS_PUSH_U8 ,
INS_PUSH_F64 ,
INS_PUSH_F32 ,
INS_END,
INS_POP,
INS_ADD,
INS_SUB,
INS_MUL,
INS_DIV,
//
// Generated using code_generating_script.py
//
INS_ADD_S64,
INS_SUB_S64,
INS_DIV_S64,
INS_MUL_S64,
INS_MOD_S64,
INS_PUSH_S64,
INS_ADD_S32,
INS_SUB_S32,
INS_DIV_S32,
INS_MUL_S32,
INS_MOD_S32,
INS_PUSH_S32,
INS_ADD_S16,
INS_SUB_S16,
INS_DIV_S16,
INS_MUL_S16,
INS_MOD_S16,
INS_PUSH_S16,
INS_ADD_S8,
INS_SUB_S8,
INS_DIV_S8,
INS_MUL_S8,
INS_MOD_S8,
INS_PUSH_S8,
INS_ADD_U64,
INS_SUB_U64,
INS_DIV_U64,
INS_MUL_U64,
INS_MOD_U64,
INS_PUSH_U64,
INS_ADD_U32,
INS_SUB_U32,
INS_DIV_U32,
INS_MUL_U32,
INS_MOD_U32,
INS_PUSH_U32,
INS_ADD_U16,
INS_SUB_U16,
INS_DIV_U16,
INS_MUL_U16,
INS_MOD_U16,
INS_PUSH_U16,
INS_ADD_U8,
INS_SUB_U8,
INS_DIV_U8,
INS_MUL_U8,
INS_MOD_U8,
INS_PUSH_U8,
INS_ADD_F32,
INS_SUB_F32,
INS_DIV_F32,
INS_MUL_F32,
INS_MOD_F32,
INS_PUSH_F32,
INS_ADD_F64,
INS_SUB_F64,
INS_DIV_F64,
INS_MUL_F64,
INS_MOD_F64,
INS_PUSH_F64,
//
// **End** of generated using code_generating_script.py
//
};
//
@@ -50,9 +105,8 @@ create_bytecode_interp(){
arena_init(&b.stack, "Bytecode stack"_s);
b.stack.alignment = 8;
// Commit
arena_push_size(&b.stack, 16);
arena_clear(&b.stack);
// Setup a 4 kilobyte stack
arena_push_size(&b.stack, kib(4));
b.stack_pointer = b.stack_bottom = b.stack.memory.data;
}
@@ -65,12 +119,20 @@ emit_pop(Bc *bc){
*instruction = INS_POP;
}
force_inline void *
stack_pop(Bc *b){
force_inline void
emit_end(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_END;
}
#define ins_pop_t(b, T) (*((T *)ins_pop(b)))
static void *
ins_pop(Bc *b){
assert_msg(b->stack_pointer != b->stack_bottom, "Reached bottom of bytecode interpreter stack");
b->stack_pointer -= sizeof(U64);
// @warning we don't do anything with type for now
Ast_Type_Kind *type = (Ast_Type_Kind *)b->stack_pointer;
unused(type);
b->stack_pointer -= sizeof(U64);
return b->stack_pointer;
}
@@ -82,127 +144,472 @@ run_bytecode_interp(Bc *b){
switch(instruction){
case INS_POP:{
void *value = stack_pop(b);
void *value = ins_pop(b);
S64 *s64 = (S64 *)value;
F64 *f64 = (F64 *)value;
U64 *u64 = (U64 *)value;
unused(s64);
unused(f64);
unused(u64);
} break;
//
// Generated using code_generating_script.py
//
case INS_END:{
goto interp_loop_breakout;
} break;
case INS_PUSH_S64:{
//
// Generated using code_generating_script.py
//
case INS_ADD_S64:{
S64 l = ins_pop_t(b, S64);
S64 r = ins_pop_t(b, S64);
S64 result = l + r;
ins_push_s64(b, result);
}break;
case INS_SUB_S64:{
S64 l = ins_pop_t(b, S64);
S64 r = ins_pop_t(b, S64);
S64 result = l - r;
ins_push_s64(b, result);
}break;
case INS_DIV_S64:{
S64 l = ins_pop_t(b, S64);
S64 r = ins_pop_t(b, S64);
S64 result = l / r;
ins_push_s64(b, result);
}break;
case INS_MUL_S64:{
S64 l = ins_pop_t(b, S64);
S64 r = ins_pop_t(b, S64);
S64 result = l * r;
ins_push_s64(b, result);
}break;
case INS_MOD_S64:{
S64 l = ins_pop_t(b, S64);
S64 r = ins_pop_t(b, S64);
S64 result = l % r;
ins_push_s64(b, result);
}break;
case INS_PUSH_S64:{
// Fetch value from instruction
// instructions are tightly packed so we
// move pointer by the type size
auto value = (S64 *)b->ins_pointer;
b->ins_pointer += sizeof(S64);
stack_push_s64(b, *value);
} break;
ins_push_s64(b, *value);
} break;
case INS_PUSH_S32:{
case INS_ADD_S32:{
S32 l = ins_pop_t(b, S32);
S32 r = ins_pop_t(b, S32);
S32 result = l + r;
ins_push_s32(b, result);
}break;
case INS_SUB_S32:{
S32 l = ins_pop_t(b, S32);
S32 r = ins_pop_t(b, S32);
S32 result = l - r;
ins_push_s32(b, result);
}break;
case INS_DIV_S32:{
S32 l = ins_pop_t(b, S32);
S32 r = ins_pop_t(b, S32);
S32 result = l / r;
ins_push_s32(b, result);
}break;
case INS_MUL_S32:{
S32 l = ins_pop_t(b, S32);
S32 r = ins_pop_t(b, S32);
S32 result = l * r;
ins_push_s32(b, result);
}break;
case INS_MOD_S32:{
S32 l = ins_pop_t(b, S32);
S32 r = ins_pop_t(b, S32);
S32 result = l % r;
ins_push_s32(b, result);
}break;
case INS_PUSH_S32:{
// Fetch value from instruction
// instructions are tightly packed so we
// move pointer by the type size
auto value = (S32 *)b->ins_pointer;
b->ins_pointer += sizeof(S32);
stack_push_s32(b, *value);
} break;
ins_push_s32(b, *value);
} break;
case INS_PUSH_S16:{
case INS_ADD_S16:{
S16 l = ins_pop_t(b, S16);
S16 r = ins_pop_t(b, S16);
S16 result = l + r;
ins_push_s16(b, result);
}break;
case INS_SUB_S16:{
S16 l = ins_pop_t(b, S16);
S16 r = ins_pop_t(b, S16);
S16 result = l - r;
ins_push_s16(b, result);
}break;
case INS_DIV_S16:{
S16 l = ins_pop_t(b, S16);
S16 r = ins_pop_t(b, S16);
S16 result = l / r;
ins_push_s16(b, result);
}break;
case INS_MUL_S16:{
S16 l = ins_pop_t(b, S16);
S16 r = ins_pop_t(b, S16);
S16 result = l * r;
ins_push_s16(b, result);
}break;
case INS_MOD_S16:{
S16 l = ins_pop_t(b, S16);
S16 r = ins_pop_t(b, S16);
S16 result = l % r;
ins_push_s16(b, result);
}break;
case INS_PUSH_S16:{
// Fetch value from instruction
// instructions are tightly packed so we
// move pointer by the type size
auto value = (S16 *)b->ins_pointer;
b->ins_pointer += sizeof(S16);
stack_push_s16(b, *value);
} break;
ins_push_s16(b, *value);
} break;
case INS_PUSH_S8:{
case INS_ADD_S8:{
S8 l = ins_pop_t(b, S8);
S8 r = ins_pop_t(b, S8);
S8 result = l + r;
ins_push_s8(b, result);
}break;
case INS_SUB_S8:{
S8 l = ins_pop_t(b, S8);
S8 r = ins_pop_t(b, S8);
S8 result = l - r;
ins_push_s8(b, result);
}break;
case INS_DIV_S8:{
S8 l = ins_pop_t(b, S8);
S8 r = ins_pop_t(b, S8);
S8 result = l / r;
ins_push_s8(b, result);
}break;
case INS_MUL_S8:{
S8 l = ins_pop_t(b, S8);
S8 r = ins_pop_t(b, S8);
S8 result = l * r;
ins_push_s8(b, result);
}break;
case INS_MOD_S8:{
S8 l = ins_pop_t(b, S8);
S8 r = ins_pop_t(b, S8);
S8 result = l % r;
ins_push_s8(b, result);
}break;
case INS_PUSH_S8:{
// Fetch value from instruction
// instructions are tightly packed so we
// move pointer by the type size
auto value = (S8 *)b->ins_pointer;
b->ins_pointer += sizeof(S8);
stack_push_s8(b, *value);
} break;
ins_push_s8(b, *value);
} break;
case INS_PUSH_U64:{
case INS_ADD_U64:{
U64 l = ins_pop_t(b, U64);
U64 r = ins_pop_t(b, U64);
U64 result = l + r;
ins_push_u64(b, result);
}break;
case INS_SUB_U64:{
U64 l = ins_pop_t(b, U64);
U64 r = ins_pop_t(b, U64);
U64 result = l - r;
ins_push_u64(b, result);
}break;
case INS_DIV_U64:{
U64 l = ins_pop_t(b, U64);
U64 r = ins_pop_t(b, U64);
U64 result = l / r;
ins_push_u64(b, result);
}break;
case INS_MUL_U64:{
U64 l = ins_pop_t(b, U64);
U64 r = ins_pop_t(b, U64);
U64 result = l * r;
ins_push_u64(b, result);
}break;
case INS_MOD_U64:{
U64 l = ins_pop_t(b, U64);
U64 r = ins_pop_t(b, U64);
U64 result = l % r;
ins_push_u64(b, result);
}break;
case INS_PUSH_U64:{
// Fetch value from instruction
// instructions are tightly packed so we
// move pointer by the type size
auto value = (U64 *)b->ins_pointer;
b->ins_pointer += sizeof(U64);
stack_push_u64(b, *value);
} break;
ins_push_u64(b, *value);
} break;
case INS_PUSH_U32:{
case INS_ADD_U32:{
U32 l = ins_pop_t(b, U32);
U32 r = ins_pop_t(b, U32);
U32 result = l + r;
ins_push_u32(b, result);
}break;
case INS_SUB_U32:{
U32 l = ins_pop_t(b, U32);
U32 r = ins_pop_t(b, U32);
U32 result = l - r;
ins_push_u32(b, result);
}break;
case INS_DIV_U32:{
U32 l = ins_pop_t(b, U32);
U32 r = ins_pop_t(b, U32);
U32 result = l / r;
ins_push_u32(b, result);
}break;
case INS_MUL_U32:{
U32 l = ins_pop_t(b, U32);
U32 r = ins_pop_t(b, U32);
U32 result = l * r;
ins_push_u32(b, result);
}break;
case INS_MOD_U32:{
U32 l = ins_pop_t(b, U32);
U32 r = ins_pop_t(b, U32);
U32 result = l % r;
ins_push_u32(b, result);
}break;
case INS_PUSH_U32:{
// Fetch value from instruction
// instructions are tightly packed so we
// move pointer by the type size
auto value = (U32 *)b->ins_pointer;
b->ins_pointer += sizeof(U32);
stack_push_u32(b, *value);
} break;
ins_push_u32(b, *value);
} break;
case INS_PUSH_U16:{
case INS_ADD_U16:{
U16 l = ins_pop_t(b, U16);
U16 r = ins_pop_t(b, U16);
U16 result = l + r;
ins_push_u16(b, result);
}break;
case INS_SUB_U16:{
U16 l = ins_pop_t(b, U16);
U16 r = ins_pop_t(b, U16);
U16 result = l - r;
ins_push_u16(b, result);
}break;
case INS_DIV_U16:{
U16 l = ins_pop_t(b, U16);
U16 r = ins_pop_t(b, U16);
U16 result = l / r;
ins_push_u16(b, result);
}break;
case INS_MUL_U16:{
U16 l = ins_pop_t(b, U16);
U16 r = ins_pop_t(b, U16);
U16 result = l * r;
ins_push_u16(b, result);
}break;
case INS_MOD_U16:{
U16 l = ins_pop_t(b, U16);
U16 r = ins_pop_t(b, U16);
U16 result = l % r;
ins_push_u16(b, result);
}break;
case INS_PUSH_U16:{
// Fetch value from instruction
// instructions are tightly packed so we
// move pointer by the type size
auto value = (U16 *)b->ins_pointer;
b->ins_pointer += sizeof(U16);
stack_push_u16(b, *value);
} break;
ins_push_u16(b, *value);
} break;
case INS_PUSH_U8:{
case INS_ADD_U8:{
U8 l = ins_pop_t(b, U8);
U8 r = ins_pop_t(b, U8);
U8 result = l + r;
ins_push_u8(b, result);
}break;
case INS_SUB_U8:{
U8 l = ins_pop_t(b, U8);
U8 r = ins_pop_t(b, U8);
U8 result = l - r;
ins_push_u8(b, result);
}break;
case INS_DIV_U8:{
U8 l = ins_pop_t(b, U8);
U8 r = ins_pop_t(b, U8);
U8 result = l / r;
ins_push_u8(b, result);
}break;
case INS_MUL_U8:{
U8 l = ins_pop_t(b, U8);
U8 r = ins_pop_t(b, U8);
U8 result = l * r;
ins_push_u8(b, result);
}break;
case INS_MOD_U8:{
U8 l = ins_pop_t(b, U8);
U8 r = ins_pop_t(b, U8);
U8 result = l % r;
ins_push_u8(b, result);
}break;
case INS_PUSH_U8:{
// Fetch value from instruction
// instructions are tightly packed so we
// move pointer by the type size
auto value = (U8 *)b->ins_pointer;
b->ins_pointer += sizeof(U8);
stack_push_u8(b, *value);
} break;
ins_push_u8(b, *value);
} break;
case INS_PUSH_F32:{
case INS_ADD_F32:{
F32 l = ins_pop_t(b, F32);
F32 r = ins_pop_t(b, F32);
F32 result = l + r;
ins_push_f32(b, result);
}break;
case INS_SUB_F32:{
F32 l = ins_pop_t(b, F32);
F32 r = ins_pop_t(b, F32);
F32 result = l - r;
ins_push_f32(b, result);
}break;
case INS_DIV_F32:{
F32 l = ins_pop_t(b, F32);
F32 r = ins_pop_t(b, F32);
F32 result = l / r;
ins_push_f32(b, result);
}break;
case INS_MUL_F32:{
F32 l = ins_pop_t(b, F32);
F32 r = ins_pop_t(b, F32);
F32 result = l * r;
ins_push_f32(b, result);
}break;
case INS_PUSH_F32:{
// Fetch value from instruction
// instructions are tightly packed so we
// move pointer by the type size
auto value = (F32 *)b->ins_pointer;
b->ins_pointer += sizeof(F32);
stack_push_f32(b, *value);
} break;
ins_push_f32(b, *value);
} break;
case INS_PUSH_F64:{
case INS_ADD_F64:{
F64 l = ins_pop_t(b, F64);
F64 r = ins_pop_t(b, F64);
F64 result = l + r;
ins_push_f64(b, result);
}break;
case INS_SUB_F64:{
F64 l = ins_pop_t(b, F64);
F64 r = ins_pop_t(b, F64);
F64 result = l - r;
ins_push_f64(b, result);
}break;
case INS_DIV_F64:{
F64 l = ins_pop_t(b, F64);
F64 r = ins_pop_t(b, F64);
F64 result = l / r;
ins_push_f64(b, result);
}break;
case INS_MUL_F64:{
F64 l = ins_pop_t(b, F64);
F64 r = ins_pop_t(b, F64);
F64 result = l * r;
ins_push_f64(b, result);
}break;
case INS_PUSH_F64:{
// Fetch value from instruction
// instructions are tightly packed so we
// move pointer by the type size
auto value = (F64 *)b->ins_pointer;
b->ins_pointer += sizeof(F64);
stack_push_f64(b, *value);
} break;
ins_push_f64(b, *value);
} break;
//
// **End** of generated using code_generating_script.py
//
//
// **End** of generated using code_generating_script.py
//
default: invalid_codepath;
}
}
interp_loop_breakout:;
}
function void
test_interpreter(){
Bc b = create_bytecode_interp();
emit_push_s64(&b, 1);
emit_push_f64(&b, 2.42f);
emit_push_s8(&b, 3);
emit_push_u16(&b, 4);
emit_pop(&b);
emit_pop(&b);
emit_pop(&b);
emit_add_s64(&b);
emit_pop(&b);
// emit_push_f64(&b, 2.42f);
// emit_push_u16(&b, 4);
// emit_pop(&b);
// emit_pop(&b);
// emit_pop(&b);
// emit_pop(&b);
emit_end(&b);
run_bytecode_interp(&b);
}

View File

@@ -4,231 +4,515 @@
//
force_inline void
stack_push_s64(Bc *b, S64 value){
U64 allocation_size = 2*sizeof(U64);
auto data = (U8 *)arena_push_size(&b->stack, allocation_size);
force_inline void
ins_push_s64(Bc *b, S64 value){
auto data = b->stack_pointer;
b->stack_pointer += 2*sizeof(U64);
C(S64, data) = value;
data += sizeof(U64);
C(U64, data) = TYPE_S64;
}
b->stack_pointer += allocation_size;
}
force_inline void
emit_push_s64(Bc *bc, S64 emit_value){
force_inline void
emit_push_s64(Bc *bc, S64 emit_value){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8)+sizeof(S64));
*instruction = INS_PUSH_S64;
S64 *value = (S64 *)(instruction + 1);
*value = emit_value;
}
}
force_inline void
emit_add_s64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_ADD_S64;
}
force_inline void
emit_sub_s64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_SUB_S64;
}
force_inline void
emit_div_s64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_DIV_S64;
}
force_inline void
emit_mul_s64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MUL_S64;
}
force_inline void
emit_mod_s64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MOD_S64;
}
force_inline void
stack_push_s32(Bc *b, S32 value){
U64 allocation_size = 2*sizeof(U64);
auto data = (U8 *)arena_push_size(&b->stack, allocation_size);
force_inline void
ins_push_s32(Bc *b, S32 value){
auto data = b->stack_pointer;
b->stack_pointer += 2*sizeof(U64);
C(S32, data) = value;
data += sizeof(U64);
C(U64, data) = TYPE_S32;
}
b->stack_pointer += allocation_size;
}
force_inline void
emit_push_s32(Bc *bc, S32 emit_value){
force_inline void
emit_push_s32(Bc *bc, S32 emit_value){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8)+sizeof(S32));
*instruction = INS_PUSH_S32;
S32 *value = (S32 *)(instruction + 1);
*value = emit_value;
}
}
force_inline void
emit_add_s32(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_ADD_S32;
}
force_inline void
emit_sub_s32(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_SUB_S32;
}
force_inline void
emit_div_s32(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_DIV_S32;
}
force_inline void
emit_mul_s32(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MUL_S32;
}
force_inline void
emit_mod_s32(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MOD_S32;
}
force_inline void
stack_push_s16(Bc *b, S16 value){
U64 allocation_size = 2*sizeof(U64);
auto data = (U8 *)arena_push_size(&b->stack, allocation_size);
force_inline void
ins_push_s16(Bc *b, S16 value){
auto data = b->stack_pointer;
b->stack_pointer += 2*sizeof(U64);
C(S16, data) = value;
data += sizeof(U64);
C(U64, data) = TYPE_S16;
}
b->stack_pointer += allocation_size;
}
force_inline void
emit_push_s16(Bc *bc, S16 emit_value){
force_inline void
emit_push_s16(Bc *bc, S16 emit_value){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8)+sizeof(S16));
*instruction = INS_PUSH_S16;
S16 *value = (S16 *)(instruction + 1);
*value = emit_value;
}
}
force_inline void
emit_add_s16(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_ADD_S16;
}
force_inline void
emit_sub_s16(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_SUB_S16;
}
force_inline void
emit_div_s16(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_DIV_S16;
}
force_inline void
emit_mul_s16(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MUL_S16;
}
force_inline void
emit_mod_s16(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MOD_S16;
}
force_inline void
stack_push_s8(Bc *b, S8 value){
U64 allocation_size = 2*sizeof(U64);
auto data = (U8 *)arena_push_size(&b->stack, allocation_size);
force_inline void
ins_push_s8(Bc *b, S8 value){
auto data = b->stack_pointer;
b->stack_pointer += 2*sizeof(U64);
C(S8, data) = value;
data += sizeof(U64);
C(U64, data) = TYPE_S8;
}
b->stack_pointer += allocation_size;
}
force_inline void
emit_push_s8(Bc *bc, S8 emit_value){
force_inline void
emit_push_s8(Bc *bc, S8 emit_value){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8)+sizeof(S8));
*instruction = INS_PUSH_S8;
S8 *value = (S8 *)(instruction + 1);
*value = emit_value;
}
}
force_inline void
emit_add_s8(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_ADD_S8;
}
force_inline void
emit_sub_s8(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_SUB_S8;
}
force_inline void
emit_div_s8(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_DIV_S8;
}
force_inline void
emit_mul_s8(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MUL_S8;
}
force_inline void
emit_mod_s8(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MOD_S8;
}
force_inline void
stack_push_u64(Bc *b, U64 value){
U64 allocation_size = 2*sizeof(U64);
auto data = (U8 *)arena_push_size(&b->stack, allocation_size);
force_inline void
ins_push_u64(Bc *b, U64 value){
auto data = b->stack_pointer;
b->stack_pointer += 2*sizeof(U64);
C(U64, data) = value;
data += sizeof(U64);
C(U64, data) = TYPE_U64;
}
b->stack_pointer += allocation_size;
}
force_inline void
emit_push_u64(Bc *bc, U64 emit_value){
force_inline void
emit_push_u64(Bc *bc, U64 emit_value){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8)+sizeof(U64));
*instruction = INS_PUSH_U64;
U64 *value = (U64 *)(instruction + 1);
*value = emit_value;
}
}
force_inline void
emit_add_u64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_ADD_U64;
}
force_inline void
emit_sub_u64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_SUB_U64;
}
force_inline void
emit_div_u64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_DIV_U64;
}
force_inline void
emit_mul_u64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MUL_U64;
}
force_inline void
emit_mod_u64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MOD_U64;
}
force_inline void
stack_push_u32(Bc *b, U32 value){
U64 allocation_size = 2*sizeof(U64);
auto data = (U8 *)arena_push_size(&b->stack, allocation_size);
force_inline void
ins_push_u32(Bc *b, U32 value){
auto data = b->stack_pointer;
b->stack_pointer += 2*sizeof(U64);
C(U32, data) = value;
data += sizeof(U64);
C(U64, data) = TYPE_U32;
}
b->stack_pointer += allocation_size;
}
force_inline void
emit_push_u32(Bc *bc, U32 emit_value){
force_inline void
emit_push_u32(Bc *bc, U32 emit_value){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8)+sizeof(U32));
*instruction = INS_PUSH_U32;
U32 *value = (U32 *)(instruction + 1);
*value = emit_value;
}
}
force_inline void
emit_add_u32(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_ADD_U32;
}
force_inline void
emit_sub_u32(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_SUB_U32;
}
force_inline void
emit_div_u32(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_DIV_U32;
}
force_inline void
emit_mul_u32(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MUL_U32;
}
force_inline void
emit_mod_u32(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MOD_U32;
}
force_inline void
stack_push_u16(Bc *b, U16 value){
U64 allocation_size = 2*sizeof(U64);
auto data = (U8 *)arena_push_size(&b->stack, allocation_size);
force_inline void
ins_push_u16(Bc *b, U16 value){
auto data = b->stack_pointer;
b->stack_pointer += 2*sizeof(U64);
C(U16, data) = value;
data += sizeof(U64);
C(U64, data) = TYPE_U16;
}
b->stack_pointer += allocation_size;
}
force_inline void
emit_push_u16(Bc *bc, U16 emit_value){
force_inline void
emit_push_u16(Bc *bc, U16 emit_value){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8)+sizeof(U16));
*instruction = INS_PUSH_U16;
U16 *value = (U16 *)(instruction + 1);
*value = emit_value;
}
}
force_inline void
emit_add_u16(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_ADD_U16;
}
force_inline void
emit_sub_u16(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_SUB_U16;
}
force_inline void
emit_div_u16(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_DIV_U16;
}
force_inline void
emit_mul_u16(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MUL_U16;
}
force_inline void
emit_mod_u16(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MOD_U16;
}
force_inline void
stack_push_u8(Bc *b, U8 value){
U64 allocation_size = 2*sizeof(U64);
auto data = (U8 *)arena_push_size(&b->stack, allocation_size);
force_inline void
ins_push_u8(Bc *b, U8 value){
auto data = b->stack_pointer;
b->stack_pointer += 2*sizeof(U64);
C(U8, data) = value;
data += sizeof(U64);
C(U64, data) = TYPE_U8;
}
b->stack_pointer += allocation_size;
}
force_inline void
emit_push_u8(Bc *bc, U8 emit_value){
force_inline void
emit_push_u8(Bc *bc, U8 emit_value){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8)+sizeof(U8));
*instruction = INS_PUSH_U8;
U8 *value = (U8 *)(instruction + 1);
*value = emit_value;
}
}
force_inline void
emit_add_u8(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_ADD_U8;
}
force_inline void
emit_sub_u8(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_SUB_U8;
}
force_inline void
emit_div_u8(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_DIV_U8;
}
force_inline void
emit_mul_u8(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MUL_U8;
}
force_inline void
emit_mod_u8(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MOD_U8;
}
force_inline void
stack_push_f32(Bc *b, F32 value){
U64 allocation_size = 2*sizeof(U64);
auto data = (U8 *)arena_push_size(&b->stack, allocation_size);
force_inline void
ins_push_f32(Bc *b, F32 value){
auto data = b->stack_pointer;
b->stack_pointer += 2*sizeof(U64);
C(F32, data) = value;
data += sizeof(U64);
C(U64, data) = TYPE_F32;
}
b->stack_pointer += allocation_size;
}
force_inline void
emit_push_f32(Bc *bc, F32 emit_value){
force_inline void
emit_push_f32(Bc *bc, F32 emit_value){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8)+sizeof(F32));
*instruction = INS_PUSH_F32;
F32 *value = (F32 *)(instruction + 1);
*value = emit_value;
}
}
force_inline void
emit_add_f32(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_ADD_F32;
}
force_inline void
emit_sub_f32(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_SUB_F32;
}
force_inline void
emit_div_f32(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_DIV_F32;
}
force_inline void
emit_mul_f32(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MUL_F32;
}
force_inline void
emit_mod_f32(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MOD_F32;
}
force_inline void
stack_push_f64(Bc *b, F64 value){
U64 allocation_size = 2*sizeof(U64);
auto data = (U8 *)arena_push_size(&b->stack, allocation_size);
force_inline void
ins_push_f64(Bc *b, F64 value){
auto data = b->stack_pointer;
b->stack_pointer += 2*sizeof(U64);
C(F64, data) = value;
data += sizeof(U64);
C(U64, data) = TYPE_F64;
}
b->stack_pointer += allocation_size;
}
force_inline void
emit_push_f64(Bc *bc, F64 emit_value){
force_inline void
emit_push_f64(Bc *bc, F64 emit_value){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8)+sizeof(F64));
*instruction = INS_PUSH_F64;
F64 *value = (F64 *)(instruction + 1);
*value = emit_value;
}
}
force_inline void
emit_add_f64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_ADD_F64;
}
force_inline void
emit_sub_f64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_SUB_F64;
}
force_inline void
emit_div_f64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_DIV_F64;
}
force_inline void
emit_mul_f64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MUL_F64;
}
force_inline void
emit_mod_f64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MOD_F64;
}
//
// **End** of generated using code_generating_script.py
//

View File

@@ -5,44 +5,75 @@ result = """
"""
types = ["S64", "S32", "S16", "S8", "U64", "U32", "U16", "U8", "F32", "F64"]
operations = [["+", "ADD"], ["-", "SUB"], ["/", "DIV"], ["*", "MUL"], ["%", "MOD"]]
enum = ["ADD", "SUB", "DIV", "MUL", "MOD", "PUSH"]
excludes = [["F64", "MOD"], ["F32", "MOD"]]
#
# Generate enum
#
if False:
for T in types:
for op in enum:
result += f" INS_{op}_{T},\n"
#
# Generate utility functions
#
if False:
if True:
for T in types:
t = T.lower()
result += f"""
force_inline void
stack_push_{t}(Bc *b, {T} value){{
U64 allocation_size = 2*sizeof(U64);
auto data = (U8 *)arena_push_size(&b->stack, allocation_size);
force_inline void
ins_push_{t}(Bc *b, {T} value){{
auto data = b->stack_pointer;
b->stack_pointer += 2*sizeof(U64);
C({T}, data) = value;
data += sizeof(U64);
C(U64, data) = TYPE_{T};
}}
b->stack_pointer += allocation_size;
}}
force_inline void
emit_push_{t}(Bc *bc, {T} emit_value){{
force_inline void
emit_push_{t}(Bc *bc, {T} emit_value){{
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8)+sizeof({T}));
*instruction = INS_PUSH_{T};
{T} *value = ({T} *)(instruction + 1);
*value = emit_value;
}}
}}
"""
for symbol, OP in operations:
op = OP.lower()
result += f"""
force_inline void
emit_{op}_{t}(Bc *bc){{
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_{OP}_{T};
}}
"""
#
# Generate switch cases
#
if True:
if False:
for T in types:
t = T.lower()
for symbol, op_name in operations:
result += f"""
case INS_{op_name}_{T}:{{
{T} l = ins_pop_t(b, {T});
{T} r = ins_pop_t(b, {T});
{T} result = l {symbol} r;
ins_push_{t}(b, result);
}}break;
"""
result += f"""
case INS_PUSH_{T}:{{
// Fetch value from instruction
@@ -50,10 +81,11 @@ case INS_PUSH_{T}:{{
// move pointer by the type size
auto value = ({T} *)b->ins_pointer;
b->ins_pointer += sizeof({T});
stack_push_{t}(b, *value);
ins_push_{t}(b, *value);
}} break;
"""
result += """
//
// **End** of generated using code_generating_script.py