Rewrite VM, only capable to work on 64bit arithmetic

This commit is contained in:
Krzosa Karol
2022-06-20 22:14:46 +02:00
parent fd66781afb
commit 101970f62e
4 changed files with 436 additions and 651 deletions

View File

@@ -8,66 +8,58 @@ enum{
//
// Generated using code_generating_script.py
//
INS_PUSH_S64,
INS_ADD_S64,
INS_SUB_S64,
INS_DIV_S64,
INS_MUL_S64,
INS_MOD_S64,
INS_PUSH_S64,
INS_ADD_S32,
INS_SUB_S32,
INS_DIV_S32,
INS_MUL_S32,
INS_MOD_S32,
INS_PUSH_S32,
INS_ADD_S16,
INS_SUB_S16,
INS_DIV_S16,
INS_MUL_S16,
INS_MOD_S16,
INS_PUSH_S16,
INS_ADD_S8,
INS_SUB_S8,
INS_DIV_S8,
INS_MUL_S8,
INS_MOD_S8,
INS_PUSH_S8,
INS_SHR_S64,
INS_SHL_S64,
INS_BITAND_S64,
INS_BITOR_S64,
INS_BITXOR_S64,
INS_BITNOT_S64,
INS_EQ_S64,
INS_NEQ_S64,
INS_GT_S64,
INS_LT_S64,
INS_OR_S64,
INS_GTE_S64,
INS_LTE_S64,
INS_PUSH_U64,
INS_ADD_U64,
INS_SUB_U64,
INS_DIV_U64,
INS_MUL_U64,
INS_MOD_U64,
INS_PUSH_U64,
INS_ADD_U32,
INS_SUB_U32,
INS_DIV_U32,
INS_MUL_U32,
INS_MOD_U32,
INS_PUSH_U32,
INS_ADD_U16,
INS_SUB_U16,
INS_DIV_U16,
INS_MUL_U16,
INS_MOD_U16,
INS_PUSH_U16,
INS_ADD_U8,
INS_SUB_U8,
INS_DIV_U8,
INS_MUL_U8,
INS_MOD_U8,
INS_PUSH_U8,
INS_ADD_F32,
INS_SUB_F32,
INS_DIV_F32,
INS_MUL_F32,
INS_MOD_F32,
INS_PUSH_F32,
INS_SHR_U64,
INS_SHL_U64,
INS_BITAND_U64,
INS_BITOR_U64,
INS_BITXOR_U64,
INS_BITNOT_U64,
INS_EQ_U64,
INS_NEQ_U64,
INS_GT_U64,
INS_LT_U64,
INS_OR_U64,
INS_GTE_U64,
INS_LTE_U64,
INS_PUSH_F64,
INS_ADD_F64,
INS_SUB_F64,
INS_DIV_F64,
INS_MUL_F64,
INS_MOD_F64,
INS_PUSH_F64,
INS_EQ_F64,
INS_NEQ_F64,
INS_GT_F64,
INS_LT_F64,
INS_GTE_F64,
INS_LTE_F64,
//
// **End** of generated using code_generating_script.py
@@ -126,7 +118,7 @@ emit_end(Bc *bc){
}
#define ins_pop_t(b, T) (*((T *)ins_pop(b)))
static void *
force_inline void *
ins_pop(Bc *b){
assert_msg(b->stack_pointer != b->stack_bottom, "Reached bottom of bytecode interpreter stack");
b->stack_pointer -= sizeof(U64);
@@ -157,10 +149,20 @@ run_bytecode_interp(Bc *b){
goto interp_loop_breakout;
} break;
//
// Generated using code_generating_script.py
//
case INS_PUSH_S64:{
// Fetch value from the instruction.
// instructions are tightly packed so we
// move pointer by the type size
auto value = (S64 *)b->ins_pointer;
b->ins_pointer += sizeof(S64);
ins_push_s64(b, *value);
} break;
case INS_ADD_S64:{
S64 l = ins_pop_t(b, S64);
S64 r = ins_pop_t(b, S64);
@@ -196,145 +198,103 @@ case INS_MOD_S64:{
ins_push_s64(b, result);
}break;
case INS_PUSH_S64:{
// Fetch value from instruction
case INS_SHR_S64:{
S64 l = ins_pop_t(b, S64);
S64 r = ins_pop_t(b, S64);
S64 result = l >> r;
ins_push_s64(b, result);
}break;
case INS_SHL_S64:{
S64 l = ins_pop_t(b, S64);
S64 r = ins_pop_t(b, S64);
S64 result = l << r;
ins_push_s64(b, result);
}break;
case INS_BITAND_S64:{
S64 l = ins_pop_t(b, S64);
S64 r = ins_pop_t(b, S64);
S64 result = l & r;
ins_push_s64(b, result);
}break;
case INS_BITOR_S64:{
S64 l = ins_pop_t(b, S64);
S64 r = ins_pop_t(b, S64);
S64 result = l | r;
ins_push_s64(b, result);
}break;
case INS_BITXOR_S64:{
S64 l = ins_pop_t(b, S64);
S64 r = ins_pop_t(b, S64);
S64 result = l | r;
ins_push_s64(b, result);
}break;
case INS_BITNOT_S64:{
S64 l = ins_pop_t(b, S64);
S64 result = ~l;
ins_push_s64(b, result);
}break;
case INS_EQ_S64:{
S64 l = ins_pop_t(b, S64);
S64 r = ins_pop_t(b, S64);
S64 result = l == r;
ins_push_s64(b, result);
}break;
case INS_NEQ_S64:{
S64 l = ins_pop_t(b, S64);
S64 r = ins_pop_t(b, S64);
S64 result = l != r;
ins_push_s64(b, result);
}break;
case INS_GT_S64:{
S64 l = ins_pop_t(b, S64);
S64 r = ins_pop_t(b, S64);
S64 result = l > r;
ins_push_s64(b, result);
}break;
case INS_LT_S64:{
S64 l = ins_pop_t(b, S64);
S64 r = ins_pop_t(b, S64);
S64 result = l < r;
ins_push_s64(b, result);
}break;
case INS_OR_S64:{
S64 l = ins_pop_t(b, S64);
S64 r = ins_pop_t(b, S64);
S64 result = l || r;
ins_push_s64(b, result);
}break;
case INS_GTE_S64:{
S64 l = ins_pop_t(b, S64);
S64 r = ins_pop_t(b, S64);
S64 result = l >= r;
ins_push_s64(b, result);
}break;
case INS_LTE_S64:{
S64 l = ins_pop_t(b, S64);
S64 r = ins_pop_t(b, S64);
S64 result = l <= r;
ins_push_s64(b, result);
}break;
case INS_PUSH_U64:{
// Fetch value from the instruction.
// instructions are tightly packed so we
// move pointer by the type size
auto value = (S64 *)b->ins_pointer;
b->ins_pointer += sizeof(S64);
ins_push_s64(b, *value);
} break;
case INS_ADD_S32:{
S32 l = ins_pop_t(b, S32);
S32 r = ins_pop_t(b, S32);
S32 result = l + r;
ins_push_s32(b, result);
}break;
case INS_SUB_S32:{
S32 l = ins_pop_t(b, S32);
S32 r = ins_pop_t(b, S32);
S32 result = l - r;
ins_push_s32(b, result);
}break;
case INS_DIV_S32:{
S32 l = ins_pop_t(b, S32);
S32 r = ins_pop_t(b, S32);
S32 result = l / r;
ins_push_s32(b, result);
}break;
case INS_MUL_S32:{
S32 l = ins_pop_t(b, S32);
S32 r = ins_pop_t(b, S32);
S32 result = l * r;
ins_push_s32(b, result);
}break;
case INS_MOD_S32:{
S32 l = ins_pop_t(b, S32);
S32 r = ins_pop_t(b, S32);
S32 result = l % r;
ins_push_s32(b, result);
}break;
case INS_PUSH_S32:{
// Fetch value from instruction
// instructions are tightly packed so we
// move pointer by the type size
auto value = (S32 *)b->ins_pointer;
b->ins_pointer += sizeof(S32);
ins_push_s32(b, *value);
} break;
case INS_ADD_S16:{
S16 l = ins_pop_t(b, S16);
S16 r = ins_pop_t(b, S16);
S16 result = l + r;
ins_push_s16(b, result);
}break;
case INS_SUB_S16:{
S16 l = ins_pop_t(b, S16);
S16 r = ins_pop_t(b, S16);
S16 result = l - r;
ins_push_s16(b, result);
}break;
case INS_DIV_S16:{
S16 l = ins_pop_t(b, S16);
S16 r = ins_pop_t(b, S16);
S16 result = l / r;
ins_push_s16(b, result);
}break;
case INS_MUL_S16:{
S16 l = ins_pop_t(b, S16);
S16 r = ins_pop_t(b, S16);
S16 result = l * r;
ins_push_s16(b, result);
}break;
case INS_MOD_S16:{
S16 l = ins_pop_t(b, S16);
S16 r = ins_pop_t(b, S16);
S16 result = l % r;
ins_push_s16(b, result);
}break;
case INS_PUSH_S16:{
// Fetch value from instruction
// instructions are tightly packed so we
// move pointer by the type size
auto value = (S16 *)b->ins_pointer;
b->ins_pointer += sizeof(S16);
ins_push_s16(b, *value);
} break;
case INS_ADD_S8:{
S8 l = ins_pop_t(b, S8);
S8 r = ins_pop_t(b, S8);
S8 result = l + r;
ins_push_s8(b, result);
}break;
case INS_SUB_S8:{
S8 l = ins_pop_t(b, S8);
S8 r = ins_pop_t(b, S8);
S8 result = l - r;
ins_push_s8(b, result);
}break;
case INS_DIV_S8:{
S8 l = ins_pop_t(b, S8);
S8 r = ins_pop_t(b, S8);
S8 result = l / r;
ins_push_s8(b, result);
}break;
case INS_MUL_S8:{
S8 l = ins_pop_t(b, S8);
S8 r = ins_pop_t(b, S8);
S8 result = l * r;
ins_push_s8(b, result);
}break;
case INS_MOD_S8:{
S8 l = ins_pop_t(b, S8);
S8 r = ins_pop_t(b, S8);
S8 result = l % r;
ins_push_s8(b, result);
}break;
case INS_PUSH_S8:{
// Fetch value from instruction
// instructions are tightly packed so we
// move pointer by the type size
auto value = (S8 *)b->ins_pointer;
b->ins_pointer += sizeof(S8);
ins_push_s8(b, *value);
auto value = (U64 *)b->ins_pointer;
b->ins_pointer += sizeof(U64);
ins_push_u64(b, *value);
} break;
case INS_ADD_U64:{
@@ -372,182 +332,103 @@ case INS_MOD_U64:{
ins_push_u64(b, result);
}break;
case INS_PUSH_U64:{
// Fetch value from instruction
case INS_SHR_U64:{
U64 l = ins_pop_t(b, U64);
U64 r = ins_pop_t(b, U64);
U64 result = l >> r;
ins_push_u64(b, result);
}break;
case INS_SHL_U64:{
U64 l = ins_pop_t(b, U64);
U64 r = ins_pop_t(b, U64);
U64 result = l << r;
ins_push_u64(b, result);
}break;
case INS_BITAND_U64:{
U64 l = ins_pop_t(b, U64);
U64 r = ins_pop_t(b, U64);
U64 result = l & r;
ins_push_u64(b, result);
}break;
case INS_BITOR_U64:{
U64 l = ins_pop_t(b, U64);
U64 r = ins_pop_t(b, U64);
U64 result = l | r;
ins_push_u64(b, result);
}break;
case INS_BITXOR_U64:{
U64 l = ins_pop_t(b, U64);
U64 r = ins_pop_t(b, U64);
U64 result = l | r;
ins_push_u64(b, result);
}break;
case INS_BITNOT_U64:{
U64 l = ins_pop_t(b, U64);
U64 result = ~l;
ins_push_u64(b, result);
}break;
case INS_EQ_U64:{
U64 l = ins_pop_t(b, U64);
U64 r = ins_pop_t(b, U64);
U64 result = l == r;
ins_push_u64(b, result);
}break;
case INS_NEQ_U64:{
U64 l = ins_pop_t(b, U64);
U64 r = ins_pop_t(b, U64);
U64 result = l != r;
ins_push_u64(b, result);
}break;
case INS_GT_U64:{
U64 l = ins_pop_t(b, U64);
U64 r = ins_pop_t(b, U64);
U64 result = l > r;
ins_push_u64(b, result);
}break;
case INS_LT_U64:{
U64 l = ins_pop_t(b, U64);
U64 r = ins_pop_t(b, U64);
U64 result = l < r;
ins_push_u64(b, result);
}break;
case INS_OR_U64:{
U64 l = ins_pop_t(b, U64);
U64 r = ins_pop_t(b, U64);
U64 result = l || r;
ins_push_u64(b, result);
}break;
case INS_GTE_U64:{
U64 l = ins_pop_t(b, U64);
U64 r = ins_pop_t(b, U64);
U64 result = l >= r;
ins_push_u64(b, result);
}break;
case INS_LTE_U64:{
U64 l = ins_pop_t(b, U64);
U64 r = ins_pop_t(b, U64);
U64 result = l <= r;
ins_push_u64(b, result);
}break;
case INS_PUSH_F64:{
// Fetch value from the instruction.
// instructions are tightly packed so we
// move pointer by the type size
auto value = (U64 *)b->ins_pointer;
b->ins_pointer += sizeof(U64);
ins_push_u64(b, *value);
} break;
case INS_ADD_U32:{
U32 l = ins_pop_t(b, U32);
U32 r = ins_pop_t(b, U32);
U32 result = l + r;
ins_push_u32(b, result);
}break;
case INS_SUB_U32:{
U32 l = ins_pop_t(b, U32);
U32 r = ins_pop_t(b, U32);
U32 result = l - r;
ins_push_u32(b, result);
}break;
case INS_DIV_U32:{
U32 l = ins_pop_t(b, U32);
U32 r = ins_pop_t(b, U32);
U32 result = l / r;
ins_push_u32(b, result);
}break;
case INS_MUL_U32:{
U32 l = ins_pop_t(b, U32);
U32 r = ins_pop_t(b, U32);
U32 result = l * r;
ins_push_u32(b, result);
}break;
case INS_MOD_U32:{
U32 l = ins_pop_t(b, U32);
U32 r = ins_pop_t(b, U32);
U32 result = l % r;
ins_push_u32(b, result);
}break;
case INS_PUSH_U32:{
// Fetch value from instruction
// instructions are tightly packed so we
// move pointer by the type size
auto value = (U32 *)b->ins_pointer;
b->ins_pointer += sizeof(U32);
ins_push_u32(b, *value);
} break;
case INS_ADD_U16:{
U16 l = ins_pop_t(b, U16);
U16 r = ins_pop_t(b, U16);
U16 result = l + r;
ins_push_u16(b, result);
}break;
case INS_SUB_U16:{
U16 l = ins_pop_t(b, U16);
U16 r = ins_pop_t(b, U16);
U16 result = l - r;
ins_push_u16(b, result);
}break;
case INS_DIV_U16:{
U16 l = ins_pop_t(b, U16);
U16 r = ins_pop_t(b, U16);
U16 result = l / r;
ins_push_u16(b, result);
}break;
case INS_MUL_U16:{
U16 l = ins_pop_t(b, U16);
U16 r = ins_pop_t(b, U16);
U16 result = l * r;
ins_push_u16(b, result);
}break;
case INS_MOD_U16:{
U16 l = ins_pop_t(b, U16);
U16 r = ins_pop_t(b, U16);
U16 result = l % r;
ins_push_u16(b, result);
}break;
case INS_PUSH_U16:{
// Fetch value from instruction
// instructions are tightly packed so we
// move pointer by the type size
auto value = (U16 *)b->ins_pointer;
b->ins_pointer += sizeof(U16);
ins_push_u16(b, *value);
} break;
case INS_ADD_U8:{
U8 l = ins_pop_t(b, U8);
U8 r = ins_pop_t(b, U8);
U8 result = l + r;
ins_push_u8(b, result);
}break;
case INS_SUB_U8:{
U8 l = ins_pop_t(b, U8);
U8 r = ins_pop_t(b, U8);
U8 result = l - r;
ins_push_u8(b, result);
}break;
case INS_DIV_U8:{
U8 l = ins_pop_t(b, U8);
U8 r = ins_pop_t(b, U8);
U8 result = l / r;
ins_push_u8(b, result);
}break;
case INS_MUL_U8:{
U8 l = ins_pop_t(b, U8);
U8 r = ins_pop_t(b, U8);
U8 result = l * r;
ins_push_u8(b, result);
}break;
case INS_MOD_U8:{
U8 l = ins_pop_t(b, U8);
U8 r = ins_pop_t(b, U8);
U8 result = l % r;
ins_push_u8(b, result);
}break;
case INS_PUSH_U8:{
// Fetch value from instruction
// instructions are tightly packed so we
// move pointer by the type size
auto value = (U8 *)b->ins_pointer;
b->ins_pointer += sizeof(U8);
ins_push_u8(b, *value);
} break;
case INS_ADD_F32:{
F32 l = ins_pop_t(b, F32);
F32 r = ins_pop_t(b, F32);
F32 result = l + r;
ins_push_f32(b, result);
}break;
case INS_SUB_F32:{
F32 l = ins_pop_t(b, F32);
F32 r = ins_pop_t(b, F32);
F32 result = l - r;
ins_push_f32(b, result);
}break;
case INS_DIV_F32:{
F32 l = ins_pop_t(b, F32);
F32 r = ins_pop_t(b, F32);
F32 result = l / r;
ins_push_f32(b, result);
}break;
case INS_MUL_F32:{
F32 l = ins_pop_t(b, F32);
F32 r = ins_pop_t(b, F32);
F32 result = l * r;
ins_push_f32(b, result);
}break;
case INS_PUSH_F32:{
// Fetch value from instruction
// instructions are tightly packed so we
// move pointer by the type size
auto value = (F32 *)b->ins_pointer;
b->ins_pointer += sizeof(F32);
ins_push_f32(b, *value);
auto value = (F64 *)b->ins_pointer;
b->ins_pointer += sizeof(F64);
ins_push_f64(b, *value);
} break;
case INS_ADD_F64:{
@@ -578,19 +459,53 @@ case INS_MUL_F64:{
ins_push_f64(b, result);
}break;
case INS_PUSH_F64:{
// Fetch value from instruction
// instructions are tightly packed so we
// move pointer by the type size
auto value = (F64 *)b->ins_pointer;
b->ins_pointer += sizeof(F64);
ins_push_f64(b, *value);
case INS_EQ_F64:{
F64 l = ins_pop_t(b, F64);
F64 r = ins_pop_t(b, F64);
F64 result = l == r;
ins_push_f64(b, result);
}break;
case INS_NEQ_F64:{
F64 l = ins_pop_t(b, F64);
F64 r = ins_pop_t(b, F64);
F64 result = l != r;
ins_push_f64(b, result);
}break;
case INS_GT_F64:{
F64 l = ins_pop_t(b, F64);
F64 r = ins_pop_t(b, F64);
F64 result = l > r;
ins_push_f64(b, result);
}break;
case INS_LT_F64:{
F64 l = ins_pop_t(b, F64);
F64 r = ins_pop_t(b, F64);
F64 result = l < r;
ins_push_f64(b, result);
}break;
case INS_GTE_F64:{
F64 l = ins_pop_t(b, F64);
F64 r = ins_pop_t(b, F64);
F64 result = l >= r;
ins_push_f64(b, result);
}break;
case INS_LTE_F64:{
F64 l = ins_pop_t(b, F64);
F64 r = ins_pop_t(b, F64);
F64 result = l <= r;
ins_push_f64(b, result);
}break;
//
// **End** of generated using code_generating_script.py
//
default: invalid_codepath;
}
}
@@ -600,16 +515,18 @@ case INS_PUSH_F64:{
function void
test_interpreter(){
Bc b = create_bytecode_interp();
emit_push_s64(&b, 1);
emit_push_s8(&b, 3);
emit_add_s64(&b);
emit_push_f64(&b, 64);
emit_push_f64(&b, 32);
emit_neq_f64(&b);
emit_pop(&b);
// emit_push_f64(&b, 2.42f);
// emit_push_u16(&b, 4);
// emit_pop(&b);
// emit_pop(&b);
// emit_pop(&b);
// emit_pop(&b);
emit_push_f64(&b, 64);
emit_push_f64(&b, 32);
emit_add_f64(&b);
emit_pop(&b);
emit_end(&b);
run_bytecode_interp(&b);
}

View File

@@ -54,157 +54,82 @@ emit_mod_s64(Bc *bc){
*instruction = INS_MOD_S64;
}
force_inline void
ins_push_s32(Bc *b, S32 value){
auto data = b->stack_pointer;
b->stack_pointer += 2*sizeof(U64);
C(S32, data) = value;
data += sizeof(U64);
C(U64, data) = TYPE_S32;
}
force_inline void
emit_push_s32(Bc *bc, S32 emit_value){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8)+sizeof(S32));
*instruction = INS_PUSH_S32;
S32 *value = (S32 *)(instruction + 1);
*value = emit_value;
}
force_inline void
emit_add_s32(Bc *bc){
emit_shr_s64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_ADD_S32;
*instruction = INS_SHR_S64;
}
force_inline void
emit_sub_s32(Bc *bc){
emit_shl_s64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_SUB_S32;
*instruction = INS_SHL_S64;
}
force_inline void
emit_div_s32(Bc *bc){
emit_bitand_s64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_DIV_S32;
*instruction = INS_BITAND_S64;
}
force_inline void
emit_mul_s32(Bc *bc){
emit_bitor_s64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MUL_S32;
*instruction = INS_BITOR_S64;
}
force_inline void
emit_mod_s32(Bc *bc){
emit_bitxor_s64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MOD_S32;
}
force_inline void
ins_push_s16(Bc *b, S16 value){
auto data = b->stack_pointer;
b->stack_pointer += 2*sizeof(U64);
C(S16, data) = value;
data += sizeof(U64);
C(U64, data) = TYPE_S16;
*instruction = INS_BITXOR_S64;
}
force_inline void
emit_push_s16(Bc *bc, S16 emit_value){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8)+sizeof(S16));
*instruction = INS_PUSH_S16;
S16 *value = (S16 *)(instruction + 1);
*value = emit_value;
}
force_inline void
emit_add_s16(Bc *bc){
emit_bitnot_s64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_ADD_S16;
*instruction = INS_BITNOT_S64;
}
force_inline void
emit_sub_s16(Bc *bc){
emit_eq_s64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_SUB_S16;
*instruction = INS_EQ_S64;
}
force_inline void
emit_div_s16(Bc *bc){
emit_neq_s64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_DIV_S16;
*instruction = INS_NEQ_S64;
}
force_inline void
emit_mul_s16(Bc *bc){
emit_gt_s64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MUL_S16;
*instruction = INS_GT_S64;
}
force_inline void
emit_mod_s16(Bc *bc){
emit_lt_s64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MOD_S16;
}
force_inline void
ins_push_s8(Bc *b, S8 value){
auto data = b->stack_pointer;
b->stack_pointer += 2*sizeof(U64);
C(S8, data) = value;
data += sizeof(U64);
C(U64, data) = TYPE_S8;
*instruction = INS_LT_S64;
}
force_inline void
emit_push_s8(Bc *bc, S8 emit_value){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8)+sizeof(S8));
*instruction = INS_PUSH_S8;
S8 *value = (S8 *)(instruction + 1);
*value = emit_value;
}
force_inline void
emit_add_s8(Bc *bc){
emit_or_s64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_ADD_S8;
*instruction = INS_OR_S64;
}
force_inline void
emit_sub_s8(Bc *bc){
emit_gte_s64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_SUB_S8;
*instruction = INS_GTE_S64;
}
force_inline void
emit_div_s8(Bc *bc){
emit_lte_s64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_DIV_S8;
}
force_inline void
emit_mul_s8(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MUL_S8;
}
force_inline void
emit_mod_s8(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MOD_S8;
*instruction = INS_LTE_S64;
}
@@ -258,208 +183,82 @@ emit_mod_u64(Bc *bc){
*instruction = INS_MOD_U64;
}
force_inline void
ins_push_u32(Bc *b, U32 value){
auto data = b->stack_pointer;
b->stack_pointer += 2*sizeof(U64);
C(U32, data) = value;
data += sizeof(U64);
C(U64, data) = TYPE_U32;
}
force_inline void
emit_push_u32(Bc *bc, U32 emit_value){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8)+sizeof(U32));
*instruction = INS_PUSH_U32;
U32 *value = (U32 *)(instruction + 1);
*value = emit_value;
}
force_inline void
emit_add_u32(Bc *bc){
emit_shr_u64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_ADD_U32;
*instruction = INS_SHR_U64;
}
force_inline void
emit_sub_u32(Bc *bc){
emit_shl_u64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_SUB_U32;
*instruction = INS_SHL_U64;
}
force_inline void
emit_div_u32(Bc *bc){
emit_bitand_u64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_DIV_U32;
*instruction = INS_BITAND_U64;
}
force_inline void
emit_mul_u32(Bc *bc){
emit_bitor_u64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MUL_U32;
*instruction = INS_BITOR_U64;
}
force_inline void
emit_mod_u32(Bc *bc){
emit_bitxor_u64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MOD_U32;
}
force_inline void
ins_push_u16(Bc *b, U16 value){
auto data = b->stack_pointer;
b->stack_pointer += 2*sizeof(U64);
C(U16, data) = value;
data += sizeof(U64);
C(U64, data) = TYPE_U16;
*instruction = INS_BITXOR_U64;
}
force_inline void
emit_push_u16(Bc *bc, U16 emit_value){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8)+sizeof(U16));
*instruction = INS_PUSH_U16;
U16 *value = (U16 *)(instruction + 1);
*value = emit_value;
}
force_inline void
emit_add_u16(Bc *bc){
emit_bitnot_u64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_ADD_U16;
*instruction = INS_BITNOT_U64;
}
force_inline void
emit_sub_u16(Bc *bc){
emit_eq_u64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_SUB_U16;
*instruction = INS_EQ_U64;
}
force_inline void
emit_div_u16(Bc *bc){
emit_neq_u64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_DIV_U16;
*instruction = INS_NEQ_U64;
}
force_inline void
emit_mul_u16(Bc *bc){
emit_gt_u64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MUL_U16;
*instruction = INS_GT_U64;
}
force_inline void
emit_mod_u16(Bc *bc){
emit_lt_u64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MOD_U16;
}
force_inline void
ins_push_u8(Bc *b, U8 value){
auto data = b->stack_pointer;
b->stack_pointer += 2*sizeof(U64);
C(U8, data) = value;
data += sizeof(U64);
C(U64, data) = TYPE_U8;
*instruction = INS_LT_U64;
}
force_inline void
emit_push_u8(Bc *bc, U8 emit_value){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8)+sizeof(U8));
*instruction = INS_PUSH_U8;
U8 *value = (U8 *)(instruction + 1);
*value = emit_value;
}
force_inline void
emit_add_u8(Bc *bc){
emit_or_u64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_ADD_U8;
*instruction = INS_OR_U64;
}
force_inline void
emit_sub_u8(Bc *bc){
emit_gte_u64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_SUB_U8;
*instruction = INS_GTE_U64;
}
force_inline void
emit_div_u8(Bc *bc){
emit_lte_u64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_DIV_U8;
}
force_inline void
emit_mul_u8(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MUL_U8;
}
force_inline void
emit_mod_u8(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MOD_U8;
}
force_inline void
ins_push_f32(Bc *b, F32 value){
auto data = b->stack_pointer;
b->stack_pointer += 2*sizeof(U64);
C(F32, data) = value;
data += sizeof(U64);
C(U64, data) = TYPE_F32;
}
force_inline void
emit_push_f32(Bc *bc, F32 emit_value){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8)+sizeof(F32));
*instruction = INS_PUSH_F32;
F32 *value = (F32 *)(instruction + 1);
*value = emit_value;
}
force_inline void
emit_add_f32(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_ADD_F32;
}
force_inline void
emit_sub_f32(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_SUB_F32;
}
force_inline void
emit_div_f32(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_DIV_F32;
}
force_inline void
emit_mul_f32(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MUL_F32;
}
force_inline void
emit_mod_f32(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MOD_F32;
*instruction = INS_LTE_U64;
}
@@ -508,9 +307,39 @@ emit_mul_f64(Bc *bc){
}
force_inline void
emit_mod_f64(Bc *bc){
emit_eq_f64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_MOD_F64;
*instruction = INS_EQ_F64;
}
force_inline void
emit_neq_f64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_NEQ_F64;
}
force_inline void
emit_gt_f64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_GT_F64;
}
force_inline void
emit_lt_f64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_LT_F64;
}
force_inline void
emit_gte_f64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_GTE_F64;
}
force_inline void
emit_lte_f64(Bc *bc){
U8 *instruction = (U8 *)arena_push_size(&bc->instructions, sizeof(U8));
*instruction = INS_LTE_F64;
}
//

View File

@@ -4,23 +4,38 @@ result = """
//
"""
types = ["S64", "S32", "S16", "S8", "U64", "U32", "U16", "U8", "F32", "F64"]
operations = [["+", "ADD"], ["-", "SUB"], ["/", "DIV"], ["*", "MUL"], ["%", "MOD"]]
enum = ["ADD", "SUB", "DIV", "MUL", "MOD", "PUSH"]
excludes = [["F64", "MOD"], ["F32", "MOD"]]
types = ["S64", "U64", "F64"]
operations = [
["+", "ADD"], ["-", "SUB"], ["/", "DIV"], ["*", "MUL"], ["%", "MOD"],
[">>", "SHR"], ["<<", "SHL"], ["&", "BITAND"], ["|", "BITOR"], ["|", "BITXOR"],
["~", "BITNOT"], ["==", "EQ"], ["!=", "NEQ"], [">", "GT"], ["<", "LT"], ["||", "OR"],
[">=", "GTE"], ["<=", "LTE"]
]
enum = ["PUSH"]
def should_skip(T, op):
if T == "F64":
if op != "DIV" and op != "SUB" and op != "ADD" and op != "MUL" and op != "EQ" and op != "NEQ"\
and op != "GT" and op != "LT" and op != "GTE" and op != "LTE":
return True
#
# Generate enum
#
if False:
if True:
for T in types:
for op in enum:
result += f" INS_{op}_{T},\n"
for _, op in operations:
if should_skip(T, op):
continue
result += f" INS_{op}_{T},\n"
result += "\n"
#
# Generate utility functions
#
if True:
if False:
for T in types:
t = T.lower()
result += f"""
@@ -47,6 +62,8 @@ emit_push_{t}(Bc *bc, {T} emit_value){{
"""
for symbol, OP in operations:
if should_skip(T, OP):
continue
op = OP.lower()
result += f"""
force_inline void
@@ -63,8 +80,42 @@ emit_{op}_{t}(Bc *bc){{
if False:
for T in types:
t = T.lower()
for symbol, op_name in operations:
#
# Push operation for type
#
result += f"""
case INS_PUSH_{T}:{{
// Fetch value from the instruction.
// instructions are tightly packed so we
// move pointer by the type size
auto value = ({T} *)b->ins_pointer;
b->ins_pointer += sizeof({T});
ins_push_{t}(b, *value);
}} break;
"""
for symbol, op_name in operations:
if should_skip(T, op_name):
continue
#
# Unary operator special case
#
if symbol == "~":
result += f"""
case INS_{op_name}_{T}:{{
{T} l = ins_pop_t(b, {T});
{T} result = {symbol}l;
ins_push_{t}(b, result);
}}break;
"""
continue
#
# Binary operation
#
result += f"""
case INS_{op_name}_{T}:{{
{T} l = ins_pop_t(b, {T});
@@ -74,17 +125,6 @@ case INS_{op_name}_{T}:{{
}}break;
"""
result += f"""
case INS_PUSH_{T}:{{
// Fetch value from instruction
// instructions are tightly packed so we
// move pointer by the type size
auto value = ({T} *)b->ins_pointer;
b->ins_pointer += sizeof({T});
ins_push_{t}(b, *value);
}} break;
"""
result += """
//
@@ -92,6 +132,7 @@ result += """
//
"""
#
# Copy to **WINDOWS** clipboard
#

View File

@@ -210,6 +210,7 @@ eval_binary(Token *pos, Token_Kind op, Value a, Value b, bool is_const){
make_sure_types_are_compatible_for_constant_evaluation(pos, &a, &b);
// @todo: Add compiler error to the invalid switch cases !!!
Value result = {};
result.type = a.type;
if(is_const){
@@ -233,9 +234,6 @@ eval_binary(Token *pos, Token_Kind op, Value a, Value b, bool is_const){
switch(op){
case TK_And: result.bool_val = a.bool_val && b.bool_val; break;
case TK_Or: result.bool_val = a.bool_val || b.bool_val; break;
case TK_BitAnd: result.bool_val = a.bool_val & b.bool_val; break;
case TK_BitOr: result.bool_val = a.bool_val | b.bool_val; break;
case TK_BitXor: result.bool_val = a.bool_val ^ b.bool_val; break;
invalid_default_case;
}
}break;