318 lines
7.5 KiB
C++
318 lines
7.5 KiB
C++
/*
|
|
We use two's complement
|
|
|
|
- Adding the value, sub is similar
|
|
Adding a positive number to a negative number is actually subtracting the value.
|
|
Last bit is a biggest possible negative number for that bit range.
|
|
0111 1111 => 127
|
|
1111 1111 => -128 - 127 = 1
|
|
|
|
1000 0000 => -128
|
|
if we add 4 we get
|
|
1000 0100 => -124
|
|
|
|
- Negating the value
|
|
1000 0001 => -127
|
|
flip bits
|
|
0111 1110 => 126
|
|
we need to add 1
|
|
0111 1111 -> 127
|
|
|
|
- Division
|
|
We don't need full 128 bit divide
|
|
we just need to make sure we always divide
|
|
smaller then U64MAX and -U64MAX
|
|
10 / -1 = -10
|
|
10 / 1 = 10
|
|
-10 / -1 = 10
|
|
-10 / 1 = 10
|
|
*/
|
|
|
|
struct S128{
|
|
U64 hi, lo;
|
|
};
|
|
|
|
function S128
|
|
s128(U64 hi, U64 lo){
|
|
S128 result = {hi,lo};
|
|
return result;
|
|
}
|
|
|
|
function S64
|
|
is_negative(S128 v){
|
|
S64 result = v.hi >> 63;
|
|
return result;
|
|
}
|
|
|
|
function S64
|
|
sign(S128 val){
|
|
S64 result = is_negative(val) ? -1 : 1;
|
|
return result;
|
|
}
|
|
|
|
function B32
|
|
has_value_bigger_then_64bit(S128 v){
|
|
U64 hi = v.hi << 1; // clear sign bit
|
|
B32 result = hi != 0;
|
|
return result;
|
|
}
|
|
|
|
function S128
|
|
s128_from_s64(S64 value){
|
|
S128 result = {value < 0 ? ~0 : 0ull, (U64)value};
|
|
return result;
|
|
}
|
|
|
|
function S128
|
|
s128_from_u64(U64 value){
|
|
S128 result = {0, value};
|
|
return result;
|
|
}
|
|
|
|
function S128
|
|
operator+(S128 a, S128 b){
|
|
U64 lo = a.lo + b.lo;
|
|
U64 hi = a.hi + b.hi + (a.lo > lo); // (a.lo > lo) is carry
|
|
return {hi,lo};
|
|
}
|
|
|
|
function S128
|
|
operator-(S128 a, S128 b){
|
|
U64 lo = a.lo - b.lo;
|
|
U64 hi = a.hi - b.hi - (a.lo < lo); // (a.lo < lo) is carry
|
|
return {hi,lo};
|
|
}
|
|
|
|
force_inline U64 lo32(U64 a){return a & 0xffffffff;}
|
|
force_inline U64 hi32(U64 a){return a >> 32;}
|
|
force_inline S128 operator~(S128 a){return {~a.hi, ~a.lo};}
|
|
force_inline S128 operator^(S128 a, S128 b){return {a.hi ^ b.hi, a.lo ^ b.lo};}
|
|
force_inline S128 operator&(S128 a, S128 b){return {a.hi & b.hi, a.lo & b.lo};}
|
|
force_inline S128 operator|(S128 a, S128 b){return {a.hi | b.hi, a.lo | b.lo};}
|
|
|
|
function S128
|
|
operator-(S128 a){
|
|
a = (~a) + s128_from_u64(1);
|
|
return a;
|
|
}
|
|
|
|
/*
|
|
2022.05.06 - Mod and Div
|
|
@hack: only values less then U64MAX -U64MAX
|
|
|
|
modulo can only overflow in this case (-)S64MIN % -1 || (-)S64MIN % -1 for ints
|
|
casuse the sign flips and MAX is smaller then MIN
|
|
so we do the arithmetic in UINT and convert back
|
|
*/
|
|
|
|
function S128
|
|
s128_div_hack(S128 a, S128 b){
|
|
S64 sign = 1;
|
|
if(is_negative(a)){
|
|
sign *= -1;
|
|
a = -a;
|
|
}
|
|
if(is_negative(b)){
|
|
sign *= -1;
|
|
b = -b;
|
|
}
|
|
assert(a.hi == 0 && b.hi == 0);
|
|
U64 division = a.lo / b.lo;
|
|
S128 result = {0, division};
|
|
if(sign == -1) result = -result;
|
|
return result;
|
|
}
|
|
|
|
function S128
|
|
s128_mod_hack(S128 a, S128 b){
|
|
S64 sign = 1;
|
|
if(is_negative(a)){
|
|
sign *= -1;
|
|
a = -a;
|
|
}
|
|
if(is_negative(b)){
|
|
sign *= -1;
|
|
b = -b;
|
|
}
|
|
assert(a.hi == 0 && b.hi == 0);
|
|
U64 division = a.lo % b.lo;
|
|
S128 result = {0, division};
|
|
if(sign == -1) result = -result;
|
|
return result;
|
|
}
|
|
|
|
function S128
|
|
u64_mul(U64 u, U64 v){
|
|
U64 u_lo = lo32(u);
|
|
U64 v_lo = lo32(v);
|
|
U64 u_hi = hi32(u);
|
|
U64 v_hi = hi32(v);
|
|
|
|
U64 t1 = u_lo * v_lo;
|
|
U64 t2 = (u_hi * v_lo) + hi32(t1);
|
|
U64 t3 = (u_lo * v_hi) + lo32(t2);
|
|
|
|
U64 lo = (t3 << 32) + lo32(t1);
|
|
U64 hi = (u_hi * v_hi) + hi32(t2) + hi32(t3);
|
|
return {hi,lo};
|
|
}
|
|
|
|
function S128
|
|
operator*(S128 a, S128 b){
|
|
S128 c = u64_mul(a.lo, b.lo);
|
|
c.hi += (a.hi * b.lo) + (a.lo * b.hi);
|
|
return c;
|
|
}
|
|
|
|
function B32
|
|
operator==(S128 a, S128 b){
|
|
B32 result = (a.lo == b.lo) && (a.hi == b.hi);
|
|
return result;
|
|
}
|
|
|
|
function B32
|
|
operator!=(S128 a, S128 b){
|
|
B32 result = !(a == b);
|
|
return result;
|
|
}
|
|
|
|
function B32
|
|
operator<(S128 a, S128 b){
|
|
if(a.hi != b.hi){
|
|
S64 a_hi = (S64)a.hi;
|
|
S64 b_hi = (S64)b.hi;
|
|
return a_hi < b_hi;
|
|
}
|
|
|
|
return a.lo < b.lo;
|
|
}
|
|
|
|
function B32
|
|
operator>(S128 a, S128 b){
|
|
if(a.hi != b.hi){
|
|
S64 a_hi = (S64)a.hi;
|
|
S64 b_hi = (S64)b.hi;
|
|
return a_hi > b_hi;
|
|
}
|
|
return a.lo > b.lo;
|
|
}
|
|
|
|
constexpr S128 const_s128_from_s64(S64 value){return {value < 0 ? ~0 : 0ull, (U64)value};}
|
|
constexpr S128 S128_max(){return {S64MAX, U64MAX};}
|
|
constexpr S128 S128_min(){return {(U64)S64MIN, 0};}
|
|
constexpr S128 S64_max() {return const_s128_from_s64(S64MAX);}
|
|
constexpr S128 S64_min() {return const_s128_from_s64(S64MIN);}
|
|
constexpr S128 S32_max() {return const_s128_from_s64(S32MAX);}
|
|
constexpr S128 S32_min() {return const_s128_from_s64(S32MIN);}
|
|
constexpr S128 S16_max() {return const_s128_from_s64(S16MAX);}
|
|
constexpr S128 S16_min() {return const_s128_from_s64(S16MIN);}
|
|
constexpr S128 S8_max() {return const_s128_from_s64(S8MAX);}
|
|
constexpr S128 S8_min() {return const_s128_from_s64(S8MIN);}
|
|
constexpr S128 U64_max() {return {0, U64MAX};}
|
|
constexpr S128 U64_min() {return {};}
|
|
constexpr S128 U32_max() {return {0, U32MAX};}
|
|
constexpr S128 U32_min() {return {};}
|
|
constexpr S128 U16_max() {return {0, U16MAX};}
|
|
constexpr S128 U16_min() {return {};}
|
|
constexpr S128 U8_max() {return {0, U8MAX};}
|
|
constexpr S128 U8_min() {return {};}
|
|
|
|
function B32
|
|
is_out_of_bounds(S128 value, Ast_Type *type){
|
|
#define BOUNDS_CASE(T) case TYPE_##T: return (value < T##_min()) || (value > T##_max())
|
|
switch(type->kind){
|
|
BOUNDS_CASE(S8);
|
|
BOUNDS_CASE(S16);
|
|
BOUNDS_CASE(S32);
|
|
BOUNDS_CASE(S64);
|
|
BOUNDS_CASE(U8);
|
|
BOUNDS_CASE(U16);
|
|
BOUNDS_CASE(U32);
|
|
BOUNDS_CASE(U64);
|
|
invalid_default_case;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
function void
|
|
test_big_int(){
|
|
S128 v1 = s128_from_s64(-1);
|
|
assert(v1.hi == U64MAX && v1.lo == U64MAX);
|
|
assert(is_negative(v1));
|
|
assert(sign(v1) == -1);
|
|
|
|
S128 v2 = U64_max() + s128_from_u64(1);
|
|
assert(v2.lo == 0 && v2.hi == 1);
|
|
|
|
S128 v3 = U64_max() + s128_from_u64(100);
|
|
assert(v3.lo == 99 && v3.hi == 1);
|
|
|
|
// S64 s64_max = S64MAX;
|
|
S128 v4 = S64_min() + s128_from_u64(100);
|
|
assert((v4.lo - S64MAX) == 101); // 101 cause S64MIN is larher by 1
|
|
|
|
S128 v5 = {1, 0};
|
|
assert(has_value_bigger_then_64bit(v5));
|
|
S128 v6 = v5 - s128_from_u64(1);
|
|
assert(v6.lo == U64MAX && v6.hi == 0);
|
|
|
|
{
|
|
S128 v7 = u64_mul(S64MAX, S64MAX);
|
|
assert(v7.hi == 0x3fffffffffffffff && v7.lo == 0x0000000000000001);
|
|
}
|
|
|
|
{
|
|
S128 v7 = u64_mul(S64MIN, S64MIN);
|
|
assert(v7.hi == 0x4000000000000000 && v7.lo == 0);
|
|
assert(has_value_bigger_then_64bit(v7));
|
|
}
|
|
|
|
{
|
|
S128 v7 = u64_mul(U64MAX, U64MAX);
|
|
assert(v7.hi == 0xfffffffffffffffe && v7.lo == 0x0000000000000001);
|
|
assert(has_value_bigger_then_64bit(v7));
|
|
}
|
|
|
|
{
|
|
S128 v7 = u64_mul(52242, 2);
|
|
assert(v7.lo == 52242*2);
|
|
assert(!has_value_bigger_then_64bit(v7));
|
|
}
|
|
|
|
{
|
|
S128 v7 = u64_mul(0, 0);
|
|
assert(v7 == s128_from_u64(0));
|
|
assert(!has_value_bigger_then_64bit(v7));
|
|
}
|
|
|
|
S128 v8 = s128_div_hack(-U64_max(), s128_from_s64(-1));
|
|
assert(v8.lo == U64MAX && v8.hi == 0);
|
|
|
|
S128 v9 = s128_div_hack(S64_max(), S64_max());
|
|
assert(v9 == s128_from_s64(1));
|
|
|
|
S128 v10 = s128_div_hack(-S64_max(), S64_max());
|
|
assert(v10 == -s128_from_s64(1));
|
|
|
|
S128 v11 = s128_div_hack(S64_min(), S64_max());
|
|
assert(v11 == -s128_from_u64(1));
|
|
|
|
{
|
|
S128 v12 = s128_div_hack(s128_from_s64(-100), s128_from_s64(10));
|
|
assert(v12 == -s128_from_u64(10));
|
|
}
|
|
|
|
{
|
|
assert(s128_from_s64(32) > s128_from_s64(16));
|
|
assert(s128_from_s64(-32) < s128_from_s64(-16));
|
|
assert(s128_from_s64(S64MIN) < (s128_from_s64(S64MIN)+s128_from_s64(10)));
|
|
assert(s128_from_s64(S64MIN) > (s128_from_s64(S64MIN)-s128_from_s64(10)));
|
|
assert(S64_min()*s128_from_u64(4) < S64_min());
|
|
}
|
|
{
|
|
assert(!is_out_of_bounds(S64_max(), type_s64));
|
|
assert(is_out_of_bounds(s128_from_s64(-19), type_u64));
|
|
assert(!is_out_of_bounds(s128_from_s64(-19), type_s64));
|
|
assert(is_out_of_bounds(U64_max(), type_s64));
|
|
}
|
|
} |