Parsing complete mostly, ordering, resolving, C++ tests

This commit is contained in:
Krzosa Karol
2022-05-12 17:10:24 +02:00
parent feae74b0b9
commit 9d54ed8195
37 changed files with 2748 additions and 5341 deletions

755
lex.c
View File

@@ -1,34 +1,192 @@
global Token token_end_of_stream = {0};
global Intern_String keyword_if;
global Intern_String keyword_for;
global Intern_String keyword_cast;
global Intern_String keyword_else;
global Intern_String keyword_defer;
global Intern_String keyword_do;
global Intern_String keyword_size_type;
global Intern_String keyword_size_expr;
global Intern_String keyword_const;
global Intern_String keyword_typedef;
global Intern_String keyword_return;
global Intern_String keyword_typeof;
global Intern_String keyword_while;
global Intern_String keyword_switch;
global Intern_String keyword_case;
global Intern_String keyword_struct;
global Intern_String keyword_enum;
global Intern_String keyword_union;
global U8 *first_keyword;
global U8 *last_keyword;
function Token *
token_alloc(Tokens *t){
if(t->cap == 0){
t->cap = 1024;
t->tokens = malloc(sizeof(Token)*t->cap);
}
else if(t->len+1 > t->cap){
t->cap *= 2;
t->tokens = realloc(t->tokens, sizeof(Token)*t->cap);
}
global Intern_String intern_char;
global Intern_String intern_void;
global Intern_String intern_int;
function void
init_default_keywords(Intern_Table *t){
keyword_if = intern_string(t, lit("if"));
first_keyword = keyword_if.s.str;
Token *result = t->tokens + t->len++;
memory_zero(result, sizeof(*result));
keyword_cast = intern_string(t, lit("cast"));
keyword_for = intern_string(t, lit("for"));
keyword_else = intern_string(t, lit("else"));
keyword_defer = intern_string(t, lit("defer"));
keyword_do = intern_string(t, lit("do"));
keyword_size_type = intern_string(t, lit("size_type"));
keyword_size_expr = intern_string(t, lit("size_expr"));
keyword_typeof = intern_string(t, lit("typeof"));
keyword_const = intern_string(t, lit("const"));
keyword_while = intern_string(t, lit("while"));
keyword_return = intern_string(t, lit("return"));
keyword_switch = intern_string(t, lit("switch"));
keyword_typedef = intern_string(t, lit("typedef"));
keyword_case = intern_string(t, lit("case"));
keyword_struct = intern_string(t, lit("struct"));
keyword_enum = intern_string(t, lit("enum"));
keyword_union = intern_string(t, lit("union"));
last_keyword = keyword_union.s.str;
intern_char = intern_string(t, lit("char"));
intern_void = intern_string(t, lit("void"));
intern_int = intern_string(t, lit("int"));
}
function B32
lex_is_keyword(Intern_String str){
B32 result = str.s.str >= first_keyword && str.s.str <= last_keyword;
return result;
}
function void
lex_advance(Lex_Stream *s){
if(*s->stream == '\n'){
s->stream++;
s->line++;
s->line_begin = s->stream;
}
else if(*s->stream == 0){
// Don't advance, end of stream
}
else{
s->stream++;
}
typedef enum Token_Kind{
TK_End,
TK_Mul,
TK_Div,
TK_Mod,
TK_LeftShift,
TK_RightShift,
TK_FirstMul = TK_Mul,
TK_LastMul = TK_RightShift,
TK_Add,
TK_Sub,
TK_FirstAdd = TK_Add,
TK_LastAdd = TK_Sub,
TK_Equals,
TK_LesserThenOrEqual,
TK_GreaterThenOrEqual,
TK_LesserThen,
TK_GreaterThen,
TK_NotEquals,
TK_FirstCompare = TK_Equals,
TK_LastCompare = TK_NotEquals,
TK_BitAnd,
TK_BitOr,
TK_BitXor,
TK_And,
TK_Or,
TK_FirstLogical = TK_BitAnd,
TK_LastLogical = TK_Or,
TK_Neg,
TK_Not,
TK_OpenParen,
TK_CloseParen,
TK_OpenBrace,
TK_CloseBrace,
TK_OpenBracket,
TK_CloseBracket,
TK_Comma,
TK_Pound,
TK_Question,
TK_ThreeDots,
TK_Semicolon,
TK_Dot,
TK_Colon,
TK_Assign,
TK_ColonAssign,
TK_DivAssign,
TK_MulAssign,
TK_ModAssign,
TK_SubAssign,
TK_AddAssign,
TK_AndAssign,
TK_OrAssign,
TK_XorAssign,
TK_LeftShiftAssign,
TK_RightShiftAssign,
TK_FirstAssign = TK_Assign,
TK_LastAssign = TK_RightShiftAssign,
TK_DoubleColon,
TK_At,
TK_Decrement,
TK_Increment,
TK_PostDecrement,
TK_PostIncrement,
TK_Arrow,
TK_ExprSizeof,
TK_DocComment,
TK_Comment,
TK_Identifier,
TK_StringLit,
TK_Character,
TK_Error,
TK_Float,
TK_Int,
TK_Keyword,
}Token_Kind;
typedef struct Token{
Token_Kind kind;
union{
String string;
struct{U8 *str; S64 len;};
};
union {
U64 int_val;
F64 float_val;
String error_val;
Intern_String intern_val;
};
String file;
S32 line;
U8 *line_begin;
}Token;
#include "token_array.c"
typedef struct Lex_Stream{
String stream;
S64 iter;
U8 *line_begin;
String file;
S32 line;
}Lex_Stream;
function U8
lexc(Lex_Stream *s){
return s->stream.str[s->iter];
}
function U8
lexci(Lex_Stream *s, S32 i){
return s->stream.str[s->iter+i];
}
function U8 *
lexcp(Lex_Stream *s){
return s->stream.str + s->iter;
}
function B32
@@ -57,13 +215,8 @@ lex_is_alphanumeric(U8 c){
function void
lex_set_len(Lex_Stream *s, Token *token){
assert(s->stream >= token->str);
token->len = s->stream - token->str;
}
function U8
lexc(Lex_Stream *s){
return *s->stream;
assert(lexcp(s) >= token->str);
token->len = lexcp(s) - token->str;
}
function void
@@ -72,6 +225,44 @@ token_error(Token *t, String error_val){
t->error_val = error_val;
}
function void
lex_parse_u64(Token *t){
U64 result = 0;
U64 m = 1;
for(S64 i = t->len - 1; i >= 0; --i){
U64 val = t->str[i] - '0';
U64 new_val = val * m;
if((result + new_val) < result){
token_error(t, lit("Integer overflow"));
return;
}
result+=new_val;
m *= 10;
}
t->int_val = result;
}
function void
lex_advance(Lex_Stream *s){
if(s->iter >= s->stream.len){
return;
}
else if(lexc(s) == '\n'){
s->iter++;
s->line++;
s->line_begin = lexcp(s);
}
else{
s->iter++;
}
}
function Token
token_int(U64 val){
Token result = {.kind = TK_Int, .int_val=val};
return result;
}
function void
lex_parse_string(Lex_Stream *s, Token *t, U8 c){
for(;;){
@@ -89,73 +280,55 @@ lex_parse_string(Lex_Stream *s, Token *t, U8 c){
}
}
function void
lex_token_seed(Lex_Stream *s, Token *t){
t->str = s->stream;
t->file = s->filename;
t->line = s->line;
t->line_begin = s->line_begin;
}
function U64
parse_u64(U8 *str, S64 len){
U64 result = 0;
U64 m = 1;
for(S64 i = len - 1; i >= 0; --i){
U64 val = str[i] - '0';
U64 new_val = val * m;
assert_msg(result+new_val >= result, "Integer overflow!");
result+=new_val;
m *= 10;
}
return result;
}
function void
lex_base(Lex_Stream *s, Tokens *tokens){
while(*s->stream){
#define CASE2(op, OpName, Assign) \
case op: \
if (lexc(s) == '=') { \
lex_advance(s); \
t->kind = Assign; \
t.kind = Assign; \
} else { \
t->kind = OpName; \
t.kind = OpName; \
} \
break
#define CASE3(op, OpName, Assign, Incr) \
case op: \
if (lexc(s) == '=') { \
lex_advance(s); \
t->kind = Assign; \
t.kind = Assign; \
} else if (lexc(s) == op) { \
lex_advance(s); \
t->kind = Incr; \
t.kind = Incr; \
} else { \
t->kind = OpName; \
t.kind = OpName; \
} \
break
Token *t = token_alloc(tokens);
top:
while(lex_is_whitespace(*s->stream))
function void
lex__stream(Token_Array *array, Lex_Stream *s){
while(lexc(s)){
while(lex_is_whitespace(lexc(s)))
lex_advance(s);
lex_token_seed(s, t);
Token t = {0};
t.str = lexcp(s);
t.file = s->file;
t.line = s->line;
t.line_begin = s->line_begin;
lex_advance(s);
switch(*t->str) {
switch(*t.str){
case 0: break;
case '@': t->kind = TK_At; break;
case '(': t->kind = TK_OpenParen; break;
case ')': t->kind = TK_CloseParen; break;
case '{': t->kind = TK_OpenBrace; break;
case '}': t->kind = TK_CloseBrace; break;
case '[': t->kind = TK_OpenBracket; break;
case ']': t->kind = TK_CloseBracket; break;
case ',': t->kind = TK_Comma; break;
case '~': t->kind = TK_Neg; break;
case '?': t->kind = TK_Question; break;
case ';': t->kind = TK_Semicolon; break;
case '@': t.kind = TK_At; break;
case '(': t.kind = TK_OpenParen; break;
case ')': t.kind = TK_CloseParen; break;
case '{': t.kind = TK_OpenBrace; break;
case '}': t.kind = TK_CloseBrace; break;
case '[': t.kind = TK_OpenBracket; break;
case ']': t.kind = TK_CloseBracket; break;
case ',': t.kind = TK_Comma; break;
case '~': t.kind = TK_Neg; break;
case '?': t.kind = TK_Question; break;
case ';': t.kind = TK_Semicolon; break;
case '#': t.kind = TK_Pound; break;
CASE2('!', TK_Not, TK_NotEquals);
CASE2('^', TK_BitXor, TK_XorAssign);
CASE2('=', TK_Assign, TK_Equals);
@@ -166,40 +339,34 @@ break
CASE3('|', TK_BitOr, TK_OrAssign, TK_Or);
#undef CASE2
#undef CASE3
case '#': {
t->kind = TK_Pound;
// @Todo(Krzosa): Some convenient way to recognize macros
} break;
case '.': {
if(s->stream[0] == '.' && s->stream[1] == '.') {
lex_advance(s);
lex_advance(s);
t->kind = TK_ThreeDots;
if(lexc(s) == '.' && lexci(s,1) == '.') {
lex_advance(s); lex_advance(s);
t.kind = TK_ThreeDots;
}
else {
t->kind = TK_Dot;
t.kind = TK_Dot;
}
} break;
case '<': {
if (lexc(s) == '<') {
lex_advance(s);
if (lexc(s) == '=') {
lex_advance(s);
t->kind = TK_LeftShiftAssign;
t.kind = TK_LeftShiftAssign;
}
else {
t->kind = TK_LeftShift;
t.kind = TK_LeftShift;
}
}
else if (lexc(s) == '=') {
lex_advance(s);
t->kind = TK_LesserThenOrEqual;
t.kind = TK_LesserThenOrEqual;
}
else {
t->kind = TK_LesserThen;
t.kind = TK_LesserThen;
}
} break;
@@ -208,129 +375,111 @@ break
lex_advance(s);
if (lexc(s) == '=') {
lex_advance(s);
t->kind = TK_RightShiftAssign;
t.kind = TK_RightShiftAssign;
}
else {
t->kind = TK_RightShift;
t.kind = TK_RightShift;
}
}
else if (lexc(s) == '=') {
lex_advance(s);
t->kind = TK_GreaterThenOrEqual;
t.kind = TK_GreaterThenOrEqual;
}
else {
t->kind = TK_GreaterThen;
t.kind = TK_GreaterThen;
}
} break;
case ':': {
if (lexc(s) == ':') {
lex_advance(s);
t->kind = TK_DoubleColon;
t.kind = TK_DoubleColon;
}
else if(lexc(s) == '='){
lex_advance(s);
t.kind = TK_ColonAssign;
}
else {
t->kind = TK_Colon;
t.kind = TK_Colon;
}
} break;
case '-':{
if (lexc(s) == '=') {
lex_advance(s);
t->kind = TK_SubAssign;
t.kind = TK_SubAssign;
}
else if (lexc(s) == '-') {
lex_advance(s);
t->kind = TK_Decrement;
t.kind = TK_Decrement;
}
else if (lexc(s) == '>') {
lex_advance(s);
t->kind = TK_Arrow;
t.kind = TK_Arrow;
}
else {
t->kind = TK_Sub;
t.kind = TK_Sub;
}
} break;
case '\'':{not_implemented;} break;
case '"': {
t->kind = TK_U8Lit;
lex_parse_string(s,t,'"');
if(t->kind != TK_Error){
t->str += 1;
t->len -= 2;
t.kind = TK_StringLit;
lex_parse_string(s,&t,'"');
if(t.kind != TK_Error){
t.str += 1;
t.len -= 2;
}
t.intern_val = intern_string(&array->interns, t.string);
} break;
case '/': {
if(lexc(s) == '='){
t->kind = TK_DivAssign;
t.kind = TK_DivAssign;
lex_advance(s);
}
else if(lexc(s) == '/'){
lex_advance(s);
if(lexc(s) == '/'){
lex_advance(s);
//t->kind = TK_DocComment;
}
else {
//t->kind = TK_Comment;
}
t.kind = TK_Comment;
for(;;){
if(lexc(s) == '\n' || lexc(s) == 0) break;
lex_advance(s);
}
goto top;
//lex_set_len(s,t);
continue;
}
else if(lexc(s) == '*'){
lex_advance(s);
//t->kind = TK_Comment;
t.kind = TK_Comment;
for(;;){
if(s->stream[0] == '*' && s->stream[1] == '/'){
if(lexc(s) == '*' && lexci(s,1) == '/'){
lex_advance(s);
lex_advance(s);
break;
}
else if(lexc(s) == 0){
token_error(t, lit("Unterminated block comment"));
break;
token_error(&t, lit("Unterminated block comment"));
goto skip_continue;
}
lex_advance(s);
}
goto top;
//lex_set_len(s,t);
continue;
skip_continue:;
}
else {
t.kind = TK_Div;
}
else t->kind = TK_Div;
} break;
case '0':
case '1':case '2':case '3':
case '4':case '5':case '6':
case '7':case '8':case '9': {
t->kind = TK_Int;
case '0':case '1':case '2':case '3':case '4':
case '5':case '6':case '7':case '8':case '9':{
t.kind = TK_Int;
while(lex_is_numeric(lexc(s)))
lex_advance(s);
lex_set_len(s, t);
t->int_val = parse_u64(t->str, t->len);
lex_set_len(s, &t);
lex_parse_u64(&t);
} break;
case 'l':{
if(s->stream[0] == 'i' && s->stream[1] == 't' && s->stream[2] == '(' && s->stream[3] == '"'){
t->kind = TK_StringLit;
lex_advance(s);lex_advance(s);lex_advance(s);lex_advance(s);
lex_parse_string(s,t,'"');
if(s->stream[0] == ')') {
t->str += 5;
t->len -= 6;
lex_advance(s);
}
else token_error(t, lit("Unterminated string literal, missing closing parenthesis"));
break;
}
};
case 'A':case 'a':case 'M':case 'm':case 'B':
case 'b':case 'N':case 'n':case 'C':case 'c':case 'O':
case 'o':case 'D':case 'd':case 'P':case 'p':case 'E':
@@ -338,192 +487,162 @@ break
case 'r':case 'G':case 'g':case 'S':case 's':case 'H':
case 'h':case 'T':case 't':case 'I':case 'i':case 'U':
case 'u':case 'J':case 'j':case 'V':case 'v':case 'K':
case 'k':case 'W':case 'w':case 'L':case 'X':
case 'k':case 'W':case 'w':case 'L':case 'X':case 'l':
case 'x':case 'Z':case 'z':case 'Y':case 'y':case '_': {
t->kind = TK_Identifier;
t.kind = TK_Identifier;
while(lex_is_alphanumeric(lexc(s)) || lexc(s) == '_')
lex_advance(s);
lex_set_len(s,t);
} break;
default:{
token_error(t, lit("Unknown token"));
lex_set_len(s,&t);
t.intern_val = intern_string(&array->interns, t.string);
if(lex_is_keyword(t.intern_val)){
t.kind = TK_Keyword;
}
} break;
default: {
token_error(&t, lit("Unknown token"));
}
}
if(t->len==0){
lex_set_len(s,t);
}
if(t.len==0)
lex_set_len(s,&t);
token_array_push(array, &t);
}
// Token end of stream
Token *t = token_alloc(tokens);
*t = token_end_of_stream;
tokens->len -= 1;
}
function Tokens
lex_stream(String in_stream, String filename){
Lex_Stream stream = {in_stream.str, in_stream.str, filename, 0};
Tokens tokens = {0};
lex_base(&stream, &tokens);
return tokens;
}
function void
parser_lex_stream(Parser *p, String in_stream, String filename){
Lex_Stream stream = {in_stream.str, in_stream.str, filename, 0};
p->tokens.len = 0;
p->tokens.iter = 0;
lex_base(&stream, &p->tokens);
intern_tokens(p);
lex_add_stream(Token_Array *array, String stream, String file){
Lex_Stream s = {stream, 0, stream.str, file, 0};
lex__stream(array, &s);
}
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
function B32
token_compare(Token *t, String str){
B32 result = string_compare(t->string, str);
return result;
function Token_Array
lex_make_token_array(Arena *arena){
Token_Array array = token_array_make(arena);
init_default_keywords(&array.interns);
return array;
}
function B32
token_is_comment(Token *token){
B32 result = token->kind == TK_Comment || token->kind == TK_DocComment;
return result;
}
function Token *
token_get(Parser *p){
Token *token = p->tokens.tokens + p->tokens.iter;
return token;
}
function B32
intern_compare(Intern_String a, Intern_String b){
B32 result = a.s.str == b.s.str;
return result;
}
function Token *
token_is_keyword(Parser *p, Intern_String keyword){
assert(intern_is_keyword(p, keyword));
Token *t = token_get(p);
if(t->kind == TK_Keyword && intern_compare(t->intern_val, keyword)){
return t;
}
return 0;
function Token_Array
lex_stream(Arena *arena, String stream, String file){
Token_Array array = lex_make_token_array(arena);
lex_add_stream(&array, stream, file);
return array;
}
function void
token_advance(Parser *p){
p->tokens.iter = clamp_top_s64(p->tokens.iter + 1, p->tokens.len);
}
function Token *
token_next(Parser *p){
Token *token = token_get(p);
token_advance(p);
return token;
}
function Token *
token_match(Parser *p, Token_Kind kind){
Token *token = token_get(p);
if(token->kind == kind){
return token_next(p);
}
return 0;
}
function Token *
token_match_keyword(Parser *p, Intern_String keyword){
assert(intern_is_keyword(p, keyword));
Token *token = token_get(p);
if(token->kind == TK_Keyword && intern_compare(keyword, token->intern_val)){
return token_next(p);
}
return 0;
}
function Token *
token_expect(Parser *p, Token_Kind kind){
Token *token = token_get(p);
if(token->kind == kind){
return token_next(p);
}
parser_push_error(p, token,
"Expected token of kind: %s, got instead token of kind: %s",
token_kind_string[kind].str, token_kind_string[token->kind].str);
return 0;
}
function B32
token_is(Parser *p, Token_Kind kind){
B32 result = token_get(p)->kind == kind;
return result;
}
function Token *
token_is_assignment(Parser *p){
Token *t = token_get(p);
if(t->kind >= TK_Assign && t->kind <= TK_RightShiftAssign)
return t;
return 0;
}
function Token *
token_peek(Parser *p, S64 count){
S64 index = clamp_top_s64(p->tokens.iter + count, p->tokens.len);
Token *result = p->tokens.tokens + index;
return result;
}
function Token *
token_peek_is(Parser *p, S64 count, Token_Kind kind){
Token *token = token_peek(p, count);
if(token->kind == kind)
return token;
return 0;
}
function Token *
token_peek_is_keyword(Parser *p, S64 count, Intern_String keyword){
Token *token = token_peek(p, count);
if(token->kind == TK_Keyword){
if(intern_compare(keyword, token->intern_val)){
return token;
}
}
return 0;
lex_restream(Token_Array *array, String stream, String file){
token_array_reset(array);
lex_add_stream(array, stream, file);
}
function void
lex_test(){
Tokens t;
t = lex_stream(lit("3252342510 42524 \"U8Literal\""), lit("test"));
//tokens_print(t);
assert(t.len == 3);
assert(t.tokens[0].int_val == 3252342510);
assert(t.tokens[1].int_val == 42524);
assert(t.tokens[2].kind == TK_U8Lit);
assert(token_compare(t.tokens + 2, lit("U8Literal")));
Arena *scratch = arena_begin_scratch();
String test = lit("18446744073709551616{})(@?&+-;....->,:::/**/\"Thing\"//R\n Thingy"
"\"Test_Meme\"+=-===42524 4294967295 18446744073709551615"
"for if while switch :=");
Token_Array array = lex_stream(scratch, test, lit("Test1"));
t = lex_stream(lit("_identifier Thing Thing2 lit(\"String_Test\")"), lit("test"));
//tokens_print(t);
assert(t.tokens[0].kind == TK_Identifier);
assert(t.tokens[1].kind == TK_Identifier);
assert(t.tokens[2].kind == TK_Identifier);
assert(t.tokens[3].kind == TK_StringLit);
assert(token_compare(t.tokens, lit("_identifier")));
assert(token_compare(t.tokens+1, lit("Thing")));
assert(token_compare(t.tokens+2, lit("Thing2")));
assert(token_compare(t.tokens+3, lit("String_Test")));
Token_Kind kind[] = {
TK_Error,TK_OpenBrace,TK_CloseBrace,TK_CloseParen,TK_OpenParen,
TK_At,TK_Question,TK_BitAnd,TK_Add,TK_Sub,TK_Semicolon,
TK_ThreeDots, TK_Dot, TK_Arrow, TK_Comma, TK_DoubleColon, TK_Colon,
TK_StringLit, TK_Identifier, TK_StringLit, TK_AddAssign, TK_SubAssign,
TK_Equals, TK_Int, TK_Int, TK_Int, TK_Keyword, TK_Keyword,
TK_Keyword, TK_Keyword, TK_ColonAssign, TK_End
};
String strs[] = {
lit("18446744073709551616"),lit("{"),lit("}"),lit(")"),lit("("),
lit("@"),lit("?"),lit("&"),lit("+"),lit("-"),lit(";"),
lit("..."),lit("."),lit("->"),lit(","),lit("::"),lit(":"),
lit("Thing"),lit("Thingy"),lit("Test_Meme"), lit("+="),lit("-="),
lit("=="),lit("42524"),lit("4294967295"),lit("18446744073709551615"),
lit("for"), lit("if"), lit("while"), lit("switch"), lit(":="), lit(""),
};
U64 vals[] = {
42524, 4294967295, 18446744073709551615llu
};
int i = 0;
int ui = 0;
for(Token *t = token_array_iter_begin(&array); t->kind != TK_End; t = token_array_iter_next(&array)){
assert(t->kind == kind[i]);
assert(string_compare(t->string, strs[i++]));
if(t->kind == TK_Int){
assert(t->int_val == vals[ui++]);
}
}
arena_end_scratch();
t = lex_stream(lit("lit(\"String_Test\"{})(324*=+=-/ *% // Comment \n"
"Thing /*Thing*/ += -= =- +/%^&*&&|| |>> << <<= >>=/*Error"),
lit("test"));
assert(t.tokens[0].kind == TK_Error);
//tokens_print(t);
}
//-----------------------------------------------------------------------------
// Token metadata
//-----------------------------------------------------------------------------
global const char *token_kind_string[] = {
[TK_End] = "End of stream",
[TK_Mul] = "*",
[TK_Div] = "/",
[TK_Add] = "+",
[TK_Sub] = "-",
[TK_Mod] = "%",
[TK_BitAnd] = "&",
[TK_BitOr] = "|",
[TK_BitXor] = "^",
[TK_Neg] = "~",
[TK_Not] = "!",
[TK_OpenParen] = "(",
[TK_CloseParen] = " ",
[TK_OpenBrace] = "{",
[TK_CloseBrace] = "}",
[TK_OpenBracket] = "[",
[TK_CloseBracket] = "]",
[TK_Comma] = ",",
[TK_Pound] = "#",
[TK_Question] = "?",
[TK_ThreeDots] = "...",
[TK_Semicolon] = ";",
[TK_Dot] = ".",
[TK_LesserThen] = "<",
[TK_GreaterThen] = ">",
[TK_Colon] = ":",
[TK_Assign] = "=",
[TK_ColonAssign] = ":=",
[TK_DivAssign] = "/=",
[TK_MulAssign] = "*=",
[TK_ModAssign] = "%=",
[TK_SubAssign] = "-=",
[TK_AddAssign] = "+=",
[TK_AndAssign] = "&=",
[TK_OrAssign] = "|=",
[TK_XorAssign] = "^=",
[TK_LeftShiftAssign] = "<<=",
[TK_RightShiftAssign] = ">>=",
[TK_DoubleColon] = "::",
[TK_At] = "@",
[TK_Decrement] = "--",
[TK_Increment] = "++",
[TK_PostDecrement] = "--",
[TK_PostIncrement] = "++",
[TK_LesserThenOrEqual] = "<=",
[TK_GreaterThenOrEqual] = ">=",
[TK_Equals] = "==",
[TK_And] = "&&",
[TK_Or] = "||",
[TK_NotEquals] = "!=",
[TK_LeftShift] = "<<",
[TK_RightShift] = ">>",
[TK_Arrow] = "->",
[TK_ExprSizeof] = "sizeof",
[TK_DocComment] = "DocComment",
[TK_Comment] = "Comment",
[TK_Identifier] = "Identifier",
[TK_StringLit] = "StringLit",
[TK_Character] = "Character",
[TK_Error] = "Error",
[TK_Float] = "Float",
[TK_Int] = "Int",
[TK_Keyword] = "Keyword",
};