diff options
-rwxr-xr-x | bxgen.c | 366 |
1 files changed, 284 insertions, 82 deletions
@@ -273,6 +273,7 @@ enum { ARCH_X86_64, ARCH_ARM32, ARCH_ARM64, + ARCH_WASM, // Relocations // @@ -385,9 +386,15 @@ typedef struct { typedef struct { b8 emit_done; - i64 offset; + i64 inst_begin; + i64 inst_end; + i64 proc_offset; i64 reg; u64 occupied_reg; + i32 phi_index; + i64 phi_offset; + i64 jmp_offset; + i64 branch; } Codegen_Entity; typedef struct { @@ -476,19 +483,18 @@ void pool_remove(Pool *pool, i64 entity, u16 type); i64 node_init (Pool *pool, Node data); void node_destroy(Pool *pool, i64 node); -i64 node_data_reference(Pool *pool, i64 node); -i64 node_data_array_c8 (Pool *pool, i64 size, c8 *data); -i64 node_data_ptr (Pool *pool, u64 address); -i64 node_data_i32 (Pool *pool, i32 value); -i64 node_data_i64 (Pool *pool, i64 value); -i64 node_data_add (Pool *pool, Var x, Var y); -i64 node_data_sub (Pool *pool, Var x, Var y); - -i64 node_ctrl_call (Pool *pool, i64 target_proc, i64 num_args, Var *args); -i64 node_ctrl_call_by_name(Pool *pool, i64 name_size, c8 *name, i64 num_args, Var *args); -i64 node_ctrl_if (Pool *pool, Var condition); -i64 node_ctrl_ret (Pool *pool, i64 num_values, Var *values); -i64 node_ctrl_phi (Pool *pool, i64 branch, i64 num_vals, Var *vals); +i64 node_address (Pool *pool, i64 node); +i64 node_array_c8 (Pool *pool, i64 size, c8 *data); +i64 node_ptr (Pool *pool, u64 address); +i64 node_i32 (Pool *pool, i32 value); +i64 node_i64 (Pool *pool, i64 value); +i64 node_add (Pool *pool, Var x, Var y); +i64 node_sub (Pool *pool, Var x, Var y); +i64 node_call (Pool *pool, i64 target_proc, i64 num_args, Var *args); +i64 node_call_by_name(Pool *pool, i64 name_size, c8 *name, i64 num_args, Var *args); +i64 node_if (Pool *pool, Var condition); +i64 node_ret (Pool *pool, i64 num_values, Var *values); +i64 node_phi (Pool *pool, i64 branch, i64 num_vals, Var *vals); i64 proc_init (Pool *pool); void proc_destroy (Pool *pool, i64 proc); @@ -525,7 +531,7 @@ void io_chmod_exe (i64 f, void *user_data); #ifndef DISABLE_HELPERS -i64 n_ref(i64 proc, i64 node); +i64 n_address(i64 proc, i64 node); i64 n_ptr(i64 proc, u64 address); i64 n_str(i64 proc, c8 *value); i64 n_i32(i64 proc, i32 value); @@ -875,8 +881,9 @@ u8 *chunk(Pool *pool, Chunk_Handle h) { #define CHUNK(pool_, handle_, type_) ((type_ *) chunk((pool_), (handle_))) b8 entity_enabled(Pool *pool, i64 id) { + if (id == UNDEFINED) + return 0; CHECK(pool != NULL, "Sanity", 0); - CHECK(id != UNDEFINED, "Undefined", 0); CHECK(id > 0 && id < pool->entities.size / (i64) sizeof(Entity), "Invalid id", 0); Entity *entities = CHUNK(pool, pool->entities, Entity); CHECK(entities != NULL, "Internal", 0); @@ -933,7 +940,7 @@ Node *node_by_id(Pool *pool, i64 id) { CHECK(id != UNDEFINED, "Undefined", &entities->node); CHECK(id > UNDEFINED && id < num_entities, "Buffer overflow", &entities->node); - CHECK(entities[id].is_enabled, "No node", &entities->node); + CHECK(entities[id].is_enabled, "Sanity", &entities->node); CHECK(entities[id].type == ENTITY_NODE, "Invalid entity type", &entities->node); return &entities[id].node; @@ -948,7 +955,7 @@ Proc *proc_by_id(Pool *pool, i64 id) { CHECK(id != UNDEFINED, "Undefined", &entities->proc); CHECK(id > UNDEFINED && id < num_entities, "Buffer overflow", &entities->proc); - CHECK(entities[id].is_enabled, "No proc", &entities->proc); + CHECK(entities[id].is_enabled, "Sanity", &entities->proc); CHECK(entities[id].type == ENTITY_PROC, "Invalid entity type", &entities->proc); return &entities[id].proc; @@ -963,7 +970,7 @@ Unit *unit_by_id(Pool *pool, i64 id) { CHECK(id != UNDEFINED, "Undefined", &entities->unit); CHECK(id > UNDEFINED && id < num_entities, "Buffer overflow", &entities->unit); - CHECK(entities[id].is_enabled, "No unit", &entities->unit); + CHECK(entities[id].is_enabled, "Sanity", &entities->unit); CHECK(entities[id].type == ENTITY_UNIT, "Invalid entity type", &entities->unit); return &entities[id].unit; @@ -983,42 +990,42 @@ void node_destroy(Pool *pool, i64 node) { pool_remove(pool, node, ENTITY_NODE); } -i64 node_data_reference(Pool *pool, i64 node) { +i64 node_address(Pool *pool, i64 node) { return node_init(pool, (Node) { .op = OP_ADDRESS, .ref = node, }); } -i64 node_data_array_c8(Pool *pool, i64 size, c8 *data) { +i64 node_array_c8(Pool *pool, i64 size, c8 *data) { return node_init(pool, (Node) { .op = OP_I8, .lit = chunk_add(pool, size, data), }); } -i64 node_data_ptr(Pool *pool, u64 address) { +i64 node_ptr(Pool *pool, u64 address) { return node_init(pool, (Node) { .op = OP_PTR, .lit = chunk_add(pool, sizeof address, &address), }); } -i64 node_data_i32(Pool *pool, i32 value) { +i64 node_i32(Pool *pool, i32 value) { return node_init(pool, (Node) { .op = OP_I32, .lit = chunk_add(pool, sizeof value, &value), }); } -i64 node_data_i64(Pool *pool, i64 value) { +i64 node_i64(Pool *pool, i64 value) { return node_init(pool, (Node) { .op = OP_I64, .lit = chunk_add(pool, sizeof value, &value), }); } -i64 node_data_add(Pool *pool, Var x, Var y) { +i64 node_add(Pool *pool, Var x, Var y) { return node_init(pool, (Node) { .op = OP_ADD, .bin_op = { @@ -1028,7 +1035,7 @@ i64 node_data_add(Pool *pool, Var x, Var y) { }); } -i64 node_data_sub(Pool *pool, Var x, Var y) { +i64 node_sub(Pool *pool, Var x, Var y) { return node_init(pool, (Node) { .op = OP_SUB, .bin_op = { @@ -1048,7 +1055,7 @@ u16 resolve_calling_convention(Pool *pool, i64 proc) { return p->convention; } -i64 node_ctrl_call(Pool *pool, i64 target_proc, i64 num_args, Var *args) { +i64 node_call(Pool *pool, i64 target_proc, i64 num_args, Var *args) { return node_init(pool, (Node) { .op = OP_CALL, .call = { @@ -1059,7 +1066,7 @@ i64 node_ctrl_call(Pool *pool, i64 target_proc, i64 num_args, Var *args) { }); } -i64 node_ctrl_call_by_name(Pool *pool, i64 name_size, c8 *name, i64 num_args, Var *args) { +i64 node_call_by_name(Pool *pool, i64 name_size, c8 *name, i64 num_args, Var *args) { return node_init(pool, (Node) { .op = OP_CALL, .call = { @@ -1071,7 +1078,7 @@ i64 node_ctrl_call_by_name(Pool *pool, i64 name_size, c8 *name, i64 num_args, Va }); } -i64 node_ctrl_if(Pool *pool, Var condition) { +i64 node_if(Pool *pool, Var condition) { return node_init(pool, (Node) { .op = OP_IF, .if_ = { @@ -1080,7 +1087,7 @@ i64 node_ctrl_if(Pool *pool, Var condition) { }); } -i64 node_ctrl_ret(Pool *pool, i64 num_values, Var *values) { +i64 node_ret(Pool *pool, i64 num_values, Var *values) { return node_init(pool, (Node) { .op = OP_RET, .ret = { @@ -1089,7 +1096,7 @@ i64 node_ctrl_ret(Pool *pool, i64 num_values, Var *values) { }); } -i64 node_ctrl_phi(Pool *pool, i64 branch, i64 num_vals, Var *vals) { +i64 node_phi(Pool *pool, i64 branch, i64 num_vals, Var *vals) { return node_init(pool, (Node) { .op = OP_PHI, .phi = { @@ -1134,7 +1141,7 @@ void proc_node_add(Pool *pool, i64 proc, i64 node) { Node *n = node_by_id(pool, node); CHECK(p != NULL, "No proc",); - CHECK(n != NULL, "No node",); + CHECK(n != NULL, "Sanity", ); CHECK(n->index_in_proc == UNDEFINED, "Internal",); @@ -1166,7 +1173,7 @@ void proc_node_remove(Pool *pool, i64 proc, i64 node) { Node *n = node_by_id(pool, node); CHECK(p != NULL, "No proc",); - CHECK(n != NULL, "No node",); + CHECK(n != NULL, "Sanity", ); i64 *nodes = CHUNK(pool, p->nodes, i64); @@ -2083,6 +2090,32 @@ b8 x86_64_emit_i32_sub_reg_val( return 1; } +b8 x86_64_emit_cmp_reg_zero( + Pool * pool, + Codegen_Context *codegen, + i64 dst +) { + CHECK(pool != NULL, "Sanity", 0); + CHECK(codegen != NULL, "Sanity", 0); + + u8 *begin = CHUNK(pool, codegen->buffer_code, u8) + codegen->offset_code; + u8 *end = begin + codegen->buffer_code.size; + + write_u8(LE, 0x83, begin, end); + switch (dst) { + case EAX: write_u8(LE, 0xf8, begin + 1, end); break; + case EBX: write_u8(LE, 0xfb, begin + 1, end); break; + case ECX: write_u8(LE, 0xf9, begin + 1, end); break; + case EDX: write_u8(LE, 0xfa, begin + 1, end); break; + case ESI: write_u8(LE, 0xfe, begin + 1, end); break; + case EDI: write_u8(LE, 0xff, begin + 1, end); break; + default: FAIL("Not implemented", 0); + } + write_u8(LE, 0, begin + 2, end); + codegen->offset_code += 3; + return 1; +} + b8 x86_64_emit_i32_op_reg_args( Pool * pool, Codegen_Context *codegen, @@ -2182,9 +2215,9 @@ b8 x86_64_emit_i32_op_reg_val( u32 src ) { switch (op) { - case MOV: LOG(VERBOSE, "+ mov [%lld], %u", dst, src); return x86_64_emit_i32_mov_reg_val(pool, codegen, dst, src); - case ADD: LOG(VERBOSE, "+ add [%lld], %u", dst, src); return x86_64_emit_i32_add_reg_val(pool, codegen, dst, src); - case SUB: LOG(VERBOSE, "+ sub [%lld], %u", dst, src); return x86_64_emit_i32_sub_reg_val(pool, codegen, dst, src); + case MOV: return x86_64_emit_i32_mov_reg_val(pool, codegen, dst, src); + case ADD: return x86_64_emit_i32_add_reg_val(pool, codegen, dst, src); + case SUB: return x86_64_emit_i32_sub_reg_val(pool, codegen, dst, src); default:; } @@ -2208,9 +2241,9 @@ b8 x86_64_emit_i32_op_reg_reg( return 1; switch (op) { - case MOV: LOG(VERBOSE, "+ mov [%lld], [%lld]", dst, src); write_u8(LE, 0x89, begin, end); break; - case ADD: LOG(VERBOSE, "+ add [%lld], [%lld]", dst, src); write_u8(LE, 0x01, begin, end); break; - case SUB: LOG(VERBOSE, "+ sub [%lld], [%lld]", dst, src); write_u8(LE, 0x29, begin, end); break; + case MOV: write_u8(LE, 0x89, begin, end); break; + case ADD: write_u8(LE, 0x01, begin, end); break; + case SUB: write_u8(LE, 0x29, begin, end); break; default: FAIL("Not implemented", 0); } ++codegen->offset_code; @@ -2226,7 +2259,7 @@ b8 x86_64_emit_node( u32 context ) { Node *n = node_by_id(pool, node); - CHECK(n != NULL, "No node", 0); + CHECK(n != NULL, "Sanity", 0); codegen->buffer_code = chunk_resize(pool, codegen->buffer_code, codegen->offset_code + 256); CHECK(codegen->buffer_code.size == codegen->offset_code + 256, "Buffer overflow", 0); @@ -2240,6 +2273,8 @@ b8 x86_64_emit_node( Codegen_Entity *entities = CHUNK(pool, codegen->entities, Codegen_Entity); CHECK(entities != NULL, "Internal", 0); + Codegen_Entity *dst = entities + node; + switch (n->op) { case OP_PTR: case OP_I8: @@ -2257,7 +2292,6 @@ b8 x86_64_emit_node( Node * y_n = node_by_id(pool, n->bin_op[1]); Codegen_Entity *x = entities + n->bin_op[0]; Codegen_Entity *y = entities + n->bin_op[1]; - Codegen_Entity *dst = entities + node; CHECK(x_n != NULL, "Sanity", 0); CHECK(y_n != NULL, "Sanity", 0); @@ -2326,7 +2360,7 @@ b8 x86_64_emit_node( switch (num_args) { case 1: { Node *arg = node_by_id(pool, args[0]); - CHECK(arg != NULL, "No node", 0); + CHECK(arg != NULL, "Sanity", 0); if (arg->op == OP_ADDRESS) { // Write data @@ -2463,7 +2497,7 @@ b8 x86_64_emit_node( // Node *dat_2 = node_by_id(pool, arg_2->ref); - CHECK(dat_2 != NULL, "No node", 0); + CHECK(dat_2 != NULL, "Sanity", 0); CHECK(dat_2->op == OP_PTR, "Not implemented", 0); i64 arg_2_offset = codegen->offset_ro_data; @@ -2557,7 +2591,35 @@ b8 x86_64_emit_node( } break; case OP_IF: { - FAIL("Not implemented", 0); + Node *arg_n = node_by_id(pool, n->if_.condition); + CHECK(arg_n != NULL, "Sanity", 0); + + CHECK(arg_n->op == OP_I32, "Not implemented", 0); + + // Load value into a register. + + dst->reg = 1; + while (dst->reg <= 6 && ((1 << (dst->reg - 1)) & proc->occupied_reg) != 0) + ++dst->reg; + + u32 val = CHUNK(pool, arg_n->lit, u32)[0]; + if (!x86_64_emit_i32_op_reg_val(pool, codegen, MOV, dst->reg, val)) + return 0; + + proc->occupied_reg |= 1 << (dst->reg - 1); + + // Compare the register with zero. + + if (!x86_64_emit_cmp_reg_zero(pool, codegen, dst->reg)) + return 0; + + // Left the space for the je instruction. + + CHECK(codegen->buffer_code.size >= codegen->offset_code + 6, "Buffer overflow", 0); + mem_set(CHUNK(pool, codegen->buffer_code, u8) + codegen->offset_code, 0x90, 2); // nop + + dst->jmp_offset = codegen->offset_code; + codegen->offset_code += 2; } break; case OP_RET: { @@ -2585,7 +2647,7 @@ b8 x86_64_emit_node( LOG(WARNING, "Some return values are ignored for node %lld", node); Node *val = node_by_id(pool, vals[0]); - CHECK(val != NULL, "No node", 0); + CHECK(val != NULL, "Sanity", 0); switch (val->op) { case OP_I64: { @@ -2606,7 +2668,8 @@ b8 x86_64_emit_node( } break; case OP_ADD: - case OP_SUB: { + case OP_SUB: + case OP_PHI: { switch (entities[vals[0]].reg) { case EAX: break; @@ -2688,18 +2751,121 @@ b8 x86_64_emit_node( } break; case OP_PHI: { - FAIL("Not implemented", 0); + i64 num_vals = n->phi.vals.size / sizeof(i64); + i64 *vals = CHUNK(pool, n->phi.vals, i64); + CHECK(vals != NULL, "Sanity", 0); + + // Allocate a register + + dst->reg = 1; + while (dst->reg <= 6 && ((1 << (dst->reg - 1)) & proc->occupied_reg) != 0) + ++dst->reg; + + proc->occupied_reg |= 1 << (dst->reg - 1); + + // Emit mov and jmp instructions. + + i64 offset_back = codegen->offset_code; + + for (i64 j = 0; j < num_vals; ++j) { + i64 src_id = vals[j]; + if (src_id == UNDEFINED) continue; + CHECK(src_id >= 0 && src_id < num_entities, "Sanity", 0); + + Node *src = node_by_id(pool, src_id); + CHECK(src != NULL, "Sanity", 0); + + Codegen_Entity *src_info = entities + src_id; + + codegen->offset_code = entities[src_id].phi_offset; + + switch (src->op) { + case OP_I32: { + u32 val = CHUNK(pool, src->lit, u32)[0]; + if (!x86_64_emit_i32_mov_reg_val(pool, codegen, dst->reg, val)) + return 0; + } break; + + case OP_ADD: + case OP_SUB: + case OP_PHI: + FAIL("Not implemented", 0); + if (!x86_64_emit_i32_op_reg_reg(pool, codegen, MOV, dst->reg, src_info->reg)) + return 0; + break; + + default: + FAIL("Not implemented", 0); + } + + u8 *begin = CHUNK(pool, codegen->buffer_code, u8) + codegen->offset_code; + u8 *end = begin + codegen->buffer_code.size; + + i64 relative_offset_64 = offset_back - (codegen->offset_code + 2); + i8 relative_offset = (i8) relative_offset_64; + CHECK(relative_offset_64 == relative_offset, "Not implemented", 0); + + write_u8(LE, 0xeb, begin, end); // jmp + write_i8(LE, relative_offset, begin + 1, end); + } + + codegen->offset_code = offset_back; } break; default: FAIL("Unknown operation", 0); } + if (dst->phi_index != 0) { + // We have to jump to phi node from here. + // Left a space for mov and jmp intructions. + + CHECK(codegen->buffer_code.size >= codegen->offset_code + 8, "Buffer overflow", 0); + mem_set(CHUNK(pool, codegen->buffer_code, u8) + codegen->offset_code, 0x90, 7); // nop + dst->phi_offset = codegen->offset_code; + codegen->offset_code += 7; + + if (dst->phi_index == 1) { + // We have to jump here from the if node. + + CHECK(dst->branch != UNDEFINED, "Sanity", 0); + CHECK(dst->branch >= 0 && dst->branch < num_entities, "Sanity", 0); + + i64 jmp_offset = entities[dst->branch].jmp_offset; + + u8 *begin = CHUNK(pool, codegen->buffer_code, u8) + jmp_offset; + u8 *end = begin + codegen->buffer_code.size; + + i64 relative_offset_64 = codegen->offset_code - (jmp_offset + 2); + i8 relative_offset = (i8) relative_offset_64; + CHECK(relative_offset_64 == relative_offset, "Not implemented", 0); + + write_u8(LE, 0x74, begin, end); // je + write_i8(LE, relative_offset, begin + 1, end); + } + } + return 1; #undef CHECK_NODE_ } +void dump_binary(Pool *pool, Codegen_Context *codegen, i64 begin, i64 end) { + if (begin >= end) + return; + c8 dump[1024] = {0}; + u8 *bytes = CHUNK(pool, codegen->buffer_code, u8); + i64 j = 0; + for (i64 i = begin; i < end && j + 4 < (i64) sizeof(dump); ++i) { + dump[j++] = ((bytes[i] >> 4) & 0xf)["0123456789abcdef"]; + dump[j++] = ( bytes[i] & 0xf)["0123456789abcdef"]; + dump[j++] = ' '; + } + if (j > 0) + dump[j - 1] = '\0'; + LOG(VERBOSE, "DUMP: %s", dump); +} + b8 emit_proc( Pool * pool, Codegen_Context *codegen, @@ -2722,7 +2888,7 @@ b8 emit_proc( CHECK(proc < codegen->entities.size / (i64) sizeof(Codegen_Entity), "Buffer overflow", 0); CHECK(entities[proc].emit_done == 0, "Emit already done", 0); - entities[proc].offset = codegen->offset_code; + entities[proc].proc_offset = codegen->offset_code; // TODO Sort nodes in the sequential execution order. // NOTE Now we assume that nodes are already sorted. @@ -2731,10 +2897,41 @@ b8 emit_proc( i64 *nodes = CHUNK(pool, p->nodes, i64); CHECK(nodes != NULL, "Internal", 0); + // Mark dependants for phi nodes. + for (i64 i = 0; i < num_nodes; ++i) + if (entity_enabled(pool, nodes[i])) { + Node *n = node_by_id(pool, nodes[i]); + CHECK(n != NULL, "Sanity", 0); + if (n->op != OP_PHI) + continue; + + i64 num_vals = n->phi.vals.size / sizeof(i64); + i64 *vals = CHUNK(pool, n->phi.vals, i64); + CHECK(vals != NULL, "Sanity", 0); + + i64 phi_index = 1; + + for (i64 j = 0; j < num_vals; ++j) { + i64 k = vals[j]; + CHECK(k != UNDEFINED, "Sanity", 0); + CHECK(k >= 0 && k < num_entities, "Sanity", 0); + entities[k].phi_index = phi_index; + entities[k].branch = n->phi.branch; + ++phi_index; + } + } + for (i64 i = 1; i < num_nodes; ++i) - if (entity_enabled(pool, nodes[i])) + if (entity_enabled(pool, nodes[i])) { + entities[i].inst_begin = codegen->offset_code; if (!x86_64_emit_node(pool, codegen, entities + proc, nodes[i], context)) return 0; + entities[i].inst_end = codegen->offset_code; + } + + for (i64 i = 1; i < num_nodes; ++i) + if (entity_enabled(pool, nodes[i])) + dump_binary(pool, codegen, entities[i].inst_begin, entities[i].inst_end); entities[proc].emit_done = 1; return 1; @@ -3667,7 +3864,7 @@ i64 unit_write_in_memory( CHECK(codegen_entities[i].emit_done, "No proc code", 0); link_add_symbol(pool, linker, (Link_Sym_Entry) { - .address = rx_code_address + codegen_entities[i].offset, + .address = rx_code_address + codegen_entities[i].proc_offset, .size = relx.symbol.value.size, }); @@ -4019,7 +4216,7 @@ i64 unit_write_in_memory( CHECK(entities[rel.proc].type == ENTITY_PROC, "No proc", 0); CHECK(codegen_entities[rel.proc].emit_done, "No proc address", 0); - i64 value = rel.value + rx_code_address + codegen_entities[rel.proc].offset; + i64 value = rel.value + rx_code_address + codegen_entities[rel.proc].proc_offset; write_i64(LE, value, begin, end); found = 1; @@ -4103,7 +4300,7 @@ i64 unit_write_in_memory( // 4 bytes - flags o + 48 write_u16(LE, ELF_HEADER_SIZE, o + 52, o_end); write_u16(LE, ELF_PROGRAM_HEADER_SIZE, o + 54, o_end); - // write_u16(LE, num_program_headers, o + 56, o_end); + // 2 bytes - num program headers o + 56 // 2 bytes - section header size o + 58 // 2 bytes - num section headers o + 60 // 2 bytes - string table section o + 62 @@ -4722,77 +4919,77 @@ Pool g_pool = { // Handy procedures // -i64 n_ref(i64 proc, i64 node) { - i64 n_ref = node_data_reference(&g_pool, node); - p_add(proc, n_ref); - return n_ref; +i64 n_address(i64 proc, i64 node) { + i64 n = node_address(&g_pool, node); + p_add(proc, n); + return n; } i64 n_ptr(i64 proc, u64 address) { - i64 n_data = node_data_ptr(&g_pool, address); - p_add(proc, n_data); - return n_data; + i64 n = node_ptr(&g_pool, address); + p_add(proc, n); + return n; } i64 n_str(i64 proc, c8 *value) { i64 len = str_len(value, value + MAX_STRING_SIZE - 1); - i64 n_data = node_data_array_c8(&g_pool, len + 1, value); - i64 n_ref = node_data_reference(&g_pool, n_data); + i64 n_data = node_array_c8(&g_pool, len + 1, value); + i64 n_ref = node_address(&g_pool, n_data); p_add(proc, n_data); p_add(proc, n_ref); return n_ref; } i64 n_i32(i64 proc, i32 value) { - i64 n = node_data_i32(&g_pool, value); + i64 n = node_i32(&g_pool, value); p_add(proc, n); return n; } i64 n_i64(i64 proc, i64 value) { - i64 n = node_data_i64(&g_pool, value); + i64 n = node_i64(&g_pool, value); p_add(proc, n); return n; } i64 n_add(i64 proc, Var x, Var y) { - i64 n = node_data_add(&g_pool, x, y); + i64 n = node_add(&g_pool, x, y); p_add(proc, n); return n; } i64 n_sub(i64 proc, Var x, Var y) { - i64 n = node_data_sub(&g_pool, x, y); + i64 n = node_sub(&g_pool, x, y); p_add(proc, n); return n; } i64 n_call(i64 proc, i64 target_proc, i64 num_args, Var *args) { - i64 n = node_ctrl_call(&g_pool, target_proc, num_args, args); + i64 n = node_call(&g_pool, target_proc, num_args, args); p_add(proc, n); return n; } i64 n_call_by_name(i64 proc, c8 *name, i64 num_args, Var *args) { - i64 n = node_ctrl_call_by_name(&g_pool, str_len(name, name + MAX_STRING_SIZE), name, num_args, args); + i64 n = node_call_by_name(&g_pool, str_len(name, name + MAX_STRING_SIZE), name, num_args, args); p_add(proc, n); return n; } i64 n_if(i64 proc, Var condition) { - i64 n = node_ctrl_if(&g_pool, condition); + i64 n = node_if(&g_pool, condition); p_add(proc, n); return n; } i64 n_ret(i64 proc, i64 num_vals, Var *vals) { - i64 n = node_ctrl_ret(&g_pool, num_vals, vals); + i64 n = node_ret(&g_pool, num_vals, vals); p_add(proc, n); return n; } i64 n_phi(i64 proc, i64 branch, i64 num_vals, Var *vals) { - i64 n = node_ctrl_phi(&g_pool, branch, num_vals, vals); + i64 n = node_phi(&g_pool, branch, num_vals, vals); p_add(proc, n); return n; } @@ -5215,24 +5412,29 @@ ADD_TEST_(math_reg_reuse, (1+2+3+4+5+6+7+8+9+10), N_RET(p, j); ) -ADD_TEST_(cond_1, 1, +ADD_TEST_(cond_1, 42, i64 p = p_new_entry(u); - i64 a = n_i32(p, 1); - i64 b = n_if(p, a); - i64 c = n_i32(p, 1); - i64 d = n_i32(p, 2); - i64 e = N_PHI(p, b, c, d); - N_RET(p, e); + i64 a = n_i32(p, 1); // mov eax, 1 + i64 b = n_if(p, a); // cmp eax, 0 + // je _1 + i64 c = n_i32(p, 42); // mov ebx, 42 + // jmp _2 + // _1: + i64 d = n_i32(p, 43); // mov ebx, 43 + // jmp _2 + i64 e = N_PHI(p, b, c, d); // _2: + // mov eax, ebx + N_RET(p, e); // ret ) -ADD_TEST_(cond_2, 2, +ADD_TEST_(cond_2, 43, i64 p = p_new_entry(u); i64 a = n_i32(p, 0); i64 b = n_if(p, a); - i64 c = n_i32(p, 1); - i64 d = n_i32(p, 2); + i64 c = n_i32(p, 42); + i64 d = n_i32(p, 43); i64 e = N_PHI(p, b, c, d); N_RET(p, e); ) @@ -5321,8 +5523,8 @@ i32 main(i32 argc, c8 **argv) { RUN_TEST_(math_sub_7); RUN_TEST_(math_sub_8); RUN_TEST_(math_reg_reuse); - // RUN_TEST_(cond_1); - // RUN_TEST_(cond_2); + RUN_TEST_(cond_1); + RUN_TEST_(cond_2); #endif #undef RUN_TEST_ |