#if 0 /* #/ ================================================================ #/ #/ bxgen.c #/ #/ Binary executable code generation and linking. #/ Compiler backend. #/ #/ ---------------------------------------------------------------- #/ #/ Qualities #/ #/ - Compiler backend in the form of a library #/ - Single source file #/ - Simple and flexible API #/ - No external dependencies #/ - No configuration required #/ - No dynamic memory management #/ - Easy cross-compilation #/ - Platform-independent host #/ #/ Inspirations #/ #/ - tinycc https://repo.or.cz/w/tinycc.git #/ - Cuik https://github.com/RealNeGate/Cuik #/ - QBE https://c9x.me/compile/ #/ #/ To-Do list #/ #/ - Simplify #/ - Factor out checks #/ - Allow to format the message in CHECK and FAIL macros #/ - Library #/ - Terminal color option #/ - Proper prefixes for identifiers #/ - Effective entity allocation #/ - Improve error handling #/ - Memory reallocation when necessary #/ - Multithreading #/ - Codegen #/ - Architectures #/ - x86_64 #/ - i386 #/ - RISC-V #/ - ARM #/ - WebAssembly #/ - Dump generated asm and binary #/ - Implicit procedure prototypes #/ - Evaluation #/ - Static single-assignment #/ - Sea of Nodes #/ - Optimization layers #/ - JIT #/ - Debug info #/ - Inlined assembly #/ - C compiler and self-compilation #/ - Meta codegen >:3 #/ - Linker #/ - Formats #/ - ELF #/ - COFF #/ - PE #/ - OMF #/ - Mach-O #/ - Linking with libc #/ - Statically-linked executable #/ - Object file #/ - Static library #/ - Dynamic-link library #/ - Dynamically-linked executable #/ - GNU ld script #/ - GOT and PLT #/ - Thread-local storage #/ - Static libraries #/ - Dynamic libraries #/ - Unused dependencies elimination #/ - Comprehensive test suite #/ - Built-in standard library #/ - Terminal #/ - Graphics >:3 #/ - Input devices >:3 #/ - Math #/ - Soft floating-point arithmeric #/ - File I/O #/ - Threads #/ - Networking >:3 #/ - Audio >:3 #/ #/ Bugs #/ #/ - Linking with libc doesn't work #/ #/ Done features #/ #/ - Simplify #/ - Use 0 for UNDEFINED. Make the zero value useful #/ - Chunk table for names and arrays #/ - Library #/ - IO static dispatch #/ - Correct serialization for endianness #/ - Proper error handling #/ - Codegen #/ - Implicit exit after ret from entry point #/ #/ ---------------------------------------------------------------- #/ #/ (C) 2024 Mitya Selivanov #/ #/ ================================================================ #/ #/ Self-compilation shell script #/ SRC=${0##*./} BIN=${SRC%.*} gcc \ -Wall -Wextra -Werror -pedantic \ -Wno-old-style-declaration \ -Wno-missing-braces \ -Wno-unused-variable \ -Wno-unused-but-set-variable \ -Wno-unused-parameter \ -Wno-enum-compare \ -O0 \ -fsanitize=undefined,address,leak \ -o $BIN $SRC && \ ./$BIN $@ && \ rm $BIN exit $? # */ #endif // ================================================================ // // Types // // ================================================================ #ifndef TYPES_HEADER_GUARD_ #define TYPES_HEADER_GUARD_ typedef signed char i8; typedef signed short i16; typedef signed i32; typedef signed long long i64; typedef unsigned char u8; typedef unsigned short u16; typedef unsigned u32; typedef unsigned long long u64; typedef char c8; typedef int c32; typedef signed char b8; typedef float f32; typedef double f64; #endif // TYPES_HEADER_GUARD_ // ================================================================ // // IR data declarations // // ================================================================ #ifndef BXGEN_HEADER_GUARD_ #define BXGEN_HEADER_GUARD_ enum { // Log level ERROR = 1, WARNING, INFO, VERBOSE, TRACE, // Limits // CHUNK_TABLE_ALIGNMENT = 16, POOL_CAPACITY = 10 * 1024 * 1024, MAX_STRING_SIZE = 1024 * 1024, // For indices UNDEFINED = 0, // Sea of Nodes flow type // FLOW_DATA = 0, FLOW_CONTROL, // Semantic node operations // OP_NONE = 0, OP_PTR, OP_I8, OP_I16, OP_I32, OP_I64, OP_F32, OP_F64, OP_ADD, OP_SUB, OR_NEG, OP_UMUL, OP_IMUL, OP_UDIV, OP_IDIV, OP_AND, OP_OR, OP_XOR, OP_NOT, OP_ADDRESS, OP_CALL, OP_IF, OP_RET, OP_PHI, // Calling conventions CONV_UNKNOWN = 0, CONV_CDECL, CONV_STDCALL, CONV_FASTCALL, CONV_THISCALL, // Unit types // UNIT_CODE = 0, UNIT_LIBRARY_OBJECT, UNIT_LIBRARY_STATIC, UNIT_LIBRARY_DYNAMIC, // Entity types // ENTITY_NODE = 0, ENTITY_PROC, ENTITY_UNIT, // IO dispatch operations // IO_OPEN_READ = 0, IO_OPEN_WRITE, IO_CLOSE, IO_SEEK, IO_TELL, IO_READ, IO_WRITE, IO_CHMOD_EXE, IO_SEEK_CURSOR = 0, IO_SEEK_BEGIN, IO_SEEK_END, // Formats // FORMAT_UNKNOWN = 0, FORMAT_ELF, FORMAT_COFF, FORMAT_PE, FORMAT_OMF, FORMAT_MATCH_O, // Architecture // ARCH_UNKNOWN = 0, ARCH_RISC_V, ARCH_I386, ARCH_X86_64, ARCH_ARM32, ARCH_ARM64, ARCH_WASM, // Relocations // REL_ADD_INSTRUCTION_ADDRESS, REL_ADD_RO_OP_ADDRESS, REL_ADD_PROC_ADDRESS, }; typedef struct { i64 size; i64 offset; } Chunk_Handle; typedef i64 Var; typedef struct { Var condition; } If; typedef struct { Chunk_Handle vals; } Ret; typedef struct { i64 branch; Chunk_Handle vals; } Phi; typedef struct { // NOTE // We may call a local procedure by it's id, // or a global procedure by name. u16 convention; // can be implicitly retrieved from the procedure i64 target_proc; Chunk_Handle target_name; Chunk_Handle args; } Call; // A semantic node is an operation with optional data // and possible references to other nodes. typedef struct { u16 op; i64 index_in_proc; union { Chunk_Handle lit; Var bin_op[2]; Var ref; Call call; If if_; Ret ret; Phi phi; }; } Node; // A procedure is a collection of semantic nodes // and has a string name. typedef struct { u16 convention; Chunk_Handle name; Chunk_Handle nodes; i64 ret_index; i64 unit; i64 index_in_unit; } Proc; // A compilation unit is a collection of procedures. // typedef struct { u16 type; i64 entry_point_index; Chunk_Handle name; Chunk_Handle procs; Chunk_Handle links; } Unit; // An entity can be any of: // - Node // - Proc // - Unit // // Every entity can be referenced by it's unique index // in the entity pool. typedef struct { b8 is_enabled; u16 type; union { Node node; Proc proc; Unit unit; }; } Entity; // Pool, a collection of all entities. // // NOTE // We use one single large memory block for *everything*. typedef struct { i64 size; i64 capacity; u8 * data; Chunk_Handle entities; } Pool; typedef struct { b8 emit_done; i64 inst_begin; i64 inst_end; i64 proc_offset; i64 reg; u64 occupied_reg; i32 phi_index; i64 phi_offset; i64 jmp_offset; i64 branch; } Codegen_Entity; typedef struct { u16 type; i64 offset; i64 size; i64 value; i64 name_size; c8 *name; i64 proc; } Codegen_Rel_Entry; typedef struct { i64 offset; i64 address; } Link_Sec_Entry; typedef struct { i64 name_size; c8 *name; i64 address; i64 size; i64 got_offset; i64 obj_index; i64 sec_index; } Link_Sym_Entry; typedef struct { i64 symbol; } Link_Rel_Entry; typedef struct { b8 has_entry; i64 entry_point; i64 offset_code; i64 offset_ro_data; Chunk_Handle entities; Chunk_Handle rels; Chunk_Handle buffer_code; Chunk_Handle buffer_ro_data; } Codegen_Context; typedef struct { i64 num_obj_files; Chunk_Handle obj_file_buffer; Chunk_Handle dependencies_buffer; Chunk_Handle obj_file_offsets; Chunk_Handle sections; Chunk_Handle symbols; Chunk_Handle rels; Chunk_Handle output_buffer; } Linker_Context; // ================================================================ // // API declarations // // ================================================================ // // Hooks // // NOTE // Shoud be implemented on the user side. // See: `* Helper procedures` // // ================================================================ #ifdef __cplusplus extern "C" { #endif void dispatch_assert(b8 condition, c8 *message, u32 line, c8 *file); void dispatch_log (i32 log_level, u32 line, c8 *file, c8 *format, ...); void dispatch_io (u16 op, i64 *id, i64 *size, void *data, void *user_data); // ================================================================ // // Main API // // ================================================================ i64 pool_add (Pool *pool, Entity data); void pool_remove(Pool *pool, i64 entity, u16 type); i64 node_init (Pool *pool, Node data); void node_destroy(Pool *pool, i64 node); i64 node_address (Pool *pool, i64 node); i64 node_array_c8 (Pool *pool, i64 size, c8 *data); i64 node_ptr (Pool *pool, u64 address); i64 node_i32 (Pool *pool, i32 value); i64 node_i64 (Pool *pool, i64 value); i64 node_add (Pool *pool, Var x, Var y); i64 node_sub (Pool *pool, Var x, Var y); i64 node_call (Pool *pool, i64 target_proc, i64 num_args, Var *args); i64 node_call_by_name(Pool *pool, i64 name_size, c8 *name, i64 num_args, Var *args); i64 node_if (Pool *pool, Var condition); i64 node_ret (Pool *pool, i64 num_values, Var *values); i64 node_phi (Pool *pool, i64 branch, i64 num_vals, Var *vals); i64 proc_init (Pool *pool); void proc_destroy (Pool *pool, i64 proc); void proc_set_convention(Pool *pool, i64 proc, u16 convention); void proc_set_name (Pool *pool, i64 proc, i64 name_size, c8 *name); void proc_node_add (Pool *pool, i64 proc, i64 node); void proc_node_remove (Pool *pool, i64 proc, i64 node); i64 unit_init (Pool *pool, u16 type); void unit_destroy (Pool *pool, i64 unit); void unit_proc_add (Pool *pool, i64 unit, i64 proc); void unit_proc_remove (Pool *pool, i64 unit, i64 proc); void unit_link_add (Pool *pool, i64 unit, i64 link_unit); void unit_link_remove (Pool *pool, i64 unit, i64 link_unit); void unit_set_name (Pool *pool, i64 unit, i64 name_size, c8 *name); void unit_set_entry_point(Pool *pool, i64 unit, i64 entry_point_proc); i64 unit_write_in_memory(Pool *pool, Codegen_Context *codegen, Linker_Context *linker, i64 unit, u16 format, u16 arch); b8 unit_write (Pool *pool, i64 unit, u16 format, u16 arch, i64 io_id, void *io_user_data); i64 io_open_read (i64 name_size, c8 *name, void *user_data); i64 io_open_write(i64 name_size, c8 *name, void *user_data); void io_close (i64 f, void *user_data); b8 io_seek (i64 f, i64 offset, u16 origin, void *user_data); i64 io_tell (i64 f, void *user_data); i64 io_read (i64 f, i64 size, void *data, void *user_data); i64 io_write (i64 f, i64 size, void *data, void *user_data); void io_chmod_exe (i64 f, void *user_data); // ================================================================ // // Helpers API // // ================================================================ #ifndef DISABLE_HELPERS i64 n_address(i64 proc, i64 node); i64 n_ptr(i64 proc, u64 address); i64 n_str(i64 proc, c8 *value); i64 n_i32(i64 proc, i32 value); i64 n_i64(i64 proc, i64 value); i64 n_add(i64 proc, Var x, Var y); i64 n_sub(i64 proc, Var x, Var y); i64 n_call (i64 proc, i64 target_proc, i64 num_args, Var *args); i64 n_call_by_name(i64 proc, c8 *name, i64 num_args, Var *args); i64 n_if (i64 proc, Var condition); i64 n_ret (i64 proc, i64 num_vals, Var *vals); i64 n_phi (i64 proc, i64 branch, i64 num_vals, Var *vals); i64 p_new(i64 unit, c8 *name); i64 p_new_entry(i64 unit); void p_add(i64 proc, i64 node); i64 u_new(void); void u_add(i64 unit, i64 proc); void u_entry_point(i64 unit, i64 proc); void u_elf_x86_64(i64 unit, c8 *output_file_name); void l_code(i64 unit, i64 link_unit); void l_object(i64 unit, c8 *object_library); void l_static(i64 unit, c8 *static_library); c8 * l_find(c8 *name, b8 silent); #define ARGS(...) \ (sizeof((Var[]) { __VA_ARGS__ }) / sizeof(Var)), \ (Var[]) { __VA_ARGS__ } #define N_CALL(proc, target_proc, ...) n_call ((proc), (target_proc), ARGS(__VA_ARGS__)) #define N_CALL_BY_NAME(proc, name, ...) n_call_by_name((proc), (name), ARGS(__VA_ARGS__)) #define N_RET(proc, ...) n_ret ((proc), ARGS(__VA_ARGS__)) #define N_PHI(proc, branch, ...) n_phi ((proc), (branch), ARGS(__VA_ARGS__)) #endif #ifdef __cplusplus } #endif #endif // BXGEN_HEADER_GUARD_ // ================================================================ // // IMPLEMENTATION // // ================================================================ // // Compilation options // // ================================================================ #ifndef BXGEN_HEADER #ifndef BXGEN_IMPL_GUARD_ #define BXGEN_IMPL_GUARD_ #ifdef __cplusplus #error Implementation code should be compiled with a C compiler! #endif #ifndef HELPERS #define HELPERS 1 #endif #ifndef TESTING #define TESTING 1 #endif #ifndef LOG_LEVEL #define LOG_LEVEL 5 #endif #ifndef LOG_BLOCKING #define LOG_BLOCKING 0 #endif #ifndef TRACE_BLOCKING #define TRACE_BLOCKING 1 #endif #define VERSION "dev" // ================================================================ // // * Basic utilities // // ================================================================ #ifndef NULL #define NULL ((void *) 0) #endif #ifdef NDEBUG # define CHECK(condition, error_string, fail_result) \ do { \ b8 ok_ = (condition); \ if (!ok_) { \ dispatch_log(ERROR, __LINE__, __FILE__, error_string); \ return fail_result; \ } \ } while (0) #else # define CHECK(condition, error_string, fail_result) \ do { \ b8 ok_ = (condition); \ dispatch_assert(ok_, error_string, __LINE__, __FILE__); \ if (!ok_) \ return fail_result; \ } while (0) #endif #ifdef NDEBUG # define LAX(condition, error_string) \ do { \ if (!(condition)) \ dispatch_log(WARNING, __LINE__, __FILE__, error_string); \ } while (0) #else # define LAX(condition, error_string) \ dispatch_assert((condition), error_string, __LINE__, __FILE__) #endif #ifdef NDEBUG # define FAIL(error_string, fail_result) \ dispatch_log(ERROR, __LINE__, __FILE__, error_string); \ return fail_result #else # define FAIL(error_string, fail_result) \ dispatch_assert(0, error_string, __LINE__, __FILE__); \ return fail_result #endif #define LOG(log_level, ...) \ do { \ if (log_level <= LOG_LEVEL) \ dispatch_log(log_level, __LINE__, __FILE__, __VA_ARGS__); \ } while (0) i64 align(i64 x, i64 a) { CHECK(a > 0, "Sanity", 0); return x + ((a - (x % a)) % a); } void mem_set(void *dst, u8 val, i64 size) { CHECK(dst != NULL, "Sanity",); CHECK(size > 0, "Invalid size",); for (i64 i = 0; i < size; ++i) ((u8 *)dst)[i] = val; } void mem_cpy(void *dst, void *__restrict src, i64 size) { CHECK(dst != NULL, "Sanity",); CHECK(src != NULL, "Sanity",); CHECK(size >= 0, "Invalid size",); for (i64 i = 0; i < size; ++i) ((u8 *)dst)[i] = ((u8 *)src)[i]; } b8 mem_eq(void *a, void *b, i64 size) { CHECK(a != NULL, "Sanity", 0); CHECK(b != NULL, "Sanity", 0); CHECK(size > 0, "Invalid size", 0); u8 *x = (u8 *) a; u8 *y = (u8 *) b; for (i64 i = 0; i < size; ++i) if (x[i] != y[i]) return 0; return 1; } b8 str_eq(i64 a_len, c8 *a, i64 b_len, c8 *b) { return a_len == b_len && mem_eq(a, b, a_len); } #define STR_EQ(z, a, b) \ (z == sizeof(b) - 1 && mem_eq((a), (b), sizeof(b) - 1)) i64 str_len(c8 *s, c8 *s_end) { CHECK(s < s_end, "Buffer overflow", 0); for (i64 len = 0; s + len < s_end; ++len) if (s[len] == '\0') return len; FAIL("Buffer overflow", 0); } i64 str_len_or(c8 *s, c8 *s_max, i64 or_val) { for (i64 len = 0; s + len < s_max; ++len) if (s[len] == '\0') return len; return or_val; } c8 *find_char(c8 *s, c8 *s_end, c8 c) { CHECK(s != NULL, "Sanity", NULL); CHECK(s_end != NULL, "Sanity", NULL); while (s < s_end && *s != c) ++s; return *s == c ? s : NULL; } c8 *find_str(c8 *s, c8 *s_end, c8 *sub, c8 *sub_end) { CHECK(s != NULL, "Sanity", NULL); CHECK(s_end != NULL, "Sanity", NULL); CHECK(sub != NULL, "Sanity", NULL); CHECK(sub_end != NULL, "Sanity", NULL); while (sub_end - sub <= s_end - s && s < s_end) { c8 *q = s; c8 *p = sub; for (; q < s_end && p < sub_end; ++q, ++p) if (*q != *p) break; if (p == sub_end) return s; ++s; } return NULL; } c8 *find_str_in_table(c8 *buf, c8 *buf_end, c8 *sub, c8 *sub_end) { CHECK(buf != NULL, "Sanity", NULL); CHECK(buf_end != NULL, "Sanity", NULL); CHECK(sub != NULL, "Sanity", NULL); CHECK(sub_end != NULL, "Sanity", NULL); while (buf < buf_end) { i64 len = str_len(buf, buf_end); if (str_eq(len, buf, sub_end - sub, sub)) return buf; buf += len + 1; } return NULL; } u64 u64_from_dec_str(c8 *s, c8 *s_end) { CHECK(s != NULL && s_end != NULL, "Sanity", 0); CHECK(s < s_end, "Buffer overflow", 0); CHECK(*s >= '0' && *s <= '9', "Parsing failed", 0); u64 x = 0; for (; s < s_end && *s >= '0' && *s <= '9'; ++s) x = (x * 10) + (*s - '0'); LAX(s == s_end || *s == ' ' || *s == '\0', "Parsing failed"); return x; } // ================================================================ // // * Chunk table and entity pool // // ================================================================ Chunk_Handle chunk_add(Pool *pool, i64 size, void *data) { CHECK(pool != NULL, "Sanity", (Chunk_Handle) {0}); CHECK(size > 0, "Sanity", (Chunk_Handle) {0}); CHECK(data != NULL, "Sanity", (Chunk_Handle) {0}); i64 chunk_size = align(size, CHUNK_TABLE_ALIGNMENT); CHECK(pool->size + chunk_size <= pool->capacity, "Out of memory", (Chunk_Handle) {0}); Chunk_Handle h = { .size = size, .offset = pool->size, }; mem_cpy(pool->data + h.offset, data, size); pool->size += chunk_size; return h; } void chunk_remove(Pool *pool, Chunk_Handle h) { (void) pool; (void) h; } #ifdef __GNUC__ __attribute__ ((warn_unused_result)) #endif Chunk_Handle chunk_resize(Pool *pool, Chunk_Handle h, i64 size) { CHECK(pool != NULL, "Sanity", (Chunk_Handle) {0}); CHECK(size > 0, "Sanity", (Chunk_Handle) {0}); CHECK(h.offset >= 0 && h.size >= 0, "Sanity", (Chunk_Handle) {0}); CHECK(h.offset + h.size <= pool->size, "Invalid handle", (Chunk_Handle) {0}); i64 chunk_size = align(size, CHUNK_TABLE_ALIGNMENT); i64 prev_size = align(h.size, CHUNK_TABLE_ALIGNMENT); if (chunk_size <= prev_size) return (Chunk_Handle) { .size = size, .offset = h.offset, }; if (h.offset + h.size == pool->size) { CHECK(pool->size + chunk_size - h.size <= pool->capacity, "Out of memory", (Chunk_Handle) {0}); pool->size += chunk_size - prev_size; return (Chunk_Handle) { .size = size, .offset = h.offset, }; } CHECK(pool->size + chunk_size <= pool->capacity, "Out of memory", (Chunk_Handle) {0}); Chunk_Handle dst = { .size = size, .offset = pool->size, }; mem_cpy(pool->data + dst.offset, pool->data + h.offset, h.size); pool->size += chunk_size; return dst; } u8 *chunk(Pool *pool, Chunk_Handle h) { CHECK(pool != NULL, "Sanity", NULL); CHECK(h.offset >= 0 && h.size >= 0, "Sanity", pool->data); CHECK(h.offset + h.size <= pool->size, "Invalid handle", pool->data); if (h.size == 0) return pool->data; return pool->data + h.offset; } #define CHUNK(pool_, handle_, type_) ((type_ *) chunk((pool_), (handle_))) b8 entity_enabled(Pool *pool, i64 id) { if (id == UNDEFINED) return 0; CHECK(pool != NULL, "Sanity", 0); CHECK(id > 0 && id < pool->entities.size / (i64) sizeof(Entity), "Invalid id", 0); Entity *entities = CHUNK(pool, pool->entities, Entity); CHECK(entities != NULL, "Internal", 0); return entities[id].is_enabled; } i64 pool_add(Pool *pool, Entity data) { CHECK(pool != NULL && pool->data != NULL, "Sanity", UNDEFINED); i64 num_entities = pool->entities.size / sizeof(Entity); if (num_entities == UNDEFINED) ++num_entities; i64 id = num_entities++; data.is_enabled = 1; pool->entities = chunk_resize(pool, pool->entities, num_entities * sizeof(Entity)); CHECK(id != UNDEFINED, "Undefined", UNDEFINED); CHECK(id < pool->entities.size / (i64) sizeof(Entity), "Buffer overflow", UNDEFINED); CHUNK(pool, pool->entities, Entity)[id] = data; return id; } void pool_remove(Pool *pool, i64 entity, u16 type) { CHECK(pool != NULL && pool->data != NULL, "Sanity",); i64 num_entities = pool->entities.size / sizeof(Entity); Entity *entities = CHUNK(pool, pool->entities, Entity); CHECK(entity > UNDEFINED, "Undefined",); CHECK(entity < num_entities, "Buffer overflow",); CHECK(entities[entity].is_enabled, "Entity already removed",); CHECK(entities[entity].type == type, "Invalid entity type",); entities[entity] = (Entity) {0}; } // ================================================================ // // * Semantic graph // // ================================================================ Node *node_by_id(Pool *pool, i64 id) { CHECK(pool != NULL && pool->data != NULL, "Sanity", NULL); i64 num_entities = pool->entities.size / sizeof(Entity); Entity *entities = CHUNK(pool, pool->entities, Entity); CHECK(id != UNDEFINED, "Undefined", &entities->node); CHECK(id > UNDEFINED && id < num_entities, "Buffer overflow", &entities->node); CHECK(entities[id].is_enabled, "Sanity", &entities->node); CHECK(entities[id].type == ENTITY_NODE, "Invalid entity type", &entities->node); return &entities[id].node; } Proc *proc_by_id(Pool *pool, i64 id) { CHECK(pool != NULL && pool->data != NULL, "Sanity", NULL); i64 num_entities = pool->entities.size / sizeof(Entity); Entity *entities = CHUNK(pool, pool->entities, Entity); CHECK(id != UNDEFINED, "Undefined", &entities->proc); CHECK(id > UNDEFINED && id < num_entities, "Buffer overflow", &entities->proc); CHECK(entities[id].is_enabled, "Sanity", &entities->proc); CHECK(entities[id].type == ENTITY_PROC, "Invalid entity type", &entities->proc); return &entities[id].proc; } Unit *unit_by_id(Pool *pool, i64 id) { CHECK(pool != NULL && pool->data != NULL, "Sanity", NULL); i64 num_entities = pool->entities.size / sizeof(Entity); Entity *entities = CHUNK(pool, pool->entities, Entity); CHECK(id != UNDEFINED, "Undefined", &entities->unit); CHECK(id > UNDEFINED && id < num_entities, "Buffer overflow", &entities->unit); CHECK(entities[id].is_enabled, "Sanity", &entities->unit); CHECK(entities[id].type == ENTITY_UNIT, "Invalid entity type", &entities->unit); return &entities[id].unit; } i64 node_init(Pool *pool, Node data) { data.index_in_proc = UNDEFINED; return pool_add(pool, (Entity) { .type = ENTITY_NODE, .node = data, }); } void node_destroy(Pool *pool, i64 node) { // TODO pool_remove(pool, node, ENTITY_NODE); } i64 node_address(Pool *pool, i64 node) { return node_init(pool, (Node) { .op = OP_ADDRESS, .ref = node, }); } i64 node_array_c8(Pool *pool, i64 size, c8 *data) { return node_init(pool, (Node) { .op = OP_I8, .lit = chunk_add(pool, size, data), }); } i64 node_ptr(Pool *pool, u64 address) { return node_init(pool, (Node) { .op = OP_PTR, .lit = chunk_add(pool, sizeof address, &address), }); } i64 node_i32(Pool *pool, i32 value) { return node_init(pool, (Node) { .op = OP_I32, .lit = chunk_add(pool, sizeof value, &value), }); } i64 node_i64(Pool *pool, i64 value) { return node_init(pool, (Node) { .op = OP_I64, .lit = chunk_add(pool, sizeof value, &value), }); } i64 node_add(Pool *pool, Var x, Var y) { return node_init(pool, (Node) { .op = OP_ADD, .bin_op = { x, y, }, }); } i64 node_sub(Pool *pool, Var x, Var y) { return node_init(pool, (Node) { .op = OP_SUB, .bin_op = { x, y, }, }); } u16 resolve_calling_convention(Pool *pool, i64 proc) { Proc *p = proc_by_id(pool, proc); CHECK(p != NULL, "No proc", CONV_UNKNOWN); if (p->convention == CONV_UNKNOWN) p->convention = CONV_CDECL; return p->convention; } i64 node_call(Pool *pool, i64 target_proc, i64 num_args, Var *args) { return node_init(pool, (Node) { .op = OP_CALL, .call = { .convention = resolve_calling_convention(pool, target_proc), .target_proc = target_proc, .args = chunk_add(pool, num_args * sizeof *args, args), }, }); } i64 node_call_by_name(Pool *pool, i64 name_size, c8 *name, i64 num_args, Var *args) { return node_init(pool, (Node) { .op = OP_CALL, .call = { .convention = CONV_CDECL, .target_proc = UNDEFINED, .target_name = chunk_add(pool, name_size, name), .args = chunk_add(pool, num_args * sizeof *args, args), }, }); } i64 node_if(Pool *pool, Var condition) { return node_init(pool, (Node) { .op = OP_IF, .if_ = { .condition = condition, }, }); } i64 node_ret(Pool *pool, i64 num_values, Var *values) { return node_init(pool, (Node) { .op = OP_RET, .ret = { .vals = chunk_add(pool, num_values * sizeof *values, values), }, }); } i64 node_phi(Pool *pool, i64 branch, i64 num_vals, Var *vals) { return node_init(pool, (Node) { .op = OP_PHI, .phi = { .branch = branch, .vals = chunk_add(pool, num_vals *sizeof *vals, vals), }, }); } i64 proc_init(Pool *pool) { return pool_add(pool, (Entity) { .type = ENTITY_PROC, .proc = (Proc) { .ret_index = UNDEFINED, .index_in_unit = UNDEFINED, }, }); } void proc_destroy(Pool *pool, i64 proc) { // TODO pool_remove(pool, proc, ENTITY_PROC); } void proc_set_convention(Pool *pool, i64 proc, u16 convention) { Proc *p = proc_by_id(pool, proc); CHECK(p != NULL, "No proc",); p->convention = convention; } void proc_set_name(Pool *pool, i64 proc, i64 name_size, c8 *name) { Proc *p = proc_by_id(pool, proc); CHECK(p != NULL, "No proc",); p->name = chunk_resize(pool, p->name, name_size); mem_cpy(chunk(pool, p->name), name, p->name.size); } void proc_node_add(Pool *pool, i64 proc, i64 node) { Proc *p = proc_by_id(pool, proc); Node *n = node_by_id(pool, node); CHECK(p != NULL, "No proc",); CHECK(n != NULL, "Sanity", ); CHECK(n->index_in_proc == UNDEFINED, "Internal",); i64 num_nodes = p->nodes.size / sizeof(i64); if (num_nodes == UNDEFINED) ++num_nodes; i64 index = num_nodes++; if (n->op == OP_RET) { // Only one return node is allowed. CHECK(p->ret_index == UNDEFINED, "Internal",); p->ret_index = index; } n->index_in_proc = index; p->nodes = chunk_resize(pool, p->nodes, num_nodes * sizeof(i64)); CHECK(index != UNDEFINED, "Undefined",); CHECK(index < p->nodes.size / (i64) sizeof(i64), "Buffer overflow",); CHUNK(pool, p->nodes, i64)[index] = node; } void proc_node_remove(Pool *pool, i64 proc, i64 node) { Proc *p = proc_by_id(pool, proc); Node *n = node_by_id(pool, node); CHECK(p != NULL, "No proc",); CHECK(n != NULL, "Sanity", ); i64 *nodes = CHUNK(pool, p->nodes, i64); CHECK(n->index_in_proc != UNDEFINED, "Undefined",); CHECK(n->index_in_proc < p->nodes.size / (i64) sizeof(i64), "Buffer overflow",); CHECK(nodes[n->index_in_proc] == node, "Internal",); if (n->op == OP_RET) { CHECK(p->ret_index != UNDEFINED, "Internal",); p->ret_index = UNDEFINED; } nodes[n->index_in_proc] = UNDEFINED; n->index_in_proc = UNDEFINED; } i64 unit_init(Pool *pool, u16 type) { return pool_add(pool, (Entity) { .type = ENTITY_UNIT, .unit = (Unit) { .type = type, .entry_point_index = UNDEFINED, } }); } void unit_destroy(Pool *pool, i64 unit) { pool_remove(pool, unit, ENTITY_UNIT); } void unit_proc_add(Pool *pool, i64 unit, i64 proc) { Unit *u = unit_by_id(pool, unit); Proc *p = proc_by_id(pool, proc); CHECK(u != NULL, "No unit",); CHECK(p != NULL, "No proc",); CHECK(p->index_in_unit == UNDEFINED, "Internal",); i64 num_procs = u->procs.size / sizeof(i64); i64 *procs = CHUNK(pool, u->procs, i64); CHECK(procs != NULL, "Internal",); for (i64 i = 1; i < num_procs; ++i) if (procs[i] == proc) return; if (num_procs == UNDEFINED) ++num_procs; i64 index = num_procs++; p->index_in_unit = index; u->procs = chunk_resize(pool, u->procs, num_procs * sizeof(i64)); CHECK(index != UNDEFINED, "Undefined",); CHECK(index < u->procs.size / (i64) sizeof(i64), "Buffer overflow",); CHUNK(pool, u->procs, i64)[index] = proc; } void unit_proc_remove(Pool *pool, i64 unit, i64 proc) { Unit *u = unit_by_id(pool, unit); Proc *p = proc_by_id(pool, proc); CHECK(u != NULL, "No unit",); CHECK(p != NULL, "No proc",); i64 *procs = CHUNK(pool, u->procs, i64); CHECK(p->index_in_unit != UNDEFINED, "Undefined",); CHECK(p->index_in_unit < u->procs.size / (i64) sizeof(i64), "Buffer overflow",); CHECK(procs[p->index_in_unit] == proc, "Internal",); if (u->entry_point_index == p->index_in_unit) u->entry_point_index = UNDEFINED; procs[p->index_in_unit] = UNDEFINED; p->index_in_unit = UNDEFINED; } void unit_link_add(Pool *pool, i64 unit, i64 link_unit) { Unit *u = unit_by_id(pool, unit); CHECK(u != NULL, "No unit",); i64 num_links = u->links.size / sizeof(i64); i64 *links = CHUNK(pool, u->links, i64); CHECK(links != NULL, "Internal",); for (i64 i = 1; i < num_links; ++i) if (links[i] == link_unit) return; if (num_links == UNDEFINED) ++num_links; i64 index = num_links++; u->links = chunk_resize(pool, u->links, num_links * sizeof(i64)); CHECK(index != UNDEFINED, "Undefined",); CHECK(index < u->links.size / (i64) sizeof(i64), "Buffer overflow",); CHUNK(pool, u->links, i64)[index] = link_unit; } void unit_link_remove(Pool *pool, i64 unit, i64 link_unit) { Unit *u = unit_by_id(pool, unit); CHECK(u != NULL, "No unit",); i64 num_links = u->links.size / sizeof(i64); i64 *links = CHUNK(pool, u->links, i64); for (i64 i = 1; i < num_links; ++i) if (links[i] == link_unit) { links[i] = UNDEFINED; return; } FAIL("Link not found",); } void unit_set_name(Pool *pool, i64 unit, i64 name_size, c8 *name) { Unit *u = unit_by_id(pool, unit); CHECK(u != NULL, "No unit",); u->name = chunk_resize(pool, u->name, name_size); CHECK(u->name.size == name_size, "Internal",); mem_cpy(CHUNK(pool, u->name, c8), name, u->name.size); } void unit_set_entry_point(Pool *pool, i64 unit, i64 entry_point_proc) { Unit *u = unit_by_id(pool, unit); CHECK(u != NULL, "No unit",); if (entry_point_proc == UNDEFINED) { u->entry_point_index = UNDEFINED; return; } Proc *p = proc_by_id(pool, entry_point_proc); CHECK(p != NULL, "No proc",); CHECK(p->index_in_unit != UNDEFINED, "Internal",); CHECK(p->index_in_unit < u->procs.size / (i64) sizeof(i64), "Buffer overflow",); CHECK(CHUNK(pool, u->procs, i64)[p->index_in_unit] == entry_point_proc, "Internal",); u->entry_point_index = p->index_in_unit; } // ================================================================ // // * Serialization // // ---------------------------------------------------------------- // // Terms // // LE = little endian // BE = big endian // HO = host ordering // // byte = 8 bits // word = 2 bytes // dword = 4 bytes // qword = 8 bytes // // ================================================================ enum { BIT_LE = 0, BIT_BE = 1, BIT_ORDER_MASK = 1, BYTE_LE = 0, BYTE_BE = 2, BYTE_ORDER_MASK = 2, WORD_LE = 0, WORD_BE = 4, WORD_ORDER_MASK = 4, DWORD_LE = 0, DWORD_BE = 8, DWORD_ORDER_MASK = 8, F64_DWORD_LE = 0, F64_DWORD_BE = 16, F64_DWORD_ORDER_MASK = 16, LE = BIT_LE | BYTE_LE | WORD_LE | DWORD_LE | F64_DWORD_LE, BE = BIT_BE | BYTE_BE | WORD_BE | DWORD_BE | F64_DWORD_BE, }; typedef struct { unsigned first:1; } Bits; u32 host_bit_order(void) { if ((*(Bits *) &(u8) { 1 }).first == 1) return BIT_LE; return BIT_BE; } u32 host_byte_order(void) { if (((u8 *) &(u32) { 1 })[0] == 1) return BYTE_LE; return BYTE_BE; } u32 host_word_order(void) { if (((u16 *) &(u32) { 0x100 })[0] == 0x100) return WORD_LE; return WORD_BE; } u32 host_dword_order(void) { if (((u32 *) &(u64) { 0x10000 })[0] == 0x10000) return DWORD_LE; return DWORD_BE; } void check_f32_format(void) { // FIXME if ((*(u64 *) &(f64) { -1.4575323640233e-306 } & 0xffffffffull) == 0x40301fcbull) return; if ((*(u64 *) &(f64) { -1.4575323640233e-306 } & 0xffffffff00000000ull) == 0x40301fcb00000000ull) return; FAIL("Unknown host floating-point number format",); } u32 host_f64_dword_order(void) { if ((*(u64 *) &(f64) { -1.4575323640233e-306 } & 0xffffffffull) == 0x40301fcbull) return host_dword_order() == DWORD_LE ? F64_DWORD_LE : F64_DWORD_BE; if ((*(u64 *) &(f64) { -1.4575323640233e-306 } & 0xffffffff00000000ull) == 0x40301fcb00000000ull) return host_dword_order() == DWORD_LE ? F64_DWORD_BE : F64_DWORD_LE; FAIL("Unknown host floating-point number format", 0); } u32 host_data_ordering(void) { return host_bit_order() | host_byte_order() | host_word_order() | host_dword_order() | host_f64_dword_order(); } u8 read_u8(u32 ordering, u8 *v, u8 *v_end) { CHECK(v != NULL, "Sanity", 0); CHECK(v < v_end, "Buffer overflow", 0); if ((ordering & BIT_ORDER_MASK) == host_bit_order()) return *v; return ((*v >> 7) & 1) | (((*v >> 6) & 1) << 1) | (((*v >> 5) & 1) << 2) | (((*v >> 4) & 1) << 3) | (((*v >> 3) & 1) << 4) | (((*v >> 2) & 1) << 5) | (((*v >> 1) & 1) << 6) | (((*v) & 1) << 7); } u16 read_u16(u32 ordering, u8 *v, u8 *v_end) { CHECK(v != NULL, "Sanity", 0); CHECK(v + 2 <= v_end, "Buffer overflow", 0); u16 x; if ((ordering & BIT_ORDER_MASK) == host_bit_order() && (ordering & BYTE_ORDER_MASK) == host_byte_order()) mem_cpy(&x, v, 2); else if ((ordering & BYTE_ORDER_MASK) == host_byte_order()) x = ((u16) read_u8(ordering, v, v_end)) | (((u16) read_u8(ordering, v + 1, v_end)) << 8); else x = ((u16) read_u8(ordering, v + 1, v_end)) | (((u16) read_u8(ordering, v, v_end)) << 8); return x; } u32 read_u32(u32 ordering, u8 *v, u8 *v_end) { CHECK(v != NULL, "Sanity", 0); CHECK(v + 4 <= v_end, "Buffer overflow", 0); u32 x; if ((ordering & BIT_ORDER_MASK) == host_bit_order() && (ordering & BYTE_ORDER_MASK) == host_byte_order() && (ordering & WORD_ORDER_MASK) == host_word_order()) mem_cpy(&x, v, 4); else if ((ordering & WORD_ORDER_MASK) == host_word_order()) x = ((u32) read_u16(ordering, v, v_end)) | (((u32) read_u16(ordering, v + 2, v_end)) << 16); else x = ((u32) read_u16(ordering, v + 2, v_end)) | (((u32) read_u16(ordering, v, v_end)) << 16); return x; } u64 read_u64(u32 ordering, u8 *v, u8 *v_end) { CHECK(v != NULL, "Sanity", 0); CHECK(v + 8 <= v_end, "Buffer overflow", 0); u64 x; if ((ordering & BIT_ORDER_MASK) == host_bit_order() && (ordering & BYTE_ORDER_MASK) == host_byte_order() && (ordering & WORD_ORDER_MASK) == host_word_order() && (ordering & DWORD_ORDER_MASK) == host_dword_order()) mem_cpy(&x, v, 8); else if ((ordering & DWORD_ORDER_MASK) == host_dword_order()) x = ((u64) read_u32(ordering, v, v_end)) | (((u64) read_u32(ordering, v + 4, v_end)) << 32); else x = ((u64) read_u32(ordering, v + 4, v_end)) | (((u64) read_u32(ordering, v, v_end)) << 32); return x; } void write_u8(u8 ordering, u8 x, u8 *v, u8 *v_end) { CHECK(v != NULL, "Sanity",); CHECK(v < v_end, "Buffer overflow",); if ((ordering & BIT_ORDER_MASK) == host_bit_order()) *v = x; else *v = ((x >> 7) & 1) | (((x >> 6) & 1) << 1) | (((x >> 5) & 1) << 2) | (((x >> 4) & 1) << 3) | (((x >> 3) & 1) << 4) | (((x >> 2) & 1) << 5) | (((x >> 1) & 1) << 6) | (((x) & 1) << 7); } void write_u16(u32 ordering, u16 x, u8 *v, u8 *v_end) { CHECK(v != NULL, "Sanity",); CHECK(v + 2 <= v_end, "Buffer overflow",); if ((ordering & BIT_ORDER_MASK) == host_bit_order() && (ordering & BYTE_ORDER_MASK) == host_byte_order()) mem_cpy(v, &x, 2); else if ((ordering & BYTE_ORDER_MASK) == host_byte_order()) { write_u8(ordering, (u8) ( x & 0xff), v, v_end); write_u8(ordering, (u8) ((x >> 8) & 0xff), v + 1, v_end); } else { write_u8(ordering, (u8) ( x & 0xff), v + 1, v_end); write_u8(ordering, (u8) ((x >> 8) & 0xff), v, v_end); } } void write_u32(u32 ordering, u32 x, u8 *v, u8 *v_end) { CHECK(v != NULL, "Sanity",); CHECK(v + 4 <= v_end, "Buffer overflow",); if ((ordering & BIT_ORDER_MASK) == host_bit_order() && (ordering & BYTE_ORDER_MASK) == host_byte_order() && (ordering & WORD_ORDER_MASK) == host_word_order()) mem_cpy(v, &x, 4); else if ((ordering & WORD_ORDER_MASK) == host_word_order()) { write_u16(ordering, (u16) ( x & 0xffff), v, v_end); write_u16(ordering, (u16) ((x >> 16) & 0xffff), v + 2, v_end); } else { write_u16(ordering, (u16) ( x & 0xffff), v + 2, v_end); write_u16(ordering, (u16) ((x >> 16) & 0xffff), v, v_end); } } void write_u64(u32 ordering, u64 x, u8 *v, u8 *v_end) { CHECK(v != NULL, "Sanity",); CHECK(v + 8 <= v_end, "Buffer overflow",); if ((ordering & BIT_ORDER_MASK) == host_bit_order() && (ordering & BYTE_ORDER_MASK) == host_byte_order() && (ordering & WORD_ORDER_MASK) == host_word_order() && (ordering & DWORD_ORDER_MASK) == host_dword_order()) mem_cpy(v, &x, 8); else if ((ordering & DWORD_ORDER_MASK) == host_dword_order()) { write_u32(ordering, (u32) ( x & 0xffffffffull), v, v_end); write_u32(ordering, (u32) ((x >> 32) & 0xffffffffull), v + 4, v_end); } else { write_u32(ordering, (u32) ( x & 0xffffffffull), v + 4, v_end); write_u32(ordering, (u32) ((x >> 16) & 0xffffffffull), v, v_end); } } i16 read_i8(u32 ordering, void *v, void *v_end) { return (i8) read_u8(ordering, v, v_end); } i16 read_i16(u32 ordering, void *v, void *v_end) { return (i16) read_u16(ordering, v, v_end); } i32 read_i32(u32 ordering, void *v, void *v_end) { return (i32) read_u32(ordering, v, v_end); } i64 read_i64(u32 ordering, void *v, void *v_end) { return (i64) read_u64(ordering, v, v_end); } f32 read_f32(u32 ordering, void *v, void *v_end) { check_f32_format(); return *(f32 *) &(u32) { read_u32(ordering, v, v_end) }; } f64 read_f64(u32 ordering, void *v, void *v_end) { u64 x = read_u64(ordering, v, v_end); if ((ordering & F64_DWORD_ORDER_MASK) != host_f64_dword_order()) x = ((x & 0xffffffffull) << 32) | ((x >> 32) & 0xffffffffull); void *p = &x; return *(f64 *) p; } void write_i8(u32 ordering, i8 x, void *v, void *v_end) { write_u8(ordering, (u8) x, v, v_end); } void write_i16(u32 ordering, i16 x, void *v, void *v_end) { write_u16(ordering, (u16) x, v, v_end); } void write_i32(u32 ordering, i32 x, void *v, void *v_end) { write_u32(ordering, (u32) x, v, v_end); } void write_i64(u32 ordering, i64 x, void *v, void *v_end) { write_u64(ordering, (u64) x, v, v_end); } void write_f32(u32 ordering, f32 x, void *v, void *v_end) { check_f32_format(); void *p = &x; write_u32(ordering, *(u32 *) p, v, v_end); } void write_f64(u32 ordering, f64 x, void *v, void *v_end) { void *p = &x; if ((ordering & F64_DWORD_ORDER_MASK) == host_f64_dword_order()) write_u64(ordering, *(u64 *) p, v, v_end); else { write_u32(ordering, *(((u32 *) p) + 1), (u8 *) v, v_end); write_u32(ordering, * (u32 *) p, ((u8 *) v) + 4, v_end); } } // Shortcuts #define HO host_data_ordering() // ================================================================ // // * Code generation and linking // // ---------------------------------------------------------------- // // Docs and helpful materials // // AR https://man.freebsd.org/cgi/man.cgi?query=ar&sektion=5 // ELF https://man7.org/linux/man-pages/man5/elf.5.html // // https://fasterthanli.me/series/making-our-own-executable-packer // // Relocation types // https://intezer.com/blog/malware-analysis/executable-and-linkable-format-101-part-3-relocations/ // https://docs.oracle.com/cd/E19120-01/open.solaris/819-0690/chapter7-2/index.html // // Thread-local storage // https://maskray.me/blog/2021-02-14-all-about-thread-local-storage // // tinycc impl // https://repo.or.cz/tinycc.git/blob/HEAD:/x86_64-link.c // https://repo.or.cz/tinycc.git/blob/HEAD:/tccelf.c // // Online assembler // https://defuse.ca/online-x86-assembler.htm // https://shell-storm.org/online/Online-Assembler-and-Disassembler/ // // Linux syscall // https://man7.org/linux/man-pages/man2/intro.2.html // https://man7.org/linux/man-pages/man2/syscalls.2.html // https://man7.org/linux/man-pages/man2/syscall.2.html // // Linker scripts // https://home.cs.colorado.edu/~main/cs1300/doc/gnu/ld_3.html // // ---------------------------------------------------------------- // // TODO Experiment with mapping several p_vaddr into one p_paddr. // // ================================================================ enum { OS_Unknown = 0, OS_Unix, OS_Linux, OS_Windows, OS_macOS, OS_Cygwin, #if defined(__CYGWIN__) HOST_OS = OS_Cygwin, #elif defined(_WIN32) HOST_OS = OS_Windows, #elif defined(__linux__) HOST_OS = OS_Linux, #elif defined(__APPLE__) HOST_OS = OS_macOS, #elif defined(__unix__) HOST_OS = OS_Unix, #else HOST_OS = OS_Unknown, #endif #if defined(__x86_64) || defined(__amd_64) || defined(_M_AMD64) HOST_ARCH = ARCH_X86_64, #elif defined(__i386) || defined(_M_I86) || defined(_X86_) HOST_ARCH = ARCH_I386, #elif defined(__aarch64__) HOST_ARCH = ARCH_ARM64, #elif defined(__arm__) || defined(__thumb__) || defined(__M_ARM) || defined(__M_ARMT) HOST_ARCH = ARCH_ARM32, #else HOST_ARCH = ARCH_UNKNOWN, #endif // x86_64 constants // X86_64_BASE_ADDRESS = 0x400000, X86_64_ALIGNMENT = 8, // NOTE For compatibility with Box64 on ARM X86_64_PAGE_SIZE = 16 * 1024, // 4 * 1024 // ELF format constants // ELF_64 = 2, ELF_2_LE = 1, ELF_VERSION = 1, ELF_SYS_V = 0, ELF_LINUX = 3, ELF_ABI_VERSION = 0, ELF_RELOCATABLE = 1, ELF_EXECUTABLE = 2, ELF_AARCH64 = 183, ELF_X86_64 = 62, ELF_HEADER_SIZE = 64, ELF_PROGRAM_HEADER_SIZE = 56, ELF_SECTION_HEADER_SIZE = 64, ELF_SYMBOL_ENTRY_SIZE = 24, ELF_REL_ENTRY_SIZE = 16, ELF_RELA_ENTRY_SIZE = 24, ELF_GOT_ENTRY_SIZE = 8, ELF_DYNAMIC_ENTRY_SIZE = 16, SEC_NONE = 0, SEC_PROGBITS, SEC_SYMTAB, SEC_STRTAB, SEC_RELA, SEC_HASH, SEC_DYNAMIC, SEC_NOTE, SEC_NOBITS, SEC_REL, SEC_SHLIB, SEC_DYNSYM, SEC_INIT_ARRAY = 14, SEC_FINI_ARRAY, SEC_PREINIT_ARRAY, SEC_GROUP, SEC_SYMTAB_SHNDX, SYM_NONE = 0, SYM_PROC, SYM_DATA, SYM_COMMON, SYM_TLS, SYM_SECTION, SYM_SPECIFIC, BIND_LOCAL = 0, BIND_GLOBAL, BIND_WEAK, // Relocation types // R_X86_64_NONE = 0, R_X86_64_64, R_X86_64_PC32, R_X86_64_GOT32, R_X86_64_PLT32, R_X86_64_COPY, R_X86_64_GLOB_DAT, R_X86_64_JUMP_SLOT, R_X86_64_RELATIVE, R_X86_64_GOTPCREL, R_X86_64_32, R_X86_64_32S, R_X86_64_16, R_X86_64_PC16, R_X86_64_8, R_X86_64_PC8, R_X86_64_DTPMOD64, R_X86_64_DTPOFF64, R_X86_64_TPOFF64, R_X86_64_TLSGD, R_X86_64_TLSLD, R_X86_64_DTPOFF32, R_X86_64_GOTTPOFF, R_X86_64_TPOFF32, R_X86_64_PC64, R_X86_64_GOTOFF64, R_X86_64_GOTPC32, R_X86_64_GOT64, R_X86_64_GOTPCREL64, R_X86_64_GOTPC64, R_X86_64_GOTPLT64, R_X86_64_PLTOFF64, R_X86_64_SIZE32, R_X86_64_SIZE64, R_X86_64_GOTPC32_TLSDESC, R_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC, R_X86_64_IRELATIVE, R_X86_64_RELATIVE64, R_X86_64_GOTPCRELX = 41, R_X86_64_REX_GOTPCRELX, // Codegen context EMIT_ENTRY_PROC = 1, }; c8 *SEC_TYPE_NAMES[] = { [SEC_NONE] = "none", [SEC_PROGBITS] = "progbits", [SEC_SYMTAB] = "symtab", [SEC_STRTAB] = "strtab", [SEC_RELA] = "rela", [SEC_HASH] = "hash", [SEC_DYNAMIC] = "dynamic", [SEC_NOTE] = "note", [SEC_NOBITS] = "nobits", [SEC_REL] = "rel", [SEC_SHLIB] = "shlib", [SEC_DYNSYM] = "dynsym", [12] = "", [13] = "", [SEC_INIT_ARRAY] = "init array", [SEC_FINI_ARRAY] = "fini array", [SEC_PREINIT_ARRAY] = "preinit array", [SEC_GROUP] = "group", [SEC_SYMTAB_SHNDX] = "symtab shndx", }; c8 *SYM_TYPE_NAMES[] = { [SYM_NONE] = "none", [SYM_PROC] = "proc", [SYM_DATA] = "data", [SYM_COMMON] = "common", [SYM_TLS] = "tls", [SYM_SECTION] = "section", [SYM_SPECIFIC] = "spec", }; c8 *BIND_TYPE_NAMES[] = { [BIND_LOCAL] = "local", [BIND_GLOBAL] = "global", [BIND_WEAK] = "weak", }; c8 *REL_NAMES[] = { [R_X86_64_NONE] = "none", [R_X86_64_64] = "64", [R_X86_64_PC32] = "pc32", [R_X86_64_GOT32] = "got32", [R_X86_64_PLT32] = "plt32", [R_X86_64_COPY] = "copy", [R_X86_64_GLOB_DAT] = "glob dat", [R_X86_64_JUMP_SLOT] = "jump slot", [R_X86_64_RELATIVE] = "relative", [R_X86_64_GOTPCREL] = "gotpcrel", [R_X86_64_32] = "32", [R_X86_64_32S] = "32s", [R_X86_64_16] = "16", [R_X86_64_PC16] = "pc16", [R_X86_64_8] = "8", [R_X86_64_PC8] = "pc8", [R_X86_64_DTPMOD64] = "dtpmod64", [R_X86_64_DTPOFF64] = "dtpoff64", [R_X86_64_TPOFF64] = "tpoff64", [R_X86_64_TLSGD] = "tlsgd", [R_X86_64_TLSLD] = "tlsld", [R_X86_64_DTPOFF32] = "dtpoff32", [R_X86_64_GOTTPOFF] = "gottpoff", [R_X86_64_TPOFF32] = "tpoff32", [R_X86_64_PC64] = "pc64", [R_X86_64_GOTOFF64] = "gotoff64", [R_X86_64_GOTPC32] = "gotpc32", [R_X86_64_GOT64] = "got64", [R_X86_64_GOTPCREL64] = "gotpcrel64", [R_X86_64_GOTPC64] = "gotpc64", [R_X86_64_GOTPLT64] = "gotplt64", [R_X86_64_PLTOFF64] = "pltoff64", [R_X86_64_SIZE32] = "size32", [R_X86_64_SIZE64] = "size64", [R_X86_64_GOTPC32_TLSDESC] = "gotpc32 tlsdesc", [R_X86_64_TLSDESC_CALL] = "tlsdesc call", [R_X86_64_TLSDESC] = "tlsdesc", [R_X86_64_IRELATIVE] = "irelative", [R_X86_64_RELATIVE64] = "relative64", [R_X86_64_GOTPCRELX] = "gotpcrelx", [R_X86_64_REX_GOTPCRELX] = "gotpcrelx", }; c8 ELF_MAGIC[4] = "\x7f" "ELF"; c8 AR_MAGIC[8] = "!\n"; c8 AR_SYMBOL_TABLE[] = "/ "; c8 AR_STRING_TABLE[] = "// "; c8 SECTION_SYMTAB[] = ".symtab"; c8 SECTION_STRTAB[] = ".strtab"; c8 SECTION_GOT[] = ".got"; c8 SECTION_PLT[] = ".plt"; c8 SECTION_INIT[] = ".init"; c8 SECTION_FINI[] = ".fini"; typedef struct { i64 offset; i64 size; } Offset_Size; typedef struct { u8 * begin; u8 * end; Offset_Size elf; i64 obj_index; } Buffer_Context; typedef struct { i64 offset; i64 num; } Offset_Num; typedef struct { Offset_Size name; u32 type; b8 alloc; b8 write; b8 exec; b8 tls; i64 alignment; i64 entry_size; i64 num_entries; Offset_Size data; } Elf_Section_Header; typedef struct { Offset_Size name; u8 type; u8 bind; i64 section; Offset_Size value; i64 obj_index; } Elf_Symbol_Entry; typedef struct { Elf_Symbol_Entry symbol; i64 offset; u32 type; i64 addent; } Elf_Relx_Entry; // ================================================================ // // Codegen // // ================================================================ enum { EAX = 1, EBX, ECX, EDX, ESI, EDI, MOV = 1, ADD, SUB, }; void codegen_add_rel(Pool *pool, Codegen_Context *codegen, Codegen_Rel_Entry rel) { CHECK(pool != NULL, "Sanity",); CHECK(codegen != NULL, "Sanity",); i64 n = codegen->rels.size / sizeof(Codegen_Rel_Entry); codegen->rels = chunk_resize(pool, codegen->rels, (n + 1) * sizeof(Codegen_Rel_Entry)); CHECK(n < codegen->rels.size / (i64) sizeof(Codegen_Rel_Entry), "Buffer overflow",); Codegen_Rel_Entry *rels = CHUNK(pool, codegen->rels, Codegen_Rel_Entry); CHECK(rels != NULL, "Internal",); rels[n] = rel; } b8 x86_64_emit_i32_mov_reg_val( Pool * pool, Codegen_Context *codegen, i64 dst, u32 src ) { CHECK(pool != NULL, "Sanity", 0); CHECK(codegen != NULL, "Sanity", 0); u8 *begin = CHUNK(pool, codegen->buffer_code, u8) + codegen->offset_code; u8 *end = begin + codegen->buffer_code.size; switch (dst) { case EAX: write_u8 (LE, 0xb8, begin, end); break; case EBX: write_u8 (LE, 0xbb, begin, end); break; case ECX: write_u8 (LE, 0xb9, begin, end); break; case EDX: write_u8 (LE, 0xba, begin, end); break; case ESI: write_u8 (LE, 0xbe, begin, end); break; case EDI: write_u8 (LE, 0xbf, begin, end); break; default: FAIL("Not implemented", 0); } write_u32(LE, src, begin + 1, end); codegen->offset_code += 5; return 1; } b8 x86_64_emit_i32_add_reg_val( Pool * pool, Codegen_Context *codegen, i64 dst, u32 src ) { CHECK(pool != NULL, "Sanity", 0); CHECK(codegen != NULL, "Sanity", 0); u8 *begin = CHUNK(pool, codegen->buffer_code, u8) + codegen->offset_code; u8 *end = begin + codegen->buffer_code.size; if (dst == EAX) { write_u8 (LE, 0x05, begin, end); write_u32(LE, src, begin + 1, end); codegen->offset_code += 5; return 1; } write_u8(LE, 0x81, begin, end); switch (dst) { case EBX: write_u8 (LE, 0xc3, begin + 1, end); break; case ECX: write_u8 (LE, 0xc1, begin + 1, end); break; case EDX: write_u8 (LE, 0xc2, begin + 1, end); break; case ESI: write_u8 (LE, 0xc6, begin + 1, end); break; case EDI: write_u8 (LE, 0xc7, begin + 1, end); break; default: FAIL("Not implemented", 0); } write_u32(LE, src, begin + 2, end); codegen->offset_code += 6; return 1; } b8 x86_64_emit_i32_sub_reg_val( Pool * pool, Codegen_Context *codegen, i64 dst, u32 src ) { CHECK(pool != NULL, "Sanity", 0); CHECK(codegen != NULL, "Sanity", 0); u8 *begin = CHUNK(pool, codegen->buffer_code, u8) + codegen->offset_code; u8 *end = begin + codegen->buffer_code.size; if (dst == EAX) { write_u8 (LE, 0x2d, begin, end); write_u32(LE, src, begin + 1, end); codegen->offset_code += 5; return 1; } write_u8(LE, 0x81, begin, end); switch (dst) { case EBX: write_u8(LE, 0xeb, begin + 1, end); break; case ECX: write_u8(LE, 0xe9, begin + 1, end); break; case EDX: write_u8(LE, 0xea, begin + 1, end); break; case ESI: write_u8(LE, 0xee, begin + 1, end); break; case EDI: write_u8(LE, 0xef, begin + 1, end); break; default: FAIL("Not implemented", 0); } write_u32(LE, src, begin + 2, end); codegen->offset_code += 6; return 1; } b8 x86_64_emit_cmp_reg_zero( Pool * pool, Codegen_Context *codegen, i64 dst ) { CHECK(pool != NULL, "Sanity", 0); CHECK(codegen != NULL, "Sanity", 0); u8 *begin = CHUNK(pool, codegen->buffer_code, u8) + codegen->offset_code; u8 *end = begin + codegen->buffer_code.size; write_u8(LE, 0x83, begin, end); switch (dst) { case EAX: write_u8(LE, 0xf8, begin + 1, end); break; case EBX: write_u8(LE, 0xfb, begin + 1, end); break; case ECX: write_u8(LE, 0xf9, begin + 1, end); break; case EDX: write_u8(LE, 0xfa, begin + 1, end); break; case ESI: write_u8(LE, 0xfe, begin + 1, end); break; case EDI: write_u8(LE, 0xff, begin + 1, end); break; default: FAIL("Not implemented", 0); } write_u8(LE, 0, begin + 2, end); codegen->offset_code += 3; return 1; } b8 x86_64_emit_i32_op_reg_args( Pool * pool, Codegen_Context *codegen, i64 dst, i64 src ) { CHECK(pool != NULL, "Sanity", 0); CHECK(codegen != NULL, "Sanity", 0); u8 *begin = CHUNK(pool, codegen->buffer_code, u8) + codegen->offset_code; u8 *end = begin + codegen->buffer_code.size; switch (dst) { case EAX: switch (src) { case EAX: write_u8(LE, 0xc0, begin, end); break; case EBX: write_u8(LE, 0xd8, begin, end); break; case ECX: write_u8(LE, 0xc8, begin, end); break; case EDX: write_u8(LE, 0xd0, begin, end); break; case ESI: write_u8(LE, 0xf0, begin, end); break; case EDI: write_u8(LE, 0xf8, begin, end); break; default: FAIL("Not implemented", 0); } break; case EBX: switch (src) { case EAX: write_u8(LE, 0xc3, begin, end); break; case EBX: write_u8(LE, 0xd3, begin, end); break; case ECX: write_u8(LE, 0xcb, begin, end); break; case EDX: write_u8(LE, 0xd3, begin, end); break; case ESI: write_u8(LE, 0xf3, begin, end); break; case EDI: write_u8(LE, 0xfb, begin, end); break; default: FAIL("Not implemented", 0); } break; case ECX: switch (src) { case EAX: write_u8(LE, 0xc1, begin, end); break; case EBX: write_u8(LE, 0xd9, begin, end); break; case ECX: write_u8(LE, 0xc9, begin, end); break; case EDX: write_u8(LE, 0xd1, begin, end); break; case ESI: write_u8(LE, 0xf1, begin, end); break; case EDI: write_u8(LE, 0xf9, begin, end); break; default: FAIL("Not implemented", 0); } break; case EDX: switch (src) { case EAX: write_u8(LE, 0xc2, begin, end); break; case EBX: write_u8(LE, 0xda, begin, end); break; case ECX: write_u8(LE, 0xca, begin, end); break; case EDX: write_u8(LE, 0xd2, begin, end); break; case ESI: write_u8(LE, 0xf2, begin, end); break; case EDI: write_u8(LE, 0xfa, begin, end); break; default: FAIL("Not implemented", 0); } break; case ESI: switch (src) { case EAX: write_u8(LE, 0xc6, begin, end); break; case EBX: write_u8(LE, 0xde, begin, end); break; case ECX: write_u8(LE, 0xce, begin, end); break; case EDX: write_u8(LE, 0xd6, begin, end); break; case ESI: write_u8(LE, 0xf6, begin, end); break; case EDI: write_u8(LE, 0xfe, begin, end); break; default: FAIL("Not implemented", 0); } break; case EDI: switch (src) { case EAX: write_u8(LE, 0xc7, begin, end); break; case EBX: write_u8(LE, 0xdf, begin, end); break; case ECX: write_u8(LE, 0xcf, begin, end); break; case EDX: write_u8(LE, 0xd7, begin, end); break; case ESI: write_u8(LE, 0xf7, begin, end); break; case EDI: write_u8(LE, 0xff, begin, end); break; default: FAIL("Not implemented", 0); } break; default: FAIL("Not implemented", 0); } ++codegen->offset_code; return 1; } b8 x86_64_emit_i32_op_reg_val( Pool * pool, Codegen_Context *codegen, i64 op, i64 dst, u32 src ) { switch (op) { case MOV: return x86_64_emit_i32_mov_reg_val(pool, codegen, dst, src); case ADD: return x86_64_emit_i32_add_reg_val(pool, codegen, dst, src); case SUB: return x86_64_emit_i32_sub_reg_val(pool, codegen, dst, src); default:; } FAIL("Sanity", 0); } b8 x86_64_emit_i32_op_reg_reg( Pool * pool, Codegen_Context *codegen, i64 op, i64 dst, i64 src ) { CHECK(pool != NULL, "Sanity", 0); CHECK(codegen != NULL, "Sanity", 0); u8 *begin = CHUNK(pool, codegen->buffer_code, u8) + codegen->offset_code; u8 *end = begin + codegen->buffer_code.size; if (dst == src) return 1; switch (op) { case MOV: write_u8(LE, 0x89, begin, end); break; case ADD: write_u8(LE, 0x01, begin, end); break; case SUB: write_u8(LE, 0x29, begin, end); break; default: FAIL("Not implemented", 0); } ++codegen->offset_code; return x86_64_emit_i32_op_reg_args(pool, codegen, dst, src); } b8 x86_64_emit_node( Pool * pool, Codegen_Context *codegen, Codegen_Entity *proc, i64 node, u32 context ) { Node *n = node_by_id(pool, node); CHECK(n != NULL, "Sanity", 0); codegen->buffer_code = chunk_resize(pool, codegen->buffer_code, codegen->offset_code + 256); CHECK(codegen->buffer_code.size == codegen->offset_code + 256, "Buffer overflow", 0); i64 num_entities = pool->entities.size / (i64) sizeof(Entity); if (codegen->entities.size / (i64) sizeof(Codegen_Entity) != num_entities) codegen->entities = chunk_resize(pool, codegen->entities, num_entities * sizeof(Codegen_Entity)); CHECK(num_entities == codegen->entities.size / (i64) sizeof(Codegen_Entity), "Buffer overflow", 0); Codegen_Entity *entities = CHUNK(pool, codegen->entities, Codegen_Entity); CHECK(entities != NULL, "Internal", 0); Codegen_Entity *dst = entities + node; switch (n->op) { case OP_PTR: case OP_I8: case OP_I32: case OP_I64: case OP_ADDRESS: // Do nothing break; case OP_ADD: case OP_SUB: { // NOTE Assuming static single-assignment form. Node * x_n = node_by_id(pool, n->bin_op[0]); Node * y_n = node_by_id(pool, n->bin_op[1]); Codegen_Entity *x = entities + n->bin_op[0]; Codegen_Entity *y = entities + n->bin_op[1]; CHECK(x_n != NULL, "Sanity", 0); CHECK(y_n != NULL, "Sanity", 0); CHECK(x != NULL, "Sanity", 0); CHECK(y != NULL, "Sanity", 0); dst->reg = 1; while (dst->reg <= 6 && ((1 << (dst->reg - 1)) & proc->occupied_reg) != 0) ++dst->reg; i64 op = n->op == OP_ADD ? ADD : SUB; switch (x_n->op) { case OP_I32: { u32 val = CHUNK(pool, x_n->lit, u32)[0]; if (!x86_64_emit_i32_op_reg_val(pool, codegen, MOV, dst->reg, val)) return 0; } break; case OP_ADD: case OP_SUB: if (!x86_64_emit_i32_op_reg_reg(pool, codegen, MOV, dst->reg, x->reg)) return 0; proc->occupied_reg &= ~(1 << (x->reg - 1)); x->reg = 0; break; default: FAIL("Not implemented", 0); } switch (y_n->op) { case OP_I32: { u32 val = CHUNK(pool, y_n->lit, u32)[0]; if (!x86_64_emit_i32_op_reg_val(pool, codegen, op, dst->reg, val)) return 0; } break; case OP_ADD: case OP_SUB: if (!x86_64_emit_i32_op_reg_reg(pool, codegen, op, dst->reg, y->reg)) return 0; proc->occupied_reg &= ~(1 << (y->reg - 1)); y->reg = 0; break; default: FAIL("Not implemented", 0); } proc->occupied_reg |= 1 << (dst->reg - 1); } break; case OP_CALL: { CHECK(n->call.convention == CONV_CDECL, "Not implemented", 0); CHECK(n->call.target_proc == UNDEFINED, "Not implemented", 0); CHECK(n->call.target_name.size > 0, "No proc name", 0); u8 *begin = CHUNK(pool, codegen->buffer_code, u8) + codegen->offset_code; u8 *end = begin + codegen->buffer_code.size; i64 num_args = n->call.args.size / sizeof(i64); i64 *args = CHUNK(pool, n->call.args, i64); CHECK(args != NULL, "Internal", 0); switch (num_args) { case 1: { Node *arg = node_by_id(pool, args[0]); CHECK(arg != NULL, "Sanity", 0); if (arg->op == OP_ADDRESS) { // Write data // Node *data = node_by_id(pool, arg->ref); CHECK(data->op == OP_I8, "Not implemented", 0); i64 arg_offset = codegen->offset_ro_data; codegen->buffer_ro_data = chunk_resize(pool, codegen->buffer_ro_data, arg_offset + data->lit.size); u8 *buf = CHUNK(pool, codegen->buffer_ro_data, u8); CHECK(buf != NULL, "Internal", 0); CHECK(arg_offset + data->lit.size <= codegen->buffer_ro_data.size, "Buffer overflow", 0); mem_cpy(buf + arg_offset, CHUNK(pool, data->lit, u8), data->lit.size); // Write code and relocations // write_u8(LE, 0x48, begin, end); // movabs write_u8(LE, 0xbf, begin + 1, end); // rdi codegen_add_rel(pool, codegen, (Codegen_Rel_Entry) { .type = REL_ADD_RO_OP_ADDRESS, .offset = codegen->offset_code + 2, .size = 8, .value = arg_offset, }); write_u8(LE, 0x48, begin + 10, end); // movabs write_u8(LE, 0xb8, begin + 11, end); // rax codegen_add_rel(pool, codegen, (Codegen_Rel_Entry) { .type = REL_ADD_PROC_ADDRESS, .offset = codegen->offset_code + 12, .size = 8, .name_size = n->call.target_name.size, .name = CHUNK(pool, n->call.target_name, c8), }); write_u8(LE, 0xff, begin + 20, end); // call write_u8(LE, 0xd0, begin + 21, end); // rax codegen->offset_code += 22; codegen->offset_ro_data += data->lit.size; } else if (arg->op == OP_I32) { CHECK(arg->lit.size == 4, "Not implemented", 0); i32 val = CHUNK(pool, arg->lit, i32)[0]; write_u8 (LE, 0xbf, begin, end); // mov edi write_i32(LE, val, begin + 1, end); write_u8 (LE, 0x48, begin + 5, end); // movabs write_u8 (LE, 0xb8, begin + 6, end); // rax codegen_add_rel(pool, codegen, (Codegen_Rel_Entry) { .type = REL_ADD_PROC_ADDRESS, .offset = codegen->offset_code + 7, .size = 8, .name_size = n->call.target_name.size, .name = CHUNK(pool, n->call.target_name, c8), }); write_u8(LE, 0xff, begin + 11, end); // call write_u8(LE, 0xd0, begin + 12, end); // rax codegen->offset_code += 13; } else { FAIL("Not implemented", 0); } } break; case 3: { Node *arg_0 = node_by_id(pool, args[0]); Node *arg_1 = node_by_id(pool, args[1]); Node *arg_2 = node_by_id(pool, args[2]); CHECK(arg_0->op == OP_PTR, "Not implemented", 0); CHECK(arg_1->op == OP_PTR, "Not implemented", 0); CHECK(arg_2->op == OP_PTR, "Not implemented", 0); u64 val_0 = CHUNK(pool, arg_0->lit, u64)[0]; u64 val_1 = CHUNK(pool, arg_1->lit, u64)[0]; u64 val_2 = CHUNK(pool, arg_2->lit, u64)[0]; CHECK(val_0 == 0, "Not implemented", 0); CHECK(val_1 == 0, "Not implemented", 0); CHECK(val_2 == 0, "Not implemented", 0); write_u8(LE, 0x31, begin, end); // xor edx write_u8(LE, 0xd2, begin + 1, end); // edx write_u8(LE, 0x31, begin + 2, end); // xor esi write_u8(LE, 0xf6, begin + 3, end); // esi write_u8(LE, 0x31, begin + 4, end); // xor edi write_u8(LE, 0xff, begin + 5, end); // edi write_u8(LE, 0x48, begin + 6, end); // movabs write_u8(LE, 0xb8, begin + 7, end); // rax codegen_add_rel(pool, codegen, (Codegen_Rel_Entry) { .type = REL_ADD_PROC_ADDRESS, .offset = codegen->offset_code + 8, .size = 8, .name_size = n->call.target_name.size, .name = CHUNK(pool, n->call.target_name, c8), }); write_u8(LE, 0xff, begin + 16, end); // call write_u8(LE, 0xd0, begin + 17, end); // rax codegen->offset_code += 18; } break; case 7: { Node *arg_0 = node_by_id(pool, args[0]); Node *arg_1 = node_by_id(pool, args[1]); Node *arg_2 = node_by_id(pool, args[2]); Node *arg_3 = node_by_id(pool, args[3]); Node *arg_4 = node_by_id(pool, args[4]); Node *arg_5 = node_by_id(pool, args[5]); Node *arg_6 = node_by_id(pool, args[6]); CHECK(arg_0->op == OP_ADDRESS, "Not implemented", 0); CHECK(arg_1->op == OP_I32, "Not implemented", 0); CHECK(arg_2->op == OP_ADDRESS, "Not implemented", 0); CHECK(arg_3->op == OP_PTR, "Not implemented", 0); CHECK(arg_4->op == OP_PTR, "Not implemented", 0); CHECK(arg_5->op == OP_PTR, "Not implemented", 0); CHECK(arg_6->op == OP_PTR, "Not implemented", 0); Proc *dat_0 = proc_by_id(pool, arg_0->ref); CHECK(dat_0 != NULL, "No proc", 0); // Write data // Node *dat_2 = node_by_id(pool, arg_2->ref); CHECK(dat_2 != NULL, "Sanity", 0); CHECK(dat_2->op == OP_PTR, "Not implemented", 0); i64 arg_2_offset = codegen->offset_ro_data; codegen->buffer_ro_data = chunk_resize(pool, codegen->buffer_ro_data, arg_2_offset + dat_2->lit.size); u8 *buf = CHUNK(pool, codegen->buffer_ro_data, u8); CHECK(buf != NULL, "Internal", 0); CHECK(arg_2_offset + dat_2->lit.size <= codegen->buffer_ro_data.size, "Buffer overflow", 0); mem_cpy(buf + arg_2_offset, CHUNK(pool, dat_2->lit, u8), dat_2->lit.size); // Write code and relocations // write_u8 (LE, 0x48, begin, end); // movabs write_u8 (LE, 0xbf, begin + 1, end); // rdi codegen_add_rel(pool, codegen, (Codegen_Rel_Entry) { .type = REL_ADD_PROC_ADDRESS, .offset = codegen->offset_code + 2, .size = 8, .proc = arg_0->ref, }); u32 val_1 = CHUNK(pool, arg_1->lit, u32)[0]; write_u8 (LE, 0xbe, begin + 10, end); // mov esi write_u32(LE, val_1, begin + 11, end); write_u8 (LE, 0x48, begin + 15, end); // movabs write_u8 (LE, 0xba, begin + 16, end); // rdx codegen_add_rel(pool, codegen, (Codegen_Rel_Entry) { .type = REL_ADD_RO_OP_ADDRESS, .offset = codegen->offset_code + 17, .size = 8, .value = arg_2_offset, }); u64 val_3 = CHUNK(pool, arg_3->lit, u64)[0]; u64 val_4 = CHUNK(pool, arg_4->lit, u64)[0]; u64 val_5 = CHUNK(pool, arg_5->lit, u64)[0]; write_u8 (LE, 0x48, begin + 25, end); // movabs write_u8 (LE, 0xb9, begin + 26, end); // rcx write_u64(LE, val_3, begin + 27, end); write_u8 (LE, 0x48, begin + 35, end); // movabs write_u8 (LE, 0xb8, begin + 36, end); // r8 write_u64(LE, val_4, begin + 37, end); write_u8 (LE, 0x48, begin + 45, end); // movabs write_u8 (LE, 0xb9, begin + 46, end); // r9 write_u64(LE, val_5, begin + 47, end); // rsp alignment write_u8(LE, 0x48, begin + 55, end); // sub rsp write_u8(LE, 0x83, begin + 56, end); // write_u8(LE, 0xec, begin + 57, end); // write_u8(LE, 8, begin + 58, end); // 8 u64 val_6 = CHUNK(pool, arg_6->lit, u64)[0]; write_u8 (LE, 0x48, begin + 59, end); // movabs write_u8 (LE, 0xb8, begin + 60, end); // rax write_u64(LE, val_6, begin + 61, end); write_u8 (LE, 0x50, begin + 69, end); // push rax write_u8 (LE, 0x48, begin + 70, end); // movabs write_u8 (LE, 0xb8, begin + 71, end); // rax codegen_add_rel(pool, codegen, (Codegen_Rel_Entry) { .type = REL_ADD_PROC_ADDRESS, .offset = codegen->offset_code + 72, .size = 8, .name_size = n->call.target_name.size, .name = CHUNK(pool, n->call.target_name, c8), }); write_u8(LE, 0xff, begin + 80, end); // call write_u8(LE, 0xd0, begin + 81, end); // rax write_u8(LE, 0x48, begin + 82, end); // add rsp write_u8(LE, 0x83, begin + 83, end); // write_u8(LE, 0xc4, begin + 84, end); // write_u8(LE, 16, begin + 85, end); // 16 codegen->offset_code += 86; codegen->offset_ro_data += dat_2->lit.size; } break; default: FAIL("Not implemented", 0); } } break; case OP_IF: { Node *arg_n = node_by_id(pool, n->if_.condition); CHECK(arg_n != NULL, "Sanity", 0); CHECK(arg_n->op == OP_I32, "Not implemented", 0); // Load value into a register. dst->reg = 1; while (dst->reg <= 6 && ((1 << (dst->reg - 1)) & proc->occupied_reg) != 0) ++dst->reg; u32 val = CHUNK(pool, arg_n->lit, u32)[0]; if (!x86_64_emit_i32_op_reg_val(pool, codegen, MOV, dst->reg, val)) return 0; proc->occupied_reg |= 1 << (dst->reg - 1); // Compare the register with zero. if (!x86_64_emit_cmp_reg_zero(pool, codegen, dst->reg)) return 0; // Left the space for the je instruction. CHECK(codegen->buffer_code.size >= codegen->offset_code + 6, "Buffer overflow", 0); mem_set(CHUNK(pool, codegen->buffer_code, u8) + codegen->offset_code, 0x90, 2); // nop dst->jmp_offset = codegen->offset_code; codegen->offset_code += 2; } break; case OP_RET: { u8 *begin = CHUNK(pool, codegen->buffer_code, u8) + codegen->offset_code; u8 *end = begin + codegen->buffer_code.size; i64 num_vals = n->ret.vals.size / sizeof(i64); i64 *vals = CHUNK(pool, n->ret.vals, i64); CHECK(vals != NULL, "Internal", 0); if ((context & EMIT_ENTRY_PROC) != 0) { if (num_vals == 0) { write_u8 (LE, 0xb8, begin, end); // mov eax write_u32(LE, 60, begin + 1, end); // 60 write_u8 (LE, 0xbf, begin + 5, end); // mov edi write_u32(LE, 0, begin + 6, end); // 0 write_u8 (LE, 0x0f, begin + 10, end); // syscall write_u8 (LE, 0x05, begin + 11, end); codegen->offset_code += 12; } else { if (num_vals > 1) LOG(WARNING, "Some return values are ignored for node %lld", node); Node *val = node_by_id(pool, vals[0]); CHECK(val != NULL, "Sanity", 0); switch (val->op) { case OP_I64: { CHECK(val->lit.size == 8, "Not implemented", 0); u32 a = CHUNK(pool, val->lit, u32)[0]; write_u8 (LE, 0xb8, begin, end); // mov eax write_u32(LE, 60, begin + 1, end); // 60 write_u8 (LE, 0xbf, begin + 5, end); // mov edi write_u32(LE, a, begin + 6, end); write_u8 (LE, 0x0f, begin + 10, end); // syscall write_u8 (LE, 0x05, begin + 11, end); codegen->offset_code += 12; } break; case OP_ADD: case OP_SUB: case OP_PHI: { switch (entities[vals[0]].reg) { case EAX: break; case EBX: write_u8 (LE, 0x89, begin, end); // mov eax write_u8 (LE, 0xd8, begin + 1, end); // ebx begin += 2; codegen->offset_code += 2; break; case ECX: write_u8 (LE, 0x89, begin, end); // mov eax write_u8 (LE, 0xc8, begin + 1, end); // ecx begin += 2; codegen->offset_code += 2; break; case EDX: write_u8 (LE, 0x89, begin, end); // mov eax write_u8 (LE, 0xd0, begin + 1, end); // edx begin += 2; codegen->offset_code += 2; break; default: FAIL("Not implemented", 0); } write_u8 (LE, 0x89, begin, end); // mov edi write_u8 (LE, 0xc7, begin + 1, end); // eax write_u8 (LE, 0xb8, begin + 2, end); // mov eax write_u32(LE, 60, begin + 3, end); // 60 write_u8 (LE, 0x0f, begin + 7, end); // syscall write_u8 (LE, 0x05, begin + 8, end); codegen->offset_code += 9; } break; default: FAIL("Not implemented", 0); } } } else { CHECK(num_vals == 1, "Not implemented", 0); Node *val = node_by_id(pool, vals[0]); switch (val->op) { case OP_I32: { CHECK(val->lit.size == 4, "Not implemented", 0); u32 a = CHUNK(pool, val->lit, u32)[0]; write_u8 (LE, 0xb8, begin, end); // mov eax write_u32(LE, a, begin + 1, end); write_u8 (LE, 0xc3, begin + 5, end); // ret codegen->offset_code += 6; } break; case OP_ADD: case OP_SUB: { CHECK(entities[vals[0]].reg == EAX, "Not implemented", 0); write_u8(LE, 0xc3, begin, end); // ret codegen->offset_code += 1; } break; default: FAIL("Not implemented", 0); } } } break; case OP_PHI: { i64 num_vals = n->phi.vals.size / sizeof(i64); i64 *vals = CHUNK(pool, n->phi.vals, i64); CHECK(vals != NULL, "Sanity", 0); // Allocate a register dst->reg = 1; while (dst->reg <= 6 && ((1 << (dst->reg - 1)) & proc->occupied_reg) != 0) ++dst->reg; proc->occupied_reg |= 1 << (dst->reg - 1); // Emit mov and jmp instructions. i64 offset_back = codegen->offset_code; for (i64 j = 0; j < num_vals; ++j) { i64 src_id = vals[j]; if (src_id == UNDEFINED) continue; CHECK(src_id >= 0 && src_id < num_entities, "Sanity", 0); Node *src = node_by_id(pool, src_id); CHECK(src != NULL, "Sanity", 0); Codegen_Entity *src_info = entities + src_id; codegen->offset_code = entities[src_id].phi_offset; switch (src->op) { case OP_I32: { u32 val = CHUNK(pool, src->lit, u32)[0]; if (!x86_64_emit_i32_mov_reg_val(pool, codegen, dst->reg, val)) return 0; } break; case OP_ADD: case OP_SUB: case OP_PHI: FAIL("Not implemented", 0); if (!x86_64_emit_i32_op_reg_reg(pool, codegen, MOV, dst->reg, src_info->reg)) return 0; break; default: FAIL("Not implemented", 0); } u8 *begin = CHUNK(pool, codegen->buffer_code, u8) + codegen->offset_code; u8 *end = begin + codegen->buffer_code.size; i64 relative_offset_64 = offset_back - (codegen->offset_code + 2); i8 relative_offset = (i8) relative_offset_64; CHECK(relative_offset_64 == relative_offset, "Not implemented", 0); write_u8(LE, 0xeb, begin, end); // jmp write_i8(LE, relative_offset, begin + 1, end); } codegen->offset_code = offset_back; } break; default: FAIL("Unknown operation", 0); } if (dst->phi_index != 0) { // We have to jump to phi node from here. // Left a space for mov and jmp intructions. CHECK(codegen->buffer_code.size >= codegen->offset_code + 8, "Buffer overflow", 0); mem_set(CHUNK(pool, codegen->buffer_code, u8) + codegen->offset_code, 0x90, 7); // nop dst->phi_offset = codegen->offset_code; codegen->offset_code += 7; if (dst->phi_index == 1) { // We have to jump here from the if node. CHECK(dst->branch != UNDEFINED, "Sanity", 0); CHECK(dst->branch >= 0 && dst->branch < num_entities, "Sanity", 0); i64 jmp_offset = entities[dst->branch].jmp_offset; u8 *begin = CHUNK(pool, codegen->buffer_code, u8) + jmp_offset; u8 *end = begin + codegen->buffer_code.size; i64 relative_offset_64 = codegen->offset_code - (jmp_offset + 2); i8 relative_offset = (i8) relative_offset_64; CHECK(relative_offset_64 == relative_offset, "Not implemented", 0); write_u8(LE, 0x74, begin, end); // je write_i8(LE, relative_offset, begin + 1, end); } } return 1; #undef CHECK_NODE_ } void dump_binary(Pool *pool, Codegen_Context *codegen, i64 begin, i64 end) { if (begin >= end) return; c8 dump[1024] = {0}; u8 *bytes = CHUNK(pool, codegen->buffer_code, u8); i64 j = 0; for (i64 i = begin; i < end && j + 4 < (i64) sizeof(dump); ++i) { dump[j++] = ((bytes[i] >> 4) & 0xf)["0123456789abcdef"]; dump[j++] = ( bytes[i] & 0xf)["0123456789abcdef"]; dump[j++] = ' '; } if (j > 0) dump[j - 1] = '\0'; LOG(VERBOSE, "DUMP: %s", dump); } b8 emit_proc( Pool * pool, Codegen_Context *codegen, i64 proc, u16 arch, u32 context ) { CHECK(arch == ARCH_X86_64, "Target not supported", 0); Proc *p = proc_by_id(pool, proc); CHECK(p != NULL, "No proc", 0); i64 num_entities = pool->entities.size / (i64) sizeof(Entity); if (codegen->entities.size / (i64) sizeof(Codegen_Entity) < num_entities) codegen->entities = chunk_resize(pool, codegen->entities, num_entities * sizeof(Codegen_Entity)); CHECK(codegen->entities.size / (i64) sizeof(Codegen_Entity) >= num_entities, "Buffer overflow", 0); Codegen_Entity *entities = CHUNK(pool, codegen->entities, Codegen_Entity); CHECK(entities != NULL, "Internal", 0); CHECK(proc < codegen->entities.size / (i64) sizeof(Codegen_Entity), "Buffer overflow", 0); CHECK(entities[proc].emit_done == 0, "Emit already done", 0); entities[proc].proc_offset = codegen->offset_code; // TODO Sort nodes in the sequential execution order. // NOTE Now we assume that nodes are already sorted. i64 num_nodes = p->nodes.size / sizeof(i64); i64 *nodes = CHUNK(pool, p->nodes, i64); CHECK(nodes != NULL, "Internal", 0); // Mark dependants for phi nodes. for (i64 i = 0; i < num_nodes; ++i) if (entity_enabled(pool, nodes[i])) { Node *n = node_by_id(pool, nodes[i]); CHECK(n != NULL, "Sanity", 0); if (n->op != OP_PHI) continue; i64 num_vals = n->phi.vals.size / sizeof(i64); i64 *vals = CHUNK(pool, n->phi.vals, i64); CHECK(vals != NULL, "Sanity", 0); i64 phi_index = 1; for (i64 j = 0; j < num_vals; ++j) { i64 k = vals[j]; CHECK(k != UNDEFINED, "Sanity", 0); CHECK(k >= 0 && k < num_entities, "Sanity", 0); entities[k].phi_index = phi_index; entities[k].branch = n->phi.branch; ++phi_index; } } for (i64 i = 1; i < num_nodes; ++i) if (entity_enabled(pool, nodes[i])) { entities[i].inst_begin = codegen->offset_code; if (!x86_64_emit_node(pool, codegen, entities + proc, nodes[i], context)) return 0; entities[i].inst_end = codegen->offset_code; } for (i64 i = 1; i < num_nodes; ++i) if (entity_enabled(pool, nodes[i])) dump_binary(pool, codegen, entities[i].inst_begin, entities[i].inst_end); entities[proc].emit_done = 1; return 1; } b8 emit_unit(Pool *pool, Codegen_Context *codegen, i64 unit, u16 arch) { CHECK(pool != NULL, "Sanity", 0); CHECK(codegen != NULL, "Sanity", 0); Unit *u = unit_by_id(pool, unit); CHECK(u != NULL, "No unit", 0); i64 num_procs = u->procs.size / sizeof(i64); i64 *procs = CHUNK(pool, u->procs, i64); CHECK(procs != NULL, "Internal", 0); for (i64 i = 1; i < num_procs; ++i) { if (!entity_enabled(pool, procs[i])) continue; u32 context = 0; if (i == u->entry_point_index) { CHECK(!codegen->has_entry, "Multiple entry points", 0); codegen->entry_point = codegen->offset_code; codegen->has_entry = 1; context |= EMIT_ENTRY_PROC; } if (!emit_proc(pool, codegen, procs[i], arch, context)) return 0; } return 1; } // ================================================================ // // Linking // // ================================================================ void link_add_symbol(Pool *pool, Linker_Context *linker, Link_Sym_Entry sym) { CHECK(pool != NULL, "Sanity",); CHECK(linker != NULL, "Sanity",); i64 n = linker->symbols.size / sizeof(Link_Sym_Entry); linker->symbols = chunk_resize(pool, linker->symbols, (n + 1) * sizeof(Link_Sym_Entry)); Link_Sym_Entry *syms = CHUNK(pool, linker->symbols, Link_Sym_Entry); CHECK(syms != NULL, "Internal",); CHECK(n < linker->symbols.size / (i64) sizeof(Link_Sym_Entry), "Buffer overflow",); syms[n] = sym; } i64 ar_find_symbol_offset_by_name( u8 *ar_symbol_table, u8 *ar_end, c8 *name, c8 *name_end ) { CHECK(ar_symbol_table != NULL, "Sanity", -1); CHECK(name != NULL, "Sanity", -1); CHECK(name_end > name, "Sanity", -1); i64 num = (i64) read_u32((LE & ~BYTE_ORDER_MASK) | BYTE_BE, ar_symbol_table, ar_end); i64 len = name_end - name; c8 *s = (c8 *) (ar_symbol_table + 4 * (num + 1)); i64 index = 0; for (; index < num; ++index) { CHECK(s + len <= (c8 *) ar_end, "Buffer overflow", -1); if (s[len] == '\0' && mem_eq(s, name, len)) return (i64) read_u32((LE & ~BYTE_ORDER_MASK) | BYTE_BE, ar_symbol_table + 4 * (index + 1), ar_end); while (*s != '\0' && s < (c8 *) ar_end) ++s; CHECK(s < (c8 *) ar_end, "Buffer overflow", -1); CHECK(*s == '\0', "Buffer overflow", -1); ++s; } FAIL("Symbol not found", 0); } Buffer_Context elf_buffer_context( Pool * pool, Linker_Context *linker, i64 num_obj_files, i64 elf_index ) { u8 * dependencies_buffer = CHUNK(pool, linker->dependencies_buffer, u8); i64 *obj_file_offsets = CHUNK(pool, linker->obj_file_offsets, i64); CHECK(num_obj_files < linker->obj_file_offsets.size / (i64) sizeof(i64), "Buffer overflow", (Buffer_Context) {0}); CHECK(obj_file_offsets[num_obj_files] < linker->dependencies_buffer.size, "Buffer overflow", (Buffer_Context) {0}); return (Buffer_Context) { .begin = dependencies_buffer, .end = dependencies_buffer + obj_file_offsets[num_obj_files], .elf = { .offset = obj_file_offsets[elf_index], .size = obj_file_offsets[elf_index + 1] - obj_file_offsets[elf_index], }, .obj_index = elf_index, }; } Offset_Num elf_section_headers( Buffer_Context b ) { u8 *begin = b.begin + b.elf.offset; u8 *end = begin + b.elf.size; CHECK(end <= b.end, "Buffer overflow", (Offset_Num) {0}); return (Offset_Num) { .offset = b.elf.offset + read_i64(LE, begin + 40, end), .num = (i64) read_u16(LE, begin + 60, end), }; } i64 elf_section_header_offset( Buffer_Context b, i64 index ) { return elf_section_headers(b).offset + ELF_SECTION_HEADER_SIZE * index; } Offset_Size elf_section_names_data( Buffer_Context b ) { u8 *elf_begin = b.begin + b.elf.offset; u8 *elf_end = elf_begin + b.elf.size; CHECK(elf_end <= b.end, "Buffer overflow", (Offset_Size) {0}); i64 string_table_index = (i64) read_u16(LE, elf_begin + 62, elf_end); u8 *begin = b.begin + elf_section_header_offset(b, string_table_index); return (Offset_Size) { .offset = b.elf.offset + read_i64(LE, begin + 24, elf_end), .size = read_i64(LE, begin + 32, elf_end), }; } Offset_Size elf_name_in_string_table( Buffer_Context b, Offset_Size string_table, i64 name_offset ) { if (name_offset == 0) return (Offset_Size) { .offset = 0, .size = 0, }; c8 *begin = (c8 *) b.begin + string_table.offset + name_offset; c8 *end = (c8 *) b.begin + string_table.offset + string_table.size; return (Offset_Size) { .offset = string_table.offset + name_offset, .size = str_len(begin, end), }; } i64 elf_find_section_index_by_name( Buffer_Context b, c8 * name, i64 name_size ) { CHECK(name != NULL, "Sanity", 0); if (name_size == 0) return 0; Offset_Num headers = elf_section_headers(b); Offset_Size names = elf_section_names_data(b); for (i64 i = 0; i < headers.num; ++i) { u8 *begin = b.begin + headers.offset + i * ELF_SECTION_HEADER_SIZE; i64 name_index = (i64) read_u32(LE, begin, b.end); Offset_Size s = elf_name_in_string_table(b, names, name_index); if (str_eq(s.size, (c8 *) b.begin + s.offset, name_size, name)) return i; } return 0; } Elf_Section_Header elf_section( Buffer_Context b, i64 index ) { Offset_Size names = elf_section_names_data(b); u8 * begin = b.begin + elf_section_header_offset(b, index); u8 * end = b.begin + b.elf.offset + b.elf.size; CHECK(end <= b.end, "Buffer overflow", (Elf_Section_Header) {0}); i64 name_index = (i64) read_u32(LE, begin, end); i64 size = read_i64(LE, begin + 32, end); i64 entry_size = read_i64(LE, begin + 56, end); i64 num_entries = entry_size > 0 ? (size / entry_size) : 0; u32 type = read_u32(LE, begin + 4, end); u64 flags = read_u64(LE, begin + 8, end); if (type > SEC_SYMTAB_SHNDX || type == 12 || type == 13) { LOG(ERROR, "Section type: %d", type); FAIL("Unknown section type", (Elf_Section_Header) {0}); } return (Elf_Section_Header) { .name = elf_name_in_string_table(b, names, name_index), .type = type, .alloc = (flags & 2) == 2, .write = (flags & 1) == 1, .exec = (flags & 4) == 4, .tls = (flags & 0x400) == 0x400, .alignment = read_i64(LE, begin + 48, end), .entry_size = entry_size, .num_entries = num_entries, .data = { .offset = b.elf.offset + read_i64(LE, begin + 24, end), .size = size, }, }; } Elf_Section_Header elf_find_section_by_name( Buffer_Context b, c8 * name, i64 name_size ) { i64 index = elf_find_section_index_by_name(b, name, name_size); return index == 0 ? (Elf_Section_Header) {0} : elf_section(b, index); } c8 *elf_name_from_offset( Buffer_Context b, Offset_Size name ) { if (name.size == 0) return ""; c8 *begin = (c8 *) (b.begin + name.offset); i64 len = str_len(begin, (c8 *) b.end); CHECK((i64) name.size == len, "Buffer overflow", ""); return begin; } i64 elf_find_related_section_index( Buffer_Context b, i64 section_index ) { Offset_Size src_name = elf_section(b, section_index).name; Elf_Section_Header dst = elf_section(b, section_index - 1); if (src_name.size > dst.name.size && mem_eq( elf_name_from_offset(b, src_name) + (src_name.size - dst.name.size), elf_name_from_offset(b, dst.name), dst.name.size)) return section_index - 1; i64 num_sections = elf_section_headers(b).num; for (i64 i = 0; i < num_sections; ++i) { if (i == section_index || i + 1 == section_index) continue; dst = elf_section(b, i); if (src_name.size > dst.name.size && mem_eq( elf_name_from_offset(b, src_name) + (src_name.size - dst.name.size), elf_name_from_offset(b, dst.name), dst.name.size)) { LOG(WARNING, "Unexpected section order"); return i; } } FAIL("Not found", 0); } Offset_Size elf_find_related_data( Buffer_Context b, i64 section_index ) { return elf_section(b, elf_find_related_section_index(b, section_index)).data; } Elf_Symbol_Entry elf_symbol( Buffer_Context b, Offset_Size symbol_table, Offset_Size string_table, i64 symbol_index ) { u8 *begin = b.begin + symbol_table.offset + symbol_index * ELF_SYMBOL_ENTRY_SIZE; u8 *end = b.begin + symbol_table.offset + symbol_table.size; CHECK(end <= b.end, "Buffer overflow", (Elf_Symbol_Entry) {0}); CHECK(end <= b.begin + b.elf.offset + b.elf.size, "Buffer overflow", (Elf_Symbol_Entry) {0}); i64 sym_name = (i64) read_u32(LE, begin, end); u8 sym_info = read_u8 (LE, begin + 4, end); i64 sym_shndx = (i64) read_u16(LE, begin + 6, end); i64 sym_value = read_i64(LE, begin + 8, end); i64 sym_size = read_i64(LE, begin + 16, end); u8 type = (sym_info & 0xf) == 0 ? SYM_NONE : (sym_info & 0xf) == 1 ? SYM_DATA : (sym_info & 0xf) == 2 ? SYM_PROC : (sym_info & 0xf) == 3 ? SYM_SECTION : (sym_info & 0xf) == 5 ? SYM_COMMON : (sym_info & 0xf) == 6 ? SYM_TLS : SYM_SPECIFIC; u8 bind = (sym_info >> 4) == 1 ? BIND_GLOBAL : (sym_info >> 4) == 2 ? BIND_WEAK : BIND_LOCAL; return (Elf_Symbol_Entry) { .name = elf_name_in_string_table(b, string_table, sym_name), .type = type, .bind = bind, .section = sym_shndx, .value = { .offset = sym_value, .size = sym_size, }, .obj_index = b.obj_index, }; } Elf_Relx_Entry elf_relx( Buffer_Context b, Offset_Size symbol_table, Offset_Size string_table, Offset_Size relocations, i64 relx_index, b8 is_rela ) { u8 *begin = b.begin + relocations.offset + relx_index * (is_rela ? ELF_RELA_ENTRY_SIZE : ELF_REL_ENTRY_SIZE); u8 *end = begin + ELF_RELA_ENTRY_SIZE; CHECK(end <= b.end, "Buffer overflow", (Elf_Relx_Entry) {0}); CHECK(end <= b.begin + b.elf.offset + b.elf.size, "Buffer overflow", (Elf_Relx_Entry) {0}); CHECK(end <= b.begin + relocations.offset + relocations.size, "Buffer overflow", (Elf_Relx_Entry) {0}); i64 relx_offset = read_i64(LE, begin, end); u32 relx_type = read_u32(LE, begin + 8, end); i64 relx_sym = (i64) read_u32(LE, begin + 12, end); i64 relx_addent = is_rela ? read_i64(LE, begin + 16, end) : 0; return (Elf_Relx_Entry) { .symbol = elf_symbol(b, symbol_table, string_table, relx_sym), .offset = relx_offset, .type = relx_type, .addent = relx_addent, }; } Elf_Symbol_Entry elf_find_symbol_by_name( Buffer_Context b, i64 symbol_table_index, Offset_Size string_table, c8 * name, i64 name_size ) { Elf_Section_Header symbol_table = elf_section(b, symbol_table_index); for (i64 i = 0; i < symbol_table.num_entries; ++i) { Elf_Symbol_Entry sym = elf_symbol(b, symbol_table.data, string_table, i); CHECK(b.begin + sym.name.offset + name_size <= b.end, "Buffer overflow", (Elf_Symbol_Entry) {0}); CHECK(sym.name.offset + name_size <= b.elf.size, "Buffer overflow", (Elf_Symbol_Entry) {0}); if (str_eq(name_size, name, sym.name.size, (c8 *) b.begin + sym.name.offset)) return sym; } FAIL("Not found", (Elf_Symbol_Entry) {0}); } void elf_checks(Buffer_Context b) { u8 *begin = b.begin + b.elf.offset; u8 *end = begin + b.elf.size; // TODO CHECK(read_u16(LE, begin + 18, end) != ELF_AARCH64, "ARM64 not implemented",); u8 osabi = read_u8(LE, begin + 7, end); CHECK( read_u8 (LE, begin, end) == ELF_MAGIC[0], "Invalid ELF file",); CHECK( read_u8 (LE, begin + 1, end) == ELF_MAGIC[1], "Invalid ELF file",); CHECK( read_u8 (LE, begin + 2, end) == ELF_MAGIC[2], "Invalid ELF file",); CHECK( read_u8 (LE, begin + 3, end) == ELF_MAGIC[3], "Invalid ELF file",); CHECK( read_u8 (LE, begin + 4, end) == ELF_64, "Unsupported ELF file",); CHECK( read_u8 (LE, begin + 5, end) == ELF_2_LE, "Unsupported ELF file",); CHECK( read_u8 (LE, begin + 6, end) == ELF_VERSION, "Unsupported ELF file",); CHECK( osabi == ELF_SYS_V || osabi == ELF_LINUX, "Unsupported ELF file",); CHECK( read_u8 (LE, begin + 8, end) == ELF_ABI_VERSION, "Unsupported ELF file",); CHECK( read_u16(LE, begin + 16, end) == ELF_RELOCATABLE, "Unsupported ELF file",); CHECK( read_u16(LE, begin + 18, end) == ELF_X86_64, "Unsupported ELF file",); CHECK( read_u32(LE, begin + 20, end) == ELF_VERSION, "Unsupported ELF file",); LAX(read_u64(LE, begin + 24, end) == 0, "Invalid entry point"); LAX(read_u64(LE, begin + 32, end) == 0, "Invalid program header offset"); LAX(read_u32(LE, begin + 48, end) == 0, "Invalid flags"); LAX(read_u16(LE, begin + 52, end) == ELF_HEADER_SIZE, "Invalid ELF header size"); LAX(read_u16(LE, begin + 54, end) == 0, "Invalid program header size"); LAX(read_u16(LE, begin + 56, end) == 0, "Invalid num program headers"); LAX(read_u16(LE, begin + 58, end) == ELF_SECTION_HEADER_SIZE, "Invalid section header size"); } i64 unit_write_in_memory( Pool * pool, Codegen_Context *codegen, Linker_Context * linker, i64 unit, u16 format, u16 arch ) { CHECK(format == FORMAT_ELF && arch == ARCH_X86_64, "Target not supported", 0); if (!emit_unit(pool, codegen, unit, arch)) return 0; u16 num_program_headers = 10; i64 program_offset = align(ELF_HEADER_SIZE + ELF_PROGRAM_HEADER_SIZE * num_program_headers, X86_64_PAGE_SIZE); i64 base_address = X86_64_BASE_ADDRESS; i64 rx_code_address = base_address + program_offset; i64 rx_code_size = codegen->offset_code; i64 ro_data_size = codegen->offset_ro_data; i64 rw_data_size = 0; i64 rw_zval_size = 0; i64 rw_got_size = ELF_GOT_ENTRY_SIZE; i64 rw_dynamic_size = ELF_DYNAMIC_ENTRY_SIZE; i64 rw_tls_data_size = 0; i64 rw_tls_zval_size = 0; i64 num_sections_total = 0; i64 num_symbols = 0; // ========================================================== // // Calculate section offsets for (i64 elf_index = 0; elf_index < linker->num_obj_files; ++elf_index) { Buffer_Context buf = elf_buffer_context(pool, linker, linker->num_obj_files, elf_index); elf_checks(buf); Offset_Num headers = elf_section_headers(buf); for (i64 sec_index = 1; sec_index < headers.num; ++sec_index, ++num_sections_total) { Elf_Section_Header section = elf_section(buf, sec_index); c8 * name = elf_name_from_offset(buf, section.name); if (section.type == SEC_PROGBITS && STR_EQ(section.name.size, name, SECTION_GOT)) { FAIL("Not implemented", 0); } if (section.type == SEC_PROGBITS && STR_EQ(section.name.size, name, SECTION_PLT)) { FAIL("Not implemented", 0); } if (!section.alloc || section.data.size == 0) continue; linker->sections = chunk_resize(pool, linker->sections, (num_sections_total + 1) * sizeof(Link_Sec_Entry)); Link_Sec_Entry *sections = CHUNK(pool, linker->sections, Link_Sec_Entry); CHECK(sections != NULL, "Internal", 0); CHECK(num_sections_total < linker->sections.size / (i64) sizeof(Link_Sec_Entry), "Buffer overflow", 0); if (section.tls) { if (section.type == SEC_PROGBITS) { sections[num_sections_total].offset = rw_tls_data_size; sections[num_sections_total].address = rw_tls_data_size; rw_tls_data_size += align(section.data.size, X86_64_ALIGNMENT); continue; } if (section.type == SEC_NOBITS) { sections[num_sections_total].address = rw_tls_zval_size; rw_tls_zval_size += align(section.data.size, X86_64_ALIGNMENT); continue; } FAIL("Not implemented", 0); } if (section.type == SEC_DYNAMIC) { FAIL("Not implemented", 0); } if (section.exec) { CHECK(!section.write, "Not implemented", 0); sections[num_sections_total].offset = rx_code_size; sections[num_sections_total].address = rx_code_size; rx_code_size += align(section.data.size, X86_64_ALIGNMENT); continue; } if (section.write && section.type == SEC_NOBITS) { sections[num_sections_total].address = rw_zval_size; rw_zval_size += align(section.data.size, X86_64_ALIGNMENT); continue; } if (section.write) { sections[num_sections_total].offset = rw_data_size; sections[num_sections_total].address = rw_data_size; rw_data_size += align(section.data.size, X86_64_ALIGNMENT); continue; } sections[num_sections_total].offset = ro_data_size; sections[num_sections_total].address = ro_data_size; ro_data_size += align(section.data.size, X86_64_ALIGNMENT); } } // ========================================================== // // Calculate global offset table size { i64 prev_num_symbols = num_symbols; for (i64 elf_index = 0; elf_index < linker->num_obj_files; ++elf_index) { Buffer_Context buf = elf_buffer_context(pool, linker, linker->num_obj_files, elf_index); i64 num_sections = elf_section_headers(buf).num; Offset_Size strtab = elf_find_section_by_name(buf, SECTION_STRTAB, sizeof SECTION_STRTAB - 1).data; Offset_Size symtab = elf_find_section_by_name(buf, SECTION_SYMTAB, sizeof SECTION_SYMTAB - 1).data; for (i64 sec_index = 1; sec_index < num_sections; ++sec_index) { Elf_Section_Header src_sec = elf_section(buf, sec_index); if (src_sec.type != SEC_REL && src_sec.type != SEC_RELA) continue; for (i64 entry_index = 0; entry_index < src_sec.num_entries; ++entry_index) { Elf_Relx_Entry relx = elf_relx(buf, symtab, strtab, src_sec.data, entry_index, src_sec.type == SEC_RELA); c8 * sym_name = elf_name_from_offset(buf, relx.symbol.name); CHECK(relx.symbol.section != 65521, "Sanity", 0); CHECK(relx.symbol.section != 65522, "Sanity", 0); switch (relx.type) { case R_X86_64_GOT32: case R_X86_64_GOTPCREL: case R_X86_64_GOTPCRELX: case R_X86_64_REX_GOTPCRELX: if (!STR_EQ(relx.symbol.name.size, sym_name, "_DYNAMIC")) { b8 found = 0; Link_Sym_Entry *symbols = CHUNK(pool, linker->symbols, Link_Sym_Entry); CHECK(symbols != NULL, "Internal", 0); CHECK(num_symbols <= linker->symbols.size / (i64) sizeof(Link_Sym_Entry), "Buffer overflow", 0); for (i64 i = 0; i < num_symbols; ++i) if (relx.symbol.name.size > 0) { if (str_eq(symbols[i].name_size, symbols[i].name, relx.symbol.name.size, sym_name)) { found = 1; break; } } else { if (symbols[i].obj_index == relx.symbol.obj_index && symbols[i].sec_index == relx.symbol.section && symbols[i].address == relx.symbol.value.offset) { found = 1; break; } } if (found) break; link_add_symbol(pool, linker, (Link_Sym_Entry) { .name_size = relx.symbol.name.size, .name = sym_name, .address = relx.symbol.value.offset, .obj_index = relx.symbol.obj_index, .sec_index = relx.symbol.section, }); rw_got_size += ELF_GOT_ENTRY_SIZE; } break; default:; } } } } num_symbols = prev_num_symbols; } // ========================================================== // // Adjust section offsets i64 ro_data_address = align(rx_code_address + rx_code_size, X86_64_PAGE_SIZE); i64 rw_data_address = align(ro_data_address + ro_data_size, X86_64_PAGE_SIZE); i64 rw_zval_address = align(rw_data_address + rw_data_size, X86_64_PAGE_SIZE); i64 rw_got_address = align(rw_zval_address + rw_zval_size, X86_64_PAGE_SIZE); i64 rw_dynamic_address = align(rw_got_address + rw_got_size, X86_64_PAGE_SIZE); i64 rw_tls_data_address = align(rw_dynamic_address + rw_dynamic_size, X86_64_PAGE_SIZE); i64 rw_tls_zval_address = align(rw_tls_data_address + rw_tls_data_size, X86_64_PAGE_SIZE); i64 rx_code_offset = program_offset; i64 ro_data_offset = align(rx_code_offset + rx_code_size, X86_64_PAGE_SIZE); i64 rw_data_offset = align(ro_data_offset + ro_data_size, X86_64_PAGE_SIZE); i64 rw_got_offset = align(rw_data_offset + rw_data_size, X86_64_PAGE_SIZE); i64 rw_dynamic_offset = align(rw_got_offset + rw_got_size, X86_64_PAGE_SIZE); i64 rw_tls_data_offset = align(rw_dynamic_offset + rw_dynamic_size, X86_64_PAGE_SIZE); // ---------------------------------------------------------- for (i64 elf_index = 0, sec_index_global = 0; elf_index < linker->num_obj_files; ++elf_index) { Buffer_Context buf = elf_buffer_context(pool, linker, linker->num_obj_files, elf_index); Offset_Num headers = elf_section_headers(buf); for (i64 sec_index = 1; sec_index < headers.num; ++sec_index, ++sec_index_global) { CHECK(sec_index_global < num_sections_total, "Buffer overflow", 0); Elf_Section_Header section = elf_section(buf, sec_index); c8 * name = elf_name_from_offset(buf, section.name); if (section.type == SEC_PROGBITS && STR_EQ(section.name.size, name, SECTION_GOT)) { FAIL("Not implemented", 0); } if (section.type == SEC_PROGBITS && STR_EQ(section.name.size, name, SECTION_PLT)) { FAIL("Not implemented", 0); } if (!section.alloc || section.data.size == 0) continue; Link_Sec_Entry *sections = CHUNK(pool, linker->sections, Link_Sec_Entry); CHECK(sections != NULL, "Internal", 0); CHECK(sec_index_global < linker->sections.size / (i64) sizeof(Link_Sec_Entry), "Buffer overflow", 0); if (section.tls) { if (section.type == SEC_PROGBITS) { sections[sec_index_global].offset += rw_tls_data_offset; sections[sec_index_global].address += rw_tls_data_address; continue; } if (section.type == SEC_NOBITS) { sections[sec_index_global].address += rw_tls_zval_address; continue; } FAIL("Not implemented", 0); } if (section.type == SEC_DYNAMIC) { FAIL("Not implemented", 0); } if (section.exec) { CHECK(!section.write, "Not implemented", 0); sections[sec_index_global].offset += rx_code_offset + codegen->offset_code; sections[sec_index_global].address += rx_code_address + codegen->offset_code; continue; } if (section.write && section.type == SEC_NOBITS) { sections[sec_index_global].address += rw_zval_address; continue; } if (section.write) { sections[sec_index_global].offset += rw_data_offset; sections[sec_index_global].address += rw_data_address; continue; } sections[sec_index_global].offset += ro_data_offset + codegen->offset_ro_data; sections[sec_index_global].address += ro_data_address + codegen->offset_ro_data; } } // ========================================================== // // Relocate defined symbols for (i64 elf_index = 0, sec_index_global = 0; elf_index < linker->num_obj_files; ++elf_index) { Buffer_Context buf = elf_buffer_context(pool, linker, linker->num_obj_files, elf_index); Offset_Num headers = elf_section_headers(buf); Offset_Size strtab = elf_find_section_by_name(buf, SECTION_STRTAB, sizeof SECTION_STRTAB - 1).data; for (i64 sec_index = 1; sec_index < headers.num; ++sec_index) { Elf_Section_Header tab = elf_section(buf, sec_index); if (tab.type != SEC_SYMTAB) continue; for (i64 sym_index = 1; sym_index < tab.num_entries; ++sym_index) { Elf_Symbol_Entry sym = elf_symbol(buf, tab.data, strtab, sym_index); c8 * sym_name = elf_name_from_offset(buf, sym.name); if (sym.type == SYM_TLS && sym.section != 0) CHECK(elf_section(buf, sym.section).tls, "Sanity", 0); if (sym.section == 0) // undefined symbol continue; if (sym.section == 65522) { // common LOG(INFO, "Common symbol: %s", sym_name); continue; } i64 sym_address = sym.value.offset; Link_Sec_Entry *sections = CHUNK(pool, linker->sections, Link_Sec_Entry); CHECK(sections != NULL, "Internal", 0); if (sym.section != 65521 && elf_section(buf, sym.section).alloc) { i64 sym_section = sec_index_global + sym.section - 1; CHECK(sym_section < linker->sections.size / (i64) sizeof(Link_Sec_Entry), "Buffer overflow", 0); CHECK(sym_section < num_sections_total, "Buffer overflow", 0); CHECK(sections[sym_section].address != 0, "Sanity", 0); sym_address += sections[sym_section].address; } link_add_symbol(pool, linker, (Link_Sym_Entry) { .name_size = sym.name.size, .name = sym_name, .address = sym_address, .size = sym.value.size, }); u8 *begin = buf.begin + tab.data.offset + sym_index * ELF_SYMBOL_ENTRY_SIZE; u8 *end = begin + tab.data.size; if (end > buf.end) end = buf.end; } } sec_index_global += elf_section_headers(buf).num - 1; } // ========================================================== // // TODO Add internal symbols // ========================================================== // // Add runtime library symbols #define ADD_UNIQUE_(i_, name_, ...) \ do { \ b8 found_ = 0; \ Link_Sym_Entry *symbols_ = \ CHUNK(pool, linker->symbols, Link_Sym_Entry); \ CHECK(symbols_ != NULL, "Internal", 0); \ CHECK(num_symbols <= \ linker->symbols.size / \ (i64) sizeof(Link_Sym_Entry), \ "Buffer overflow", 0); \ for (i_ = 0; i_ < num_symbols; ++i_) \ if (STR_EQ(symbols_[i_].name_size, \ symbols_[i_].name, \ name_)) { \ found_ = 1; \ break; \ } \ CHECK(!found_, "Forbidden symbol: " name_, 0); \ link_add_symbol(pool, linker, (Link_Sym_Entry) { \ .name_size = sizeof name_ - 1, \ .name = name_, \ __VA_ARGS__ \ }); \ } while (0) #define ADD_IF_MISSING_(name_, ...) \ do { \ b8 found_ = 0; \ Link_Sym_Entry *symbols_ = \ CHUNK(pool, linker->symbols, Link_Sym_Entry); \ CHECK(symbols_ != NULL, "Internal", 0); \ CHECK(num_symbols <= \ linker->symbols.size / \ (i64) sizeof(Link_Sym_Entry), \ "Buffer overflow", 0); \ for (i_ = 0; i_ < num_symbols; ++i_) \ if (STR_EQ(symbols_[i_].name_size, \ symbols_[i_].name, \ name_)) { \ found_ = 1; \ break; \ } \ if (!found_) { \ link_add_symbol(pool, linker, (Link_Sym_Entry) { \ .name_size = sizeof name_ - 1, \ .name = name_, \ __VA_ARGS__ \ }); \ } \ } while (0) // ---------------------------------------------------------- i64 sym_index_got; i64 sym_index_dynamic; ADD_UNIQUE_(sym_index_got, "_GLOBAL_OFFSET_TABLE_", .address = rw_got_address, .size = rw_got_size); ADD_UNIQUE_(sym_index_dynamic, "_DYNAMIC", .address = rw_dynamic_address, .size = rw_dynamic_size, .got_offset = 0); { i64 i_; ADD_IF_MISSING_("__ehdr_start", .address = base_address, .size = ELF_HEADER_SIZE); ADD_IF_MISSING_("__pthread_initialize_minimal",); ADD_IF_MISSING_("__preinit_array_start",); ADD_IF_MISSING_("__preinit_array_end",); ADD_IF_MISSING_("__init_array_start",); ADD_IF_MISSING_("__init_array_end",); ADD_IF_MISSING_("__fini_array_start",); ADD_IF_MISSING_("__fini_array_end",); ADD_IF_MISSING_("__rela_iplt_start",); ADD_IF_MISSING_("__rela_iplt_end",); ADD_IF_MISSING_("__start___libc_atexit",); ADD_IF_MISSING_("__stop___libc_atexit",); ADD_IF_MISSING_("__start___libc_IO_vtables",); ADD_IF_MISSING_("__stop___libc_IO_vtables",); ADD_IF_MISSING_("__start___libc_subfreeres",); ADD_IF_MISSING_("__stop___libc_subfreeres",); ADD_IF_MISSING_("__start___libc_freeres_ptrs",); ADD_IF_MISSING_("__stop___libc_freeres_ptrs",); ADD_IF_MISSING_("_init",); ADD_IF_MISSING_("_fini",); ADD_IF_MISSING_("_end",); ADD_IF_MISSING_("_dl_rtld_map",); ADD_IF_MISSING_("__gmon_start__",); ADD_IF_MISSING_("__gcc_personality_v0",); ADD_IF_MISSING_("_Unwind_Resume",); ADD_IF_MISSING_("_Unwind_ForcedUnwind",); ADD_IF_MISSING_("_Unwind_Backtrace",); ADD_IF_MISSING_("_Unwind_GetCFA",); ADD_IF_MISSING_("_Unwind_GetIP",); ADD_IF_MISSING_("__addtf3",); ADD_IF_MISSING_("__subtf3",); ADD_IF_MISSING_("__multf3",); ADD_IF_MISSING_("__divtf3",); ADD_IF_MISSING_("__eqtf2",); ADD_IF_MISSING_("__letf2",); ADD_IF_MISSING_("__lttf2",); ADD_IF_MISSING_("__getf2",); ADD_IF_MISSING_("__gttf2",); ADD_IF_MISSING_("__unordtf2",); } #undef ADD_UNIQUE_ #undef ADD_IF_MISSING_ // ========================================================== // // TODO Resolve internal symbols // ========================================================== // // Process relocations and build global offset table for (i64 elf_index = 0, sec_index_global = 0, rel_index_global = 0, got_offset = 0; elf_index < linker->num_obj_files; ++elf_index) { Buffer_Context buf = elf_buffer_context(pool, linker, linker->num_obj_files, elf_index); i64 num_sections = elf_section_headers(buf).num; Offset_Size strtab = elf_find_section_by_name(buf, SECTION_STRTAB, sizeof SECTION_STRTAB - 1).data; Offset_Size symtab = elf_find_section_by_name(buf, SECTION_SYMTAB, sizeof SECTION_SYMTAB - 1).data; for (i64 sec_index = 1; sec_index < num_sections; ++sec_index) { Elf_Section_Header src_sec = elf_section(buf, sec_index); if (src_sec.type != SEC_REL && src_sec.type != SEC_RELA) continue; // i64 dst_index = elf_find_related_section_index(buf, sec_index); // i64 dst_index_global = sec_index_global + dst_index - 1; for (i64 entry_index = 0; entry_index < src_sec.num_entries; ++entry_index, ++rel_index_global) { Elf_Relx_Entry relx = elf_relx(buf, symtab, strtab, src_sec.data, entry_index, src_sec.type == SEC_RELA); c8 * sym_name = elf_name_from_offset(buf, relx.symbol.name); i64 sym_index_global = num_symbols; CHECK(relx.symbol.section != 65521, "Sanity", 0); CHECK(relx.symbol.section != 65522, "Sanity", 0); if (relx.symbol.section == 0) { b8 found = 0; i64 num_entities = pool->entities.size / sizeof(Entity); Entity *entities = CHUNK(pool, pool->entities, Entity); CHECK(entities != NULL, "Internal", 0); Codegen_Entity *codegen_entities = CHUNK(pool, codegen->entities, Codegen_Entity); CHECK(codegen_entities != NULL, "Internal", 0); CHECK(codegen->entities.size / (i64) sizeof(Codegen_Entity) >= num_entities, "Buffer overflow", 0); for (i64 i = 0; i < num_entities; ++i) if (entities[i].is_enabled && entities[i].type == ENTITY_PROC && str_eq(entities[i].proc.name.size, CHUNK(pool, entities[i].proc.name, c8), relx.symbol.name.size, sym_name)) { CHECK(codegen_entities[i].emit_done, "No proc code", 0); link_add_symbol(pool, linker, (Link_Sym_Entry) { .address = rx_code_address + codegen_entities[i].proc_offset, .size = relx.symbol.value.size, }); found = 1; break; } Link_Sym_Entry *symbols = CHUNK(pool, linker->symbols, Link_Sym_Entry); CHECK(symbols != NULL, "Internal", 0); CHECK(num_symbols <= linker->symbols.size / (i64) sizeof(Link_Sym_Entry), "Buffer overflow", 0); if (!found) for (i64 i = 0; i < num_symbols; ++i) if (str_eq(symbols[i].name_size, symbols[i].name, relx.symbol.name.size, sym_name)) { sym_index_global = i; found = 1; break; } if (!found) { LOG(ERROR, "Undefined symbol: %s", sym_name); FAIL("Link failed", 0); } } else { i64 const SEARCH_RANGE = 1024; Link_Sec_Entry *sections = CHUNK(pool, linker->sections, Link_Sec_Entry); CHECK(sections != NULL, "Internal", 0); CHECK(sec_index_global < linker->sections.size / (i64) sizeof(Link_Sec_Entry), "Buffer overflow", 0); Link_Sym_Entry *symbols = CHUNK(pool, linker->symbols, Link_Sym_Entry); CHECK(symbols != NULL, "Internal", 0); CHECK(num_symbols <= linker->symbols.size / (i64) sizeof(Link_Sym_Entry), "Buffer overflow", 0); i64 src_index_global = sec_index_global + relx.symbol.section - 1; i64 address = relx.symbol.value.offset + sections[src_index_global].address; b8 found = 0; for (i64 k = 1; k <= num_symbols && k <= SEARCH_RANGE; ++k) if (symbols[num_symbols - k].address == address) { sym_index_global = num_symbols - k; found = 1; break; } if (!found) link_add_symbol(pool, linker, (Link_Sym_Entry) { .address = address, .size = relx.symbol.value.size, }); } if (sym_index_global >= num_symbols) LOG(ERROR, "Symbol: %s", sym_name); CHECK(sym_index_global < num_symbols, "Symbol not found", 0); i64 num_rels = linker->rels.size / sizeof(Link_Rel_Entry); if (rel_index_global >= num_rels) { num_rels = rel_index_global + 1; linker->rels = chunk_resize(pool, linker->rels, num_rels * sizeof(Link_Rel_Entry)); } Link_Rel_Entry *rels = CHUNK(pool, linker->rels, Link_Rel_Entry); CHECK(rels != NULL, "Internal", 0); CHECK(rel_index_global < linker->rels.size / (i64) sizeof(Link_Rel_Entry), "Buffer overflow", 0); rels[rel_index_global].symbol = sym_index_global; switch (relx.type) { case R_X86_64_GOT32: case R_X86_64_GOTPCREL: case R_X86_64_GOTPCRELX: case R_X86_64_REX_GOTPCRELX: { Link_Sym_Entry *symbols = CHUNK(pool, linker->symbols, Link_Sym_Entry); CHECK(symbols != NULL, "Internal", 0); if (!STR_EQ(relx.symbol.name.size, sym_name, "_DYNAMIC") && symbols[sym_index_global].got_offset == 0) { got_offset += ELF_GOT_ENTRY_SIZE; CHECK(got_offset < rw_got_size, "Sanity", 0); symbols[sym_index_global].got_offset = got_offset; } } break; default:; } } } sec_index_global += num_sections - 1; } // ============================================================== // // Apply relocations for (i64 elf_index = 0, sec_index_global = 0, rel_index_global = 0; elf_index < linker->num_obj_files; ++elf_index) { Buffer_Context buf = elf_buffer_context(pool, linker, linker->num_obj_files, elf_index); i64 num_sections = elf_section_headers(buf).num; Offset_Size strtab = elf_find_section_by_name(buf, SECTION_STRTAB, sizeof SECTION_STRTAB - 1).data; Offset_Size symtab = elf_find_section_by_name(buf, SECTION_SYMTAB, sizeof SECTION_SYMTAB - 1).data; for (i64 sec_index = 1; sec_index < num_sections; ++sec_index) { Elf_Section_Header src_sec = elf_section(buf, sec_index); if (src_sec.type != SEC_REL && src_sec.type != SEC_RELA) continue; i64 dst_index = elf_find_related_section_index(buf, sec_index); i64 dst_index_global = sec_index_global + dst_index - 1; for (i64 entry_index = 0; entry_index < src_sec.num_entries; ++entry_index, ++rel_index_global) { Link_Rel_Entry *rels = CHUNK(pool, linker->rels, Link_Rel_Entry); Link_Sym_Entry *symbols = CHUNK(pool, linker->symbols, Link_Sym_Entry); Link_Sec_Entry *sections = CHUNK(pool, linker->sections, Link_Sec_Entry); CHECK(rels != NULL, "Internal", 0); CHECK(symbols != NULL, "Internal", 0); CHECK(sections != NULL, "Internal", 0); Elf_Relx_Entry relx = elf_relx(buf, symtab, strtab, src_sec.data, entry_index, src_sec.type == SEC_RELA); c8 * sym_name = elf_name_from_offset(buf, relx.symbol.name); Link_Sym_Entry symbol = symbols[rels[rel_index_global].symbol]; u8 *dst = buf.begin + elf_section(buf, dst_index).data.offset + relx.offset; // TODO Implement GOT and PLT. // Represents the addend used to compute the value of the relocatable field. i64 A = relx.addent; // Represents the base address at which a shared object has been loaded into memory during execution. Generally, a shared object is built with a 0 base virtual address, but the execution address will be different. i64 B = sections[dst_index_global].address; // Represents the place (section offset or address) of the storage unit being relocated (computed using r_offset). i64 P = sections[dst_index_global].address + relx.offset; // Represents the value of the symbol whose index resides in the relocation entry. i64 S = symbol.address; // The size of the symbol whose index resides in the relocation entry. i64 Z = symbol.size; // Represents the address of the global offset table. i64 GOT = rw_got_address; // Represents the offset into the global offset table at which the relocation entry's symbol will reside during execution. i64 G = symbol.got_offset; // Represents the place (section offset or address) of the Procedure Linkage Table entry for a symbol. i64 L = S; // TODO switch (relx.type) { #define SKIP_(x) \ if (str_eq(relx.symbol.name.size, sym_name, sizeof(#x) - 1, #x)) \ break; case R_X86_64_64: case R_X86_64_PC32: case R_X86_64_PLT32: case R_X86_64_GOTTPOFF: case R_X86_64_TPOFF32: SKIP_(__preinit_array_start); SKIP_(__preinit_array_end); SKIP_(__init_array_start); SKIP_(__init_array_end); SKIP_(__fini_array_start); SKIP_(__fini_array_end); SKIP_(__gcc_personality_v0); SKIP_(__pthread_initialize_minimal); SKIP_(_init); SKIP_(_fini); SKIP_(_end); SKIP_(_Unwind_Resume); SKIP_(_Unwind_Backtrace); SKIP_(_Unwind_ForcedUnwind); SKIP_(_Unwind_GetIP); SKIP_(_Unwind_GetCFA); SKIP_(__addtf3); SKIP_(__subtf3); SKIP_(__multf3); SKIP_(__divtf3); SKIP_(__eqtf2); SKIP_(__lttf2); SKIP_(__letf2); SKIP_(__gttf2); SKIP_(__getf2); SKIP_(__unordtf2); if (S == 0) LOG(ERROR, "Check symbol: %s", sym_name); CHECK(S != 0, "Symbol address is 0", 0); break; #undef SKIP_ default:; } switch (relx.type) { #define ADD_(BITS, OP) \ do { \ i64 x_ = read_i##BITS(LE, dst, buf.end) + (OP); \ write_i##BITS(LE, (i##BITS) x_, dst, buf.end); \ } while (0) #define TODO_ FAIL("Not implemented", 0) case R_X86_64_NONE: /* Do nothing */ break; case R_X86_64_64: ADD_(64, S + A); break; case R_X86_64_PC32: ADD_(32, S + A - P); break; case R_X86_64_GOT32: TODO_; break; case R_X86_64_PLT32: ADD_(32, L + A - P); break; case R_X86_64_COPY: /* Do nothing */ break; case R_X86_64_GLOB_DAT: ADD_(64, S); break; case R_X86_64_JUMP_SLOT: ADD_(64, S); break; case R_X86_64_RELATIVE: ADD_(64, B + A); break; case R_X86_64_GOTPCREL: ADD_(32, GOT + G + A - P); break; case R_X86_64_32: ADD_(32, S + A); break; case R_X86_64_32S: ADD_(32, S + A); break; case R_X86_64_16: ADD_(16, S + A); break; case R_X86_64_PC16: ADD_(16, S + A - P); break; case R_X86_64_8: ADD_(8, S + A); break; case R_X86_64_PC8: ADD_(8, S + A - P); break; case R_X86_64_DTPMOD64: TODO_; break; case R_X86_64_DTPOFF64: TODO_; break; case R_X86_64_TPOFF64: TODO_; break; case R_X86_64_TLSGD: TODO_; break; case R_X86_64_TLSLD: TODO_; break; case R_X86_64_DTPOFF32: TODO_; break; case R_X86_64_GOTTPOFF: ADD_(32, S - GOT); break; case R_X86_64_TPOFF32: ADD_(32, S + A - B); break; case R_X86_64_PC64: ADD_(64, S + A - P); break; case R_X86_64_GOTOFF64: TODO_; break; case R_X86_64_GOTPC32: ADD_(32, GOT + A - P); break; case R_X86_64_GOT64: TODO_; break; case R_X86_64_GOTPCREL64: TODO_; break; case R_X86_64_GOTPC64: ADD_(64, GOT + A - P); break; case R_X86_64_GOTPLT64: TODO_; break; case R_X86_64_PLTOFF64: TODO_; break; case R_X86_64_SIZE32: ADD_(32, Z + A); break; case R_X86_64_SIZE64: ADD_(64, Z + A); break; case R_X86_64_GOTPC32_TLSDESC: TODO_; break; case R_X86_64_TLSDESC_CALL: TODO_; break; case R_X86_64_TLSDESC: TODO_; break; case R_X86_64_IRELATIVE: TODO_; break; case R_X86_64_RELATIVE64: TODO_; break; case R_X86_64_GOTPCRELX: ADD_(32, GOT + G + A - P); break; case R_X86_64_REX_GOTPCRELX: ADD_(32, GOT + G + A - P); break; default: FAIL("Unknown relocation type", 0); #undef ADD_ #undef TODO_ } } } sec_index_global += num_sections - 1; } // ============================================================== // // Apply internal relocations #define FIND_(x) \ do { \ Link_Sym_Entry *symbols_ = CHUNK(pool, linker->symbols, Link_Sym_Entry); \ CHECK(symbols_ != NULL, "Internal", 0); \ for (i64 i = 0; i < num_symbols; ++i) \ if (STR_EQ(symbols_[i].name_size, symbols_[i].name, #x)) { \ LOG(INFO, "Found " #x ": 0x%llx", symbols_[i].address); \ break; \ } \ } while (0) FIND_(_start); FIND_(__libc_start_main); FIND_(__tunables_init); FIND_(__libc_early_init); FIND_(__ctype_init); #undef FIND_ i64 num_rels = codegen->rels.size / sizeof(Codegen_Rel_Entry); Codegen_Rel_Entry *rels = CHUNK(pool, codegen->rels, Codegen_Rel_Entry); CHECK(rels != NULL, "Internal", 0); for (i64 rel_index = 0; rel_index < num_rels; ++rel_index) { Codegen_Rel_Entry rel = rels[rel_index]; u8 *buffer_code = CHUNK(pool, codegen->buffer_code, u8); CHECK(buffer_code != NULL, "Internal", 0); u8 *begin = buffer_code + rel.offset; u8 *end = buffer_code + codegen->offset_code; switch (rel.type) { case REL_ADD_INSTRUCTION_ADDRESS: { CHECK(rel.size == 8, "Not implemented", 0); i64 value = rel.value + rx_code_address + rel.offset; write_i64(LE, value, begin, end); } break; case REL_ADD_RO_OP_ADDRESS: { CHECK(rel.size == 8, "Not implemented", 0); i64 value = rel.value + ro_data_address; write_i64(LE, value, begin, end); } break; case REL_ADD_PROC_ADDRESS: { CHECK(rel.size == 8, "Not implemented", 0); b8 found = 0; if (rel.proc == UNDEFINED) { CHECK(rel.name_size > 0 && rel.name != NULL, "No proc name", 0); Link_Sym_Entry *symbols = CHUNK(pool, linker->symbols, Link_Sym_Entry); CHECK(symbols != NULL, "Internal", 0); for (i64 i = 0; i < num_symbols; ++i) if (symbols[i].address != 0 && str_eq(symbols[i].name_size, symbols[i].name, rel.name_size, rel.name)) { i64 value = rel.value + symbols[i].address; write_i64(LE, value, begin, end); found = 1; LOG(VERBOSE, "Found %.*s: 0x%llx", rel.name_size, rel.name, value); break; } } else { i64 num_entities = pool->entities.size / sizeof(Entity); Entity *entities = CHUNK(pool, pool->entities, Entity); Codegen_Entity *codegen_entities = CHUNK(pool, codegen->entities, Codegen_Entity); CHECK(codegen_entities != NULL, "Internal", 0); CHECK(rel.proc != UNDEFINED, "Undefined", 0); CHECK(rel.proc > 0 && rel.proc < num_entities, "Buffer overflow", 0); CHECK(entities != NULL, "Internal", 0); CHECK(entities[rel.proc].is_enabled, "No entity", 0); CHECK(entities[rel.proc].type == ENTITY_PROC, "No proc", 0); CHECK(codegen_entities[rel.proc].emit_done, "No proc address", 0); i64 value = rel.value + rx_code_address + codegen_entities[rel.proc].proc_offset; write_i64(LE, value, begin, end); found = 1; LOG(VERBOSE, "Found anonymous proc: 0x%llx", value); } if (!found) { LOG(ERROR, "Undefined symbol: %.*s", rel.name_size, rel.name); FAIL("Link failed", 0); } } break; } } // ============================================================== // // Writing the ELF executable // i64 output_size = align(rw_tls_data_offset + rw_tls_data_size, X86_64_PAGE_SIZE); i64 entry = rx_code_address + codegen->entry_point; if (!codegen->has_entry) { // TODO Explicitly declare _start proc b8 found = 0; Link_Sym_Entry *symbols = CHUNK(pool, linker->symbols, Link_Sym_Entry); CHECK(symbols != NULL, "Internal", 0); for (i64 sym_index = 0; sym_index < num_symbols; ++sym_index) if (STR_EQ(symbols[sym_index].name_size, symbols[sym_index].name, "_start")) { entry = symbols[sym_index].address; found = 1; break; } CHECK(found, "Undefined symbol: _start", 0); } LOG(VERBOSE, "Entry point: 0x%llx", entry); LOG(VERBOSE, "Total %lld sections", num_sections_total); LOG(VERBOSE, "Total %lld symbols", num_symbols); LOG(VERBOSE, "Total size"); LOG(VERBOSE, "r/x code - %7lld bytes", rx_code_size); LOG(VERBOSE, "r/o data - %7lld bytes", ro_data_size); LOG(VERBOSE, "r/w data - %7lld bytes", rw_data_size); LOG(VERBOSE, "r/w zval - %7lld bytes", rw_zval_size); LOG(VERBOSE, "r/w TLS data - %7lld bytes", rw_tls_data_size); LOG(VERBOSE, "r/w TLS zval - %7lld bytes", rw_tls_zval_size); LOG(VERBOSE, "r/w GOT - %7lld bytes", rw_got_size); LOG(VERBOSE, "r/w dynamic - %7lld bytes", rw_dynamic_size); LOG(VERBOSE, "Writing ELF x86_64 executable"); linker->output_buffer = chunk_resize(pool, linker->output_buffer, output_size); u8 *o = CHUNK(pool, linker->output_buffer, u8); u8 *o_end = o + linker->output_buffer.size; // ELF header // mem_cpy(o, ELF_MAGIC, 4); write_u8 (LE, ELF_64, o + 4, o_end); write_u8 (LE, ELF_2_LE, o + 5, o_end); write_u8 (LE, ELF_VERSION, o + 6, o_end); write_u8 (LE, ELF_SYS_V, o + 7, o_end); write_u8 (LE, ELF_ABI_VERSION, o + 8, o_end); // 7 bytes - padding write_u16(LE, ELF_EXECUTABLE, o + 16, o_end); write_u16(LE, ELF_X86_64, o + 18, o_end); write_u32(LE, ELF_VERSION, o + 20, o_end); write_i64(LE, entry, o + 24, o_end); write_u64(LE, ELF_HEADER_SIZE, o + 32, o_end); // program header offset // 8 bytes - section header offset o + 40 // 4 bytes - flags o + 48 write_u16(LE, ELF_HEADER_SIZE, o + 52, o_end); write_u16(LE, ELF_PROGRAM_HEADER_SIZE, o + 54, o_end); // 2 bytes - num program headers o + 56 // 2 bytes - section header size o + 58 // 2 bytes - num section headers o + 60 // 2 bytes - string table section o + 62 // header index // Program headers // CHECK(rx_code_offset % X86_64_PAGE_SIZE == rx_code_address % X86_64_PAGE_SIZE, "Invalid alignment", 0); CHECK(rw_data_offset % X86_64_PAGE_SIZE == rw_data_address % X86_64_PAGE_SIZE, "Invalid alignment", 0); CHECK(ro_data_offset % X86_64_PAGE_SIZE == ro_data_address % X86_64_PAGE_SIZE, "Invalid alignment", 0); CHECK(rw_got_offset % X86_64_PAGE_SIZE == rw_got_address % X86_64_PAGE_SIZE, "Invalid alignemtn", 0); CHECK(rw_dynamic_offset % X86_64_PAGE_SIZE == rw_dynamic_address % X86_64_PAGE_SIZE, "Invalid alignemtn", 0); CHECK(rw_tls_data_offset % X86_64_PAGE_SIZE == rw_tls_data_address % X86_64_PAGE_SIZE, "Invalid alignment", 0); u8 *h = o + ELF_HEADER_SIZE; num_program_headers = 0; // r/x code if (rx_code_size > 0) { write_u32(LE, 1, h, o_end); // type (PT_LOAD) write_u32(LE, 5, h + 4, o_end); // flags (PF_X | PF_R) write_i64(LE, rx_code_offset, h + 8, o_end); write_i64(LE, rx_code_address, h + 16, o_end); // virtual address write_i64(LE, rx_code_address, h + 24, o_end); // phisical address write_i64(LE, rx_code_size, h + 32, o_end); // size in file write_i64(LE, rx_code_size, h + 40, o_end); // size in memory write_i64(LE, X86_64_ALIGNMENT, h + 48, o_end); h += ELF_PROGRAM_HEADER_SIZE; ++num_program_headers; } // r/o data if (ro_data_size > 0) { write_u32(LE, 1, h, o_end); // type (PT_LOAD) write_u32(LE, 4, h + 4, o_end); // flags (PF_R) write_i64(LE, ro_data_offset, h + 8, o_end); write_i64(LE, ro_data_address, h + 16, o_end); // virtual address write_i64(LE, ro_data_address, h + 24, o_end); // phisical address write_i64(LE, ro_data_size, h + 32, o_end); // size in file write_i64(LE, ro_data_size, h + 40, o_end); // size in memory write_i64(LE, X86_64_ALIGNMENT, h + 48, o_end); h += ELF_PROGRAM_HEADER_SIZE; ++num_program_headers; } // r/w data if (rw_data_size > 0) { write_u32(LE, 1, h, o_end); // type (PT_LOAD) write_u32(LE, 6, h + 4, o_end); // flags (PF_R | PF_W) write_i64(LE, rw_data_offset, h + 8, o_end); write_i64(LE, rw_data_address, h + 16, o_end); // virtual address write_i64(LE, rw_data_address, h + 24, o_end); // phisical address write_i64(LE, rw_data_size, h + 32, o_end); // size in file write_i64(LE, rw_data_size, h + 40, o_end); // size in memory write_i64(LE, X86_64_ALIGNMENT, h + 48, o_end); h += ELF_PROGRAM_HEADER_SIZE; ++num_program_headers; } // r/w zero values if (rw_zval_size > 0) { write_u32(LE, 1, h, o_end); // type (PT_LOAD) write_u32(LE, 6, h + 4, o_end); // flags (PF_R | PF_W) write_i64(LE, rw_got_offset, h + 8, o_end); write_i64(LE, rw_zval_address, h + 16, o_end); // virtual address write_i64(LE, rw_zval_address, h + 24, o_end); // phisical address write_i64(LE, 0, h + 32, o_end); // size in file write_i64(LE, rw_zval_size, h + 40, o_end); // size in memory write_i64(LE, X86_64_ALIGNMENT, h + 48, o_end); h += ELF_PROGRAM_HEADER_SIZE; ++num_program_headers; } // r/w GOT if (rw_got_size > 0) { write_u32(LE, 1, h, o_end); // type (PT_LOAD) write_u32(LE, 6, h + 4, o_end); // flags (PF_R | PF_W) write_i64(LE, rw_got_offset, h + 8, o_end); write_i64(LE, rw_got_address, h + 16, o_end); // virtual address write_i64(LE, rw_got_address, h + 24, o_end); // phisical address write_i64(LE, rw_got_size, h + 32, o_end); // size in file write_i64(LE, rw_got_size, h + 40, o_end); // size in memory write_i64(LE, X86_64_ALIGNMENT, h + 48, o_end); h += ELF_PROGRAM_HEADER_SIZE; ++num_program_headers; } // r/w dynamic if (rw_dynamic_size) { write_u32(LE, 2, h, o_end); // type (PT_DYNAMIC) write_u32(LE, 6, h + 4, o_end); // flags (PF_R | PF_W) write_i64(LE, rw_dynamic_offset, h + 8, o_end); write_i64(LE, rw_dynamic_address, h + 16, o_end); // virtual address write_i64(LE, rw_dynamic_address, h + 24, o_end); // phisical address write_i64(LE, rw_dynamic_size, h + 32, o_end); // size in file write_i64(LE, rw_dynamic_size, h + 40, o_end); // size in memory write_i64(LE, X86_64_ALIGNMENT, h + 48, o_end); h += ELF_PROGRAM_HEADER_SIZE; ++num_program_headers; } // r/w TLS data if (rw_tls_data_size > 0) { write_u32(LE, 7, h, o_end); // type (PT_TLS) write_u32(LE, 6, h + 4, o_end); // flags (PF_R | PF_W) write_i64(LE, rw_tls_data_offset, h + 8, o_end); write_i64(LE, rw_tls_data_address, h + 16, o_end); // virtual address write_i64(LE, rw_tls_data_address, h + 24, o_end); // phisical address write_i64(LE, rw_tls_data_size, h + 32, o_end); // size in file write_i64(LE, rw_tls_data_size, h + 40, o_end); // size in memory write_i64(LE, X86_64_ALIGNMENT, h + 48, o_end); h += ELF_PROGRAM_HEADER_SIZE; ++num_program_headers; } // r/w TLS zero values if (rw_tls_zval_size > 0) { write_u32(LE, 7, h, o_end); // type (PT_TLS) write_u32(LE, 6, h + 4, o_end); // flags (PF_R | PF_W) write_i64(LE, output_size, h + 8, o_end); write_i64(LE, rw_tls_zval_address, h + 16, o_end); // virtual address write_i64(LE, rw_tls_zval_address, h + 24, o_end); // phisical address write_i64(LE, 0, h + 32, o_end); // size in file write_i64(LE, rw_tls_zval_size, h + 40, o_end); // size in memory write_i64(LE, X86_64_ALIGNMENT, h + 48, o_end); h += ELF_PROGRAM_HEADER_SIZE; ++num_program_headers; } write_u16(LE, num_program_headers, o + 56, o_end); CHECK(h == o + ELF_HEADER_SIZE + num_program_headers * ELF_PROGRAM_HEADER_SIZE, "Invalid num program headers", 0); CHECK(rx_code_offset >= h - o, "Sanity", 0); // Code // u8 *buffer_code = CHUNK(pool, codegen->buffer_code, u8); u8 *buffer_ro_data = CHUNK(pool, codegen->buffer_ro_data, u8); CHECK(buffer_code != NULL, "Internal", 0); CHECK(buffer_ro_data != NULL, "Internal", 0); mem_cpy(o + rx_code_offset, buffer_code, codegen->offset_code); mem_cpy(o + ro_data_offset, buffer_ro_data, codegen->offset_ro_data); // ============================================================== // // Write sections into the output buffer Link_Sec_Entry *sections = CHUNK(pool, linker->sections, Link_Sec_Entry); CHECK(sections != NULL, "Internal", 0); for (i64 elf_index = 0, sec_index_global = 0; elf_index < linker->num_obj_files; ++elf_index) { Buffer_Context buf = elf_buffer_context(pool, linker, linker->num_obj_files, elf_index); Offset_Num headers = elf_section_headers(buf); for (i64 sec_index = 1; sec_index < headers.num; ++sec_index, ++sec_index_global) { Elf_Section_Header section = elf_section(buf, sec_index); i64 offset = sections[sec_index_global].offset; if (offset == 0 || !section.alloc || section.data.size == 0) continue; u8 *p = o + offset; CHECK(p >= o + program_offset + codegen->offset_code, "Buffer overflow", 0); CHECK(p + section.data.size <= o + output_size, "Buffer overflow", 0); mem_cpy(p, buf.begin + section.data.offset, section.data.size); } } // GOT write_u64(LE, rw_dynamic_address, o + rw_got_offset, o_end); Link_Sym_Entry *symbols = CHUNK(pool, linker->symbols, Link_Sym_Entry); CHECK(symbols != NULL, "Internal", 0); for (i64 i = 0, offset = 0; i < num_symbols; ++i) { Link_Sym_Entry *sym = symbols + i; if (sym->got_offset == 0) continue; offset += ELF_GOT_ENTRY_SIZE; CHECK(offset < rw_got_size, "Sanity", 0); write_u64(LE, sym->address, o + rw_got_offset + offset, o_end); } // ============================================================== return output_size; } b8 unit_write_with_context( Pool * pool, Codegen_Context *codegen, Linker_Context * linker, i64 unit, u16 format, u16 arch, i64 io_out, void * io_user_data ) { CHECK(unit != UNDEFINED, "Invalid unit", 0); // ============================================================== // // Reading dependencies i64 obj_files_size = 0; Unit *u = unit_by_id(pool, unit); i64 num_links = u->links.size / sizeof(i64); i64 *links = CHUNK(pool, u->links, i64); CHECK(links != NULL, "Internal", 0); for (i64 link_index = 1; link_index < num_links; ++link_index) { i64 id = links[link_index]; if (id == UNDEFINED) continue; Unit *l = unit_by_id(pool, id); CHECK(entity_enabled(pool, id), "Internal", 0); CHECK(l->name.size > 0, "No link name", 0); switch (l->type) { case UNIT_LIBRARY_OBJECT: { i64 f = io_open_read(l->name.size, CHUNK(pool, l->name, c8), io_user_data); io_seek(f, 0, IO_SEEK_END, io_user_data); i64 in_size = io_tell(f, io_user_data); linker->dependencies_buffer = chunk_resize(pool, linker->dependencies_buffer, obj_files_size + in_size); u8 *dependencies_buffer = CHUNK(pool, linker->dependencies_buffer, u8); CHECK(dependencies_buffer != NULL, "Internal", 0); io_seek(f, 0, IO_SEEK_BEGIN, io_user_data); i64 n = io_read(f, in_size, dependencies_buffer + obj_files_size, io_user_data); CHECK(n == in_size, "Read failed", 0); io_close(f, io_user_data); linker->obj_file_offsets = chunk_resize(pool, linker->obj_file_offsets, (linker->num_obj_files + 2) * sizeof(i64)); i64 *obj_file_offsets = CHUNK(pool, linker->obj_file_offsets, i64); CHECK(obj_file_offsets != NULL, "Internal", 0); obj_file_offsets[linker->num_obj_files] = obj_files_size; obj_files_size += align(in_size, X86_64_ALIGNMENT); obj_file_offsets[++linker->num_obj_files] = obj_files_size; } break; case UNIT_LIBRARY_STATIC: { i64 f = io_open_read(l->name.size, CHUNK(pool, l->name, c8), io_user_data); io_seek(f, 0, IO_SEEK_END, io_user_data); i64 in_size = io_tell(f, io_user_data); linker->obj_file_buffer = chunk_resize(pool, linker->obj_file_buffer, in_size); u8 *obj_file_buffer = CHUNK(pool, linker->obj_file_buffer, u8); CHECK(obj_file_buffer != NULL, "Internal", 0); io_seek(f, 0, IO_SEEK_BEGIN, io_user_data); i64 n = io_read(f, in_size, obj_file_buffer, io_user_data); CHECK(n == in_size, "Read failed", 0); io_close(f, io_user_data); // ======================================================== // // Read AR library u8 *ar_begin = obj_file_buffer; u8 *ar_end = obj_file_buffer + in_size; CHECK(mem_eq(ar_begin, AR_MAGIC, 8), "Invalid AR file", 0); u8 *f_begin = ar_begin + 8; while (f_begin + 60 < ar_end) { u8 *f_id = f_begin; u8 *f_size = f_begin + 48; u8 *f_end = f_begin + 58; u8 *f_data = f_begin + 60; i64 size = (i64) u64_from_dec_str((c8 *) f_size, (c8 *) f_size + 10); size = align(size, 2); CHECK(mem_eq(f_end, "\x60\x0a", 2), "Invalid AR file", 0); CHECK(f_begin + size <= ar_end, "Buffer overflow", 0); if (!mem_eq(f_id, AR_SYMBOL_TABLE, sizeof AR_SYMBOL_TABLE - 1) && !mem_eq(f_id, AR_STRING_TABLE, sizeof AR_STRING_TABLE - 1)) { // Read ELF object file i64 delta_size = align(size, X86_64_ALIGNMENT); u8 *dependencies_buffer = CHUNK(pool, linker->dependencies_buffer, u8); CHECK(dependencies_buffer != NULL, "Internal", 0); mem_cpy(dependencies_buffer + obj_files_size, f_data, size); i64 *obj_file_offsets = CHUNK(pool, linker->obj_file_offsets, i64); CHECK(obj_file_offsets != NULL, "Internal", 0); obj_file_offsets[linker->num_obj_files] = obj_files_size; obj_files_size += delta_size; obj_file_offsets[++linker->num_obj_files] = obj_files_size; } f_begin = f_data + size; } } break; default: FAIL("Not implemented", 0); } } // ============================================================== i64 output_size = unit_write_in_memory(pool, codegen, linker, unit, format, arch); if (output_size <= 0) return 0; // ============================================================== // // Write the output buffer into the file u8 *output_buffer = CHUNK(pool, linker->output_buffer, u8); CHECK(output_buffer != NULL, "Internal", 0); io_write(io_out, output_size, output_buffer, io_user_data); return 1; } b8 unit_write( Pool * pool, i64 unit, u16 format, u16 arch, i64 io_out, void * io_user_data ) { Codegen_Context codegen = {0}; Linker_Context linker = {0}; b8 status = unit_write_with_context(pool, &codegen, &linker, unit, format, arch, io_out, io_user_data); chunk_remove(pool, codegen.entities); chunk_remove(pool, codegen.rels); chunk_remove(pool, codegen.buffer_code); chunk_remove(pool, codegen.buffer_ro_data); chunk_remove(pool, linker.obj_file_buffer); chunk_remove(pool, linker.dependencies_buffer); chunk_remove(pool, linker.obj_file_offsets); chunk_remove(pool, linker.sections); chunk_remove(pool, linker.symbols); chunk_remove(pool, linker.rels); chunk_remove(pool, linker.output_buffer); return status; } i64 io_open_read(i64 name_size, c8 *name, void *user_data) { i64 f; dispatch_io(IO_OPEN_READ, &f, &name_size, name, user_data); return f; } i64 io_open_write(i64 name_size, c8 *name, void *user_data) { i64 f; dispatch_io(IO_OPEN_WRITE, &f, &name_size, name, user_data); return f; } void io_close(i64 f, void *user_data) { dispatch_io(IO_CLOSE, &f, NULL, NULL, user_data); } b8 io_seek(i64 f, i64 offset, u16 origin, void *user_data) { dispatch_io(IO_SEEK, &f, &offset, &origin, user_data); return 1; } i64 io_tell(i64 f, void *user_data) { i64 offset; dispatch_io(IO_TELL, &f, &offset, NULL, user_data); return offset; } i64 io_read(i64 f, i64 size, void *data, void *user_data) { dispatch_io(IO_READ, &f, &size, data, user_data); return size; } i64 io_write(i64 f, i64 size, void *data, void *user_data) { dispatch_io(IO_WRITE, &f, &size, data, user_data); return size; } void io_chmod_exe(i64 f, void *user_data) { dispatch_io(IO_CHMOD_EXE, &f, NULL, NULL, user_data); } // ================================================================ // // * Helper procedures // // ================================================================ #if HELPERS #include #include #include #include #ifdef __unix__ #include #include #endif void wait_any_input(void) { while (getc(stdin) != '\n'); fflush(stdin); } void dispatch_assert(b8 condition, c8 *message, u32 line, c8 *file) { if (condition) return; dispatch_log(ERROR, line, file, message); exit(-1); } void dispatch_log(i32 log_level, u32 line, c8 *file, c8 *format, ...) { if (file == NULL || format == NULL) return; if (*format == '\0' && log_level != TRACE) { fprintf(log_level == ERROR || log_level == WARNING ? stderr : stdout, "\n"); return; } c8 message[256] = {0}; va_list ap; va_start(ap, format); vsnprintf(message, sizeof message, format, ap); va_end(ap); fflush(stdout); i32 len = 56 - (i32) str_len_or(message, message + 56, 56); switch (log_level) { case ERROR: fprintf(stderr, "\r\x1b[41;1m\x1b[30m Error \x1b[40;0m\x1b[37m %s " "%.*s \x1b[36m%s\x1b[34m:%d\x1b[37m\n", message, len, "................................................................", file, line); if (LOG_BLOCKING) wait_any_input(); break; case WARNING: fprintf(stderr, "\r\x1b[43;1m\x1b[30m Warning \x1b[40;0m\x1b[37m %s " "%.*s \x1b[36m%s\x1b[34m:%d\x1b[37m\n", message, len, "................................................................", file, line); if (LOG_BLOCKING) wait_any_input(); break; case INFO: fprintf(stdout, "\r\x1b[42;1m\x1b[30m Info \x1b[40;0m\x1b[37m %s\n", message); break; case VERBOSE: fprintf(stdout, "\r\x1b[47;1m\x1b[30m Verbose \x1b[40;0m\x1b[37m %s\n", message); break; case TRACE: fprintf(stdout, "\r\x1b[45;1m\x1b[30m Trace \x1b[40;0m\x1b[37m %s " "%.*s \x1b[36m%s\x1b[34m:%d\x1b[37m\n", message, len, "................................................................", file, line); if (TRACE_BLOCKING) wait_any_input(); break; default:; } } // IO dispatch procedure // void dispatch_io(u16 op, i64 *id, i64 *size, void *data, void *user_data) { CHECK(id != NULL, "Sanity",); (void) user_data; FILE **f = (FILE **) id; c8 buf[1024 * 4] = { 0 }; switch (op) { case IO_OPEN_READ: case IO_OPEN_WRITE: CHECK(size != NULL, "Sanity",); CHECK(*size > 0 && *size < (i64) sizeof buf, "Sanity",); CHECK(data != NULL, "Sanity",); mem_cpy(buf, data, *size); *f = fopen(buf, op == IO_OPEN_READ ? "rb" : "wb"); if (*f == NULL) LOG(ERROR, "Open: %s", buf); CHECK(*f != NULL, "File open failed",); break; case IO_CLOSE: CHECK(*f != NULL, "Sanity",); CHECK(size == NULL, "Sanity",); CHECK(data == NULL, "Sanity",); fclose(*f); break; case IO_SEEK: { CHECK(*f != NULL, "Sanity",); CHECK(size != NULL, "Sanity",); CHECK(data != NULL, "Sanity",); u16 *origin = (u16 *) data; if (!(*origin == IO_SEEK_CURSOR && *size == 0)) { CHECK(*origin == IO_SEEK_CURSOR || *origin == IO_SEEK_BEGIN || *origin == IO_SEEK_END, "Sanity",); i32 s = fseek(*f, *size, *origin == IO_SEEK_CURSOR ? SEEK_CUR : *origin == IO_SEEK_BEGIN ? SEEK_SET : SEEK_END); CHECK(s == 0, "File seek failed",); } } break; case IO_TELL: { CHECK(*f != NULL, "Sanity",); CHECK(size != NULL, "Sanity",); CHECK(data == NULL, "Sanity",); i64 n = (i64) ftell(*f); CHECK(n >= 0, "File tell failed",); *size = n; } break; case IO_READ: CHECK(*f != NULL, "Sanity",); CHECK(size != NULL, "Sanity",); CHECK(data != NULL, "Sanity",); CHECK(*size > 0, "Sanity",); *size = fread(data, 1, *size, *f); break; case IO_WRITE: CHECK(*f != NULL, "Sanity",); CHECK(size != NULL, "Sanity",); CHECK(data != NULL, "Sanity",); CHECK(*size > 0, "Sanity",); *size = fwrite(data, 1, *size, *f); break; case IO_CHMOD_EXE: CHECK(*f != NULL, "Sanity",); CHECK(size == NULL, "Sanity",); #ifdef __unix__ fchmod(fileno(*f), 0775); #endif break; default: FAIL("Sanity",); } } // Global state // Pool g_pool = { // Statically allocate a large memory block. // TODO Reallocate the memory block when necessary. .capacity = POOL_CAPACITY, .data = (u8[POOL_CAPACITY]) {0}, }; // Handy procedures // i64 n_address(i64 proc, i64 node) { i64 n = node_address(&g_pool, node); p_add(proc, n); return n; } i64 n_ptr(i64 proc, u64 address) { i64 n = node_ptr(&g_pool, address); p_add(proc, n); return n; } i64 n_str(i64 proc, c8 *value) { i64 len = str_len(value, value + MAX_STRING_SIZE - 1); i64 n_data = node_array_c8(&g_pool, len + 1, value); i64 n_ref = node_address(&g_pool, n_data); p_add(proc, n_data); p_add(proc, n_ref); return n_ref; } i64 n_i32(i64 proc, i32 value) { i64 n = node_i32(&g_pool, value); p_add(proc, n); return n; } i64 n_i64(i64 proc, i64 value) { i64 n = node_i64(&g_pool, value); p_add(proc, n); return n; } i64 n_add(i64 proc, Var x, Var y) { i64 n = node_add(&g_pool, x, y); p_add(proc, n); return n; } i64 n_sub(i64 proc, Var x, Var y) { i64 n = node_sub(&g_pool, x, y); p_add(proc, n); return n; } i64 n_call(i64 proc, i64 target_proc, i64 num_args, Var *args) { i64 n = node_call(&g_pool, target_proc, num_args, args); p_add(proc, n); return n; } i64 n_call_by_name(i64 proc, c8 *name, i64 num_args, Var *args) { i64 n = node_call_by_name(&g_pool, str_len(name, name + MAX_STRING_SIZE), name, num_args, args); p_add(proc, n); return n; } i64 n_if(i64 proc, Var condition) { i64 n = node_if(&g_pool, condition); p_add(proc, n); return n; } i64 n_ret(i64 proc, i64 num_vals, Var *vals) { i64 n = node_ret(&g_pool, num_vals, vals); p_add(proc, n); return n; } i64 n_phi(i64 proc, i64 branch, i64 num_vals, Var *vals) { i64 n = node_phi(&g_pool, branch, num_vals, vals); p_add(proc, n); return n; } i64 p_new(i64 unit, c8 *name) { i64 p = proc_init(&g_pool); i64 len = str_len(name, name + MAX_STRING_SIZE); if (len > 0) proc_set_name(&g_pool, p, len, name); u_add(unit, p); return p; } i64 p_new_entry(i64 unit) { i64 p = p_new(unit, ""); u_entry_point(unit, p); return p; } void p_add(i64 proc, i64 node) { proc_node_add(&g_pool, proc, node); } i64 u_new(void) { return unit_init(&g_pool, UNIT_CODE); } void u_add(i64 unit, i64 proc) { unit_proc_add(&g_pool, unit, proc); } void u_entry_point(i64 unit, i64 proc) { unit_set_entry_point(&g_pool, unit, proc); } void u_elf_x86_64(i64 unit, c8 *output_file_name) { i64 name_len = str_len(output_file_name, output_file_name + MAX_STRING_SIZE); i64 out = io_open_write(name_len, output_file_name, NULL); b8 ok = unit_write(&g_pool, unit, FORMAT_ELF, ARCH_X86_64, out, NULL); io_chmod_exe(out, NULL); io_close(out, NULL); CHECK(ok, "Failed",); } void l_code(i64 unit, i64 link_unit) { unit_link_add(&g_pool, unit, link_unit); } void l_object(i64 unit, c8 *object_library) { i64 l = unit_init(&g_pool, UNIT_LIBRARY_OBJECT); c8 *path = l_find(object_library, 0); i64 len = str_len(path, path + MAX_STRING_SIZE); unit_set_name(&g_pool, l, len, path); unit_link_add(&g_pool, unit, l); } void l_static(i64 unit, c8 *static_library) { i64 l = unit_init(&g_pool, UNIT_LIBRARY_STATIC); c8 *path = l_find(static_library, 0); i64 len = str_len(path, path + MAX_STRING_SIZE); unit_set_name(&g_pool, l, len, path); unit_link_add(&g_pool, unit, l); } c8 *l_find(c8 *name, b8 silent) { // Find the full path to a library // FIXME: This does not take into account cross-compilation. CHECK(name != NULL, "Invalid argument", ""); i64 len = str_len(name, name + MAX_STRING_SIZE); CHECK(len < MAX_STRING_SIZE, "Invalid argument", ""); static c8 buf[MAX_STRING_SIZE]; // FIXME #define TRY_(template) \ do { \ snprintf(buf, sizeof buf, (template), name); \ FILE *f = fopen(buf, "rb"); \ if (f == NULL) break; \ fclose(f); \ if (!silent) \ LOG(VERBOSE, "Found library: %s", buf); \ return buf; \ } while (0) TRY_("%s"); if (HOST_OS == OS_Linux) { TRY_("/lib/%s"); if (HOST_ARCH == ARCH_X86_64 || HOST_ARCH == ARCH_ARM64) TRY_("/lib64/%s"); if (HOST_ARCH == ARCH_X86_64) TRY_("/lib/x86_64-linux-gnu/%s"); } TRY_("lib%s.a"); TRY_("%s.o"); if (HOST_OS == OS_Linux) { TRY_("/lib/lib%s.a"); TRY_("/lib/%s.o"); if (HOST_ARCH == ARCH_X86_64 || HOST_ARCH == ARCH_ARM64) { TRY_("/lib64/lib%s.a"); TRY_("/lib64/%s.o"); } if (HOST_ARCH == ARCH_X86_64) { TRY_("/lib/x86_64-linux-gnu/lib%s.a"); TRY_("/lib/x86_64-linux-gnu/%s.o"); } } #undef TRY_ FAIL("Library not found", ""); } #endif // ================================================================ // // TEST SUITE // // ================================================================ #if TESTING #if HELPERS #define ADD_TEST_(name_, res_, ...) \ b8 name_(void) { \ i64 u = u_new(); \ CHECK(u != UNDEFINED, "Sanity", 0); \ \ __VA_ARGS__ \ \ u_elf_x86_64(u, "test_" #name_); \ \ b8 success = 1; \ \ if (HOST_OS != OS_Linux) { \ LOG(INFO, "Skip running the executable. " \ "Host system is not compatible."); \ } else if (HO != LE) { \ LOG(INFO, "Skip running the executable. " \ "Host data ordering is not compatible."); \ } else { \ LOG(VERBOSE, "Running the executable"); \ \ i32 ret; \ \ if (LOG_LEVEL >= VERBOSE) \ ret = system("./test_" #name_); \ else \ ret = system("./test_" #name_ " >/dev/null"); \ \ if (WEXITSTATUS(ret) != res_) \ success = 0; \ } \ \ if (success) \ system("rm test_" #name_); \ \ return success; \ } ADD_TEST_(link_with_libc, 42, // Add the main proc. i64 p = p_new(u, "main"); { // Call puts N_CALL_BY_NAME( p, "puts", // proc name n_str(p, "hello sailor") // the first argument ); // Return 42 N_RET( p, n_i32(p, 42) // the return value ); } // Add dependencies if (l_find("c_nonshared", 1)[0] != '\0') l_static(u, "c_nonshared"); else l_static(u, "c"); l_object(u, "crt1"); ) ADD_TEST_(math_two_plus_two, 4, i64 p = p_new_entry(u); N_RET( p, n_add(p, n_i32(p, 2), n_i32(p, 2)) ); ) ADD_TEST_(math_sixty_nine, 69, i64 p = p_new_entry(u); N_RET( p, n_add(p, n_i32(p, 27), n_i32(p, 42)) ); ) ADD_TEST_(math_add_3, 1+2+3, i64 p = p_new_entry(u); i64 a = n_add(p, n_i32(p, 1), n_i32(p, 2)); i64 b = n_i32(p, 3); N_RET( p, n_add(p, a, b) ); ) ADD_TEST_(math_add_4a, 1+2+3+4, i64 p = p_new_entry(u); i64 a = n_add(p, n_i32(p, 1), n_i32(p, 2)); i64 b = n_add(p, n_i32(p, 3), n_i32(p, 4)); N_RET( p, n_add(p, a, b) ); ) ADD_TEST_(math_add_5, 1+2+3+4+5, i64 p = p_new_entry(u); i64 a = n_add(p, n_i32(p, 1), n_i32(p, 2)); i64 b = n_add(p, n_i32(p, 3), n_i32(p, 4)); i64 c = n_i32(p, 5); i64 a_b = n_add(p, a, b); N_RET( p, n_add(p, a_b, c) ); ) ADD_TEST_(math_add_6, 1+2+3+4+5+6, i64 p = p_new_entry(u); i64 a = n_add(p, n_i32(p, 1), n_i32(p, 2)); i64 b = n_add(p, n_i32(p, 3), n_i32(p, 4)); i64 c = n_add(p, n_i32(p, 5), n_i32(p, 6)); i64 a_b = n_add(p, a, b); N_RET( p, n_add(p, a_b, c) ); ) ADD_TEST_(math_add_7, 1+2+3+4+5+6+7, i64 p = p_new_entry(u); i64 a = n_add(p, n_i32(p, 1), n_i32(p, 2)); i64 b = n_add(p, n_i32(p, 3), n_i32(p, 4)); i64 c = n_add(p, n_i32(p, 5), n_i32(p, 6)); i64 d = n_i32(p, 7); i64 a_b = n_add(p, a, b); i64 c_d = n_add(p, c, d); N_RET( p, n_add(p, a_b, c_d) ); ) ADD_TEST_(math_add_4b, 5+6+7+8, i64 p = p_new_entry(u); i64 a = n_add(p, n_i32(p, 1), n_i32(p, 2)); i64 b = n_add(p, n_i32(p, 3), n_i32(p, 4)); i64 c = n_add(p, n_i32(p, 5), n_i32(p, 6)); i64 d = n_add(p, n_i32(p, 7), n_i32(p, 8)); i64 a_b = n_add(p, a, b); i64 c_d = n_add(p, c, d); (void) a_b; N_RET( p, c_d ); ) ADD_TEST_(math_add_8, 1+2+3+4+5+6+7+8, i64 p = p_new_entry(u); i64 a = n_add(p, n_i32(p, 1), n_i32(p, 2)); i64 b = n_add(p, n_i32(p, 3), n_i32(p, 4)); i64 c = n_add(p, n_i32(p, 5), n_i32(p, 6)); i64 d = n_add(p, n_i32(p, 7), n_i32(p, 8)); i64 a_b = n_add(p, a, b); i64 c_d = n_add(p, c, d); N_RET( p, n_add(p, a_b, c_d) ); ) ADD_TEST_(math_sub_3, 3-2-1, i64 p = p_new_entry(u); i64 a = n_sub(p, n_i32(p, 3), n_i32(p, 2)); i64 b = n_i32(p, 1); N_RET( p, n_sub(p, a, b) ); ) ADD_TEST_(math_sub_4, (4-3)-(2-1), i64 p = p_new_entry(u); i64 a = n_sub(p, n_i32(p, 4), n_i32(p, 3)); i64 b = n_sub(p, n_i32(p, 2), n_i32(p, 1)); N_RET( p, n_sub(p, a, b) ); ) ADD_TEST_(math_sub_5, ((5-3)-(1-4))-2, i64 p = p_new_entry(u); i64 a = n_sub(p, n_i32(p, 5), n_i32(p, 3)); i64 b = n_sub(p, n_i32(p, 1), n_i32(p, 4)); i64 c = n_i32(p, 2); i64 a_b = n_sub(p, a, b); N_RET( p, n_sub(p, a_b, c) ); ) ADD_TEST_(math_sub_6, ((6-1)-(5-4))-(3-2), i64 p = p_new_entry(u); i64 a = n_sub(p, n_i32(p, 6), n_i32(p, 1)); i64 b = n_sub(p, n_i32(p, 5), n_i32(p, 4)); i64 c = n_sub(p, n_i32(p, 3), n_i32(p, 2)); i64 a_b = n_sub(p, a, b); N_RET( p, n_sub(p, a_b, c) ); ) ADD_TEST_(math_sub_7, ((7-2)-(2-3))-((4-5)-1), i64 p = p_new_entry(u); i64 a = n_sub(p, n_i32(p, 7), n_i32(p, 2)); i64 b = n_sub(p, n_i32(p, 2), n_i32(p, 3)); i64 c = n_sub(p, n_i32(p, 4), n_i32(p, 5)); i64 d = n_i32(p, 1); i64 a_b = n_sub(p, a, b); i64 c_d = n_sub(p, c, d); N_RET( p, n_sub(p, a_b, c_d) ); ) ADD_TEST_(math_sub_8, ((8-1)-(2-3))-((4-5)-(7-6)), i64 p = p_new_entry(u); i64 a = n_sub(p, n_i32(p, 8), n_i32(p, 1)); i64 b = n_sub(p, n_i32(p, 2), n_i32(p, 3)); i64 c = n_sub(p, n_i32(p, 4), n_i32(p, 5)); i64 d = n_sub(p, n_i32(p, 7), n_i32(p, 6)); i64 a_b = n_sub(p, a, b); i64 c_d = n_sub(p, c, d); N_RET( p, n_sub(p, a_b, c_d) ); ) ADD_TEST_(math_reg_reuse, (1+2+3+4+5+6+7+8+9+10), i64 p = p_new_entry(u); i64 a = n_i32(p, 1); i64 b = n_add(p, a, n_i32(p, 2)); i64 c = n_add(p, b, n_i32(p, 3)); i64 d = n_add(p, c, n_i32(p, 4)); i64 e = n_add(p, d, n_i32(p, 5)); i64 f = n_add(p, e, n_i32(p, 6)); i64 g = n_add(p, f, n_i32(p, 7)); i64 h = n_add(p, g, n_i32(p, 8)); i64 i = n_add(p, h, n_i32(p, 9)); i64 j = n_add(p, i, n_i32(p, 10)); N_RET(p, j); ) ADD_TEST_(cond_1, 42, i64 p = p_new_entry(u); i64 a = n_i32(p, 1); // mov eax, 1 i64 b = n_if(p, a); // cmp eax, 0 // je _1 i64 c = n_i32(p, 42); // mov ebx, 42 // jmp _2 // _1: i64 d = n_i32(p, 43); // mov ebx, 43 // jmp _2 i64 e = N_PHI(p, b, c, d); // _2: // mov eax, ebx N_RET(p, e); // ret ) ADD_TEST_(cond_2, 43, i64 p = p_new_entry(u); i64 a = n_i32(p, 0); i64 b = n_if(p, a); i64 c = n_i32(p, 42); i64 d = n_i32(p, 43); i64 e = N_PHI(p, b, c, d); N_RET(p, e); ) #endif // HELPERS i32 main(i32 argc, c8 **argv) { b8 is_ok = 1; b8 print_version = 0; b8 print_help = 0; for (i32 i = 1; i < argc; ++i) { if (argv[i][0] == '?' && argv[i][1] == '\0') print_help = 1; else if (argv[i][0] == '-') { if (argv[i][1] == '-') { i64 len = str_len(argv[i] + 2, argv[i] + 128); if (str_eq(len, argv[i] + 2, 7, "version")) print_version = 1; else if (str_eq(len, argv[i] + 2, 4, "help")) print_help = 1; else { LOG(ERROR, "Unknown argument %s", argv[i]); is_ok = 0; } } else { for (i32 j = 1; argv[i][j] != '\0'; ++j) switch (argv[i][j]) { case 'V': print_version = 1; break; case 'h': print_help = 1; break; default: { LOG(ERROR, "Unknown option %c", argv[i][j]); is_ok = 0; } } } } else { LOG(ERROR, "Unknown argument %s", argv[i]); is_ok = 0; } } if (!is_ok) return -1; LOG(INFO, "bxgen " VERSION); if (print_help) { // TODO LOG(ERROR, "Not implemented"); return 0; } if (print_version) return 0; i32 num_tests = 0; i32 num_passed = 0; #define RUN_TEST_(t) \ do { \ ++num_tests; \ if (t()) { \ ++num_passed; \ LOG(INFO, #t " - OK"); \ } else \ LOG(ERROR, #t " - FAIL"); \ } while (0) #if HELPERS // RUN_TEST_(link_with_libc); RUN_TEST_(math_two_plus_two); RUN_TEST_(math_sixty_nine); RUN_TEST_(math_add_3); RUN_TEST_(math_add_4a); RUN_TEST_(math_add_5); RUN_TEST_(math_add_6); RUN_TEST_(math_add_7); RUN_TEST_(math_add_4b); RUN_TEST_(math_add_8); RUN_TEST_(math_sub_3); RUN_TEST_(math_sub_4); RUN_TEST_(math_sub_5); RUN_TEST_(math_sub_6); RUN_TEST_(math_sub_7); RUN_TEST_(math_sub_8); RUN_TEST_(math_reg_reuse); RUN_TEST_(cond_1); RUN_TEST_(cond_2); #endif #undef RUN_TEST_ LOG(INFO, "%d of %d tests passed.", num_passed, num_tests); return num_passed == num_tests ? 0 : -1; } #endif // TESTING #endif // BXGEN_IMPL_GUARD_ #endif // BXGEN_HEADER