From 21c240a8ae4500a9c121cbabd301da6e5235fc01 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Fri, 1 Jan 2016 19:33:06 +0000 Subject: [PATCH] Explicit Everything! --- .gitignore | 1 + gb.h | 170 +++++++++++----------- gb.hpp | 398 ++++++++++++++++++++++------------------------------ gb_string.h | 2 +- 4 files changed, 253 insertions(+), 318 deletions(-) diff --git a/.gitignore b/.gitignore index 3e8c18b..44a40d9 100644 --- a/.gitignore +++ b/.gitignore @@ -225,3 +225,4 @@ test.* src/ *.sln run.bat +external/ diff --git a/gb.h b/gb.h index 143c895..9101012 100644 --- a/gb.h +++ b/gb.h @@ -15,6 +15,7 @@ /* Version History: + 0.06 - Explicit Everything 0.05 - Fix Macros 0.04a - Change conventions to be in keeping with `gb.hpp` 0.04 - Allow for no @@ -31,31 +32,32 @@ Version History: extern "C" { #endif -/* NOTE(bill): Because static means three different things in C/C++ - * Great Design(!) - */ -#ifndef global_variable -#define global_variable static -#define internal_linkage static -#define local_persist static +#ifndef GB_DO_NOT_USE_CUSTOM_STATIC + /* NOTE(bill): Because static means three different things in C/C++ + * Great Design(!) + */ + #ifndef global_variable + #define global_variable static + #define internal_linkage static + #define local_persist static + #endif + + /* Example for static defines + + global_variable f32 const TAU = 6.283185f; + global_variable void* g_memory; + + internal_linkage void + some_function(...) + { + local_persist u32 count = 0; + ... + count++; + ... + } + */ #endif -/* Example for static defines - - global_variable f32 const TAU = 6.283185f; - global_variable void* g_memory; - - internal_linkage void - some_function(...) - { - local_persist u32 count = 0; - ... - count++; - ... - } -*/ - - #if defined(_MSC_VER) #define _ALLOW_KEYWORD_MACROS @@ -177,21 +179,11 @@ extern "C" { #undef VC_EXTRALEAN #undef WIN32_EXTRA_LEAN #undef WIN32_LEAN_AND_MEAN - #else #include #include #endif -#ifndef true -#define true (0==0) -#define false (0!=0) -#endif - -#ifndef NULL -#define NULL ((void*)0) -#endif - #if defined(NDEBUG) #define GB_ASSERT(cond) ((void)(0)) #else @@ -264,8 +256,6 @@ GB_COMPILE_TIME_ASSERT(sizeof(s64) == 8); typedef float f32; typedef double f64; -typedef s32 bool32; - #if defined(GB_ARCH_32_BIT) typedef u32 usize; typedef s32 ssize; @@ -275,9 +265,23 @@ typedef s32 bool32; #else #error Unknown architecture bit size #endif - GB_COMPILE_TIME_ASSERT(sizeof(usize) == sizeof(size_t)); + + +typedef enum bool32 +{ + false = 0 != 0, + true = !false, +} bool32; + +#ifndef NULL +#define NULL ((void*)0) +#endif + +GB_COMPILE_TIME_ASSERT(sizeof(bool32) == sizeof(s32)); + + typedef uintptr_t uintptr; typedef intptr_t intptr; typedef ptrdiff_t ptrdiff; @@ -329,25 +333,24 @@ typedef ptrdiff_t ptrdiff; #define cast(Type, src) ((Type)(src)) #endif -#if defined(GB_COMPILER_GNU_GCC) - #ifndef bit_cast - #define bit_cast(Type, src) ({ GB_ASSERT(sizeof(Type) <= sizeof(src)); Type dst; memcpy(&dst, &(src), sizeof(Type)); dst; }) - #endif -#endif #ifndef pseudo_cast #define pseudo_cast(Type, src) (*cast(Type*, &(src))) #endif -#ifndef GB_UNUSED -#define GB_UNUSED(x) cast(void, sizeof(x)) +#if defined(GB_COMPILER_GNU_GCC) + #ifndef bit_cast + #define bit_cast(Type, src) ({ GB_ASSERT(sizeof(Type) <= sizeof(src)); Type dst; memcpy(&dst, &(src), sizeof(Type)); dst; }) + #else + // TODO(bill): Figure out a version for `bit_cast` that is not `pseudo_cast` + #define bit_cast(Type, src) (pseudo_cast(Type, src)) + #endif #endif - - - - +#ifndef GB_UNUSED +#define GB_UNUSED(x) cast(void, sizeof(x)) +#endif @@ -360,7 +363,7 @@ typedef ptrdiff_t ptrdiff; /* NOTE(bill): 0[x] is used to prevent C++ style arrays with operator overloading */ #ifndef GB_ARRAY_COUNT -#define GB_ARRAY_COUNT(x) ((sizeof(x)/sizeof(0[x])) / (cast(size_t, !(sizeof(x) % sizeof(0[x]))))) +#define GB_ARRAY_COUNT(x) ((sizeof(x)/sizeof(0[x])) / (cast(usize, !(sizeof(x) % sizeof(0[x]))))) #endif #ifndef GB_KILOBYTES @@ -498,17 +501,15 @@ typedef struct gb_Allocator s64 (*total_allocated)(struct gb_Allocator* a); } gb_Allocator; -typedef void* gb_Allocator_Ptr; void* -gb_alloc_align(gb_Allocator_Ptr allocator, usize size, usize align) +gb_alloc_align(gb_Allocator* allocator, usize size, usize align) { GB_ASSERT(allocator != NULL); - gb_Allocator* a = cast(gb_Allocator*, allocator); - return a->alloc(a, size, align); + return allocator->alloc(allocator, size, align); } void* -gb_alloc(gb_Allocator_Ptr allocator, usize size) +gb_alloc(gb_Allocator* allocator, usize size) { GB_ASSERT(allocator != NULL); return gb_alloc_align(allocator, size, GB_DEFAULT_ALIGNMENT); @@ -520,27 +521,24 @@ gb_alloc(gb_Allocator_Ptr allocator, usize size) #endif void -gb_free(gb_Allocator_Ptr allocator, void* ptr) +gb_free(gb_Allocator* allocator, void* ptr) { GB_ASSERT(allocator != NULL); - gb_Allocator* a = cast(gb_Allocator*, allocator); - if (ptr) a->free(a, ptr); + if (ptr) allocator->free(allocator, ptr); } s64 -gb_allocated_size(gb_Allocator_Ptr allocator, void const* ptr) +gb_allocated_size(gb_Allocator* allocator, void const* ptr) { GB_ASSERT(allocator != NULL); - gb_Allocator* a = cast(gb_Allocator*, allocator); - return a->allocated_size(a, ptr); + return allocator->allocated_size(allocator, ptr); } s64 -gb_total_allocated(gb_Allocator_Ptr allocator) +gb_total_allocated(gb_Allocator* allocator) { GB_ASSERT(allocator != NULL); - gb_Allocator* a = cast(gb_Allocator*, allocator); - return a->total_allocated(a); + return allocator->total_allocated(allocator); } @@ -1198,7 +1196,7 @@ gb_thread_destroy(gb_Thread* t) gb_semaphore_destroy(&t->semaphore); } -internal_linkage void +static void gb__thread_run(gb_Thread* t) { gb_semaphore_post(&t->semaphore); @@ -1206,7 +1204,7 @@ gb__thread_run(gb_Thread* t) } #if defined(GB_SYSTEM_WINDOWS) -internal_linkage DWORD WINAPI +static DWORD WINAPI gb__thread_proc(void* arg) { gb__thread_run(cast(gb_Thread* , arg)); @@ -1214,7 +1212,7 @@ gb__thread_proc(void* arg) } #else -internal_linkage void* +static void* gb__thread_proc(void* arg) { gb__thread_run(cast(gb_Thread* , arg)); @@ -1297,7 +1295,7 @@ gb_thread_current_id(void) #if defined(GB_SYSTEM_WINDOWS) u8* thread_local_storage = cast(u8*, __readgsqword(0x30)); - thread_id = *cast(u32 *, thread_local_storage + 0x48); + thread_id = *cast(u32*, thread_local_storage + 0x48); #elif defined(GB_SYSTEM_OSX) && defined(GB_ARCH_64_BIT) asm("mov %%gs:0x00,%0" : "=r"(thread_id)); @@ -1327,7 +1325,7 @@ typedef struct gb__Heap_Header } gb__Heap_Header; -internal_linkage void* +static void* gb__heap_alloc(gb_Allocator* a, usize size, usize align) { gb_Heap* heap = cast(gb_Heap*, a); @@ -1360,7 +1358,7 @@ gb__heap_alloc(gb_Allocator* a, usize size, usize align) -internal_linkage void +static void gb__heap_free(gb_Allocator* a, void* ptr) { if (!ptr) return; @@ -1369,7 +1367,7 @@ gb__heap_free(gb_Allocator* a, void* ptr) if (heap->use_mutex) gb_mutex_lock(&heap->mutex); - heap->total_allocated_count -= gb_allocated_size(heap, ptr); + heap->total_allocated_count -= gb_allocated_size(a, ptr); heap->allocation_count--; #if defined (GB_SYSTEM_WINDOWS) @@ -1382,7 +1380,7 @@ gb__heap_free(gb_Allocator* a, void* ptr) if (heap->use_mutex) gb_mutex_unlock(&heap->mutex); } -internal_linkage s64 +static s64 gb__heap_allocated_size(gb_Allocator* a, void const* ptr) { #if defined(GB_SYSTEM_WINDOWS) @@ -1408,7 +1406,7 @@ gb__heap_allocated_size(gb_Allocator* a, void const* ptr) #endif } -internal_linkage s64 +static s64 gb__heap_total_allocated(gb_Allocator* a) { gb_Heap* heap = cast(gb_Heap*, a); @@ -1459,7 +1457,7 @@ gb_heap_destroy(gb_Heap* heap) -internal_linkage void* +static void* gb__arena_alloc(gb_Allocator* a, usize size, usize align) { gb_Arena* arena = cast(gb_Arena*, a); @@ -1479,14 +1477,14 @@ gb__arena_alloc(gb_Allocator* a, usize size, usize align) return ptr; } -internal_linkage void +static void gb__arena_free(gb_Allocator* a, void* ptr) /* NOTE(bill): Arenas free all at once */ { GB_UNUSED(a); GB_UNUSED(ptr); } -internal_linkage s64 +static s64 gb__arena_allocated_size(gb_Allocator* a, void const* ptr) { GB_UNUSED(a); @@ -1494,7 +1492,7 @@ gb__arena_allocated_size(gb_Allocator* a, void const* ptr) return -1; } -internal_linkage s64 +static s64 gb__arena_total_allocated(gb_Allocator* a) { return cast(gb_Arena*, a)->total_allocated_count; @@ -1574,7 +1572,7 @@ gb_make_temporary_arena_memory(gb_Arena* arena) void gb_temporary_arena_memory_free(gb_Temporary_Arena_Memory tmp) { - GB_ASSERT(gb_total_allocated(tmp.arena) >= tmp.original_count); + GB_ASSERT(gb_total_allocated(cast(gb_Allocator*, tmp.arena)) >= tmp.original_count); tmp.arena->total_allocated_count = tmp.original_count; GB_ASSERT(tmp.arena->temp_count > 0); tmp.arena->temp_count--; @@ -1587,7 +1585,7 @@ gb_temporary_arena_memory_free(gb_Temporary_Arena_Memory tmp) -internal_linkage void* +static void* gb__pool_alloc(gb_Allocator* a, usize size, usize align) { gb_Pool* pool = cast(gb_Pool*, a); @@ -1605,7 +1603,7 @@ gb__pool_alloc(gb_Allocator* a, usize size, usize align) return ptr; } -internal_linkage void +static void gb__pool_free(gb_Allocator* a, void* ptr) { if (!ptr) return; @@ -1620,7 +1618,7 @@ gb__pool_free(gb_Allocator* a, void* ptr) pool->total_size -= pool->block_size; } -internal_linkage s64 +static s64 gb__pool_allocated_size(gb_Allocator* a, void const* ptr) { GB_UNUSED(a); @@ -1628,7 +1626,7 @@ gb__pool_allocated_size(gb_Allocator* a, void const* ptr) return -1; } -internal_linkage s64 +static s64 gb__pool_total_allocated(gb_Allocator* a) { gb_Pool* pool = cast(gb_Pool*, a); @@ -1719,14 +1717,14 @@ void* gb_zero_size(void* ptr, usize bytes) { return memset(ptr, 0, bytes); } /**********************************/ -internal_linkage void +static void gb__string_set_length(gb_String str, gb_String_Size len) { GB_STRING_HEADER(str)->length = len; } -internal_linkage void +static void gb__string_set_capacity(gb_String str, gb_String_Size cap) { GB_STRING_HEADER(str)->capacity = cap; @@ -1856,7 +1854,7 @@ gb_string_set(gb_String str, char const* cstr) } -internal_linkage void* +static void* gb__string_realloc(gb_Allocator* a, void* ptr, gb_String_Size old_size, gb_String_Size new_size) { if (!ptr) return gb_alloc(a, new_size); @@ -1979,7 +1977,7 @@ gb_hash_adler32(void const* key, u32 num_bytes) return (b << 16) | a; } -global_variable const u32 GB_CRC32_TABLE[256] = { +static const u32 GB_CRC32_TABLE[256] = { 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, @@ -2046,7 +2044,7 @@ global_variable const u32 GB_CRC32_TABLE[256] = { 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d, }; -global_variable const u64 GB_CRC64_TABLE[256] = { +static const u64 GB_CRC64_TABLE[256] = { 0x0000000000000000ull, 0x42F0E1EBA9EA3693ull, 0x85E1C3D753D46D26ull, 0xC711223CFA3E5BB5ull, 0x493366450E42ECDFull, 0x0BC387AEA7A8DA4Cull, 0xCCD2A5925D9681F9ull, 0x8E224479F47CB76Aull, 0x9266CC8A1C85D9BEull, 0xD0962D61B56FEF2Dull, 0x17870F5D4F51B498ull, 0x5577EEB6E6BB820Bull, @@ -2377,7 +2375,7 @@ gb_hash_murmur32(void const* key, u32 num_bytes, u32 seed) /* Get the frequency of the performance counter */ /* It is constant across the program's lifetime */ - local_persist LARGE_INTEGER s_frequency; + static LARGE_INTEGER s_frequency; QueryPerformanceFrequency(&s_frequency); /* TODO(bill): Is this fast enough? */ /* Get the current time */ diff --git a/gb.hpp b/gb.hpp index 0a1054e..24f2cdf 100644 --- a/gb.hpp +++ b/gb.hpp @@ -39,6 +39,7 @@ CONTENTS: /* Version History: + 0.33 - Explicit Everything! No ctor/dtor on Array and Hash_Table 0.32 - Change const position convention 0.31a - Minor fixes 0.31 - Remove `_Allocator` suffix for allocator types @@ -584,33 +585,29 @@ template struct Remove_Extent_Def { using Type = T; // TODO(bill): Do I "need" all of these template traits? - - //////////////////////////////// // // // C++11 Move Semantics // // // //////////////////////////////// -// TODO(bill): Are these decent names? Are `forward` and `move` clear enough? - template inline T&& -forward(Remove_Reference& t) +forward_ownership(Remove_Reference& t) { return static_cast(t); } template inline T&& -forward(Remove_Reference&& t) +forward_ownership(Remove_Reference&& t) { return static_cast(t); } template inline Remove_Reference&& -move(T&& t) +move_ownership(T&& t) { return static_cast&&>(t); } @@ -636,13 +633,13 @@ __GB_NAMESPACE_END { Func f; - Defer(Func&& f) : f{forward(f)} {} + Defer(Func&& f) : f{forward_ownership(f)} {} ~Defer() { f(); }; }; template inline Defer - defer_func(Func&& f) { return Defer(forward(f)); } + defer_func(Func&& f) { return Defer(forward_ownership(f)); } } // namespace impl __GB_NAMESPACE_END @@ -650,7 +647,7 @@ __GB_NAMESPACE_END #define GB_DEFER_1(x, y) x##y #define GB_DEFER_2(x, y) GB_DEFER_1(x, y) #define GB_DEFER_3(x) GB_DEFER_2(GB_DEFER_2(GB_DEFER_2(x, __COUNTER__), _), __LINE__) - #define defer(code) auto GB_DEFER_3(_defer_) = __GB_NAMESPACE_PREFIX::impl::defer_func([&](){code;}) + #define defer(code) auto GB_DEFER_3(_defer_) = ::impl::defer_func([&](){code;}) /* EXAMPLES @@ -1026,29 +1023,35 @@ void destroy(Pool* pool); namespace memory { void* align_forward(void* ptr, usize align); -void* pointer_add(void* ptr, usize bytes); -void* pointer_sub(void* ptr, usize bytes); + +void* pointer_add(void* ptr, usize bytes); +void* pointer_sub(void* ptr, usize bytes); void const* pointer_add(void const* ptr, usize bytes); void const* pointer_sub(void const* ptr, usize bytes); -void* set(void* ptr, usize bytes, u8 value); +template +void fill(T* ptr, usize count, T const& value); -void* zero(void* ptr, usize bytes); -void* copy(void const* src, usize bytes, void* dest); -void* move(void const* src, usize bytes, void* dest); +template +void fill(T* ptr, usize count, T&& value); + +void zero(void* ptr, usize bytes); +void copy(void const* src, usize bytes, void* dest); +void move(void const* src, usize bytes, void* dest); bool equals(void const* a, void const* b, usize bytes); -// TODO(bill): Should this be just zero(T*) ??? template -T* zero_struct(T* ptr); +void zero_struct(T* ptr); template -T* zero_array(T* ptr, usize count); +void zero_array(T* ptr, usize count); template -T* copy_array(T const* src_array, usize count, T* dest_array); +void copy_array(T const* src_array, usize count, T* dest_array); + +template +void copy_struct(T const* src_array, T* dest_array); -// TODO(bill): Should I implement something like std::copy, std::fill, std::fill_n ??? template void swap(T* a, T* b); @@ -1153,42 +1156,36 @@ struct Array s64 capacity; T* data; - Array() = default; - explicit Array(Allocator* a, usize count = 0); - - ~Array(); - - Array(Array const& array); - Array(Array&& array); - - Array& operator=(Array const& array); - Array& operator=(Array&& array); - T const& operator[](usize index) const; T& operator[](usize index); }; - -// TODO(bill): Should I even have ctor, dtor, copy/move overloads for Array? -// Should these be explicit functions e.g. +// NOTE(bill): There are not ctor/dtor for Array. +// These are explicit functions e.g. /* -auto old_array = array::make(...); +auto old_array = array::make(...); auto new_array = array::copy(old_array); array::free(&old_array); array::free(&new_array); */ +// This allows functions to be passed by value at a low cost namespace array { -// Helper functions to make and free an array +// Helper functions to make, free, and copy an array template Array make(Allocator* allocator, usize count = 0); template void free(Array* array); +// TODO(bill): Is passing by value okay here or is pass by const& ? +// (sizeof(Array) = 16 + sizeof(void*)) (24 bytes on x86, 32 bytes on x64) +template Array copy(Array array, Allocator* allocator = nullptr); // Appends `item` to the end of the array template void append(Array* a, T const& item); template void append(Array* a, T&& item); // Appends `items[count]` to the end of the array template void append(Array* a, T const* items, usize count); +// Append the contents of another array of the same type +template void append(Array* a, Array other); // Pops the last item form the array. The array cannot be empty. template void pop(Array* a); @@ -1208,10 +1205,10 @@ template void grow(Array* a, usize min_capacity = 0); // Used to iterate over the array with a C++11 for loop template inline T* begin(Array& a) { return a.data; } template inline T const* begin(Array const& a) { return a.data; } -template inline T* begin(Array&& a) { return a.data; } +template inline T* begin(Array&& a) { return a.data; } template inline T* end(Array& a) { return a.data + a.count; } template inline T const* end(Array const& a) { return a.data + a.count; } -template inline T* end(Array&& a) { return a.data + a.count; } +template inline T* end(Array&& a) { return a.data + a.count; } @@ -1238,22 +1235,14 @@ struct Hash_Table Array hashes; Array entries; - - Hash_Table(); - explicit Hash_Table(Allocator* a); - Hash_Table(Hash_Table const& other); - Hash_Table(Hash_Table&& other); - - ~Hash_Table() = default; - - Hash_Table& operator=(Hash_Table const& other); - Hash_Table& operator=(Hash_Table&& other); }; namespace hash_table { -// Helper function to make a hash table +// Helper function to make, free, and copy a hash table template Hash_Table make(Allocator* a); +template void free(Hash_Table* h); +template Hash_Table copy(Hash_Table const& h, Allocator* a = nullptr); // Return `true` if the specified key exist in the hash table template bool has(Hash_Table const& h, u64 key); @@ -1442,96 +1431,6 @@ u64 rdtsc(); // // //////////////////////////////// -template -inline -Array::Array(Allocator* a, usize count_) -: allocator(a) -, count(0) -, capacity(0) -, data(nullptr) -{ - if (count_ > 0) - { - data = alloc_array(a, count_); - if (data) - count = capacity = count_; - } -} - - -template -inline -Array::Array(Array const& other) -: allocator(other.allocator) -, count(0) -, capacity(0) -, data(nullptr) -{ - auto new_count = other.count; - array::set_capacity(this, new_count); - memory::copy_array(other.data, new_count, data); - this->count = new_count; -} - -template -inline -Array::Array(Array&& other) -: allocator(nullptr) -, count(0) -, capacity(0) -, data(nullptr) -{ - *this = move(other); -} - - -template -inline -Array::~Array() -{ - if (allocator && capacity > 0) - free(allocator, data); -} - - -template -Array& -Array::operator=(Array const& other) -{ - if (allocator == nullptr) - allocator = other.allocator; - auto new_count = other.count; - array::resize(this, new_count); - memory::copy_array(other.data, new_count, data); - return *this; -} - - -template -Array& -Array::operator=(Array&& other) -{ - if (this != &other) - { - if (allocator && capacity > 0) - free(allocator, data); - - allocator = other.allocator; - count = other.count; - capacity = other.capacity; - data = other.data; - - other.allocator = nullptr; - other.count = 0; - other.capacity = 0; - other.data = nullptr; - } - - return *this; -} - - - template inline T const& Array::operator[](usize index) const @@ -1559,16 +1458,17 @@ template inline Array make(Allocator* allocator, usize count) { - Array array{allocator}; + Array result = {}; + result.allocator = allocator; if (count > 0) { - array.data = alloc_array(allocator, count); - if (array.data) - array.count = array.capacity = count; + result.data = alloc_array(allocator, count); + if (result.data) + result.count = result.capacity = count; } - return array; + return result; } template @@ -1582,6 +1482,28 @@ free(Array* a) a->data = nullptr; } +template +inline Array +copy(Array other, Allocator* allocator) +{ + Array result = {}; + + if (allocator) + result.allocator = allocator; + else + result.allocator = other.allocator; + + auto new_count = other.count; + + array::resize(&result, new_count); + memory::copy_array(other.data, new_count, data); + + return result; +} + + + + template inline void append(Array* a, T const& item) @@ -1597,7 +1519,7 @@ append(Array* a, T&& item) { if (a->capacity < a->count + 1) array::grow(a); - a->data[a->count++] = move(item); + a->data[a->count++] = move_ownership(item); } template @@ -1611,6 +1533,14 @@ append(Array* a, T const* items, usize count) a->count += count; } +template +inline void +append(Array* a, Array other) +{ + array::append(a, other.data, other.count); +} + + template inline void pop(Array* a) @@ -1662,7 +1592,9 @@ set_capacity(Array* a, usize capacity) } free(a->allocator, a->data); a->data = data; - a->capacity = capacity; + a + + ->capacity = capacity; } template @@ -1677,72 +1609,58 @@ grow(Array* a, usize min_capacity) } } // namespace array + + + + + + //////////////////////////////// // // // Hash Table // // // //////////////////////////////// -template -inline -Hash_Table::Hash_Table() -: hashes() -, entries() -{ -} - -template -inline -Hash_Table::Hash_Table(Allocator* a) -: hashes(a) -, entries(a) -{ -} - -template -inline -Hash_Table::Hash_Table(Hash_Table const& other) -: hashes(other.hashes) -, entries(other.entries) -{ -} - -template -inline -Hash_Table::Hash_Table(Hash_Table&& other) -: hashes(move(other.hashes)) -, entries(move(other.entries)) -{ -} - -template -inline Hash_Table& -Hash_Table::operator=(Hash_Table const& other) -{ - hashes = other.hashes; - entries = other.entries; - return *this; -} - -template -inline Hash_Table& -Hash_Table::operator=(Hash_Table&& other) -{ - hashes = move(other.hashes); - entries = move(other.entries); - return *this; -} - - namespace hash_table { template inline Hash_Table make(Allocator* a) { - return Hash_Table{a}; + Hash_Table result = {}; + + result.hashes = array::make(a); + result.entries = array::make::Entry>(a); + + return result; } +template +inline void +free(Hash_Table* h) +{ + if (h->hashes.allocator) + array::free(&h->hashes); + + if (h->entries.allocator) + array::free(&h->entries); +} + +template +inline Hash_Table +copy(Hash_Table const& other, Allocator* allocator) +{ + Allocator* a = other.hashes.allocator; + if (allocator) a = allocator; + + Hash_Table result = {}; + result.hashes = array::copy(other.hashes, a); + result.entries = array::copy(other.entries, a); + + return result; +} + + namespace impl { struct Find_Result @@ -1913,21 +1831,21 @@ rehash(Hash_Table* h, usize new_capacity) for (usize i = 0; i < new_capacity; i++) nh.hashes[i] = -1; - for (u32 i = 0; i < h->entries.count; i++) + for (s64 i = 0; i < h->entries.count; i++) { auto const* e = &h->entries[i]; multi_hash_table::insert(&nh, e->key, e->value); } Hash_Table empty_ht{h->hashes.allocator}; - h->~Hash_Table(); + hash_table::free(h); - memory::copy_array(&nh, 1, h); - memory::copy_array(&empty_ht, 1, &nh); + memory::copy_struct(&nh, h); + memory::copy_struct(&empty_ht, &nh); } template -void +inline void grow(Hash_Table* h) { const usize new_capacity = 2 * h->entries.count + 8; @@ -1935,11 +1853,11 @@ grow(Hash_Table* h) } template -bool +inline bool is_full(Hash_Table* h) { // Make sure that there is enough space - f32 const maximum_load_coefficient = 0.75f; + f64 const maximum_load_coefficient = 0.75; return h->entries.count >= maximum_load_coefficient * h->hashes.count; } } // namespace impl @@ -1983,7 +1901,7 @@ set(Hash_Table* h, u64 key, T&& value) impl::grow(h); s64 const index = impl::find_or_make_entry(h, key); - h->entries[index].value = move(value); + h->entries[index].value = move_ownership(value); if (impl::is_full(h)) impl::grow(h); } @@ -2107,7 +2025,7 @@ insert(Hash_Table* h, u64 key, T&& value) hash_table::impl::grow(h); auto next = hash_table::impl::make_entry(h, key); - h->entries[next].value = move(value); + h->entries[next].value = move_ownership(value); if (hash_table::impl::is_full(h)) hash_table::impl::grow(h); @@ -2135,34 +2053,57 @@ remove_all(Hash_Table* h, u64 key) namespace memory { template -inline T* +inline void +fill(T* ptr, usize count, T const& value) +{ + for (usize i = 0; i < count; i++) + ptr[i] = value; +} + +template +inline void +fill(T* ptr, usize count, T&& value) +{ + for (usize i = 0; i < count; i++) + ptr[i] = move_ownership(value); +} + +template +inline void zero_struct(T* ptr) { - return static_cast(memory::zero(ptr, sizeof(T))); + memory::zero(ptr, sizeof(T)); } - template -inline T* +inline void zero_array(T* ptr, usize count) { - return static_cast(memory::zero(ptr, count * sizeof(T))); + memory::zero(ptr, count * sizeof(T)); } template -inline T* +inline void copy_array(T const* src_array, usize count, T* dest_array) { - return static_cast(memory::copy(src_array, count * sizeof(T), dest_array)); + memory::copy(src_array, count * sizeof(T), dest_array); } +template +inline void +copy_struct(T const* src_array, T* dest_array) +{ + memory::copy(src_array, sizeof(T), dest_array); +} + + template inline void swap(T* a, T* b) { - T c = __GB_NAMESPACE_PREFIX::move(*a); - *a = __GB_NAMESPACE_PREFIX::move(*b); - *b = __GB_NAMESPACE_PREFIX::move(c); + T c = move_ownership(*a); + *a = move_ownership(*b); + *b = move_ownership(c); } template @@ -2208,8 +2149,8 @@ quick(T* array, usize count, Comparison_Function compare) j--; } - quick(array, i, compare); - quick(array+i, count-i, compare); + sort::quick(array, i, compare); + sort::quick(array+i, count-i, compare); } } // namespace sort @@ -3271,28 +3212,23 @@ pointer_sub(void const* ptr, usize bytes) return static_cast(static_cast(ptr) - bytes); } -GB_FORCE_INLINE void* -set(void* ptr, usize bytes, u8 value) -{ - return memset(ptr, value, bytes); -} -GB_FORCE_INLINE void* +GB_FORCE_INLINE void zero(void* ptr, usize bytes) { - return memory::set(ptr, bytes, 0); + memset(ptr, 0, bytes); } -GB_FORCE_INLINE void* +GB_FORCE_INLINE void copy(void const* src, usize bytes, void* dest) { - return memcpy(dest, src, bytes); + memcpy(dest, src, bytes); } -GB_FORCE_INLINE void* +GB_FORCE_INLINE void move(void const* src, usize bytes, void* dest) { - return memmove(dest, src, bytes); + memmove(dest, src, bytes); } GB_FORCE_INLINE bool diff --git a/gb_string.h b/gb_string.h index 0a244a8..90e24b0 100644 --- a/gb_string.h +++ b/gb_string.h @@ -91,7 +91,7 @@ // // printf("%c %c\n", str[0], str[13]); // -// * gb strings are singularlly allocated. The meta-data is next to the character +// * gb strings are singularly allocated. The meta-data is next to the character // array which is better for the cache. // // Disadvantages: