Explicit Everything!

This commit is contained in:
gingerBill 2016-01-01 19:33:06 +00:00
parent 8c934b2edc
commit 21c240a8ae
4 changed files with 253 additions and 318 deletions

1
.gitignore vendored
View File

@ -225,3 +225,4 @@ test.*
src/
*.sln
run.bat
external/

170
gb.h
View File

@ -15,6 +15,7 @@
/*
Version History:
0.06 - Explicit Everything
0.05 - Fix Macros
0.04a - Change conventions to be in keeping with `gb.hpp`
0.04 - Allow for no <stdio.h>
@ -31,31 +32,32 @@ Version History:
extern "C" {
#endif
/* NOTE(bill): Because static means three different things in C/C++
* Great Design(!)
*/
#ifndef global_variable
#define global_variable static
#define internal_linkage static
#define local_persist static
#ifndef GB_DO_NOT_USE_CUSTOM_STATIC
/* NOTE(bill): Because static means three different things in C/C++
* Great Design(!)
*/
#ifndef global_variable
#define global_variable static
#define internal_linkage static
#define local_persist static
#endif
/* Example for static defines
global_variable f32 const TAU = 6.283185f;
global_variable void* g_memory;
internal_linkage void
some_function(...)
{
local_persist u32 count = 0;
...
count++;
...
}
*/
#endif
/* Example for static defines
global_variable f32 const TAU = 6.283185f;
global_variable void* g_memory;
internal_linkage void
some_function(...)
{
local_persist u32 count = 0;
...
count++;
...
}
*/
#if defined(_MSC_VER)
#define _ALLOW_KEYWORD_MACROS
@ -177,21 +179,11 @@ extern "C" {
#undef VC_EXTRALEAN
#undef WIN32_EXTRA_LEAN
#undef WIN32_LEAN_AND_MEAN
#else
#include <pthread.h>
#include <sys/time.h>
#endif
#ifndef true
#define true (0==0)
#define false (0!=0)
#endif
#ifndef NULL
#define NULL ((void*)0)
#endif
#if defined(NDEBUG)
#define GB_ASSERT(cond) ((void)(0))
#else
@ -264,8 +256,6 @@ GB_COMPILE_TIME_ASSERT(sizeof(s64) == 8);
typedef float f32;
typedef double f64;
typedef s32 bool32;
#if defined(GB_ARCH_32_BIT)
typedef u32 usize;
typedef s32 ssize;
@ -275,9 +265,23 @@ typedef s32 bool32;
#else
#error Unknown architecture bit size
#endif
GB_COMPILE_TIME_ASSERT(sizeof(usize) == sizeof(size_t));
typedef enum bool32
{
false = 0 != 0,
true = !false,
} bool32;
#ifndef NULL
#define NULL ((void*)0)
#endif
GB_COMPILE_TIME_ASSERT(sizeof(bool32) == sizeof(s32));
typedef uintptr_t uintptr;
typedef intptr_t intptr;
typedef ptrdiff_t ptrdiff;
@ -329,25 +333,24 @@ typedef ptrdiff_t ptrdiff;
#define cast(Type, src) ((Type)(src))
#endif
#if defined(GB_COMPILER_GNU_GCC)
#ifndef bit_cast
#define bit_cast(Type, src) ({ GB_ASSERT(sizeof(Type) <= sizeof(src)); Type dst; memcpy(&dst, &(src), sizeof(Type)); dst; })
#endif
#endif
#ifndef pseudo_cast
#define pseudo_cast(Type, src) (*cast(Type*, &(src)))
#endif
#ifndef GB_UNUSED
#define GB_UNUSED(x) cast(void, sizeof(x))
#if defined(GB_COMPILER_GNU_GCC)
#ifndef bit_cast
#define bit_cast(Type, src) ({ GB_ASSERT(sizeof(Type) <= sizeof(src)); Type dst; memcpy(&dst, &(src), sizeof(Type)); dst; })
#else
// TODO(bill): Figure out a version for `bit_cast` that is not `pseudo_cast`
#define bit_cast(Type, src) (pseudo_cast(Type, src))
#endif
#endif
#ifndef GB_UNUSED
#define GB_UNUSED(x) cast(void, sizeof(x))
#endif
@ -360,7 +363,7 @@ typedef ptrdiff_t ptrdiff;
/* NOTE(bill): 0[x] is used to prevent C++ style arrays with operator overloading */
#ifndef GB_ARRAY_COUNT
#define GB_ARRAY_COUNT(x) ((sizeof(x)/sizeof(0[x])) / (cast(size_t, !(sizeof(x) % sizeof(0[x])))))
#define GB_ARRAY_COUNT(x) ((sizeof(x)/sizeof(0[x])) / (cast(usize, !(sizeof(x) % sizeof(0[x])))))
#endif
#ifndef GB_KILOBYTES
@ -498,17 +501,15 @@ typedef struct gb_Allocator
s64 (*total_allocated)(struct gb_Allocator* a);
} gb_Allocator;
typedef void* gb_Allocator_Ptr;
void*
gb_alloc_align(gb_Allocator_Ptr allocator, usize size, usize align)
gb_alloc_align(gb_Allocator* allocator, usize size, usize align)
{
GB_ASSERT(allocator != NULL);
gb_Allocator* a = cast(gb_Allocator*, allocator);
return a->alloc(a, size, align);
return allocator->alloc(allocator, size, align);
}
void*
gb_alloc(gb_Allocator_Ptr allocator, usize size)
gb_alloc(gb_Allocator* allocator, usize size)
{
GB_ASSERT(allocator != NULL);
return gb_alloc_align(allocator, size, GB_DEFAULT_ALIGNMENT);
@ -520,27 +521,24 @@ gb_alloc(gb_Allocator_Ptr allocator, usize size)
#endif
void
gb_free(gb_Allocator_Ptr allocator, void* ptr)
gb_free(gb_Allocator* allocator, void* ptr)
{
GB_ASSERT(allocator != NULL);
gb_Allocator* a = cast(gb_Allocator*, allocator);
if (ptr) a->free(a, ptr);
if (ptr) allocator->free(allocator, ptr);
}
s64
gb_allocated_size(gb_Allocator_Ptr allocator, void const* ptr)
gb_allocated_size(gb_Allocator* allocator, void const* ptr)
{
GB_ASSERT(allocator != NULL);
gb_Allocator* a = cast(gb_Allocator*, allocator);
return a->allocated_size(a, ptr);
return allocator->allocated_size(allocator, ptr);
}
s64
gb_total_allocated(gb_Allocator_Ptr allocator)
gb_total_allocated(gb_Allocator* allocator)
{
GB_ASSERT(allocator != NULL);
gb_Allocator* a = cast(gb_Allocator*, allocator);
return a->total_allocated(a);
return allocator->total_allocated(allocator);
}
@ -1198,7 +1196,7 @@ gb_thread_destroy(gb_Thread* t)
gb_semaphore_destroy(&t->semaphore);
}
internal_linkage void
static void
gb__thread_run(gb_Thread* t)
{
gb_semaphore_post(&t->semaphore);
@ -1206,7 +1204,7 @@ gb__thread_run(gb_Thread* t)
}
#if defined(GB_SYSTEM_WINDOWS)
internal_linkage DWORD WINAPI
static DWORD WINAPI
gb__thread_proc(void* arg)
{
gb__thread_run(cast(gb_Thread* , arg));
@ -1214,7 +1212,7 @@ gb__thread_proc(void* arg)
}
#else
internal_linkage void*
static void*
gb__thread_proc(void* arg)
{
gb__thread_run(cast(gb_Thread* , arg));
@ -1297,7 +1295,7 @@ gb_thread_current_id(void)
#if defined(GB_SYSTEM_WINDOWS)
u8* thread_local_storage = cast(u8*, __readgsqword(0x30));
thread_id = *cast(u32 *, thread_local_storage + 0x48);
thread_id = *cast(u32*, thread_local_storage + 0x48);
#elif defined(GB_SYSTEM_OSX) && defined(GB_ARCH_64_BIT)
asm("mov %%gs:0x00,%0" : "=r"(thread_id));
@ -1327,7 +1325,7 @@ typedef struct gb__Heap_Header
} gb__Heap_Header;
internal_linkage void*
static void*
gb__heap_alloc(gb_Allocator* a, usize size, usize align)
{
gb_Heap* heap = cast(gb_Heap*, a);
@ -1360,7 +1358,7 @@ gb__heap_alloc(gb_Allocator* a, usize size, usize align)
internal_linkage void
static void
gb__heap_free(gb_Allocator* a, void* ptr)
{
if (!ptr) return;
@ -1369,7 +1367,7 @@ gb__heap_free(gb_Allocator* a, void* ptr)
if (heap->use_mutex) gb_mutex_lock(&heap->mutex);
heap->total_allocated_count -= gb_allocated_size(heap, ptr);
heap->total_allocated_count -= gb_allocated_size(a, ptr);
heap->allocation_count--;
#if defined (GB_SYSTEM_WINDOWS)
@ -1382,7 +1380,7 @@ gb__heap_free(gb_Allocator* a, void* ptr)
if (heap->use_mutex) gb_mutex_unlock(&heap->mutex);
}
internal_linkage s64
static s64
gb__heap_allocated_size(gb_Allocator* a, void const* ptr)
{
#if defined(GB_SYSTEM_WINDOWS)
@ -1408,7 +1406,7 @@ gb__heap_allocated_size(gb_Allocator* a, void const* ptr)
#endif
}
internal_linkage s64
static s64
gb__heap_total_allocated(gb_Allocator* a)
{
gb_Heap* heap = cast(gb_Heap*, a);
@ -1459,7 +1457,7 @@ gb_heap_destroy(gb_Heap* heap)
internal_linkage void*
static void*
gb__arena_alloc(gb_Allocator* a, usize size, usize align)
{
gb_Arena* arena = cast(gb_Arena*, a);
@ -1479,14 +1477,14 @@ gb__arena_alloc(gb_Allocator* a, usize size, usize align)
return ptr;
}
internal_linkage void
static void
gb__arena_free(gb_Allocator* a, void* ptr) /* NOTE(bill): Arenas free all at once */
{
GB_UNUSED(a);
GB_UNUSED(ptr);
}
internal_linkage s64
static s64
gb__arena_allocated_size(gb_Allocator* a, void const* ptr)
{
GB_UNUSED(a);
@ -1494,7 +1492,7 @@ gb__arena_allocated_size(gb_Allocator* a, void const* ptr)
return -1;
}
internal_linkage s64
static s64
gb__arena_total_allocated(gb_Allocator* a)
{
return cast(gb_Arena*, a)->total_allocated_count;
@ -1574,7 +1572,7 @@ gb_make_temporary_arena_memory(gb_Arena* arena)
void
gb_temporary_arena_memory_free(gb_Temporary_Arena_Memory tmp)
{
GB_ASSERT(gb_total_allocated(tmp.arena) >= tmp.original_count);
GB_ASSERT(gb_total_allocated(cast(gb_Allocator*, tmp.arena)) >= tmp.original_count);
tmp.arena->total_allocated_count = tmp.original_count;
GB_ASSERT(tmp.arena->temp_count > 0);
tmp.arena->temp_count--;
@ -1587,7 +1585,7 @@ gb_temporary_arena_memory_free(gb_Temporary_Arena_Memory tmp)
internal_linkage void*
static void*
gb__pool_alloc(gb_Allocator* a, usize size, usize align)
{
gb_Pool* pool = cast(gb_Pool*, a);
@ -1605,7 +1603,7 @@ gb__pool_alloc(gb_Allocator* a, usize size, usize align)
return ptr;
}
internal_linkage void
static void
gb__pool_free(gb_Allocator* a, void* ptr)
{
if (!ptr) return;
@ -1620,7 +1618,7 @@ gb__pool_free(gb_Allocator* a, void* ptr)
pool->total_size -= pool->block_size;
}
internal_linkage s64
static s64
gb__pool_allocated_size(gb_Allocator* a, void const* ptr)
{
GB_UNUSED(a);
@ -1628,7 +1626,7 @@ gb__pool_allocated_size(gb_Allocator* a, void const* ptr)
return -1;
}
internal_linkage s64
static s64
gb__pool_total_allocated(gb_Allocator* a)
{
gb_Pool* pool = cast(gb_Pool*, a);
@ -1719,14 +1717,14 @@ void* gb_zero_size(void* ptr, usize bytes) { return memset(ptr, 0, bytes); }
/**********************************/
internal_linkage void
static void
gb__string_set_length(gb_String str, gb_String_Size len)
{
GB_STRING_HEADER(str)->length = len;
}
internal_linkage void
static void
gb__string_set_capacity(gb_String str, gb_String_Size cap)
{
GB_STRING_HEADER(str)->capacity = cap;
@ -1856,7 +1854,7 @@ gb_string_set(gb_String str, char const* cstr)
}
internal_linkage void*
static void*
gb__string_realloc(gb_Allocator* a, void* ptr, gb_String_Size old_size, gb_String_Size new_size)
{
if (!ptr) return gb_alloc(a, new_size);
@ -1979,7 +1977,7 @@ gb_hash_adler32(void const* key, u32 num_bytes)
return (b << 16) | a;
}
global_variable const u32 GB_CRC32_TABLE[256] = {
static const u32 GB_CRC32_TABLE[256] = {
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
@ -2046,7 +2044,7 @@ global_variable const u32 GB_CRC32_TABLE[256] = {
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d,
};
global_variable const u64 GB_CRC64_TABLE[256] = {
static const u64 GB_CRC64_TABLE[256] = {
0x0000000000000000ull, 0x42F0E1EBA9EA3693ull, 0x85E1C3D753D46D26ull, 0xC711223CFA3E5BB5ull,
0x493366450E42ECDFull, 0x0BC387AEA7A8DA4Cull, 0xCCD2A5925D9681F9ull, 0x8E224479F47CB76Aull,
0x9266CC8A1C85D9BEull, 0xD0962D61B56FEF2Dull, 0x17870F5D4F51B498ull, 0x5577EEB6E6BB820Bull,
@ -2377,7 +2375,7 @@ gb_hash_murmur32(void const* key, u32 num_bytes, u32 seed)
/* Get the frequency of the performance counter */
/* It is constant across the program's lifetime */
local_persist LARGE_INTEGER s_frequency;
static LARGE_INTEGER s_frequency;
QueryPerformanceFrequency(&s_frequency); /* TODO(bill): Is this fast enough? */
/* Get the current time */

398
gb.hpp
View File

@ -39,6 +39,7 @@ CONTENTS:
/*
Version History:
0.33 - Explicit Everything! No ctor/dtor on Array<T> and Hash_Table<T>
0.32 - Change const position convention
0.31a - Minor fixes
0.31 - Remove `_Allocator` suffix for allocator types
@ -584,33 +585,29 @@ template <typename T, usize N> struct Remove_Extent_Def<T[N]> { using Type = T;
// TODO(bill): Do I "need" all of these template traits?
////////////////////////////////
// //
// C++11 Move Semantics //
// //
////////////////////////////////
// TODO(bill): Are these decent names? Are `forward` and `move` clear enough?
template <typename T>
inline T&&
forward(Remove_Reference<T>& t)
forward_ownership(Remove_Reference<T>& t)
{
return static_cast<T&&>(t);
}
template <typename T>
inline T&&
forward(Remove_Reference<T>&& t)
forward_ownership(Remove_Reference<T>&& t)
{
return static_cast<T&&>(t);
}
template <typename T>
inline Remove_Reference<T>&&
move(T&& t)
move_ownership(T&& t)
{
return static_cast<Remove_Reference<T>&&>(t);
}
@ -636,13 +633,13 @@ __GB_NAMESPACE_END
{
Func f;
Defer(Func&& f) : f{forward<Func>(f)} {}
Defer(Func&& f) : f{forward_ownership<Func>(f)} {}
~Defer() { f(); };
};
template <typename Func>
inline Defer<Func>
defer_func(Func&& f) { return Defer<Func>(forward<Func>(f)); }
defer_func(Func&& f) { return Defer<Func>(forward_ownership<Func>(f)); }
} // namespace impl
__GB_NAMESPACE_END
@ -650,7 +647,7 @@ __GB_NAMESPACE_END
#define GB_DEFER_1(x, y) x##y
#define GB_DEFER_2(x, y) GB_DEFER_1(x, y)
#define GB_DEFER_3(x) GB_DEFER_2(GB_DEFER_2(GB_DEFER_2(x, __COUNTER__), _), __LINE__)
#define defer(code) auto GB_DEFER_3(_defer_) = __GB_NAMESPACE_PREFIX::impl::defer_func([&](){code;})
#define defer(code) auto GB_DEFER_3(_defer_) = ::impl::defer_func([&](){code;})
/* EXAMPLES
@ -1026,29 +1023,35 @@ void destroy(Pool* pool);
namespace memory
{
void* align_forward(void* ptr, usize align);
void* pointer_add(void* ptr, usize bytes);
void* pointer_sub(void* ptr, usize bytes);
void* pointer_add(void* ptr, usize bytes);
void* pointer_sub(void* ptr, usize bytes);
void const* pointer_add(void const* ptr, usize bytes);
void const* pointer_sub(void const* ptr, usize bytes);
void* set(void* ptr, usize bytes, u8 value);
template <typename T>
void fill(T* ptr, usize count, T const& value);
void* zero(void* ptr, usize bytes);
void* copy(void const* src, usize bytes, void* dest);
void* move(void const* src, usize bytes, void* dest);
template <typename T>
void fill(T* ptr, usize count, T&& value);
void zero(void* ptr, usize bytes);
void copy(void const* src, usize bytes, void* dest);
void move(void const* src, usize bytes, void* dest);
bool equals(void const* a, void const* b, usize bytes);
// TODO(bill): Should this be just zero(T*) ???
template <typename T>
T* zero_struct(T* ptr);
void zero_struct(T* ptr);
template <typename T>
T* zero_array(T* ptr, usize count);
void zero_array(T* ptr, usize count);
template <typename T>
T* copy_array(T const* src_array, usize count, T* dest_array);
void copy_array(T const* src_array, usize count, T* dest_array);
template <typename T>
void copy_struct(T const* src_array, T* dest_array);
// TODO(bill): Should I implement something like std::copy, std::fill, std::fill_n ???
template <typename T>
void swap(T* a, T* b);
@ -1153,42 +1156,36 @@ struct Array
s64 capacity;
T* data;
Array() = default;
explicit Array(Allocator* a, usize count = 0);
~Array();
Array(Array const& array);
Array(Array&& array);
Array& operator=(Array const& array);
Array& operator=(Array&& array);
T const& operator[](usize index) const;
T& operator[](usize index);
};
// TODO(bill): Should I even have ctor, dtor, copy/move overloads for Array<T>?
// Should these be explicit functions e.g.
// NOTE(bill): There are not ctor/dtor for Array<T>.
// These are explicit functions e.g.
/*
auto old_array = array::make(...);
auto old_array = array::make<T>(...);
auto new_array = array::copy(old_array);
array::free(&old_array);
array::free(&new_array);
*/
// This allows functions to be passed by value at a low cost
namespace array
{
// Helper functions to make and free an array
// Helper functions to make, free, and copy an array
template <typename T> Array<T> make(Allocator* allocator, usize count = 0);
template <typename T> void free(Array<T>* array);
// TODO(bill): Is passing by value okay here or is pass by const& ?
// (sizeof(Array<T>) = 16 + sizeof(void*)) (24 bytes on x86, 32 bytes on x64)
template <typename T> Array<T> copy(Array<T> array, Allocator* allocator = nullptr);
// Appends `item` to the end of the array
template <typename T> void append(Array<T>* a, T const& item);
template <typename T> void append(Array<T>* a, T&& item);
// Appends `items[count]` to the end of the array
template <typename T> void append(Array<T>* a, T const* items, usize count);
// Append the contents of another array of the same type
template <typename T> void append(Array<T>* a, Array<T> other);
// Pops the last item form the array. The array cannot be empty.
template <typename T> void pop(Array<T>* a);
@ -1208,10 +1205,10 @@ template <typename T> void grow(Array<T>* a, usize min_capacity = 0);
// Used to iterate over the array with a C++11 for loop
template <typename T> inline T* begin(Array<T>& a) { return a.data; }
template <typename T> inline T const* begin(Array<T> const& a) { return a.data; }
template <typename T> inline T* begin(Array<T>&& a) { return a.data; }
template <typename T> inline T* begin(Array<T>&& a) { return a.data; }
template <typename T> inline T* end(Array<T>& a) { return a.data + a.count; }
template <typename T> inline T const* end(Array<T> const& a) { return a.data + a.count; }
template <typename T> inline T* end(Array<T>&& a) { return a.data + a.count; }
template <typename T> inline T* end(Array<T>&& a) { return a.data + a.count; }
@ -1238,22 +1235,14 @@ struct Hash_Table
Array<s64> hashes;
Array<Entry> entries;
Hash_Table();
explicit Hash_Table(Allocator* a);
Hash_Table(Hash_Table<T> const& other);
Hash_Table(Hash_Table<T>&& other);
~Hash_Table() = default;
Hash_Table<T>& operator=(Hash_Table<T> const& other);
Hash_Table<T>& operator=(Hash_Table<T>&& other);
};
namespace hash_table
{
// Helper function to make a hash table
// Helper function to make, free, and copy a hash table
template <typename T> Hash_Table<T> make(Allocator* a);
template <typename T> void free(Hash_Table<T>* h);
template <typename T> Hash_Table<T> copy(Hash_Table<T> const& h, Allocator* a = nullptr);
// Return `true` if the specified key exist in the hash table
template <typename T> bool has(Hash_Table<T> const& h, u64 key);
@ -1442,96 +1431,6 @@ u64 rdtsc();
// //
////////////////////////////////
template <typename T>
inline
Array<T>::Array(Allocator* a, usize count_)
: allocator(a)
, count(0)
, capacity(0)
, data(nullptr)
{
if (count_ > 0)
{
data = alloc_array<T>(a, count_);
if (data)
count = capacity = count_;
}
}
template <typename T>
inline
Array<T>::Array(Array<T> const& other)
: allocator(other.allocator)
, count(0)
, capacity(0)
, data(nullptr)
{
auto new_count = other.count;
array::set_capacity(this, new_count);
memory::copy_array(other.data, new_count, data);
this->count = new_count;
}
template <typename T>
inline
Array<T>::Array(Array<T>&& other)
: allocator(nullptr)
, count(0)
, capacity(0)
, data(nullptr)
{
*this = move(other);
}
template <typename T>
inline
Array<T>::~Array()
{
if (allocator && capacity > 0)
free(allocator, data);
}
template <typename T>
Array<T>&
Array<T>::operator=(Array<T> const& other)
{
if (allocator == nullptr)
allocator = other.allocator;
auto new_count = other.count;
array::resize(this, new_count);
memory::copy_array(other.data, new_count, data);
return *this;
}
template <typename T>
Array<T>&
Array<T>::operator=(Array<T>&& other)
{
if (this != &other)
{
if (allocator && capacity > 0)
free(allocator, data);
allocator = other.allocator;
count = other.count;
capacity = other.capacity;
data = other.data;
other.allocator = nullptr;
other.count = 0;
other.capacity = 0;
other.data = nullptr;
}
return *this;
}
template <typename T>
inline T const&
Array<T>::operator[](usize index) const
@ -1559,16 +1458,17 @@ template <typename T>
inline Array<T>
make(Allocator* allocator, usize count)
{
Array<T> array{allocator};
Array<T> result = {};
result.allocator = allocator;
if (count > 0)
{
array.data = alloc_array<T>(allocator, count);
if (array.data)
array.count = array.capacity = count;
result.data = alloc_array<T>(allocator, count);
if (result.data)
result.count = result.capacity = count;
}
return array;
return result;
}
template <typename T>
@ -1582,6 +1482,28 @@ free(Array<T>* a)
a->data = nullptr;
}
template <typename T>
inline Array<T>
copy(Array<T> other, Allocator* allocator)
{
Array<T> result = {};
if (allocator)
result.allocator = allocator;
else
result.allocator = other.allocator;
auto new_count = other.count;
array::resize(&result, new_count);
memory::copy_array(other.data, new_count, data);
return result;
}
template <typename T>
inline void
append(Array<T>* a, T const& item)
@ -1597,7 +1519,7 @@ append(Array<T>* a, T&& item)
{
if (a->capacity < a->count + 1)
array::grow(a);
a->data[a->count++] = move(item);
a->data[a->count++] = move_ownership(item);
}
template <typename T>
@ -1611,6 +1533,14 @@ append(Array<T>* a, T const* items, usize count)
a->count += count;
}
template <typename T>
inline void
append(Array<T>* a, Array<T> other)
{
array::append(a, other.data, other.count);
}
template <typename T>
inline void
pop(Array<T>* a)
@ -1662,7 +1592,9 @@ set_capacity(Array<T>* a, usize capacity)
}
free(a->allocator, a->data);
a->data = data;
a->capacity = capacity;
a
->capacity = capacity;
}
template <typename T>
@ -1677,72 +1609,58 @@ grow(Array<T>* a, usize min_capacity)
}
} // namespace array
////////////////////////////////
// //
// Hash Table //
// //
////////////////////////////////
template <typename T>
inline
Hash_Table<T>::Hash_Table()
: hashes()
, entries()
{
}
template <typename T>
inline
Hash_Table<T>::Hash_Table(Allocator* a)
: hashes(a)
, entries(a)
{
}
template <typename T>
inline
Hash_Table<T>::Hash_Table(Hash_Table<T> const& other)
: hashes(other.hashes)
, entries(other.entries)
{
}
template <typename T>
inline
Hash_Table<T>::Hash_Table(Hash_Table<T>&& other)
: hashes(move(other.hashes))
, entries(move(other.entries))
{
}
template <typename T>
inline Hash_Table<T>&
Hash_Table<T>::operator=(Hash_Table<T> const& other)
{
hashes = other.hashes;
entries = other.entries;
return *this;
}
template <typename T>
inline Hash_Table<T>&
Hash_Table<T>::operator=(Hash_Table<T>&& other)
{
hashes = move(other.hashes);
entries = move(other.entries);
return *this;
}
namespace hash_table
{
template <typename T>
inline Hash_Table<T>
make(Allocator* a)
{
return Hash_Table<T>{a};
Hash_Table<T> result = {};
result.hashes = array::make<s64>(a);
result.entries = array::make<typename Hash_Table<T>::Entry>(a);
return result;
}
template <typename T>
inline void
free(Hash_Table<T>* h)
{
if (h->hashes.allocator)
array::free(&h->hashes);
if (h->entries.allocator)
array::free(&h->entries);
}
template <typename T>
inline Hash_Table<T>
copy(Hash_Table<T> const& other, Allocator* allocator)
{
Allocator* a = other.hashes.allocator;
if (allocator) a = allocator;
Hash_Table<T> result = {};
result.hashes = array::copy(other.hashes, a);
result.entries = array::copy(other.entries, a);
return result;
}
namespace impl
{
struct Find_Result
@ -1913,21 +1831,21 @@ rehash(Hash_Table<T>* h, usize new_capacity)
for (usize i = 0; i < new_capacity; i++)
nh.hashes[i] = -1;
for (u32 i = 0; i < h->entries.count; i++)
for (s64 i = 0; i < h->entries.count; i++)
{
auto const* e = &h->entries[i];
multi_hash_table::insert(&nh, e->key, e->value);
}
Hash_Table<T> empty_ht{h->hashes.allocator};
h->~Hash_Table<T>();
hash_table::free(h);
memory::copy_array(&nh, 1, h);
memory::copy_array(&empty_ht, 1, &nh);
memory::copy_struct(&nh, h);
memory::copy_struct(&empty_ht, &nh);
}
template <typename T>
void
inline void
grow(Hash_Table<T>* h)
{
const usize new_capacity = 2 * h->entries.count + 8;
@ -1935,11 +1853,11 @@ grow(Hash_Table<T>* h)
}
template <typename T>
bool
inline bool
is_full(Hash_Table<T>* h)
{
// Make sure that there is enough space
f32 const maximum_load_coefficient = 0.75f;
f64 const maximum_load_coefficient = 0.75;
return h->entries.count >= maximum_load_coefficient * h->hashes.count;
}
} // namespace impl
@ -1983,7 +1901,7 @@ set(Hash_Table<T>* h, u64 key, T&& value)
impl::grow(h);
s64 const index = impl::find_or_make_entry(h, key);
h->entries[index].value = move(value);
h->entries[index].value = move_ownership(value);
if (impl::is_full(h))
impl::grow(h);
}
@ -2107,7 +2025,7 @@ insert(Hash_Table<T>* h, u64 key, T&& value)
hash_table::impl::grow(h);
auto next = hash_table::impl::make_entry(h, key);
h->entries[next].value = move(value);
h->entries[next].value = move_ownership(value);
if (hash_table::impl::is_full(h))
hash_table::impl::grow(h);
@ -2135,34 +2053,57 @@ remove_all(Hash_Table<T>* h, u64 key)
namespace memory
{
template <typename T>
inline T*
inline void
fill(T* ptr, usize count, T const& value)
{
for (usize i = 0; i < count; i++)
ptr[i] = value;
}
template <typename T>
inline void
fill(T* ptr, usize count, T&& value)
{
for (usize i = 0; i < count; i++)
ptr[i] = move_ownership(value);
}
template <typename T>
inline void
zero_struct(T* ptr)
{
return static_cast<T*>(memory::zero(ptr, sizeof(T)));
memory::zero(ptr, sizeof(T));
}
template <typename T>
inline T*
inline void
zero_array(T* ptr, usize count)
{
return static_cast<T*>(memory::zero(ptr, count * sizeof(T)));
memory::zero(ptr, count * sizeof(T));
}
template <typename T>
inline T*
inline void
copy_array(T const* src_array, usize count, T* dest_array)
{
return static_cast<T*>(memory::copy(src_array, count * sizeof(T), dest_array));
memory::copy(src_array, count * sizeof(T), dest_array);
}
template <typename T>
inline void
copy_struct(T const* src_array, T* dest_array)
{
memory::copy(src_array, sizeof(T), dest_array);
}
template <typename T>
inline void
swap(T* a, T* b)
{
T c = __GB_NAMESPACE_PREFIX::move(*a);
*a = __GB_NAMESPACE_PREFIX::move(*b);
*b = __GB_NAMESPACE_PREFIX::move(c);
T c = move_ownership(*a);
*a = move_ownership(*b);
*b = move_ownership(c);
}
template <typename T, usize N>
@ -2208,8 +2149,8 @@ quick(T* array, usize count, Comparison_Function compare)
j--;
}
quick(array, i, compare);
quick(array+i, count-i, compare);
sort::quick(array, i, compare);
sort::quick(array+i, count-i, compare);
}
} // namespace sort
@ -3271,28 +3212,23 @@ pointer_sub(void const* ptr, usize bytes)
return static_cast<void const*>(static_cast<u8 const*>(ptr) - bytes);
}
GB_FORCE_INLINE void*
set(void* ptr, usize bytes, u8 value)
{
return memset(ptr, value, bytes);
}
GB_FORCE_INLINE void*
GB_FORCE_INLINE void
zero(void* ptr, usize bytes)
{
return memory::set(ptr, bytes, 0);
memset(ptr, 0, bytes);
}
GB_FORCE_INLINE void*
GB_FORCE_INLINE void
copy(void const* src, usize bytes, void* dest)
{
return memcpy(dest, src, bytes);
memcpy(dest, src, bytes);
}
GB_FORCE_INLINE void*
GB_FORCE_INLINE void
move(void const* src, usize bytes, void* dest)
{
return memmove(dest, src, bytes);
memmove(dest, src, bytes);
}
GB_FORCE_INLINE bool

View File

@ -91,7 +91,7 @@
//
// printf("%c %c\n", str[0], str[13]);
//
// * gb strings are singularlly allocated. The meta-data is next to the character
// * gb strings are singularly allocated. The meta-data is next to the character
// array which is better for the cache.
//
// Disadvantages: