2024-06-11 23:40:34 -04:00
|
|
|
/* Copyright (C) 2024 Peter McGoron
|
|
|
|
*
|
|
|
|
* This program is free software: you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU Lesser General Public License as
|
|
|
|
* published by the Free Software Foundation, either version 3 of the
|
|
|
|
* License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this program. If not, see
|
|
|
|
* <https://www.gnu.org/licenses/>.
|
|
|
|
*/
|
2024-06-20 23:14:25 -04:00
|
|
|
#include <stdio.h>
|
2024-06-11 23:40:34 -04:00
|
|
|
|
2024-07-14 01:43:56 -04:00
|
|
|
/* XXX: Currently valgrind marks the header region as valid for all
|
|
|
|
* functions, when it should only be valid inside allocator calls.
|
|
|
|
*/
|
2024-07-14 01:20:50 -04:00
|
|
|
#ifdef UNS_VALGRIND
|
|
|
|
# include <valgrind/valgrind.h>
|
2024-07-14 01:43:56 -04:00
|
|
|
# include <valgrind/memcheck.h>
|
2024-07-14 01:20:50 -04:00
|
|
|
# define REDZONE 16
|
|
|
|
#else
|
|
|
|
# define REDZONE 0
|
|
|
|
# define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) (void)0
|
|
|
|
# define VALGRIND_DESTROY_MEMPOOL(pool) (void)0
|
|
|
|
# define VALGRIND_MEMPOOL_ALLOC(pool, ptr, siz) (void)0
|
2024-07-14 01:43:56 -04:00
|
|
|
# define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(p, len) 0
|
|
|
|
# define VALGRIND_MAKE_MEM_DEFINED(p, len) (void)0
|
|
|
|
# define VALGRIND_MAKE_MEM_NOACCESS(p, len) (void)0
|
2024-07-14 01:20:50 -04:00
|
|
|
#endif
|
|
|
|
|
2024-06-11 23:40:34 -04:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <limits.h>
|
|
|
|
#include <assert.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include "uns.h"
|
|
|
|
#include "cheney_c89.h"
|
|
|
|
|
|
|
|
struct ctx {
|
2024-07-08 19:58:22 -04:00
|
|
|
/** Pointer to the beginning of the heap. */
|
2024-06-11 23:40:34 -04:00
|
|
|
unsigned char *tospace;
|
2024-07-08 19:58:22 -04:00
|
|
|
|
|
|
|
/** Pointer to one past the end of the heap. */
|
2024-06-11 23:40:34 -04:00
|
|
|
unsigned char *tospace_end;
|
2024-07-08 19:58:22 -04:00
|
|
|
|
|
|
|
/** Pointer to the next place to alloc data. This may be one
|
|
|
|
* past the end of the heap, meaning there is no space left
|
|
|
|
* on the heap.
|
|
|
|
*/
|
2024-06-11 23:40:34 -04:00
|
|
|
unsigned char *tospace_alloc;
|
2024-07-08 19:58:22 -04:00
|
|
|
|
|
|
|
/** A value set by the user to control the next heap size after
|
|
|
|
* a collection.
|
|
|
|
*/
|
|
|
|
size_t new_heap_size;
|
|
|
|
|
|
|
|
uns_cheney_c89_collect_callback cb;
|
|
|
|
struct uns_cheney_c89_statistics stats;
|
2024-06-11 23:40:34 -04:00
|
|
|
};
|
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
void uns_cheney_c89_deinit(Uns_GC gc)
|
|
|
|
{
|
|
|
|
struct ctx *ctx = uns_ctx(gc);
|
|
|
|
|
|
|
|
free(ctx->tospace);
|
|
|
|
free(ctx);
|
|
|
|
}
|
2024-06-11 23:40:34 -04:00
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
void uns_cheney_c89_set_collect_callback(
|
|
|
|
Uns_GC gc,
|
|
|
|
uns_cheney_c89_collect_callback cb
|
|
|
|
)
|
2024-06-11 23:40:34 -04:00
|
|
|
{
|
2024-07-08 19:58:22 -04:00
|
|
|
struct ctx *ctx = uns_ctx(gc);
|
|
|
|
ctx->cb = cb;
|
2024-06-11 23:40:34 -04:00
|
|
|
}
|
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
void uns_cheney_c89_set_new_heap_size(Uns_GC gc, size_t l)
|
2024-06-11 23:40:34 -04:00
|
|
|
{
|
2024-07-08 19:58:22 -04:00
|
|
|
struct ctx *ctx = uns_ctx(gc);
|
|
|
|
ctx->new_heap_size = l;
|
|
|
|
}
|
2024-06-11 23:40:34 -04:00
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
size_t uns_cheney_c89_get_new_heap_size(Uns_GC gc)
|
|
|
|
{
|
|
|
|
struct ctx *ctx = uns_ctx(gc);
|
|
|
|
return ctx->new_heap_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Header of a allocated region is a single uns_sword.
|
|
|
|
*
|
|
|
|
* 0: Relocated. What follows is a pointer to the relocated region.
|
|
|
|
* Positive: allocated bytes.
|
|
|
|
* Negative: allocated record of pointers with this many bytes.
|
|
|
|
*
|
|
|
|
* All lengths must be representable in positive and negative.
|
|
|
|
* Hence UNS_SWORD_MIN and UNS_SWORD_MAX are disallowed length
|
|
|
|
* values.
|
|
|
|
*
|
|
|
|
* Relocated pointers only exist during copying and are not present
|
|
|
|
* during normal execution.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/** Destructured header. "len" is always the readable length of the
|
|
|
|
* region in bytes (it is always positive).
|
|
|
|
*/
|
|
|
|
struct hdr {
|
|
|
|
enum {
|
|
|
|
RELO,
|
|
|
|
REGION,
|
|
|
|
RECORD
|
|
|
|
} typ;
|
|
|
|
uns_sword len;
|
|
|
|
};
|
|
|
|
|
|
|
|
/** Number of fields in a record. */
|
|
|
|
#define REC_FIELDS(n) ((n)/sizeof(void*))
|
|
|
|
|
|
|
|
/** Size in bytes of the header. */
|
|
|
|
#define HDR_LEN sizeof(uns_sword)
|
|
|
|
|
|
|
|
/** Extract a header from a pointer to a region. */
|
|
|
|
#define HDR_PTR(p) ((unsigned char *)p - HDR_LEN)
|
|
|
|
|
|
|
|
/** Minimum size of a region. */
|
|
|
|
#define MIN_REG_LEN sizeof(void*)
|
|
|
|
|
|
|
|
/** Destructure header pointed to by p
|
|
|
|
*
|
|
|
|
* # Parameters
|
|
|
|
* - `out notnull hdr`: Destructured header.
|
|
|
|
* - `notnull p`: Pointer to a header. This is not a pointer to a region.
|
|
|
|
* For a pointer to a region, use `hdr_extract`.
|
|
|
|
*/
|
|
|
|
static void hdr_read(struct hdr *hdr, unsigned char *p)
|
|
|
|
{
|
|
|
|
assert(hdr);
|
|
|
|
assert(p);
|
|
|
|
|
2024-07-14 01:43:56 -04:00
|
|
|
if (VALGRIND_CHECK_MEM_IS_ADDRESSABLE(p + HDR_LEN, 1) != 0)
|
|
|
|
abort();
|
|
|
|
|
|
|
|
VALGRIND_MAKE_MEM_DEFINED(p, HDR_LEN);
|
2024-07-08 19:58:22 -04:00
|
|
|
memcpy(&hdr->len, p, HDR_LEN);
|
2024-07-14 01:43:56 -04:00
|
|
|
VALGRIND_MAKE_MEM_NOACCESS(p, HDR_LEN);
|
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
if (hdr->len < 0) {
|
|
|
|
hdr->typ = RECORD;
|
|
|
|
hdr->len = -hdr->len;
|
|
|
|
} else if (hdr->len == 0) {
|
|
|
|
hdr->typ = RELO;
|
|
|
|
hdr->len = sizeof(void*);
|
|
|
|
} else {
|
|
|
|
hdr->typ = REGION;
|
|
|
|
}
|
2024-07-14 01:43:56 -04:00
|
|
|
|
|
|
|
if (VALGRIND_CHECK_MEM_IS_ADDRESSABLE(p + HDR_LEN, hdr->len) != 0)
|
|
|
|
abort();
|
2024-07-08 19:58:22 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Destructure header from a pointer to a region.
|
|
|
|
*
|
|
|
|
* # Parameters
|
|
|
|
* - `out notnull hdr`: Destructured header.
|
|
|
|
* - `notnull p`: Pointer to a region. This is not a pointer to the
|
|
|
|
* header.
|
|
|
|
*/
|
|
|
|
#define hdr_extract(h,p) hdr_read(h, HDR_PTR(p))
|
|
|
|
|
|
|
|
/** Write a header to a location.
|
|
|
|
*
|
|
|
|
* # Parameters
|
|
|
|
* - `notnull hdr`: Header description with all fields filled out. This
|
|
|
|
* function does not do sanity checking and may overflow if given
|
|
|
|
* bad data.
|
|
|
|
* - `notnull p`: Header to where the header should be written to.
|
|
|
|
* This will overwrite data at the pointer location. The pointer
|
|
|
|
* becomes a pointer to a header, not a pointer to a region.
|
|
|
|
*/
|
|
|
|
static void hdr_write_direct(struct hdr *hdr, unsigned char *p)
|
|
|
|
{
|
|
|
|
uns_sword s;
|
|
|
|
|
|
|
|
assert(hdr);
|
|
|
|
assert(p);
|
|
|
|
|
|
|
|
switch (hdr->typ) {
|
|
|
|
case REGION: s = hdr->len; break;
|
|
|
|
case RELO: s = 0; break;
|
|
|
|
case RECORD: s = -hdr->len; break;
|
|
|
|
}
|
|
|
|
|
2024-07-14 01:43:56 -04:00
|
|
|
if (VALGRIND_CHECK_MEM_IS_ADDRESSABLE(p + HDR_LEN, hdr->len) != 0)
|
|
|
|
abort();
|
|
|
|
|
|
|
|
VALGRIND_MAKE_MEM_DEFINED(p, HDR_LEN);
|
2024-07-08 19:58:22 -04:00
|
|
|
memcpy(p, &s, HDR_LEN);
|
2024-07-14 01:43:56 -04:00
|
|
|
VALGRIND_MAKE_MEM_NOACCESS(p, HDR_LEN);
|
2024-07-08 19:58:22 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Write to the header of a region.
|
|
|
|
*
|
|
|
|
* # Parameters
|
|
|
|
* - `notnull hdr`: See `hdr_write_direct`.
|
|
|
|
* - `notnull p`: Header to a region. The call will do pointer arithmetic
|
|
|
|
* to get the pointer to the header, and write to the header. It will
|
|
|
|
* not overwrite region data.
|
|
|
|
*/
|
|
|
|
#define hdr_write(h, p) hdr_write_direct(h, HDR_PTR(p))
|
|
|
|
|
|
|
|
/** Returns true if there is enough space in the heap for a region with
|
|
|
|
* length `bytes`.
|
|
|
|
*/
|
|
|
|
static int enough_space(struct ctx *ctx, uns_sword bytes)
|
|
|
|
{
|
2024-07-14 01:20:50 -04:00
|
|
|
return ctx->tospace_end - ctx->tospace_alloc >= bytes + HDR_LEN + REDZONE;
|
2024-06-11 23:40:34 -04:00
|
|
|
}
|
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
/** Allocate region without bounds checking.
|
|
|
|
*
|
|
|
|
* # Parameters
|
|
|
|
* - `len`: Length in bytes of the entire record. This includes the header
|
|
|
|
* region. The length should have been adjusted by the caller to include
|
|
|
|
* the minimum region length.
|
|
|
|
* # Returns
|
|
|
|
* A pointer to a region.
|
|
|
|
*/
|
|
|
|
static void *raw_alloc(struct ctx *ctx, uns_sword len, int is_record)
|
2024-06-11 23:40:34 -04:00
|
|
|
{
|
|
|
|
unsigned char *p;
|
2024-07-08 19:58:22 -04:00
|
|
|
struct hdr hdr = {0};
|
2024-06-11 23:40:34 -04:00
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
assert(len >= HDR_LEN + MIN_REG_LEN);
|
2024-06-11 23:40:34 -04:00
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
hdr.len = len - HDR_LEN;
|
|
|
|
if (is_record) {
|
|
|
|
hdr.typ = RECORD;
|
|
|
|
} else {
|
|
|
|
hdr.typ = REGION;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(enough_space(ctx, len));
|
|
|
|
|
|
|
|
p = ctx->tospace_alloc;
|
2024-07-14 01:43:56 -04:00
|
|
|
VALGRIND_MEMPOOL_ALLOC(ctx->tospace, p + HDR_LEN, len - HDR_LEN);
|
2024-07-14 01:20:50 -04:00
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
hdr_write_direct(&hdr, p);
|
2024-07-14 01:20:50 -04:00
|
|
|
ctx->tospace_alloc += len + REDZONE;
|
2024-07-08 19:58:22 -04:00
|
|
|
|
|
|
|
return p + HDR_LEN;
|
2024-06-11 23:40:34 -04:00
|
|
|
}
|
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
/** Move an entire object to the new heap during collection.
|
|
|
|
*
|
|
|
|
* This function does nothing if `p` is NULL. Otherwise, `p` is a pointer
|
|
|
|
* to a region.
|
|
|
|
* If `p` was relocated (typ == `RELO`), then this function returns a
|
|
|
|
* pointer to the relocated header in tospace.
|
|
|
|
* Otherwise, it allocates memory in the tospace, copies the entire region
|
|
|
|
* with its header to the tospace, and modifies the region in the fromspace
|
|
|
|
* to be a region of type `RELO`.
|
|
|
|
*/
|
|
|
|
static unsigned char *relocate(struct ctx *ctx, unsigned char *p)
|
2024-06-11 23:40:34 -04:00
|
|
|
{
|
2024-07-08 19:58:22 -04:00
|
|
|
void *res;
|
|
|
|
struct hdr hdr = {0};
|
2024-06-11 23:40:34 -04:00
|
|
|
|
|
|
|
if (!p)
|
|
|
|
return NULL;
|
2024-07-08 19:58:22 -04:00
|
|
|
hdr_extract(&hdr, p);
|
|
|
|
|
|
|
|
if (hdr.typ == RELO) {
|
|
|
|
memcpy(&res, p, sizeof(void*));
|
2024-06-11 23:40:34 -04:00
|
|
|
return res;
|
2024-07-08 19:58:22 -04:00
|
|
|
}
|
2024-06-11 23:40:34 -04:00
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
assert(hdr.len >= MIN_REG_LEN);
|
2024-06-11 23:40:34 -04:00
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
/* Write entire region to memory */
|
|
|
|
res = raw_alloc(ctx, HDR_LEN + hdr.len, hdr.typ == RECORD);
|
|
|
|
memcpy(res, p, hdr.len);
|
|
|
|
hdr_write(&hdr, res);
|
|
|
|
|
|
|
|
/* Change old pointer to relocation pointer */
|
|
|
|
hdr.typ = RELO;
|
|
|
|
hdr_write(&hdr, p);
|
|
|
|
memcpy(p, &res, sizeof(void*));
|
2024-06-11 23:40:34 -04:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
/** Calculate the starting byte index of an element in a record.
|
|
|
|
*
|
|
|
|
* # Parameters
|
|
|
|
* - `notnull p`: Pointer to a region.
|
|
|
|
* - `loc`: Index into the region.
|
|
|
|
*/
|
|
|
|
static size_t record_index(Uns_ptr p, size_t loc)
|
|
|
|
{
|
|
|
|
struct hdr hdr = {0};
|
|
|
|
|
|
|
|
assert(p);
|
|
|
|
hdr_extract(&hdr, p);
|
|
|
|
assert(hdr.typ == RECORD);
|
|
|
|
|
|
|
|
/* Turn hdr.len into the number of records in the region */
|
|
|
|
assert(loc < hdr.len / sizeof(void*));
|
|
|
|
assert(loc < SIZE_MAX/sizeof(void*));
|
|
|
|
return loc * sizeof(void*);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *uns_cheney_c89_get(Uns_ptr p, size_t loc,
|
|
|
|
enum uns_fld_type *typ)
|
|
|
|
{
|
|
|
|
void *r;
|
|
|
|
|
|
|
|
if (typ)
|
|
|
|
*typ = UNS_POINTER;
|
|
|
|
loc = record_index(p, loc);
|
|
|
|
memcpy(&r, (unsigned char *)p + loc, sizeof(void*));
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
void uns_cheney_c89_set(Uns_ptr p, size_t loc,
|
|
|
|
enum uns_fld_type typ, void *newp)
|
|
|
|
{
|
|
|
|
assert(typ == UNS_POINTER);
|
|
|
|
loc = record_index(p, loc);
|
|
|
|
memcpy((unsigned char *)p + loc, &newp, sizeof(void*));
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Relocate each pointer in a record and record its new pointer.
|
|
|
|
*
|
|
|
|
* # Parameters
|
|
|
|
* - `notnull p`: Pointer to a record.
|
|
|
|
* - `len`: Number of elements in the record.
|
|
|
|
*/
|
|
|
|
static void scan_record(struct ctx *ctx, void *p, size_t len)
|
2024-06-11 23:40:34 -04:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
void *newp;
|
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
for (i = 0; i < len; i++) {
|
|
|
|
newp = relocate(ctx,
|
|
|
|
uns_cheney_c89_get(p, i, NULL));
|
|
|
|
uns_cheney_c89_set(p, i, UNS_POINTER, newp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Main section of the copying algorithm. */
|
|
|
|
static void relocate_everything(Uns_GC gc)
|
|
|
|
{
|
|
|
|
unsigned char *scanptr;
|
|
|
|
struct uns_ctr *root;
|
|
|
|
struct ctx *ctx = uns_ctx(gc);
|
|
|
|
struct hdr hdr = {0};
|
|
|
|
|
|
|
|
/* Relocate roots */
|
|
|
|
for (root = uns_roots(gc); root; root = root->next)
|
|
|
|
root->p = relocate(ctx, root->p);
|
|
|
|
|
|
|
|
/* Scan the heap until the end of allocated space. If there
|
|
|
|
* is a record at this location, read the record and relocate
|
|
|
|
* all values, and then change each field to the relocated
|
|
|
|
* record pointer.
|
|
|
|
*/
|
|
|
|
scanptr = ctx->tospace;
|
|
|
|
while (scanptr != ctx->tospace_alloc) {
|
|
|
|
/* scanptr currently points to the header data. */
|
|
|
|
hdr_read(&hdr, scanptr);
|
|
|
|
scanptr += HDR_LEN;
|
|
|
|
|
|
|
|
if (hdr.typ == RECORD)
|
|
|
|
scan_record(ctx, scanptr, (size_t)hdr.len/sizeof(void*));
|
2024-07-14 01:20:50 -04:00
|
|
|
scanptr += hdr.len + REDZONE;
|
2024-06-11 23:40:34 -04:00
|
|
|
}
|
2024-07-08 19:58:22 -04:00
|
|
|
|
2024-06-11 23:40:34 -04:00
|
|
|
}
|
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
int uns_cheney_c89_collect(Uns_GC gc)
|
2024-06-11 23:40:34 -04:00
|
|
|
{
|
|
|
|
/* Save fromspace */
|
2024-07-08 19:58:22 -04:00
|
|
|
struct ctx *ctx = uns_ctx(gc);
|
2024-06-11 23:40:34 -04:00
|
|
|
unsigned char *fromspace = ctx->tospace;
|
|
|
|
unsigned char *fromspace_lim = ctx->tospace_alloc;
|
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
size_t newlen = ctx->new_heap_size;
|
2024-06-11 23:40:34 -04:00
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
ctx->stats.usage_before = fromspace_lim - fromspace;
|
2024-06-11 23:40:34 -04:00
|
|
|
|
|
|
|
/* Bail out immediately if allocation fails. This preserves
|
|
|
|
* the objects as they were.
|
|
|
|
*/
|
2024-07-08 19:58:22 -04:00
|
|
|
assert(newlen >= fromspace_lim - fromspace);
|
2024-06-11 23:40:34 -04:00
|
|
|
ctx->tospace = malloc(newlen);
|
2024-07-14 01:20:50 -04:00
|
|
|
VALGRIND_CREATE_MEMPOOL(ctx->tospace, REDZONE, 0);
|
2024-06-11 23:40:34 -04:00
|
|
|
if (!ctx->tospace) {
|
|
|
|
ctx->tospace = fromspace;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
/* Setup context to be valid for the allocator */
|
2024-06-11 23:40:34 -04:00
|
|
|
ctx->tospace_end = ctx->tospace + newlen;
|
|
|
|
ctx->tospace_alloc = ctx->tospace;
|
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
relocate_everything(gc);
|
2024-06-11 23:40:34 -04:00
|
|
|
|
2024-07-14 01:20:50 -04:00
|
|
|
VALGRIND_DESTROY_MEMPOOL(fromspace);
|
2024-06-11 23:40:34 -04:00
|
|
|
free(fromspace);
|
2024-07-08 19:58:22 -04:00
|
|
|
|
|
|
|
ctx->stats.usage_after = ctx->tospace_alloc - ctx->tospace;
|
|
|
|
ctx->stats.collection_number += 1;
|
|
|
|
if (ctx->cb)
|
|
|
|
ctx->cb(gc, &ctx->stats);
|
|
|
|
|
2024-06-11 23:40:34 -04:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
static void *alloc(Uns_GC gc, size_t bytes, int is_record)
|
2024-06-11 23:40:34 -04:00
|
|
|
{
|
2024-07-08 19:58:22 -04:00
|
|
|
struct ctx *ctx = uns_ctx(gc);
|
|
|
|
uns_sword bytes_as_sword;
|
|
|
|
|
|
|
|
if (bytes >= UNS_SWORD_MAX) {
|
|
|
|
uns_on_oom(gc);
|
|
|
|
return NULL;
|
|
|
|
} else if (bytes < MIN_REG_LEN) {
|
|
|
|
bytes = MIN_REG_LEN;
|
|
|
|
}
|
|
|
|
|
|
|
|
bytes_as_sword = (uns_sword)bytes + HDR_LEN;
|
2024-06-11 23:40:34 -04:00
|
|
|
|
2024-06-12 08:56:49 -04:00
|
|
|
/* Make sure to check for header space when allocating */
|
2024-07-08 19:58:22 -04:00
|
|
|
if (!enough_space(ctx, bytes_as_sword)) {
|
2024-06-11 23:40:34 -04:00
|
|
|
uns_cheney_c89_collect(gc);
|
2024-07-08 19:58:22 -04:00
|
|
|
if (!enough_space(ctx, bytes_as_sword)) {
|
|
|
|
uns_on_oom(gc);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
2024-06-11 23:40:34 -04:00
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
return raw_alloc(ctx, HDR_LEN + bytes, is_record);
|
2024-06-11 23:40:34 -04:00
|
|
|
}
|
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
void *uns_cheney_c89_alloc(Uns_GC gc, size_t bytes, enum uns_bytes_type typ)
|
2024-06-11 23:40:34 -04:00
|
|
|
{
|
2024-07-08 19:58:22 -04:00
|
|
|
assert(typ == UNS_NOEXEC);
|
2024-06-11 23:40:34 -04:00
|
|
|
return alloc(gc, bytes, 0);
|
|
|
|
}
|
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
void *uns_cheney_c89_alloc_rec(Uns_GC gc, size_t len, enum uns_record_type typ)
|
2024-06-11 23:40:34 -04:00
|
|
|
{
|
2024-07-08 19:58:22 -04:00
|
|
|
void *p;
|
2024-06-11 23:40:34 -04:00
|
|
|
size_t i;
|
2024-07-08 19:58:22 -04:00
|
|
|
assert(typ == UNS_POINTERS_ONLY);
|
|
|
|
|
|
|
|
if (len >= SIZE_MAX/sizeof(void*)) {
|
|
|
|
uns_on_oom(gc);
|
|
|
|
return NULL;
|
|
|
|
} else if (len == 0) {
|
|
|
|
len = 1;
|
|
|
|
}
|
2024-06-11 23:40:34 -04:00
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
p = alloc(gc, len*sizeof(void*), 1);
|
|
|
|
if (p == NULL)
|
|
|
|
return NULL;
|
2024-06-11 23:40:34 -04:00
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
for (i = 0; i < len; i++)
|
|
|
|
uns_cheney_c89_set(p, i, UNS_POINTER, NULL);
|
|
|
|
return p;
|
2024-06-11 23:40:34 -04:00
|
|
|
}
|
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
int uns_cheney_c89_init(Uns_GC gc, size_t heap_size)
|
2024-06-11 23:40:34 -04:00
|
|
|
{
|
2024-07-08 19:58:22 -04:00
|
|
|
struct ctx *ctx = malloc(sizeof(struct ctx));
|
|
|
|
if (!ctx)
|
|
|
|
return 0;
|
2024-06-11 23:40:34 -04:00
|
|
|
|
2024-07-08 19:58:22 -04:00
|
|
|
uns_deinit(gc);
|
|
|
|
ctx->tospace_alloc = ctx->tospace = malloc(heap_size);
|
2024-07-14 01:20:50 -04:00
|
|
|
VALGRIND_CREATE_MEMPOOL(ctx->tospace, REDZONE, 0);
|
2024-07-08 19:58:22 -04:00
|
|
|
if (!ctx->tospace) {
|
|
|
|
free(ctx);
|
2024-06-11 23:40:34 -04:00
|
|
|
return 0;
|
2024-07-08 19:58:22 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
ctx->stats.usage_before = ctx->stats.usage_after
|
|
|
|
= ctx->stats.collection_number = 0;
|
|
|
|
|
|
|
|
ctx->tospace_end = ctx->tospace + heap_size;
|
|
|
|
ctx->new_heap_size = heap_size;
|
|
|
|
ctx->cb = NULL;
|
|
|
|
|
|
|
|
uns_set_ctx(gc, ctx);
|
|
|
|
uns_set_deinit(gc, uns_cheney_c89_deinit);
|
|
|
|
uns_set_collect(gc, uns_cheney_c89_collect);
|
|
|
|
uns_set_alloc(gc, uns_cheney_c89_alloc);
|
|
|
|
uns_set_alloc_rec(gc, uns_cheney_c89_alloc_rec);
|
|
|
|
uns_set_set(gc, uns_cheney_c89_set);
|
|
|
|
uns_set_get(gc, uns_cheney_c89_get);
|
2024-06-11 23:40:34 -04:00
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|