https://github.com/JuliaLang/julia
Tip revision: e909ff57426d4c273c8191f168849f570d4992b4 authored by Jeff Bezanson on 27 May 2021, 21:36:18 UTC
fix test
fix test
Tip revision: e909ff5
datatype.c
// This file is a part of Julia. License is MIT: https://julialang.org/license
/*
defining DataTypes
basic operations on struct and bits values
*/
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include "julia.h"
#include "julia_internal.h"
#include "julia_assert.h"
#include "julia_gcext.h"
#ifdef __cplusplus
extern "C" {
#endif
// allocating TypeNames -----------------------------------------------------------
static int is10digit(char c) JL_NOTSAFEPOINT
{
return (c >= '0' && c <= '9');
}
static jl_sym_t *jl_demangle_typename(jl_sym_t *s) JL_NOTSAFEPOINT
{
char *n = jl_symbol_name(s);
if (n[0] != '#')
return s;
char *end = strrchr(n, '#');
int32_t len;
if (end == n || end == n+1)
len = strlen(n) - 1;
else
len = (end-n) - 1; // extract `f` from `#f#...`
if (is10digit(n[1]))
return _jl_symbol(n, len+1);
return _jl_symbol(&n[1], len);
}
JL_DLLEXPORT jl_methtable_t *jl_new_method_table(jl_sym_t *name, jl_module_t *module)
{
jl_task_t *ct = jl_current_task;
jl_methtable_t *mt =
(jl_methtable_t*)jl_gc_alloc(ct->ptls, sizeof(jl_methtable_t),
jl_methtable_type);
mt->name = jl_demangle_typename(name);
mt->module = module;
mt->defs = jl_nothing;
mt->leafcache = (jl_array_t*)jl_an_empty_vec_any;
mt->cache = jl_nothing;
mt->max_args = 0;
mt->kwsorter = NULL;
mt->backedges = NULL;
JL_MUTEX_INIT(&mt->writelock);
mt->offs = 0;
mt->frozen = 0;
return mt;
}
JL_DLLEXPORT jl_typename_t *jl_new_typename_in(jl_sym_t *name, jl_module_t *module, int abstract, int mutabl)
{
jl_task_t *ct = jl_current_task;
jl_typename_t *tn =
(jl_typename_t*)jl_gc_alloc(ct->ptls, sizeof(jl_typename_t),
jl_typename_type);
tn->name = name;
tn->module = module;
tn->wrapper = NULL;
tn->cache = jl_emptysvec;
tn->linearcache = jl_emptysvec;
tn->names = NULL;
tn->hash = bitmix(bitmix(module ? module->build_id : 0, name->hash), 0xa1ada1da);
tn->abstract = abstract;
tn->mutabl = mutabl;
tn->mayinlinealloc = 0;
tn->mt = NULL;
tn->partial = NULL;
tn->atomicfields = NULL;
return tn;
}
// allocating DataTypes -----------------------------------------------------------
jl_datatype_t *jl_new_abstracttype(jl_value_t *name, jl_module_t *module, jl_datatype_t *super, jl_svec_t *parameters)
{
return jl_new_datatype((jl_sym_t*)name, module, super, parameters, jl_emptysvec, jl_emptysvec, jl_emptysvec, 1, 0, 0);
}
jl_datatype_t *jl_new_uninitialized_datatype(void)
{
jl_task_t *ct = jl_current_task;
jl_datatype_t *t = (jl_datatype_t*)jl_gc_alloc(ct->ptls, sizeof(jl_datatype_t), jl_datatype_type);
t->hash = 0;
t->hasfreetypevars = 0;
t->isdispatchtuple = 0;
t->isbitstype = 0;
t->zeroinit = 0;
t->has_concrete_subtype = 1;
t->cached_by_hash = 0;
t->name = NULL;
t->super = NULL;
t->parameters = NULL;
t->layout = NULL;
t->types = NULL;
t->instance = NULL;
return t;
}
static jl_datatype_layout_t *jl_get_layout(uint32_t nfields,
uint32_t npointers,
uint32_t alignment,
int haspadding,
jl_fielddesc32_t desc[],
uint32_t pointers[]) JL_NOTSAFEPOINT
{
assert(alignment); // should have been verified by caller
// compute the smallest fielddesc type that can hold the layout description
int fielddesc_type = 0;
if (nfields > 0) {
uint32_t max_size = 0;
uint32_t max_offset = desc[nfields - 1].offset;
if (npointers > 0 && pointers[npointers - 1] > max_offset)
max_offset = pointers[npointers - 1];
for (size_t i = 0; i < nfields; i++) {
if (desc[i].size > max_size)
max_size = desc[i].size;
}
jl_fielddesc8_t maxdesc8 = { 0, max_size, max_offset };
jl_fielddesc16_t maxdesc16 = { 0, max_size, max_offset };
jl_fielddesc32_t maxdesc32 = { 0, max_size, max_offset };
if (maxdesc8.size != max_size || maxdesc8.offset != max_offset) {
fielddesc_type = 1;
if (maxdesc16.size != max_size || maxdesc16.offset != max_offset) {
fielddesc_type = 2;
if (maxdesc32.size != max_size || maxdesc32.offset != max_offset) {
assert(0); // should have been verified by caller
}
}
}
}
// allocate a new descriptor
// TODO: lots of these are the same--take advantage of the fact these are immutable to combine them
uint32_t fielddesc_size = jl_fielddesc_size(fielddesc_type);
jl_datatype_layout_t *flddesc = (jl_datatype_layout_t*)jl_gc_perm_alloc(
sizeof(jl_datatype_layout_t) + nfields * fielddesc_size + (npointers << fielddesc_type),
0, 4, 0);
flddesc->nfields = nfields;
flddesc->alignment = alignment;
flddesc->haspadding = haspadding;
flddesc->fielddesc_type = fielddesc_type;
flddesc->npointers = npointers;
flddesc->first_ptr = (npointers > 0 ? pointers[0] : -1);
// fill out the fields of the new descriptor
jl_fielddesc8_t* desc8 = (jl_fielddesc8_t*)jl_dt_layout_fields(flddesc);
jl_fielddesc16_t* desc16 = (jl_fielddesc16_t*)jl_dt_layout_fields(flddesc);
jl_fielddesc32_t* desc32 = (jl_fielddesc32_t*)jl_dt_layout_fields(flddesc);
for (size_t i = 0; i < nfields; i++) {
if (fielddesc_type == 0) {
desc8[i].offset = desc[i].offset;
desc8[i].size = desc[i].size;
desc8[i].isptr = desc[i].isptr;
}
else if (fielddesc_type == 1) {
desc16[i].offset = desc[i].offset;
desc16[i].size = desc[i].size;
desc16[i].isptr = desc[i].isptr;
}
else {
desc32[i].offset = desc[i].offset;
desc32[i].size = desc[i].size;
desc32[i].isptr = desc[i].isptr;
}
}
uint8_t* ptrs8 = (uint8_t*)jl_dt_layout_ptrs(flddesc);
uint16_t* ptrs16 = (uint16_t*)jl_dt_layout_ptrs(flddesc);
uint32_t* ptrs32 = (uint32_t*)jl_dt_layout_ptrs(flddesc);
for (size_t i = 0; i < npointers; i++) {
if (fielddesc_type == 0) {
ptrs8[i] = pointers[i];
}
else if (fielddesc_type == 1) {
ptrs16[i] = pointers[i];
}
else {
ptrs32[i] = pointers[i];
}
}
return flddesc;
}
// Determine if homogeneous tuple with fields of type t will have
// a special alignment beyond normal Julia rules.
// Return special alignment if one exists, 0 if normal alignment rules hold.
// A non-zero result *must* match the LLVM rules for a vector type <nfields x t>.
// For sake of Ahead-Of-Time (AOT) compilation, this routine has to work
// without LLVM being available.
unsigned jl_special_vector_alignment(size_t nfields, jl_value_t *t)
{
if (!jl_is_vecelement_type(t))
return 0;
assert(jl_datatype_nfields(t) == 1);
jl_value_t *ty = jl_field_type((jl_datatype_t*)t, 0);
if (!jl_is_primitivetype(ty))
// LLVM requires that a vector element be a primitive type.
// LLVM allows pointer types as vector elements, but until a
// motivating use case comes up for Julia, we reject pointers.
return 0;
size_t elsz = jl_datatype_size(ty);
if (elsz != 1 && elsz != 2 && elsz != 4 && elsz != 8)
// Only handle power-of-two-sized elements (for now)
return 0;
size_t size = nfields * elsz;
// Use natural alignment for this vector: this matches LLVM and clang.
return next_power_of_two(size);
}
STATIC_INLINE int jl_is_datatype_make_singleton(jl_datatype_t *d)
{
return (!d->name->abstract && jl_datatype_size(d) == 0 && d != jl_symbol_type && d->name != jl_array_typename &&
d->isconcretetype && !d->name->mutabl);
}
STATIC_INLINE void jl_maybe_allocate_singleton_instance(jl_datatype_t *st)
{
if (jl_is_datatype_make_singleton(st)) {
// It's possible for st to already have an ->instance if it was redefined
if (!st->instance) {
jl_task_t *ct = jl_current_task;
st->instance = jl_gc_alloc(ct->ptls, 0, st);
jl_gc_wb(st, st->instance);
}
}
}
int jl_datatype_isinlinealloc(jl_datatype_t *ty, int pointerfree) JL_NOTSAFEPOINT
{
if (ty->name->mayinlinealloc && ty->layout) {
if (ty->layout->npointers > 0) {
if (pointerfree)
return 0;
if (ty->name->n_uninitialized != 0)
return 0;
if (ty->layout->fielddesc_type > 1) // GC only implements support for 8 and 16 (not array32)
return 0;
}
return 1;
}
return 0;
}
static unsigned union_isinlinable(jl_value_t *ty, int pointerfree, size_t *nbytes, size_t *align, int asfield) JL_NOTSAFEPOINT
{
if (jl_is_uniontype(ty)) {
unsigned na = union_isinlinable(((jl_uniontype_t*)ty)->a, 1, nbytes, align, asfield);
if (na == 0)
return 0;
unsigned nb = union_isinlinable(((jl_uniontype_t*)ty)->b, 1, nbytes, align, asfield);
if (nb == 0)
return 0;
return na + nb;
}
if (jl_is_datatype(ty) && jl_datatype_isinlinealloc((jl_datatype_t*)ty, pointerfree)) {
size_t sz = jl_datatype_size(ty);
size_t al = jl_datatype_align(ty);
// primitive types in struct slots need their sizes aligned. issue #37974
if (asfield && jl_is_primitivetype(ty))
sz = LLT_ALIGN(sz, al);
if (*nbytes < sz)
*nbytes = sz;
if (*align < al)
*align = al;
return 1;
}
return 0;
}
int jl_uniontype_size(jl_value_t *ty, size_t *sz) JL_NOTSAFEPOINT
{
size_t al = 0;
return union_isinlinable(ty, 0, sz, &al, 0) != 0;
}
JL_DLLEXPORT int jl_islayout_inline(jl_value_t *eltype, size_t *fsz, size_t *al) JL_NOTSAFEPOINT
{
unsigned countbits = union_isinlinable(eltype, 0, fsz, al, 1);
return (countbits > 0 && countbits < 127) ? countbits : 0;
}
JL_DLLEXPORT int jl_stored_inline(jl_value_t *eltype) JL_NOTSAFEPOINT
{
size_t fsz = 0, al = 0;
return jl_islayout_inline(eltype, &fsz, &al);
}
// whether instances of this type can use pointer comparison for `===`
int jl_pointer_egal(jl_value_t *t)
{
if (t == (jl_value_t*)jl_any_type)
return 0; // when setting up the initial types, jl_is_type_type gets confused about this
if (t == (jl_value_t*)jl_symbol_type)
return 1;
if (t == (jl_value_t*)jl_bool_type)
return 1;
if (jl_is_mutable_datatype(t) && // excludes abstract types
t != (jl_value_t*)jl_string_type && // technically mutable, but compared by contents
t != (jl_value_t*)jl_simplevector_type &&
!jl_is_kind(t))
return 1;
if ((jl_is_datatype(t) && jl_is_datatype_singleton((jl_datatype_t*)t)) ||
t == (jl_value_t*)jl_typeofbottom_type->super)
return 1;
if (jl_is_type_type(t) && jl_is_datatype(jl_tparam0(t))) {
// need to use typeseq for most types
// but can compare some types by pointer
jl_datatype_t *dt = (jl_datatype_t*)jl_tparam0(t);
// `Core.TypeofBottom` and `Type{Union{}}` are used interchangeably
// with different pointer values even though `Core.TypeofBottom` is a concrete type.
// See `Core.Compiler.hasuniquerep`
if (dt != jl_typeofbottom_type &&
(dt->isconcretetype || jl_svec_len(dt->parameters) == 0)) {
// Concrete types have unique pointer values
// If the type has zero type parameters it'll also have only one possible
// pointer value.
return 1;
}
}
return 0;
}
static void throw_ovf(int should_malloc, void *desc, jl_datatype_t* st, int offset)
{
if (should_malloc)
free(desc);
jl_errorf("type %s has field offset %d that exceeds the page size", jl_symbol_name(st->name->name), offset);
}
void jl_compute_field_offsets(jl_datatype_t *st)
{
const uint64_t max_offset = (((uint64_t)1) << 32) - 1;
const uint64_t max_size = max_offset >> 1;
if (st->name->wrapper == NULL)
return; // we got called too early--we'll be back
jl_datatype_t *w = (jl_datatype_t*)jl_unwrap_unionall(st->name->wrapper);
assert(st->types && w->types);
size_t i, nfields = jl_svec_len(st->types);
assert(st->name->n_uninitialized <= nfields);
if (st == w && st->layout) {
// this check allows us to force re-computation of the layout for some types during init
st->layout = NULL;
st->size = 0;
st->zeroinit = 0;
st->has_concrete_subtype = 1;
}
// If layout doesn't depend on type parameters, it's stored in st->name->wrapper
// and reused by all subtypes.
if (w->layout) {
st->layout = w->layout;
st->size = w->size;
st->zeroinit = w->zeroinit;
st->has_concrete_subtype = w->has_concrete_subtype;
if (jl_is_layout_opaque(st->layout)) { // e.g. jl_array_typename
return;
}
}
else if (nfields == 0) {
// if we have no fields, we can trivially skip the rest
if (st == jl_symbol_type || st == jl_string_type) {
// opaque layout - heap-allocated blob
static const jl_datatype_layout_t opaque_byte_layout = {0, 1, -1, 1, 0, 0};
st->layout = &opaque_byte_layout;
return;
}
else if (st == jl_simplevector_type || st == jl_module_type || st->name == jl_array_typename) {
static const jl_datatype_layout_t opaque_ptr_layout = {0, 1, -1, sizeof(void*), 0, 0};
st->layout = &opaque_ptr_layout;
return;
}
else {
// reuse the same layout for all singletons
static const jl_datatype_layout_t singleton_layout = {0, 0, -1, 1, 0, 0};
st->layout = &singleton_layout;
}
}
else {
// compute a conservative estimate of whether there could exist an instance of a subtype of this
for (i = 0; st->has_concrete_subtype && i < nfields - st->name->n_uninitialized; i++) {
jl_value_t *fld = jl_svecref(st->types, i);
if (fld == jl_bottom_type)
st->has_concrete_subtype = 0;
else
st->has_concrete_subtype = !jl_is_datatype(fld) || ((jl_datatype_t *)fld)->has_concrete_subtype;
}
// compute layout for the wrapper object if the field types have no free variables
if (!st->isconcretetype && !jl_has_fixed_layout(st)) {
assert(st == w); // otherwise caller should not have requested this layout
return;
}
}
int isbitstype = st->isconcretetype && st->name->mayinlinealloc;
for (i = 0; isbitstype && i < nfields; i++) {
jl_value_t *fld = jl_field_type(st, i);
isbitstype = jl_isbits(fld);
}
// if we didn't reuse the layout above, compute it now
if (st->layout == NULL) {
size_t descsz = nfields * sizeof(jl_fielddesc32_t);
jl_fielddesc32_t *desc;
uint32_t *pointers;
int should_malloc = descsz >= jl_page_size;
if (should_malloc)
desc = (jl_fielddesc32_t*)malloc_s(descsz);
else
desc = (jl_fielddesc32_t*)alloca(descsz);
size_t sz = 0;
size_t alignm = 1;
int zeroinit = 0;
int haspadding = 0;
int homogeneous = 1;
int needlock = 0;
uint32_t npointers = 0;
jl_value_t *firstty = jl_field_type(st, 0);
for (i = 0; i < nfields; i++) {
jl_value_t *fld = jl_field_type(st, i);
int isatomic = jl_field_isatomic(st, i);
size_t fsz = 0, al = 1;
if (jl_islayout_inline(fld, &fsz, &al) && (!isatomic || jl_is_datatype(fld))) { // aka jl_datatype_isinlinealloc
if (__unlikely(fsz > max_size))
// Should never happen
throw_ovf(should_malloc, desc, st, fsz);
desc[i].isptr = 0;
if (jl_is_uniontype(fld)) {
haspadding = 1;
fsz += 1; // selector byte
zeroinit = 1;
}
else {
uint32_t fld_npointers = ((jl_datatype_t*)fld)->layout->npointers;
if (((jl_datatype_t*)fld)->layout->haspadding)
haspadding = 1;
if (i >= nfields - st->name->n_uninitialized && fld_npointers &&
fld_npointers * sizeof(void*) != fsz) {
// field may be undef (may be uninitialized and contains pointer),
// and contains non-pointer fields of non-zero sizes.
haspadding = 1;
}
if (!zeroinit)
zeroinit = ((jl_datatype_t*)fld)->zeroinit;
npointers += fld_npointers;
}
}
else {
fsz = sizeof(void*);
if (fsz > MAX_ALIGN)
fsz = MAX_ALIGN;
al = fsz;
desc[i].isptr = 1;
zeroinit = 1;
npointers++;
if (!jl_pointer_egal(fld)) {
// this somewhat poorly named flag says whether some of the bits can be non-unique
haspadding = 1;
}
}
if (isatomic && fsz > MAX_ATOMIC_SIZE)
needlock = 1;
if (isatomic && fsz <= MAX_ATOMIC_SIZE)
al = fsz = next_power_of_two(fsz);
if (al != 0) {
size_t alsz = LLT_ALIGN(sz, al);
if (alsz != sz)
haspadding = 1;
sz = alsz;
if (al > alignm)
alignm = al;
}
homogeneous &= firstty == fld;
desc[i].offset = sz;
desc[i].size = fsz;
if (__unlikely(max_offset - sz < fsz))
throw_ovf(should_malloc, desc, st, sz);
sz += fsz;
}
if (needlock) {
size_t offset = LLT_ALIGN(sizeof(jl_mutex_t), alignm);
for (i = 0; i < nfields; i++) {
desc[i].offset += offset;
}
if (__unlikely(max_offset - sz < offset))
throw_ovf(should_malloc, desc, st, sz);
sz += offset;
haspadding = 1;
}
if (homogeneous && jl_is_tuple_type(st)) {
// Some tuples become LLVM vectors with stronger alignment than what was calculated above.
unsigned al = jl_special_vector_alignment(nfields, firstty);
assert(al % alignm == 0);
if (al > alignm)
alignm = al;
}
st->size = LLT_ALIGN(sz, alignm);
if (st->size > sz)
haspadding = 1;
if (should_malloc && npointers)
pointers = (uint32_t*)malloc_s(npointers * sizeof(uint32_t));
else
pointers = (uint32_t*)alloca(npointers * sizeof(uint32_t));
size_t ptr_i = 0;
for (i = 0; i < nfields; i++) {
jl_value_t *fld = jl_field_type(st, i);
uint32_t offset = desc[i].offset / sizeof(jl_value_t**);
if (desc[i].isptr)
pointers[ptr_i++] = offset;
else if (jl_is_datatype(fld)) {
int j, npointers = ((jl_datatype_t*)fld)->layout->npointers;
for (j = 0; j < npointers; j++) {
pointers[ptr_i++] = offset + jl_ptr_offset((jl_datatype_t*)fld, j);
}
}
}
assert(ptr_i == npointers);
st->layout = jl_get_layout(nfields, npointers, alignm, haspadding, desc, pointers);
if (should_malloc) {
free(desc);
if (npointers)
free(pointers);
}
st->zeroinit = zeroinit;
}
// now finish deciding if this instantiation qualifies for special properties
assert(!isbitstype || st->layout->npointers == 0); // the definition of isbits
st->isbitstype = isbitstype;
jl_maybe_allocate_singleton_instance(st);
return;
}
static int is_anonfn_typename(char *name)
{
if (name[0] != '#' || name[1] == '#')
return 0;
char *other = strrchr(name, '#');
return other > &name[1] && is10digit(other[1]);
}
JL_DLLEXPORT jl_datatype_t *jl_new_datatype(
jl_sym_t *name,
jl_module_t *module,
jl_datatype_t *super,
jl_svec_t *parameters,
jl_svec_t *fnames,
jl_svec_t *ftypes,
jl_svec_t *fattrs,
int abstract, int mutabl,
int ninitialized)
{
jl_datatype_t *t = NULL;
jl_typename_t *tn = NULL;
JL_GC_PUSH2(&t, &tn);
assert(parameters);
// init enough before possibly calling jl_new_typename_in
t = jl_new_uninitialized_datatype();
t->super = super;
if (super != NULL) jl_gc_wb(t, t->super);
t->parameters = parameters;
jl_gc_wb(t, t->parameters);
t->types = ftypes;
if (ftypes != NULL) jl_gc_wb(t, t->types);
t->size = 0;
t->name = NULL;
if (jl_is_typename(name)) {
// This code-path is used by the Serialization module to by-pass normal expectations
tn = (jl_typename_t*)name;
tn->abstract = abstract;
tn->mutabl = mutabl;
}
else {
tn = jl_new_typename_in((jl_sym_t*)name, module, abstract, mutabl);
if (super == jl_function_type || super == jl_builtin_type || is_anonfn_typename(jl_symbol_name(name))) {
// Callable objects (including compiler-generated closures) get independent method tables
// as an optimization
tn->mt = jl_new_method_table(name, module);
jl_gc_wb(tn, tn->mt);
if (jl_svec_len(parameters) == 0 && !abstract)
tn->mt->offs = 1;
}
else {
// Everything else, gets to use the unified table
tn->mt = jl_nonfunction_mt;
}
}
t->name = tn;
jl_gc_wb(t, t->name);
t->name->names = fnames;
jl_gc_wb(t->name, t->name->names);
tn->n_uninitialized = jl_svec_len(fnames) - ninitialized;
uint32_t *volatile atomicfields = NULL;
int i;
JL_TRY {
for (i = 0; i + 1 < jl_svec_len(fattrs); i += 2) {
jl_value_t *fldi = jl_svecref(fattrs, i);
jl_sym_t *attr = (jl_sym_t*)jl_svecref(fattrs, i + 1);
JL_TYPECHK(typeassert, long, fldi);
JL_TYPECHK(typeassert, symbol, (jl_value_t*)attr);
size_t fldn = jl_unbox_long(fldi);
if (fldn < 1 || fldn > jl_svec_len(fnames))
jl_errorf("invalid field attribute %lld", (long long)fldn);
fldn--;
if (attr == atomic_sym) {
if (!mutabl)
jl_errorf("invalid field attribute atomic for immutable struct");
if (atomicfields == NULL) {
size_t nb = (jl_svec_len(fnames) + 31) / 32 * sizeof(uint32_t);
atomicfields = (uint32_t*)malloc_s(nb);
memset(atomicfields, 0, nb);
}
atomicfields[fldn / 32] |= 1 << (fldn % 32);
}
else {
jl_errorf("invalid field attribute %s", jl_symbol_name(attr));
}
}
}
JL_CATCH {
if (atomicfields)
free(atomicfields);
jl_rethrow();
}
tn->atomicfields = atomicfields;
if (t->name->wrapper == NULL) {
t->name->wrapper = (jl_value_t*)t;
jl_gc_wb(t->name, t);
int i, np = jl_svec_len(parameters);
for (i = np - 1; i >= 0; i--) {
t->name->wrapper = jl_new_struct(jl_unionall_type, jl_svecref(parameters, i), t->name->wrapper);
jl_gc_wb(t->name, t->name->wrapper);
}
if (!mutabl && !abstract && ftypes != NULL)
tn->mayinlinealloc = 1;
}
jl_precompute_memoized_dt(t, 0);
if (!abstract && t->types != NULL)
jl_compute_field_offsets(t);
JL_GC_POP();
return t;
}
JL_DLLEXPORT jl_datatype_t *jl_new_primitivetype(jl_value_t *name, jl_module_t *module,
jl_datatype_t *super,
jl_svec_t *parameters, size_t nbits)
{
jl_datatype_t *bt = jl_new_datatype((jl_sym_t*)name, module, super, parameters,
jl_emptysvec, jl_emptysvec, jl_emptysvec, 0, 0, 0);
uint32_t nbytes = (nbits + 7) / 8;
uint32_t alignm = next_power_of_two(nbytes);
if (alignm > MAX_ALIGN)
alignm = MAX_ALIGN;
bt->isbitstype = (parameters == jl_emptysvec);
bt->size = nbytes;
bt->layout = jl_get_layout(0, 0, alignm, 0, NULL, NULL);
bt->instance = NULL;
return bt;
}
JL_DLLEXPORT jl_datatype_t * jl_new_foreign_type(jl_sym_t *name,
jl_module_t *module,
jl_datatype_t *super,
jl_markfunc_t markfunc,
jl_sweepfunc_t sweepfunc,
int haspointers,
int large)
{
jl_datatype_t *bt = jl_new_datatype(name, module, super,
jl_emptysvec, jl_emptysvec, jl_emptysvec, jl_emptysvec, 0, 1, 0);
bt->size = large ? GC_MAX_SZCLASS+1 : 0;
jl_datatype_layout_t *layout = (jl_datatype_layout_t *)
jl_gc_perm_alloc(sizeof(jl_datatype_layout_t) + sizeof(jl_fielddescdyn_t),
0, 4, 0);
layout->nfields = 0;
layout->alignment = sizeof(void *);
layout->haspadding = 1;
layout->npointers = haspointers;
layout->fielddesc_type = 3;
jl_fielddescdyn_t * desc =
(jl_fielddescdyn_t *) ((char *)layout + sizeof(*layout));
desc->markfunc = markfunc;
desc->sweepfunc = sweepfunc;
bt->layout = layout;
bt->instance = NULL;
return bt;
}
JL_DLLEXPORT int jl_is_foreign_type(jl_datatype_t *dt)
{
return jl_is_datatype(dt) && dt->layout && dt->layout->fielddesc_type == 3;
}
// bits constructors ----------------------------------------------------------
#if MAX_ATOMIC_SIZE > MAX_POINTERATOMIC_SIZE
#error MAX_ATOMIC_SIZE too large
#endif
#if MAX_POINTERATOMIC_SIZE > 16
#error MAX_POINTERATOMIC_SIZE too large
#endif
#if MAX_POINTERATOMIC_SIZE >= 16
typedef __uint128_t uint128_t;
#endif
JL_DLLEXPORT jl_value_t *jl_new_bits(jl_value_t *dt, const void *data)
{
// data may not have the alignment required by the size
// but will always have the alignment required by the datatype
assert(jl_is_datatype(dt));
jl_datatype_t *bt = (jl_datatype_t*)dt;
size_t nb = jl_datatype_size(bt);
// some types have special pools to minimize allocations
if (nb == 0) return jl_new_struct_uninit(bt); // returns bt->instance
if (bt == jl_bool_type) return (1 & *(int8_t*)data) ? jl_true : jl_false;
if (bt == jl_uint8_type) return jl_box_uint8(*(uint8_t*)data);
if (bt == jl_int64_type) return jl_box_int64(*(int64_t*)data);
if (bt == jl_int32_type) return jl_box_int32(*(int32_t*)data);
if (bt == jl_int8_type) return jl_box_int8(*(int8_t*)data);
if (bt == jl_int16_type) return jl_box_int16(*(int16_t*)data);
if (bt == jl_uint64_type) return jl_box_uint64(*(uint64_t*)data);
if (bt == jl_uint32_type) return jl_box_uint32(*(uint32_t*)data);
if (bt == jl_uint16_type) return jl_box_uint16(*(uint16_t*)data);
if (bt == jl_char_type) return jl_box_char(*(uint32_t*)data);
jl_task_t *ct = jl_current_task;
jl_value_t *v = jl_gc_alloc(ct->ptls, nb, bt);
switch (nb) {
case 1: *(uint8_t*) v = *(uint8_t*)data; break;
case 2: *(uint16_t*)v = jl_load_unaligned_i16(data); break;
case 4: *(uint32_t*)v = jl_load_unaligned_i32(data); break;
case 8: *(uint64_t*)v = jl_load_unaligned_i64(data); break;
case 16:
memcpy(jl_assume_aligned(v, 16), data, 16);
break;
default: memcpy(v, data, nb);
}
return v;
}
JL_DLLEXPORT jl_value_t *jl_atomic_new_bits(jl_value_t *dt, const char *data)
{
// data must have the required alignment for an atomic of the given size
assert(jl_is_datatype(dt));
jl_datatype_t *bt = (jl_datatype_t*)dt;
size_t nb = jl_datatype_size(bt);
// some types have special pools to minimize allocations
if (nb == 0) return jl_new_struct_uninit(bt); // returns bt->instance
if (bt == jl_bool_type) return (1 & jl_atomic_load((int8_t*)data)) ? jl_true : jl_false;
if (bt == jl_uint8_type) return jl_box_uint8(jl_atomic_load((uint8_t*)data));
if (bt == jl_int64_type) return jl_box_int64(jl_atomic_load((int64_t*)data));
if (bt == jl_int32_type) return jl_box_int32(jl_atomic_load((int32_t*)data));
if (bt == jl_int8_type) return jl_box_int8(jl_atomic_load((int8_t*)data));
if (bt == jl_int16_type) return jl_box_int16(jl_atomic_load((int16_t*)data));
if (bt == jl_uint64_type) return jl_box_uint64(jl_atomic_load((uint64_t*)data));
if (bt == jl_uint32_type) return jl_box_uint32(jl_atomic_load((uint32_t*)data));
if (bt == jl_uint16_type) return jl_box_uint16(jl_atomic_load((uint16_t*)data));
if (bt == jl_char_type) return jl_box_char(jl_atomic_load((uint32_t*)data));
jl_task_t *ct = jl_current_task;
jl_value_t *v = jl_gc_alloc(ct->ptls, nb, bt);
switch (nb) {
case 1: *(uint8_t*) v = jl_atomic_load((uint8_t*)data); break;
case 2: *(uint16_t*)v = jl_atomic_load((uint16_t*)data); break;
case 4: *(uint32_t*)v = jl_atomic_load((uint32_t*)data); break;
#if MAX_POINTERATOMIC_SIZE >= 8
case 8: *(uint64_t*)v = jl_atomic_load((uint64_t*)data); break;
#endif
#if MAX_POINTERATOMIC_SIZE >= 16
case 16: *(uint128_t*)v = jl_atomic_load((uint128_t*)data); break;
#endif
default:
abort();
}
return v;
}
JL_DLLEXPORT void jl_atomic_store_bits(char *dst, const jl_value_t *src, int nb)
{
// dst must have the required alignment for an atomic of the given size
// src must be aligned by the GC
switch (nb) {
case 0: break;
case 1: jl_atomic_store((uint8_t*)dst, *(uint8_t*)src); break;
case 2: jl_atomic_store((uint16_t*)dst, *(uint16_t*)src); break;
case 4: jl_atomic_store((uint32_t*)dst, *(uint32_t*)src); break;
#if MAX_POINTERATOMIC_SIZE >= 8
case 8: jl_atomic_store((uint64_t*)dst, *(uint64_t*)src); break;
#endif
#if MAX_POINTERATOMIC_SIZE >= 16
case 16: jl_atomic_store((uint128_t*)dst, *(uint128_t*)src); break;
#endif
default:
abort();
}
}
JL_DLLEXPORT jl_value_t *jl_atomic_swap_bits(jl_value_t *dt, char *dst, const jl_value_t *src, int nb)
{
// dst must have the required alignment for an atomic of the given size
assert(jl_is_datatype(dt));
jl_datatype_t *bt = (jl_datatype_t*)dt;
// some types have special pools to minimize allocations
if (nb == 0) return jl_new_struct_uninit(bt); // returns bt->instance
if (bt == jl_bool_type) return (1 & jl_atomic_exchange((int8_t*)dst, 1 & *(int8_t*)src)) ? jl_true : jl_false;
if (bt == jl_uint8_type) return jl_box_uint8(jl_atomic_exchange((uint8_t*)dst, *(int8_t*)src));
if (bt == jl_int64_type) return jl_box_int64(jl_atomic_exchange((int64_t*)dst, *(int64_t*)src));
if (bt == jl_int32_type) return jl_box_int32(jl_atomic_exchange((int32_t*)dst, *(int32_t*)src));
if (bt == jl_int8_type) return jl_box_int8(jl_atomic_exchange((int8_t*)dst, *(int8_t*)src));
if (bt == jl_int16_type) return jl_box_int16(jl_atomic_exchange((int16_t*)dst, *(int16_t*)src));
if (bt == jl_uint64_type) return jl_box_uint64(jl_atomic_exchange((uint64_t*)dst, *(uint64_t*)src));
if (bt == jl_uint32_type) return jl_box_uint32(jl_atomic_exchange((uint32_t*)dst, *(uint32_t*)src));
if (bt == jl_uint16_type) return jl_box_uint16(jl_atomic_exchange((uint16_t*)dst, *(uint16_t*)src));
if (bt == jl_char_type) return jl_box_char(jl_atomic_exchange((uint32_t*)dst, *(uint32_t*)src));
jl_task_t *ct = jl_current_task;
jl_value_t *v = jl_gc_alloc(ct->ptls, jl_datatype_size(bt), bt);
switch (nb) {
case 1: *(uint8_t*) v = jl_atomic_exchange((uint8_t*)dst, *(uint8_t*)src); break;
case 2: *(uint16_t*)v = jl_atomic_exchange((uint16_t*)dst, *(uint16_t*)src); break;
case 4: *(uint32_t*)v = jl_atomic_exchange((uint32_t*)dst, *(uint32_t*)src); break;
#if MAX_POINTERATOMIC_SIZE >= 8
case 8: *(uint64_t*)v = jl_atomic_exchange((uint64_t*)dst, *(uint64_t*)src); break;
#endif
#if MAX_POINTERATOMIC_SIZE >= 16
case 16: *(uint128_t*)v = jl_atomic_exchange((uint128_t*)dst, *(uint128_t*)src); break;
#endif
default:
abort();
}
return v;
}
JL_DLLEXPORT int jl_atomic_bool_cmpswap_bits(char *dst, const jl_value_t *expected, const jl_value_t *src, int nb)
{
// dst must have the required alignment for an atomic of the given size
// n.b.: this can spuriously fail if there are padding bits, the caller should deal with that
int success;
switch (nb) {
case 0: {
success = 1;
break;
}
case 1: {
uint8_t y = *(uint8_t*)expected;
success = jl_atomic_cmpswap((uint8_t*)dst, &y, *(uint8_t*)src);
break;
}
case 2: {
uint16_t y = *(uint16_t*)expected;
success = jl_atomic_cmpswap((uint16_t*)dst, &y, *(uint16_t*)src);
break;
}
case 4: {
uint32_t y = *(uint32_t*)expected;
success = jl_atomic_cmpswap((uint32_t*)dst, &y, *(uint32_t*)src);
break;
}
#if MAX_POINTERATOMIC_SIZE >= 8
case 8: {
uint64_t y = *(uint64_t*)expected;
success = jl_atomic_cmpswap((uint64_t*)dst, &y, *(uint64_t*)src);
break;
}
#endif
#if MAX_POINTERATOMIC_SIZE >= 16
case 16: {
uint128_t y = *(uint128_t*)expected;
success = jl_atomic_cmpswap((uint128_t*)dst, &y, *(uint128_t*)src);
break;
}
#endif
default:
abort();
}
return success;
}
JL_DLLEXPORT jl_value_t *jl_atomic_cmpswap_bits(jl_datatype_t *dt, char *dst, const jl_value_t *expected, const jl_value_t *src, int nb)
{
// dst must have the required alignment for an atomic of the given size
// n.b.: this does not spuriously fail if there are padding bits
jl_value_t *params[2];
params[0] = (jl_value_t*)dt;
params[1] = (jl_value_t*)jl_bool_type;
jl_datatype_t *tuptyp = jl_apply_tuple_type_v(params, 2);
JL_GC_PROMISE_ROOTED(tuptyp); // (JL_ALWAYS_LEAFTYPE)
int isptr = jl_field_isptr(tuptyp, 0);
jl_task_t *ct = jl_current_task;
jl_value_t *y = jl_gc_alloc(ct->ptls, isptr ? nb : tuptyp->size, isptr ? dt : tuptyp);
int success;
jl_datatype_t *et = (jl_datatype_t*)jl_typeof(expected);
switch (nb) {
case 0: {
success = (dt == et);
break;
}
case 1: {
uint8_t *y8 = (uint8_t*)y;
if (dt == et) {
*y8 = *(uint8_t*)expected;
success = jl_atomic_cmpswap((uint8_t*)dst, y8, *(uint8_t*)src);
}
else {
*y8 = jl_atomic_load((uint8_t*)dst);
success = 0;
}
break;
}
case 2: {
uint16_t *y16 = (uint16_t*)y;
if (dt == et) {
*y16 = *(uint16_t*)expected;
while (1) {
success = jl_atomic_cmpswap((uint16_t*)dst, y16, *(uint16_t*)src);
if (success || !dt->layout->haspadding || !jl_egal__bits(y, expected, dt))
break;
}
}
else {
*y16 = jl_atomic_load((uint16_t*)dst);
success = 0;
}
break;
}
case 4: {
uint32_t *y32 = (uint32_t*)y;
if (dt == et) {
*y32 = *(uint32_t*)expected;
while (1) {
success = jl_atomic_cmpswap((uint32_t*)dst, y32, *(uint32_t*)src);
if (success || !dt->layout->haspadding || !jl_egal__bits(y, expected, dt))
break;
}
}
else {
*y32 = jl_atomic_load((uint32_t*)dst);
success = 0;
}
break;
}
#if MAX_POINTERATOMIC_SIZE >= 8
case 8: {
uint64_t *y64 = (uint64_t*)y;
if (dt == et) {
*y64 = *(uint64_t*)expected;
while (1) {
success = jl_atomic_cmpswap((uint64_t*)dst, y64, *(uint64_t*)src);
if (success || !dt->layout->haspadding || !jl_egal__bits(y, expected, dt))
break;
}
}
else {
*y64 = jl_atomic_load((uint64_t*)dst);
success = 0;
}
break;
}
#endif
#if MAX_POINTERATOMIC_SIZE >= 16
case 16: {
uint128_t *y128 = (uint128_t*)y;
if (dt == et) {
*y128 = *(uint128_t*)expected;
while (1) {
success = jl_atomic_cmpswap((uint128_t*)dst, y128, *(uint128_t*)src);
if (success || !dt->layout->haspadding || !jl_egal__bits(y, expected, dt))
break;
}
}
else {
*y128 = jl_atomic_load((uint128_t*)dst);
success = 0;
}
break;
}
#endif
default:
abort();
}
if (isptr) {
JL_GC_PUSH1(&y);
jl_value_t *z = jl_gc_alloc(ct->ptls, tuptyp->size, tuptyp);
*(jl_value_t**)z = y;
JL_GC_POP();
y = z;
nb = sizeof(jl_value_t*);
}
*((uint8_t*)y + nb) = success ? 1 : 0;
return y;
}
// used by boot.jl
JL_DLLEXPORT jl_value_t *jl_typemax_uint(jl_value_t *bt)
{
uint64_t data = 0xffffffffffffffffULL;
jl_task_t *ct = jl_current_task;
jl_value_t *v = jl_gc_alloc(ct->ptls, sizeof(size_t), bt);
memcpy(v, &data, sizeof(size_t));
return v;
}
#define PERMBOXN_FUNC(nb,nw) \
jl_value_t *jl_permbox##nb(jl_datatype_t *t, int##nb##_t x) \
{ /* NOTE: t must be a concrete isbits datatype */ \
assert(jl_datatype_size(t) == sizeof(x)); \
jl_value_t *v = jl_gc_permobj(nw * sizeof(void*), t); \
*(int##nb##_t*)jl_data_ptr(v) = x; \
return v; \
}
PERMBOXN_FUNC(8, 1)
PERMBOXN_FUNC(16, 1)
PERMBOXN_FUNC(32, 1)
#ifdef _P64
PERMBOXN_FUNC(64, 1)
#else
PERMBOXN_FUNC(64, 2)
#endif
#define UNBOX_FUNC(j_type,c_type) \
JL_DLLEXPORT c_type jl_unbox_##j_type(jl_value_t *v) \
{ \
assert(jl_is_primitivetype(jl_typeof(v))); \
assert(jl_datatype_size(jl_typeof(v)) == sizeof(c_type)); \
return *(c_type*)jl_data_ptr(v); \
}
UNBOX_FUNC(int8, int8_t)
UNBOX_FUNC(uint8, uint8_t)
UNBOX_FUNC(int16, int16_t)
UNBOX_FUNC(uint16, uint16_t)
UNBOX_FUNC(int32, int32_t)
UNBOX_FUNC(uint32, uint32_t)
UNBOX_FUNC(int64, int64_t)
UNBOX_FUNC(uint64, uint64_t)
UNBOX_FUNC(bool, int8_t)
UNBOX_FUNC(float32, float)
UNBOX_FUNC(float64, double)
UNBOX_FUNC(voidpointer, void*)
UNBOX_FUNC(uint8pointer, uint8_t*)
#define BOX_FUNC(typ,c_type,pfx,nw) \
JL_DLLEXPORT jl_value_t *pfx##_##typ(c_type x) \
{ \
jl_task_t *ct = jl_current_task; \
jl_value_t *v = jl_gc_alloc(ct->ptls, nw * sizeof(void*), \
jl_##typ##_type); \
*(c_type*)jl_data_ptr(v) = x; \
return v; \
}
BOX_FUNC(float32, float, jl_box, 1)
BOX_FUNC(voidpointer, void*, jl_box, 1)
BOX_FUNC(uint8pointer, uint8_t*, jl_box, 1)
#ifdef _P64
BOX_FUNC(float64, double, jl_box, 1)
#else
BOX_FUNC(float64, double, jl_box, 2)
#endif
#define NBOX_C 1024
#define SIBOX_FUNC(typ,c_type,nw)\
static jl_value_t *boxed_##typ##_cache[NBOX_C]; \
JL_DLLEXPORT jl_value_t *jl_box_##typ(c_type x) \
{ \
jl_task_t *ct = jl_current_task; \
c_type idx = x+NBOX_C/2; \
if ((u##c_type)idx < (u##c_type)NBOX_C) \
return boxed_##typ##_cache[idx]; \
jl_value_t *v = jl_gc_alloc(ct->ptls, nw * sizeof(void*), \
jl_##typ##_type); \
*(c_type*)jl_data_ptr(v) = x; \
return v; \
}
#define UIBOX_FUNC(typ,c_type,nw) \
static jl_value_t *boxed_##typ##_cache[NBOX_C]; \
JL_DLLEXPORT jl_value_t *jl_box_##typ(c_type x) \
{ \
jl_task_t *ct = jl_current_task; \
if (x < NBOX_C) \
return boxed_##typ##_cache[x]; \
jl_value_t *v = jl_gc_alloc(ct->ptls, nw * sizeof(void*), \
jl_##typ##_type); \
*(c_type*)jl_data_ptr(v) = x; \
return v; \
}
SIBOX_FUNC(int16, int16_t, 1)
SIBOX_FUNC(int32, int32_t, 1)
UIBOX_FUNC(uint16, uint16_t, 1)
UIBOX_FUNC(uint32, uint32_t, 1)
UIBOX_FUNC(ssavalue, size_t, 1)
UIBOX_FUNC(slotnumber, size_t, 1)
#ifdef _P64
SIBOX_FUNC(int64, int64_t, 1)
UIBOX_FUNC(uint64, uint64_t, 1)
#else
SIBOX_FUNC(int64, int64_t, 2)
UIBOX_FUNC(uint64, uint64_t, 2)
#endif
static jl_value_t *boxed_char_cache[128];
JL_DLLEXPORT jl_value_t *jl_box_char(uint32_t x)
{
jl_task_t *ct = jl_current_task;
uint32_t u = bswap_32(x);
if (u < 128)
return boxed_char_cache[(uint8_t)u];
jl_value_t *v = jl_gc_alloc(ct->ptls, sizeof(void*), jl_char_type);
*(uint32_t*)jl_data_ptr(v) = x;
return v;
}
JL_DLLEXPORT jl_value_t *jl_boxed_int8_cache[256];
JL_DLLEXPORT jl_value_t *jl_box_int8(int8_t x)
{
return jl_boxed_int8_cache[(uint8_t)x];
}
JL_DLLEXPORT jl_value_t *jl_boxed_uint8_cache[256];
JL_DLLEXPORT jl_value_t *jl_box_uint8(uint8_t x)
{
return jl_boxed_uint8_cache[x];
}
void jl_init_int32_int64_cache(void)
{
int64_t i;
for(i=0; i < NBOX_C; i++) {
boxed_int32_cache[i] = jl_permbox32(jl_int32_type, i-NBOX_C/2);
boxed_int64_cache[i] = jl_permbox64(jl_int64_type, i-NBOX_C/2);
#ifdef _P64
boxed_ssavalue_cache[i] = jl_permbox64(jl_ssavalue_type, i);
boxed_slotnumber_cache[i] = jl_permbox64(jl_slotnumber_type, i);
#else
boxed_ssavalue_cache[i] = jl_permbox32(jl_ssavalue_type, i);
boxed_slotnumber_cache[i] = jl_permbox32(jl_slotnumber_type, i);
#endif
}
for(i=0; i < 256; i++) {
jl_boxed_uint8_cache[i] = jl_permbox8(jl_uint8_type, i);
}
}
void jl_init_box_caches(void)
{
int64_t i;
for(i=0; i < 128; i++) {
boxed_char_cache[i] = jl_permbox32(jl_char_type, i << 24);
}
for(i=0; i < 256; i++) {
jl_boxed_int8_cache[i] = jl_permbox8(jl_int8_type, i);
}
for(i=0; i < NBOX_C; i++) {
boxed_int16_cache[i] = jl_permbox16(jl_int16_type, i-NBOX_C/2);
boxed_uint16_cache[i] = jl_permbox16(jl_uint16_type, i);
boxed_uint32_cache[i] = jl_permbox32(jl_uint32_type, i);
boxed_uint64_cache[i] = jl_permbox64(jl_uint64_type, i);
}
}
JL_DLLEXPORT jl_value_t *jl_box_bool(int8_t x)
{
if (x)
return jl_true;
return jl_false;
}
// struct constructors --------------------------------------------------------
JL_DLLEXPORT jl_value_t *jl_new_struct(jl_datatype_t *type, ...)
{
jl_task_t *ct = jl_current_task;
if (type->instance != NULL) return type->instance;
va_list args;
size_t i, nf = jl_datatype_nfields(type);
va_start(args, type);
jl_value_t *jv = jl_gc_alloc(ct->ptls, jl_datatype_size(type), type);
if (nf > 0 && jl_field_offset(type, 0) != 0) {
memset(jv, 0, jl_field_offset(type, 0));
}
for (i = 0; i < nf; i++) {
set_nth_field(type, jv, i, va_arg(args, jl_value_t*), 0);
}
va_end(args);
return jv;
}
JL_DLLEXPORT jl_value_t *jl_new_structv(jl_datatype_t *type, jl_value_t **args, uint32_t na)
{
jl_task_t *ct = jl_current_task;
if (!jl_is_datatype(type) || type->layout == NULL) {
jl_type_error("new", (jl_value_t*)jl_datatype_type, (jl_value_t*)type);
}
size_t nf = jl_datatype_nfields(type);
if (nf - type->name->n_uninitialized > na || na > nf)
jl_error("invalid struct allocation");
for (size_t i = 0; i < na; i++) {
jl_value_t *ft = jl_field_type_concrete(type, i);
if (!jl_isa(args[i], ft))
jl_type_error("new", ft, args[i]);
}
if (type->instance != NULL)
return type->instance;
jl_value_t *jv = jl_gc_alloc(ct->ptls, jl_datatype_size(type), type);
if (jl_datatype_nfields(type) > 0) {
if (jl_field_offset(type, 0) != 0) {
memset(jl_data_ptr(jv), 0, jl_field_offset(type, 0));
}
JL_GC_PUSH1(&jv);
for (size_t i = 0; i < na; i++) {
set_nth_field(type, jv, i, args[i], 0);
}
if (na < jl_datatype_nfields(type)) {
char *data = (char*)jl_data_ptr(jv);
size_t offs = jl_field_offset(type, na);
memset(data + offs, 0, jl_datatype_size(type) - offs);
}
JL_GC_POP();
}
return jv;
}
JL_DLLEXPORT jl_value_t *jl_new_structt(jl_datatype_t *type, jl_value_t *tup)
{
jl_task_t *ct = jl_current_task;
if (!jl_is_tuple(tup))
jl_type_error("new", (jl_value_t*)jl_tuple_type, tup);
if (!jl_is_datatype(type) || type->layout == NULL)
jl_type_error("new", (jl_value_t *)jl_datatype_type, (jl_value_t *)type);
size_t nargs = jl_nfields(tup);
size_t nf = jl_datatype_nfields(type);
JL_NARGS(new, nf, nf);
if (type->instance != NULL) {
jl_datatype_t *tupt = (jl_datatype_t*)jl_typeof(tup);
for (size_t i = 0; i < nargs; i++) {
jl_value_t *ft = jl_field_type_concrete(type, i);
jl_value_t *et = jl_field_type_concrete(tupt, i);
assert(jl_is_concrete_type(ft) && jl_is_concrete_type(et));
if (et != ft)
jl_type_error("new", ft, jl_get_nth_field(tup, i));
}
return type->instance;
}
size_t size = jl_datatype_size(type);
jl_value_t *jv = jl_gc_alloc(ct->ptls, size, type);
if (nf == 0)
return jv;
jl_value_t *fi = NULL;
if (type->zeroinit) {
// if there are references, zero the space first to prevent the GC
// from seeing uninitialized references during jl_get_nth_field and jl_isa,
// which can allocate.
memset(jl_data_ptr(jv), 0, size);
}
else if (jl_field_offset(type, 0) != 0) {
memset(jl_data_ptr(jv), 0, jl_field_offset(type, 0));
}
JL_GC_PUSH2(&jv, &fi);
for (size_t i = 0; i < nargs; i++) {
jl_value_t *ft = jl_field_type_concrete(type, i);
fi = jl_get_nth_field(tup, i);
if (!jl_isa(fi, ft))
jl_type_error("new", ft, fi);
set_nth_field(type, jv, i, fi, 0);
}
JL_GC_POP();
return jv;
}
JL_DLLEXPORT jl_value_t *jl_new_struct_uninit(jl_datatype_t *type)
{
jl_task_t *ct = jl_current_task;
if (type->instance != NULL) return type->instance;
size_t size = jl_datatype_size(type);
jl_value_t *jv = jl_gc_alloc(ct->ptls, size, type);
if (size > 0)
memset(jl_data_ptr(jv), 0, size);
return jv;
}
// field access ---------------------------------------------------------------
JL_DLLEXPORT void jl_lock_value(jl_value_t *v) JL_NOTSAFEPOINT
{
JL_LOCK_NOGC((jl_mutex_t*)v);
}
JL_DLLEXPORT void jl_unlock_value(jl_value_t *v) JL_NOTSAFEPOINT
{
JL_UNLOCK_NOGC((jl_mutex_t*)v);
}
JL_DLLEXPORT int jl_field_index(jl_datatype_t *t, jl_sym_t *fld, int err)
{
if (jl_is_namedtuple_type(t)) {
jl_value_t *ns = jl_tparam0(t);
if (jl_is_tuple(ns)) {
size_t i, n = jl_nfields(ns);
for (i = 0; i < n; i++) {
if (jl_get_nth_field(ns, i) == (jl_value_t*)fld) {
return (int)i;
}
}
}
}
else {
jl_svec_t *fn = jl_field_names(t);
size_t i, n = jl_svec_len(fn);
for (i = 0; i < n; i++) {
if (jl_svecref(fn, i) == (jl_value_t*)fld) {
return (int)i;
}
}
}
if (err)
jl_errorf("type %s has no field %s", jl_symbol_name(t->name->name),
jl_symbol_name(fld));
return -1;
}
JL_DLLEXPORT jl_value_t *jl_get_nth_field(jl_value_t *v, size_t i)
{
jl_datatype_t *st = (jl_datatype_t*)jl_typeof(v);
if (i >= jl_datatype_nfields(st))
jl_bounds_error_int(v, i + 1);
size_t offs = jl_field_offset(st, i);
if (jl_field_isptr(st, i)) {
return jl_atomic_load_relaxed((jl_value_t**)((char*)v + offs));
}
jl_value_t *ty = jl_field_type_concrete(st, i);
int isatomic = jl_field_isatomic(st, i);
if (jl_is_uniontype(ty)) {
assert(!isatomic);
size_t fsz = jl_field_size(st, i);
uint8_t sel = ((uint8_t*)v)[offs + fsz - 1];
ty = jl_nth_union_component(ty, sel);
if (jl_is_datatype_singleton((jl_datatype_t*)ty))
return ((jl_datatype_t*)ty)->instance;
}
jl_value_t *r;
size_t fsz = jl_datatype_size(ty);
int needlock = (isatomic && fsz > MAX_ATOMIC_SIZE);
if (isatomic && !needlock) {
r = jl_atomic_new_bits(ty, (char*)v + offs);
}
else if (needlock) {
jl_task_t *ct = jl_current_task;
r = jl_gc_alloc(ct->ptls, fsz, ty);
jl_lock_value(v);
memcpy((char*)r, (char*)v + offs, fsz);
jl_unlock_value(v);
}
else {
r = jl_new_bits(ty, (char*)v + offs);
}
return undefref_check((jl_datatype_t*)ty, r);
}
JL_DLLEXPORT jl_value_t *jl_get_nth_field_noalloc(jl_value_t *v JL_PROPAGATES_ROOT, size_t i) JL_NOTSAFEPOINT
{
jl_datatype_t *st = (jl_datatype_t*)jl_typeof(v);
assert(i < jl_datatype_nfields(st));
size_t offs = jl_field_offset(st,i);
assert(jl_field_isptr(st,i));
return jl_atomic_load_relaxed((jl_value_t**)((char*)v + offs));
}
JL_DLLEXPORT jl_value_t *jl_get_nth_field_checked(jl_value_t *v, size_t i)
{
jl_value_t *r = jl_get_nth_field(v, i);
if (__unlikely(r == NULL))
jl_throw(jl_undefref_exception);
return r;
}
static inline void memassign_safe(int hasptr, jl_value_t *parent, char *dst, const jl_value_t *src, size_t nb) JL_NOTSAFEPOINT
{
if (hasptr) {
// assert that although dst might have some undefined bits, the src heap box should be okay with that
assert(LLT_ALIGN(nb, sizeof(void*)) == LLT_ALIGN(jl_datatype_size(jl_typeof(src)), sizeof(void*)));
size_t nptr = nb / sizeof(void*);
memmove_refs((void**)dst, (void**)src, nptr);
jl_gc_multi_wb(parent, src);
src = (jl_value_t*)((char*)src + nptr * sizeof(void*));
nb -= nptr * sizeof(void*);
}
else {
// src must be a heap box.
assert(nb == jl_datatype_size(jl_typeof(src)));
}
switch (nb) {
case 0: break;
case 1: *(uint8_t*)dst = *(uint8_t*)src; break;
case 2: jl_store_unaligned_i16(dst, *(uint16_t*)src); break;
case 4: jl_store_unaligned_i32(dst, *(uint32_t*)src); break;
case 8: jl_store_unaligned_i64(dst, *(uint64_t*)src); break;
case 16: memcpy(dst, jl_assume_aligned(src, 16), 16); break;
default: memcpy(dst, src, nb); break;
}
}
void set_nth_field(jl_datatype_t *st, jl_value_t *v, size_t i, jl_value_t *rhs, int isatomic) JL_NOTSAFEPOINT
{
size_t offs = jl_field_offset(st, i);
if (rhs == NULL) { // TODO: this should be invalid, but it happens frequently in ircode.c
assert(jl_field_isptr(st, i) && *(jl_value_t**)((char*)v + offs) == NULL);
return;
}
if (jl_field_isptr(st, i)) {
jl_atomic_store_relaxed((jl_value_t**)((char*)v + offs), rhs);
jl_gc_wb(v, rhs);
}
else {
jl_value_t *ty = jl_field_type_concrete(st, i);
jl_value_t *rty = jl_typeof(rhs);
int hasptr;
int isunion = jl_is_uniontype(ty);
if (isunion) {
assert(!isatomic);
size_t fsz = jl_field_size(st, i);
uint8_t *psel = &((uint8_t*)v)[offs + fsz - 1];
unsigned nth = 0;
if (!jl_find_union_component(ty, rty, &nth))
assert(0 && "invalid field assignment to isbits union");
*psel = nth;
if (jl_is_datatype_singleton((jl_datatype_t*)rty))
return;
hasptr = 0;
}
else {
hasptr = ((jl_datatype_t*)ty)->layout->npointers > 0;
}
size_t fsz = jl_datatype_size((jl_datatype_t*)rty); // need to shrink-wrap the final copy
int needlock = (isatomic && fsz > MAX_ATOMIC_SIZE);
if (isatomic && !needlock) {
jl_atomic_store_bits((char*)v + offs, rhs, fsz);
if (hasptr)
jl_gc_multi_wb(v, rhs); // rhs is immutable
}
else if (needlock) {
jl_lock_value(v);
memcpy((char*)v + offs, (char*)rhs, fsz);
jl_unlock_value(v);
}
else {
memassign_safe(hasptr, v, (char*)v + offs, rhs, fsz);
}
}
}
jl_value_t *swap_nth_field(jl_datatype_t *st, jl_value_t *v, size_t i, jl_value_t *rhs, int isatomic)
{
jl_value_t *ty = jl_field_type_concrete(st, i);
if (!jl_isa(rhs, ty))
jl_type_error("swapfield!", ty, rhs);
size_t offs = jl_field_offset(st, i);
jl_value_t *r;
if (jl_field_isptr(st, i)) {
if (isatomic)
r = jl_atomic_exchange((jl_value_t**)((char*)v + offs), rhs);
else
r = jl_atomic_exchange_relaxed((jl_value_t**)((char*)v + offs), rhs);
jl_gc_wb(v, rhs);
}
else {
jl_value_t *rty = jl_typeof(rhs);
int hasptr;
int isunion = jl_is_uniontype(ty);
if (isunion) {
assert(!isatomic);
r = jl_get_nth_field(v, i);
size_t fsz = jl_field_size(st, i);
uint8_t *psel = &((uint8_t*)v)[offs + fsz - 1];
unsigned nth = 0;
if (!jl_find_union_component(ty, rty, &nth))
assert(0 && "invalid field assignment to isbits union");
*psel = nth;
if (jl_is_datatype_singleton((jl_datatype_t*)rty))
return r;
hasptr = 0;
}
else {
hasptr = ((jl_datatype_t*)ty)->layout->npointers > 0;
}
size_t fsz = jl_datatype_size((jl_datatype_t*)rty); // need to shrink-wrap the final copy
int needlock = (isatomic && fsz > MAX_ATOMIC_SIZE);
if (isatomic && !needlock) {
r = jl_atomic_swap_bits(rty, (char*)v + offs, rhs, fsz);
if (hasptr)
jl_gc_multi_wb(v, rhs); // rhs is immutable
}
else {
if (needlock) {
jl_task_t *ct = jl_current_task;
r = jl_gc_alloc(ct->ptls, fsz, ty);
jl_lock_value(v);
memcpy((char*)r, (char*)v + offs, fsz);
memcpy((char*)v + offs, (char*)rhs, fsz);
jl_unlock_value(v);
}
else {
if (!isunion)
r = jl_new_bits(ty, (char*)v + offs);
memassign_safe(hasptr, v, (char*)v + offs, rhs, fsz);
}
if (needlock || !isunion)
r = undefref_check((jl_datatype_t*)ty, r);
}
}
if (__unlikely(r == NULL))
jl_throw(jl_undefref_exception);
return r;
}
jl_value_t *modify_nth_field(jl_datatype_t *st, jl_value_t *v, size_t i, jl_value_t *op, jl_value_t *rhs, int isatomic)
{
size_t offs = jl_field_offset(st, i);
jl_value_t *ty = jl_field_type_concrete(st, i);
jl_value_t *r = jl_get_nth_field_checked(v, i);
if (isatomic && jl_field_isptr(st, i))
jl_fence(); // load was previously only relaxed
jl_value_t **args;
JL_GC_PUSHARGS(args, 2);
args[0] = r;
while (1) {
args[1] = rhs;
jl_value_t *y = jl_apply_generic(op, args, 2);
args[1] = y;
if (!jl_isa(y, ty))
jl_type_error("modifyfield!", ty, y);
if (jl_field_isptr(st, i)) {
jl_value_t **p = (jl_value_t**)((char*)v + offs);
if (isatomic ? jl_atomic_cmpswap(p, &r, y) : jl_atomic_cmpswap_relaxed(p, &r, y))
break;
}
else {
jl_value_t *yty = jl_typeof(y);
jl_value_t *rty = jl_typeof(r);
int hasptr;
int isunion = jl_is_uniontype(ty);
if (isunion) {
assert(!isatomic);
hasptr = 0;
}
else {
hasptr = ((jl_datatype_t*)ty)->layout->npointers > 0;
}
size_t fsz = jl_datatype_size((jl_datatype_t*)rty); // need to shrink-wrap the final copy
int needlock = (isatomic && fsz > MAX_ATOMIC_SIZE);
if (isatomic && !needlock) {
if (jl_atomic_bool_cmpswap_bits((char*)v + offs, r, y, fsz)) {
if (hasptr)
jl_gc_multi_wb(v, y); // y is immutable
break;
}
r = jl_atomic_new_bits(ty, (char*)v + offs);
}
else {
if (needlock)
jl_lock_value(v);
int success = memcmp((char*)v + offs, r, fsz) == 0;
if (success) {
if (isunion) {
size_t fsz = jl_field_size(st, i);
uint8_t *psel = &((uint8_t*)v)[offs + fsz - 1];
success = (jl_typeof(r) == jl_nth_union_component(ty, *psel));
if (success) {
unsigned nth = 0;
if (!jl_find_union_component(ty, yty, &nth))
assert(0 && "invalid field assignment to isbits union");
*psel = nth;
if (jl_is_datatype_singleton((jl_datatype_t*)yty))
break;
}
fsz = jl_datatype_size((jl_datatype_t*)yty); // need to shrink-wrap the final copy
}
else {
assert(yty == ty && rty == ty);
}
memassign_safe(hasptr, v, (char*)v + offs, y, fsz);
}
if (needlock)
jl_unlock_value(v);
if (success)
break;
r = jl_get_nth_field(v, i);
}
}
args[0] = r;
jl_gc_safepoint();
}
// args[0] == r (old); args[1] == y (new)
args[0] = jl_f_tuple(NULL, args, 2);
JL_GC_POP();
return args[0];
}
jl_value_t *replace_nth_field(jl_datatype_t *st, jl_value_t *v, size_t i, jl_value_t *expected, jl_value_t *rhs, int isatomic)
{
jl_value_t *ty = jl_field_type_concrete(st, i);
if (!jl_isa(rhs, ty))
jl_type_error("replacefield!", ty, rhs);
size_t offs = jl_field_offset(st, i);
jl_value_t *r = expected;
if (jl_field_isptr(st, i)) {
jl_value_t **p = (jl_value_t**)((char*)v + offs);
int success;
while (1) {
success = isatomic ? jl_atomic_cmpswap(p, &r, rhs) : jl_atomic_cmpswap_relaxed(p, &r, rhs);
if (success)
jl_gc_wb(v, rhs);
if (__unlikely(r == NULL))
jl_throw(jl_undefref_exception);
if (success || !jl_egal(r, expected))
break;
}
jl_value_t **args;
JL_GC_PUSHARGS(args, 2);
args[0] = r;
args[1] = success ? jl_true : jl_false;
r = jl_f_tuple(NULL, args, 2);
JL_GC_POP();
}
else {
int hasptr;
int isunion = jl_is_uniontype(ty);
int needlock;
jl_value_t *rty = ty;
size_t fsz;
if (isunion) {
assert(!isatomic);
hasptr = 0;
needlock = 0;
isatomic = 0; // this makes GCC happy
}
else {
hasptr = ((jl_datatype_t*)ty)->layout->npointers > 0;
fsz = jl_datatype_size((jl_datatype_t*)rty); // need to shrink-wrap the final copy
needlock = (isatomic && fsz > MAX_ATOMIC_SIZE);
}
if (isatomic && !needlock) {
r = jl_atomic_cmpswap_bits((jl_datatype_t*)rty, (char*)v + offs, r, rhs, fsz);
int success = *((uint8_t*)r + fsz);
if (success && hasptr)
jl_gc_multi_wb(v, rhs); // rhs is immutable
}
else {
jl_task_t *ct = jl_current_task;
uint8_t *psel;
if (isunion) {
size_t fsz = jl_field_size(st, i);
psel = &((uint8_t*)v)[offs + fsz - 1];
rty = jl_nth_union_component(rty, *psel);
}
jl_value_t *params[2];
params[0] = rty;
params[1] = (jl_value_t*)jl_bool_type;
jl_datatype_t *tuptyp = jl_apply_tuple_type_v(params, 2);
JL_GC_PROMISE_ROOTED(tuptyp); // (JL_ALWAYS_LEAFTYPE)
assert(!jl_field_isptr(tuptyp, 0));
r = jl_gc_alloc(ct->ptls, tuptyp->size, (jl_value_t*)tuptyp);
int success = (rty == jl_typeof(expected));
if (needlock)
jl_lock_value(v);
size_t fsz = jl_datatype_size((jl_datatype_t*)rty); // need to shrink-wrap the final copy
memcpy((char*)r, (char*)v + offs, fsz);
if (success) {
if (((jl_datatype_t*)rty)->layout->haspadding)
success = jl_egal__bits(r, expected, (jl_datatype_t*)rty);
else
success = memcmp((char*)r, (char*)expected, fsz) == 0;
}
*((uint8_t*)r + fsz) = success ? 1 : 0;
if (success) {
jl_value_t *rty = jl_typeof(rhs);
size_t fsz = jl_datatype_size((jl_datatype_t*)rty); // need to shrink-wrap the final copy
if (isunion) {
unsigned nth = 0;
if (!jl_find_union_component(ty, rty, &nth))
assert(0 && "invalid field assignment to isbits union");
*psel = nth;
if (jl_is_datatype_singleton((jl_datatype_t*)rty))
return r;
}
memassign_safe(hasptr, v, (char*)v + offs, rhs, fsz);
}
if (needlock)
jl_unlock_value(v);
}
r = undefref_check((jl_datatype_t*)rty, r);
if (__unlikely(r == NULL))
jl_throw(jl_undefref_exception);
}
return r;
}
JL_DLLEXPORT int jl_field_isdefined(jl_value_t *v, size_t i) JL_NOTSAFEPOINT
{
jl_datatype_t *st = (jl_datatype_t*)jl_typeof(v);
size_t offs = jl_field_offset(st, i);
jl_value_t **fld = (jl_value_t**)((char*)v + offs);
if (!jl_field_isptr(st, i)) {
jl_datatype_t *ft = (jl_datatype_t*)jl_field_type_concrete(st, i);
if (!jl_is_datatype(ft) || ft->layout->first_ptr < 0)
return 2; // isbits are always defined
fld += ft->layout->first_ptr;
}
jl_value_t *fval = jl_atomic_load_relaxed(fld);
return fval != NULL ? 1 : 0;
}
JL_DLLEXPORT size_t jl_get_field_offset(jl_datatype_t *ty, int field) JL_NOTSAFEPOINT
{
if (ty->layout == NULL || field > jl_datatype_nfields(ty) || field < 1)
jl_bounds_error_int((jl_value_t*)ty, field);
return jl_field_offset(ty, field - 1);
}
#ifdef __cplusplus
}
#endif