dnl kcore/type.h.m → dnl ~ lexi hale dnl this file gathers information on the environment it's dnl being compiled in, defining types that our code dnl needs. it will be emitted as . dnl vim: ft=c include(`typesize.m') changequote(`“',`”') #ifndef KItype #define KItype /* we define 64-bit types first due to an oddity in how * 128-bit types are handled: we want kc_?big to reference * the absolute largest type available to the compiler, * but in some cases, 128-bits may not be among the * standard C types despite being supported by the * compiler. to work around this, we first check whether * 64-bit types are available (__int128_t only works on * 64-bit systems) and then whether the compiler is one * that supports the 128-bit extension - but only if a * native 128-bit type is not available. * * once this is done, we can be certain that u128 will * reference the largest available integer type and can * safely define kc_?big by reference to it. */ ifdef(“type_bit64”,“ typedef unsigned type_bit64 u64; typedef signed type_bit64 s64; ifdef(“type_bit128”,,“ /* even if no native type is 128 bits long, clang and * gcc have extensions to support 128 bit arithmetic * on some 64-bit hardware */ # ifdef __SIZEOF_INT128__ typedef __uint128_t u128; typedef __int128_t s128; # else /* if we don`'t have access to that extension * or native 128-bit types, then we just use * the largest native type specified in the * C standard */ typedef unsigned long long u128; typedef signed long long s128; # endif ”)”,“dnl typedef unsigned long long u64; typedef signed long long s64; typedef u64 u128; typedef s64 s128; ”)dnl ifdef(“type_bit128”,“dnl typedef unsigned type_bit128 u128; typedef signed type_bit128 s128; ”)dnl typedef unsigned char ubyte; typedef signed char sbyte; typedef u128 ubig; typedef s128 sbig; ifdef(“type_bit8”,“dnl typedef unsigned type_bit8 u8; typedef signed type_bit8 s8; ”,“dnl typedef ubyte u8; typedef sbyte s8; ”)dnl ifdef(“type_bit16”,“ typedef unsigned type_bit16 u16; typedef signed type_bit16 s16; ”,“ typedef ubig u16; typedef sbig s16; ”)dnl ifdef(“type_bit32”,“ typedef unsigned type_bit32 u32; typedef signed type_bit32 s32; ”,“ typedef ubig u32; typedef sbig s32; ”)dnl define(“cat”, “$1$2”) typedef enum kcendian { kcendian_high, kcendian_low, kcendian_system = cat(kcendian_,prop_endian), } kcendian; define(“defval”,“#define $1 $2 ”) defval(byte_bits,arch_byte_bits) /* this section was originally defined with enums so * as to avoid creating extraneous macros; however, * enum in C is limited to values of type int, when * we need values that are considerably longer. the * section has been commented out until someone has * the energy to convert it to #defines. enum { u8_min = 0, u8_max = ((u8)-1), u16_min = 0, u16_max = ((u16)-1), u32_min = 0, u32_max = ((u32)-1), u64_min = 0, u64_max = ((u64)-1), u128_min = 0, u128_max = ((u128)-1), // assuming two's complement. TODO: check math define(“sspec”,“ $1_min = 0 - ((1 << sizeof($1) * byte_bits) / 2), $1_max = (1 << sizeof($1) * byte_bits) / 2 - 1”)dnl sspec(s8), sspec(s16), sspec(s32), sspec(s64), sspec(s128), kc_uchar_min = 0, kc_uchar_max = type_max_u_char, kc_ushort_min = 0, kc_ushort_max = type_max_u_short, kc_uint_min = 0, kc_uint_max = type_max_u_int, kc_ulong_min = 0, kc_ulong_max = type_max_u_long, kc_ullong_min = 0, kc_ullong_max = type_max_u_llong, kc_schar_min = type_min_s_char, kc_schar_max = type_max_s_char, kc_sshort_min = type_min_s_short, kc_sshort_max = type_max_s_short, kc_sint_min = type_min_s_int, kc_sint_max = type_max_s_int, kc_slong_min = type_min_s_long, kc_slong_max = type_max_s_long, kc_sllong_min = type_min_s_llong, kc_sllong_max = type_max_s_llong, ubig_min = u128_min, ubig_max = u128_max, sbig_min = s128_min, sbig_max = s128_max, ubyte_min = kc_uchar_min, ubyte_max = kc_uchar_max, sbyte_min = kc_schar_min, sbyte_max = kc_schar_max, }; */ ifdef(“type_sz”,“ typedef type_sz sz; ”,“dnl # ifdef __cplusplus /* C++ gives us a clean, standardized way to do this */ typedef decltype (sizeof(char)) sz; # else # if defined(__GNUC__) || defined(__clang__) typedef __typeof__ (sizeof(char)) sz; # else /* we're stumped - set sz to the safest possible value under * the circumstances, and warn the user. */ # warning no authoritative sz (size_t) type definition \ available; defaulting to largest unsigned integer type typedef ubig sz; # endif # endif ”)dnl ifdef(“type_offset”,“ typedef type_offset offset; ”,“dnl # ifdef __cplusplus /* C++ gives us a clean, standardized way to do this */ typedef decltype (((void*)-1) - 1) offset; # else # if defined(__GNUC__) || defined(__clang__) typedef __typeof__ (((void*)10) - ((void*)5)) offset; # else /* no dice - set offset to the safest possible value under * the circumstances, and warn the user. */ # warning no authoritative offset (ptrdiff_t) type definition \ available; defaulting to largest unsigned integer type typedef sbig offset; # endif # endif ”)dnl // exit status integer types - pls use kbad in instead ifelse(target_posix,“yes”,“ /* by convention, posix return values are 8-bit, * but note that many modern UNIXes do technically * support higher-bit values. for this reason, * stat_long is defined differently under posix. */ typedef u8 stat; typedef u32 stat_long; ”,“dnl ifelse(atom_target_os,“win”,“ typedef u32 stat; ”,“dnl ifelse(atom_target_os,“vms”,“ typedef u32 stat; ”,“dnl typedef u8 stat; /* we don't know a specific exit status type * for your arch so we're going with a sane * default. if this is wrong, help us fix it! */ ”)”)dnl typedef stat stat_long; ”)dnl /* unicode types */ typedef u32 codepoint; /* a codepoint is a UTF-32 character */ typedef u8 rune [5]; /* a rune is a valid UTF-8 character terminated with a nul. */ #endif