diff --git a/ruby_atomic.h b/ruby_atomic.h index 085c307ac6..5c9049e001 100644 --- a/ruby_atomic.h +++ b/ruby_atomic.h @@ -36,4 +36,36 @@ rbimpl_atomic_load_relaxed(rb_atomic_t *ptr) } #define ATOMIC_LOAD_RELAXED(var) rbimpl_atomic_load_relaxed(&(var)) +static inline uint64_t +rbimpl_atomic_u64_load_relaxed(const uint64_t *value) +{ +#if defined(HAVE_GCC_ATOMIC_BUILTINS) + return __atomic_load_n(value, __ATOMIC_RELAXED); +#elif defined(_WIN32) + uint64_t val = *value; + return InterlockedCompareExchange64(value, val, val); +#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx)) + uint64_t val = *value; + return atomic_cas_64(value, val, val); +#else + return *value; +#endif +} +#define ATOMIC_U64_LOAD_RELAXED(var) rbimpl_atomic_u64_load_relaxed(&(var)) + +static inline void +rbimpl_atomic_u64_set_relaxed(uint64_t *address, uint64_t value) +{ +#if defined(HAVE_GCC_ATOMIC_BUILTINS) + __atomic_store_n(address, value, __ATOMIC_RELAXED); +#elif defined(_WIN32) + InterlockedExchange64(address, value); +#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx)) + atomic_swap_64(address, value); +#else + *address = value; +#endif +} +#define ATOMIC_U64_SET_RELAXED(var, val) rbimpl_atomic_u64_set_relaxed(&(var), val) + #endif diff --git a/shape.h b/shape.h index b9809c4010..50b39062ec 100644 --- a/shape.h +++ b/shape.h @@ -3,34 +3,22 @@ #include "internal/gc.h" -#if (SIZEOF_UINT64_T <= SIZEOF_VALUE) - #define SIZEOF_SHAPE_T 4 -typedef uint32_t attr_index_t; -typedef uint32_t shape_id_t; -# define SHAPE_ID_NUM_BITS 32 - -#else - -#define SIZEOF_SHAPE_T 2 typedef uint16_t attr_index_t; -typedef uint16_t shape_id_t; -# define SHAPE_ID_NUM_BITS 16 - -#endif +typedef uint32_t shape_id_t; +#define SHAPE_ID_NUM_BITS 32 typedef uint32_t redblack_id_t; #define SHAPE_MAX_FIELDS (attr_index_t)(-1) -# define SHAPE_FLAG_MASK (((VALUE)-1) >> SHAPE_ID_NUM_BITS) +#define SHAPE_FLAG_MASK (((VALUE)-1) >> SHAPE_ID_NUM_BITS) +#define SHAPE_FLAG_SHIFT ((SIZEOF_VALUE * 8) - SHAPE_ID_NUM_BITS) -# define SHAPE_FLAG_SHIFT ((SIZEOF_VALUE * 8) - SHAPE_ID_NUM_BITS) +#define SHAPE_MAX_VARIATIONS 8 -# define SHAPE_MAX_VARIATIONS 8 - -# define INVALID_SHAPE_ID (((uintptr_t)1 << SHAPE_ID_NUM_BITS) - 1) -#define ATTR_INDEX_NOT_SET (attr_index_t)-1 +#define INVALID_SHAPE_ID ((shape_id_t)-1) +#define ATTR_INDEX_NOT_SET ((attr_index_t)-1) #define ROOT_SHAPE_ID 0x0 #define SPECIAL_CONST_SHAPE_ID 0x1 @@ -44,13 +32,13 @@ typedef struct redblack_node redblack_node_t; struct rb_shape { VALUE edges; // id_table from ID (ivar) to next shape ID edge_name; // ID (ivar) for transition from parent to rb_shape + redblack_node_t *ancestor_index; + shape_id_t parent_id; attr_index_t next_field_index; // Fields are either ivars or internal properties like `object_id` attr_index_t capacity; // Total capacity of the object with this shape uint8_t type; uint8_t heap_index; uint8_t flags; - shape_id_t parent_id; - redblack_node_t *ancestor_index; }; typedef struct rb_shape rb_shape_t; @@ -82,6 +70,14 @@ typedef struct { } rb_shape_tree_t; RUBY_EXTERN rb_shape_tree_t *rb_shape_tree_ptr; +union rb_attr_index_cache { + uint64_t pack; + struct { + shape_id_t shape_id; + attr_index_t index; + } unpack; +}; + static inline rb_shape_tree_t * rb_current_shape_tree(void) { diff --git a/vm_callinfo.h b/vm_callinfo.h index 6813c1cc94..d4dc3b655e 100644 --- a/vm_callinfo.h +++ b/vm_callinfo.h @@ -289,7 +289,7 @@ struct rb_callcache { union { struct { - uintptr_t value; // Shape ID in upper bits, index in lower bits + uint64_t value; // Shape ID in upper bits, index in lower bits } attr; const enum method_missing_reason method_missing_reason; /* used by method_missing */ VALUE v; @@ -416,24 +416,25 @@ vm_cc_call(const struct rb_callcache *cc) } static inline void -vm_unpack_shape_and_index(uintptr_t cache_value, shape_id_t *shape_id, attr_index_t *index) +vm_unpack_shape_and_index(const uint64_t cache_value, shape_id_t *shape_id, attr_index_t *index) { - *shape_id = (shape_id_t)(cache_value >> SHAPE_FLAG_SHIFT); - *index = (attr_index_t)(cache_value & SHAPE_FLAG_MASK) - 1; + union rb_attr_index_cache cache = { + .pack = cache_value, + }; + *shape_id = cache.unpack.shape_id; + *index = cache.unpack.index - 1; } static inline void vm_cc_atomic_shape_and_index(const struct rb_callcache *cc, shape_id_t *shape_id, attr_index_t *index) { - // Atomically read uintptr_t - vm_unpack_shape_and_index(cc->aux_.attr.value, shape_id, index); + vm_unpack_shape_and_index(ATOMIC_U64_LOAD_RELAXED(cc->aux_.attr.value), shape_id, index); } static inline void vm_ic_atomic_shape_and_index(const struct iseq_inline_iv_cache_entry *ic, shape_id_t *shape_id, attr_index_t *index) { - // Atomically read uintptr_t - vm_unpack_shape_and_index(ic->value, shape_id, index); + vm_unpack_shape_and_index(ATOMIC_U64_LOAD_RELAXED(ic->value), shape_id, index); } static inline unsigned int @@ -470,16 +471,22 @@ set_vm_cc_ivar(const struct rb_callcache *cc) *(VALUE *)&cc->flags |= VM_CALLCACHE_IVAR; } -static inline uintptr_t +static inline uint64_t vm_pack_shape_and_index(shape_id_t shape_id, attr_index_t index) { - return (attr_index_t)(index + 1) | ((uintptr_t)(shape_id) << SHAPE_FLAG_SHIFT); + union rb_attr_index_cache cache = { + .unpack = { + .shape_id = shape_id, + .index = index + 1, + } + }; + return cache.pack; } static inline void vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t dest_shape_id) { - uintptr_t *attr_value = (uintptr_t *)&cc->aux_.attr.value; + uint64_t *attr_value = (uint64_t *)&cc->aux_.attr.value; if (!vm_cc_markable(cc)) { *attr_value = vm_pack_shape_and_index(INVALID_SHAPE_ID, ATTR_INDEX_NOT_SET); return; @@ -497,15 +504,15 @@ vm_cc_ivar_p(const struct rb_callcache *cc) } static inline void -vm_ic_attr_index_set(const rb_iseq_t *iseq, const struct iseq_inline_iv_cache_entry *ic, attr_index_t index, shape_id_t dest_shape_id) +vm_ic_attr_index_set(const rb_iseq_t *iseq, struct iseq_inline_iv_cache_entry *ic, attr_index_t index, shape_id_t dest_shape_id) { - *(uintptr_t *)&ic->value = vm_pack_shape_and_index(dest_shape_id, index); + ATOMIC_U64_SET_RELAXED(ic->value, vm_pack_shape_and_index(dest_shape_id, index)); } static inline void -vm_ic_attr_index_initialize(const struct iseq_inline_iv_cache_entry *ic, shape_id_t shape_id) +vm_ic_attr_index_initialize(struct iseq_inline_iv_cache_entry *ic, shape_id_t shape_id) { - *(uintptr_t *)&ic->value = vm_pack_shape_and_index(shape_id, ATTR_INDEX_NOT_SET); + ATOMIC_U64_SET_RELAXED(ic->value, vm_pack_shape_and_index(shape_id, ATTR_INDEX_NOT_SET)); } static inline void diff --git a/vm_core.h b/vm_core.h index e4aea59e3f..af2a4f85e3 100644 --- a/vm_core.h +++ b/vm_core.h @@ -288,7 +288,7 @@ struct iseq_inline_constant_cache { }; struct iseq_inline_iv_cache_entry { - uintptr_t value; // attr_index in lower bits, dest_shape_id in upper bits + uint64_t value; // attr_index in lower bits, dest_shape_id in upper bits ID iv_set_name; }; diff --git a/vm_insnhelper.c b/vm_insnhelper.c index e638ac1e82..78d845405a 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -4774,7 +4774,7 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st .call_ = cc->call_, .aux_ = { .attr = { - .value = INVALID_SHAPE_ID << SHAPE_FLAG_SHIFT, + .value = vm_pack_shape_and_index(INVALID_SHAPE_ID, ATTR_INDEX_NOT_SET), } }, }); diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs index 0f6385bada..0925cdb993 100644 --- a/yjit/src/codegen.rs +++ b/yjit/src/codegen.rs @@ -2904,7 +2904,7 @@ fn gen_get_ivar( let ivar_index = unsafe { let shape_id = comptime_receiver.shape_id_of(); - let mut ivar_index: u32 = 0; + let mut ivar_index: u16 = 0; if rb_shape_get_iv_index(shape_id, ivar_name, &mut ivar_index) { Some(ivar_index as usize) } else { @@ -3106,7 +3106,7 @@ fn gen_set_ivar( let shape_too_complex = comptime_receiver.shape_too_complex(); let ivar_index = if !shape_too_complex { let shape_id = comptime_receiver.shape_id_of(); - let mut ivar_index: u32 = 0; + let mut ivar_index: u16 = 0; if unsafe { rb_shape_get_iv_index(shape_id, ivar_name, &mut ivar_index) } { Some(ivar_index as usize) } else { @@ -3395,7 +3395,7 @@ fn gen_definedivar( let shape_id = comptime_receiver.shape_id_of(); let ivar_exists = unsafe { - let mut ivar_index: u32 = 0; + let mut ivar_index: u16 = 0; rb_shape_get_iv_index(shape_id, ivar_name, &mut ivar_index) }; diff --git a/yjit/src/cruby_bindings.inc.rs b/yjit/src/cruby_bindings.inc.rs index 0829317cff..004864d75b 100644 --- a/yjit/src/cruby_bindings.inc.rs +++ b/yjit/src/cruby_bindings.inc.rs @@ -515,7 +515,7 @@ pub struct iseq_inline_constant_cache { #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct iseq_inline_iv_cache_entry { - pub value: usize, + pub value: u64, pub iv_set_name: ID, } #[repr(C)] @@ -685,7 +685,7 @@ pub const VM_ENV_FLAG_ESCAPED: vm_frame_env_flags = 4; pub const VM_ENV_FLAG_WB_REQUIRED: vm_frame_env_flags = 8; pub const VM_ENV_FLAG_ISOLATED: vm_frame_env_flags = 16; pub type vm_frame_env_flags = u32; -pub type attr_index_t = u32; +pub type attr_index_t = u16; pub type shape_id_t = u32; pub type redblack_id_t = u32; pub type redblack_node_t = redblack_node; @@ -693,13 +693,13 @@ pub type redblack_node_t = redblack_node; pub struct rb_shape { pub edges: VALUE, pub edge_name: ID, + pub ancestor_index: *mut redblack_node_t, + pub parent_id: shape_id_t, pub next_field_index: attr_index_t, pub capacity: attr_index_t, pub type_: u8, pub heap_index: u8, pub flags: u8, - pub parent_id: shape_id_t, - pub ancestor_index: *mut redblack_node_t, } pub type rb_shape_t = rb_shape; #[repr(C)] diff --git a/zjit/src/cruby_bindings.inc.rs b/zjit/src/cruby_bindings.inc.rs index e8fc3d3759..95fb8c5213 100644 --- a/zjit/src/cruby_bindings.inc.rs +++ b/zjit/src/cruby_bindings.inc.rs @@ -317,7 +317,7 @@ pub struct iseq_inline_constant_cache { } #[repr(C)] pub struct iseq_inline_iv_cache_entry { - pub value: usize, + pub value: u64, pub iv_set_name: ID, } #[repr(C)] @@ -393,7 +393,7 @@ pub const VM_ENV_FLAG_ESCAPED: vm_frame_env_flags = 4; pub const VM_ENV_FLAG_WB_REQUIRED: vm_frame_env_flags = 8; pub const VM_ENV_FLAG_ISOLATED: vm_frame_env_flags = 16; pub type vm_frame_env_flags = u32; -pub type attr_index_t = u32; +pub type attr_index_t = u16; pub type shape_id_t = u32; pub type redblack_id_t = u32; pub type redblack_node_t = redblack_node; @@ -401,13 +401,13 @@ pub type redblack_node_t = redblack_node; pub struct rb_shape { pub edges: VALUE, pub edge_name: ID, + pub ancestor_index: *mut redblack_node_t, + pub parent_id: shape_id_t, pub next_field_index: attr_index_t, pub capacity: attr_index_t, pub type_: u8, pub heap_index: u8, pub flags: u8, - pub parent_id: shape_id_t, - pub ancestor_index: *mut redblack_node_t, } pub type rb_shape_t = rb_shape; #[repr(C)]