This commit implements the Object Shapes technique in CRuby.

Object Shapes is used for accessing instance variables and representing the
"frozenness" of objects.  Object instances have a "shape" and the shape
represents some attributes of the object (currently which instance variables are
set and the "frozenness").  Shapes form a tree data structure, and when a new
instance variable is set on an object, that object "transitions" to a new shape
in the shape tree.  Each shape has an ID that is used for caching. The shape
structure is independent of class, so objects of different types can have the
same shape.

For example:

```ruby
class Foo
  def initialize
    # Starts with shape id 0
    @a = 1 # transitions to shape id 1
    @b = 1 # transitions to shape id 2
  end
end

class Bar
  def initialize
    # Starts with shape id 0
    @a = 1 # transitions to shape id 1
    @b = 1 # transitions to shape id 2
  end
end

foo = Foo.new # `foo` has shape id 2
bar = Bar.new # `bar` has shape id 2
```

Both `foo` and `bar` instances have the same shape because they both set
instance variables of the same name in the same order.

This technique can help to improve inline cache hits as well as generate more
efficient machine code in JIT compilers.

This commit also adds some methods for debugging shapes on objects.  See
`RubyVM::Shape` for more details.

For more context on Object Shapes, see [Feature: #18776]

Co-Authored-By: Aaron Patterson <tenderlove@ruby-lang.org>
Co-Authored-By: Eileen M. Uchitelle <eileencodes@gmail.com>
Co-Authored-By: John Hawthorn <john@hawthorn.email>
This commit is contained in:
Jemma Issroff 2022-09-23 13:54:42 -04:00 committed by Aaron Patterson
parent 2e88bca24f
commit 9ddfd2ca00
Notes: git 2022-09-27 01:22:15 +09:00
45 changed files with 2561 additions and 959 deletions

View File

@ -34,3 +34,19 @@ assert_equal %{ok}, %{
print "ok" print "ok"
end end
}, '[ruby-core:15120]' }, '[ruby-core:15120]'
assert_equal %{ok}, %{
class Big
attr_reader :foo
def initialize
@foo = "ok"
end
end
obj = Big.new
100.times do |i|
obj.instance_variable_set(:"@ivar_\#{i}", i)
end
Big.new.foo
}

322
common.mk

File diff suppressed because it is too large Load Diff

View File

@ -2058,20 +2058,7 @@ cdhash_set_label_i(VALUE key, VALUE val, VALUE ptr)
static inline VALUE static inline VALUE
get_ivar_ic_value(rb_iseq_t *iseq,ID id) get_ivar_ic_value(rb_iseq_t *iseq,ID id)
{ {
VALUE val; return INT2FIX(ISEQ_BODY(iseq)->ivc_size++);
struct rb_id_table *tbl = ISEQ_COMPILE_DATA(iseq)->ivar_cache_table;
if (tbl) {
if (rb_id_table_lookup(tbl,id,&val)) {
return val;
}
}
else {
tbl = rb_id_table_create(1);
ISEQ_COMPILE_DATA(iseq)->ivar_cache_table = tbl;
}
val = INT2FIX(ISEQ_BODY(iseq)->ivc_size++);
rb_id_table_insert(tbl,id,val);
return val;
} }
static inline VALUE static inline VALUE
@ -2472,9 +2459,13 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
generated_iseq[code_index + 1 + j] = (VALUE)ic; generated_iseq[code_index + 1 + j] = (VALUE)ic;
} }
break; break;
case TS_IVC: /* inline ivar cache */
{
unsigned int ic_index = FIX2UINT(operands[j]);
vm_ic_attr_index_initialize(((IVC)&body->is_entries[ic_index]), INVALID_SHAPE_ID);
}
case TS_ISE: /* inline storage entry: `once` insn */ case TS_ISE: /* inline storage entry: `once` insn */
case TS_ICVARC: /* inline cvar cache */ case TS_ICVARC: /* inline cvar cache */
case TS_IVC: /* inline ivar cache */
{ {
unsigned int ic_index = FIX2UINT(operands[j]); unsigned int ic_index = FIX2UINT(operands[j]);
IC ic = &ISEQ_IS_ENTRY_START(body, type)[ic_index].ic_cache; IC ic = &ISEQ_IS_ENTRY_START(body, type)[ic_index].ic_cache;
@ -11514,6 +11505,11 @@ ibf_load_code(const struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t bytecod
ISE ic = ISEQ_IS_ENTRY_START(load_body, operand_type) + op; ISE ic = ISEQ_IS_ENTRY_START(load_body, operand_type) + op;
code[code_index] = (VALUE)ic; code[code_index] = (VALUE)ic;
if (operand_type == TS_IVC) {
vm_ic_attr_index_initialize(((IVC)code[code_index]), INVALID_SHAPE_ID);
}
} }
break; break;
case TS_CALLDATA: case TS_CALLDATA:

View File

@ -130,7 +130,6 @@ RB_DEBUG_COUNTER(frame_C2R)
/* instance variable counts /* instance variable counts
* *
* * ivar_get_ic_hit/miss: ivar_get inline cache (ic) hit/miss counts (VM insn) * * ivar_get_ic_hit/miss: ivar_get inline cache (ic) hit/miss counts (VM insn)
* * ivar_get_ic_miss_serial: ivar_get ic miss reason by serial (VM insn)
* * ivar_get_ic_miss_unset: ... by unset (VM insn) * * ivar_get_ic_miss_unset: ... by unset (VM insn)
* * ivar_get_ic_miss_noobject: ... by "not T_OBJECT" (VM insn) * * ivar_get_ic_miss_noobject: ... by "not T_OBJECT" (VM insn)
* * ivar_set_...: same counts with ivar_set (VM insn) * * ivar_set_...: same counts with ivar_set (VM insn)
@ -140,17 +139,17 @@ RB_DEBUG_COUNTER(frame_C2R)
*/ */
RB_DEBUG_COUNTER(ivar_get_ic_hit) RB_DEBUG_COUNTER(ivar_get_ic_hit)
RB_DEBUG_COUNTER(ivar_get_ic_miss) RB_DEBUG_COUNTER(ivar_get_ic_miss)
RB_DEBUG_COUNTER(ivar_get_ic_miss_serial)
RB_DEBUG_COUNTER(ivar_get_ic_miss_unset)
RB_DEBUG_COUNTER(ivar_get_ic_miss_noobject) RB_DEBUG_COUNTER(ivar_get_ic_miss_noobject)
RB_DEBUG_COUNTER(ivar_set_ic_hit) RB_DEBUG_COUNTER(ivar_set_ic_hit)
RB_DEBUG_COUNTER(ivar_set_ic_miss) RB_DEBUG_COUNTER(ivar_set_ic_miss)
RB_DEBUG_COUNTER(ivar_set_ic_miss_serial)
RB_DEBUG_COUNTER(ivar_set_ic_miss_unset)
RB_DEBUG_COUNTER(ivar_set_ic_miss_iv_hit) RB_DEBUG_COUNTER(ivar_set_ic_miss_iv_hit)
RB_DEBUG_COUNTER(ivar_set_ic_miss_noobject) RB_DEBUG_COUNTER(ivar_set_ic_miss_noobject)
RB_DEBUG_COUNTER(ivar_get_base) RB_DEBUG_COUNTER(ivar_get_base)
RB_DEBUG_COUNTER(ivar_set_base) RB_DEBUG_COUNTER(ivar_set_base)
RB_DEBUG_COUNTER(ivar_get_ic_miss_set)
RB_DEBUG_COUNTER(ivar_get_cc_miss_set)
RB_DEBUG_COUNTER(ivar_get_ic_miss_unset)
RB_DEBUG_COUNTER(ivar_get_cc_miss_unset)
/* local variable counts /* local variable counts
* *
@ -321,6 +320,7 @@ RB_DEBUG_COUNTER(obj_imemo_parser_strterm)
RB_DEBUG_COUNTER(obj_imemo_callinfo) RB_DEBUG_COUNTER(obj_imemo_callinfo)
RB_DEBUG_COUNTER(obj_imemo_callcache) RB_DEBUG_COUNTER(obj_imemo_callcache)
RB_DEBUG_COUNTER(obj_imemo_constcache) RB_DEBUG_COUNTER(obj_imemo_constcache)
RB_DEBUG_COUNTER(obj_imemo_shape)
/* ar_table */ /* ar_table */
RB_DEBUG_COUNTER(artable_hint_hit) RB_DEBUG_COUNTER(artable_hint_hit)

View File

@ -165,7 +165,9 @@ coverage.o: $(top_srcdir)/ccan/check_type/check_type.h
coverage.o: $(top_srcdir)/ccan/container_of/container_of.h coverage.o: $(top_srcdir)/ccan/container_of/container_of.h
coverage.o: $(top_srcdir)/ccan/list/list.h coverage.o: $(top_srcdir)/ccan/list/list.h
coverage.o: $(top_srcdir)/ccan/str/str.h coverage.o: $(top_srcdir)/ccan/str/str.h
coverage.o: $(top_srcdir)/constant.h
coverage.o: $(top_srcdir)/gc.h coverage.o: $(top_srcdir)/gc.h
coverage.o: $(top_srcdir)/id_table.h
coverage.o: $(top_srcdir)/internal.h coverage.o: $(top_srcdir)/internal.h
coverage.o: $(top_srcdir)/internal/array.h coverage.o: $(top_srcdir)/internal/array.h
coverage.o: $(top_srcdir)/internal/compilers.h coverage.o: $(top_srcdir)/internal/compilers.h
@ -176,12 +178,14 @@ coverage.o: $(top_srcdir)/internal/sanitizers.h
coverage.o: $(top_srcdir)/internal/serial.h coverage.o: $(top_srcdir)/internal/serial.h
coverage.o: $(top_srcdir)/internal/static_assert.h coverage.o: $(top_srcdir)/internal/static_assert.h
coverage.o: $(top_srcdir)/internal/thread.h coverage.o: $(top_srcdir)/internal/thread.h
coverage.o: $(top_srcdir)/internal/variable.h
coverage.o: $(top_srcdir)/internal/vm.h coverage.o: $(top_srcdir)/internal/vm.h
coverage.o: $(top_srcdir)/internal/warnings.h coverage.o: $(top_srcdir)/internal/warnings.h
coverage.o: $(top_srcdir)/method.h coverage.o: $(top_srcdir)/method.h
coverage.o: $(top_srcdir)/node.h coverage.o: $(top_srcdir)/node.h
coverage.o: $(top_srcdir)/ruby_assert.h coverage.o: $(top_srcdir)/ruby_assert.h
coverage.o: $(top_srcdir)/ruby_atomic.h coverage.o: $(top_srcdir)/ruby_atomic.h
coverage.o: $(top_srcdir)/shape.h
coverage.o: $(top_srcdir)/thread_pthread.h coverage.o: $(top_srcdir)/thread_pthread.h
coverage.o: $(top_srcdir)/vm_core.h coverage.o: $(top_srcdir)/vm_core.h
coverage.o: $(top_srcdir)/vm_opts.h coverage.o: $(top_srcdir)/vm_opts.h

View File

@ -350,6 +350,7 @@ objspace.o: $(top_srcdir)/internal/serial.h
objspace.o: $(top_srcdir)/internal/static_assert.h objspace.o: $(top_srcdir)/internal/static_assert.h
objspace.o: $(top_srcdir)/internal/warnings.h objspace.o: $(top_srcdir)/internal/warnings.h
objspace.o: $(top_srcdir)/node.h objspace.o: $(top_srcdir)/node.h
objspace.o: $(top_srcdir)/shape.h
objspace.o: $(top_srcdir)/symbol.h objspace.o: $(top_srcdir)/symbol.h
objspace.o: objspace.c objspace.o: objspace.c
objspace.o: {$(VPATH)}id.h objspace.o: {$(VPATH)}id.h
@ -533,7 +534,9 @@ objspace_dump.o: $(top_srcdir)/ccan/check_type/check_type.h
objspace_dump.o: $(top_srcdir)/ccan/container_of/container_of.h objspace_dump.o: $(top_srcdir)/ccan/container_of/container_of.h
objspace_dump.o: $(top_srcdir)/ccan/list/list.h objspace_dump.o: $(top_srcdir)/ccan/list/list.h
objspace_dump.o: $(top_srcdir)/ccan/str/str.h objspace_dump.o: $(top_srcdir)/ccan/str/str.h
objspace_dump.o: $(top_srcdir)/constant.h
objspace_dump.o: $(top_srcdir)/gc.h objspace_dump.o: $(top_srcdir)/gc.h
objspace_dump.o: $(top_srcdir)/id_table.h
objspace_dump.o: $(top_srcdir)/internal.h objspace_dump.o: $(top_srcdir)/internal.h
objspace_dump.o: $(top_srcdir)/internal/array.h objspace_dump.o: $(top_srcdir)/internal/array.h
objspace_dump.o: $(top_srcdir)/internal/compilers.h objspace_dump.o: $(top_srcdir)/internal/compilers.h
@ -544,12 +547,14 @@ objspace_dump.o: $(top_srcdir)/internal/sanitizers.h
objspace_dump.o: $(top_srcdir)/internal/serial.h objspace_dump.o: $(top_srcdir)/internal/serial.h
objspace_dump.o: $(top_srcdir)/internal/static_assert.h objspace_dump.o: $(top_srcdir)/internal/static_assert.h
objspace_dump.o: $(top_srcdir)/internal/string.h objspace_dump.o: $(top_srcdir)/internal/string.h
objspace_dump.o: $(top_srcdir)/internal/variable.h
objspace_dump.o: $(top_srcdir)/internal/vm.h objspace_dump.o: $(top_srcdir)/internal/vm.h
objspace_dump.o: $(top_srcdir)/internal/warnings.h objspace_dump.o: $(top_srcdir)/internal/warnings.h
objspace_dump.o: $(top_srcdir)/method.h objspace_dump.o: $(top_srcdir)/method.h
objspace_dump.o: $(top_srcdir)/node.h objspace_dump.o: $(top_srcdir)/node.h
objspace_dump.o: $(top_srcdir)/ruby_assert.h objspace_dump.o: $(top_srcdir)/ruby_assert.h
objspace_dump.o: $(top_srcdir)/ruby_atomic.h objspace_dump.o: $(top_srcdir)/ruby_atomic.h
objspace_dump.o: $(top_srcdir)/shape.h
objspace_dump.o: $(top_srcdir)/thread_pthread.h objspace_dump.o: $(top_srcdir)/thread_pthread.h
objspace_dump.o: $(top_srcdir)/vm_core.h objspace_dump.o: $(top_srcdir)/vm_core.h
objspace_dump.o: $(top_srcdir)/vm_opts.h objspace_dump.o: $(top_srcdir)/vm_opts.h

View File

@ -644,6 +644,7 @@ count_imemo_objects(int argc, VALUE *argv, VALUE self)
INIT_IMEMO_TYPE_ID(imemo_callinfo); INIT_IMEMO_TYPE_ID(imemo_callinfo);
INIT_IMEMO_TYPE_ID(imemo_callcache); INIT_IMEMO_TYPE_ID(imemo_callcache);
INIT_IMEMO_TYPE_ID(imemo_constcache); INIT_IMEMO_TYPE_ID(imemo_constcache);
INIT_IMEMO_TYPE_ID(imemo_shape);
#undef INIT_IMEMO_TYPE_ID #undef INIT_IMEMO_TYPE_ID
} }

215
gc.c
View File

@ -2895,8 +2895,7 @@ rb_class_instance_allocate_internal(VALUE klass, VALUE flags, bool wb_protected)
GC_ASSERT((flags & RUBY_T_MASK) == T_OBJECT); GC_ASSERT((flags & RUBY_T_MASK) == T_OBJECT);
GC_ASSERT(flags & ROBJECT_EMBED); GC_ASSERT(flags & ROBJECT_EMBED);
st_table *index_tbl = RCLASS_IV_INDEX_TBL(klass); uint32_t index_tbl_num_entries = RCLASS_EXT(klass)->max_iv_count;
uint32_t index_tbl_num_entries = index_tbl == NULL ? 0 : (uint32_t)index_tbl->num_entries;
size_t size; size_t size;
bool embed = true; bool embed = true;
@ -2931,7 +2930,7 @@ rb_class_instance_allocate_internal(VALUE klass, VALUE flags, bool wb_protected)
#endif #endif
} }
else { else {
rb_init_iv_list(obj); rb_ensure_iv_list_size(obj, 0, index_tbl_num_entries);
} }
return obj; return obj;
@ -2972,6 +2971,7 @@ rb_imemo_name(enum imemo_type type)
IMEMO_NAME(callinfo); IMEMO_NAME(callinfo);
IMEMO_NAME(callcache); IMEMO_NAME(callcache);
IMEMO_NAME(constcache); IMEMO_NAME(constcache);
IMEMO_NAME(shape);
#undef IMEMO_NAME #undef IMEMO_NAME
} }
return "unknown"; return "unknown";
@ -3018,6 +3018,14 @@ imemo_memsize(VALUE obj)
case imemo_iseq: case imemo_iseq:
size += rb_iseq_memsize((rb_iseq_t *)obj); size += rb_iseq_memsize((rb_iseq_t *)obj);
break; break;
case imemo_shape:
{
struct rb_id_table* edges = ((rb_shape_t *) obj)->edges;
if (edges) {
size += rb_id_table_memsize(edges);
}
break;
}
case imemo_env: case imemo_env:
size += RANY(obj)->as.imemo.env.env_size * sizeof(VALUE); size += RANY(obj)->as.imemo.env.env_size * sizeof(VALUE);
break; break;
@ -3206,20 +3214,6 @@ rb_free_const_table(struct rb_id_table *tbl)
rb_id_table_free(tbl); rb_id_table_free(tbl);
} }
static int
free_iv_index_tbl_free_i(st_data_t key, st_data_t value, st_data_t data)
{
xfree((void *)value);
return ST_CONTINUE;
}
static void
iv_index_tbl_free(struct st_table *tbl)
{
st_foreach(tbl, free_iv_index_tbl_free_i, 0);
st_free_table(tbl);
}
// alive: if false, target pointers can be freed already. // alive: if false, target pointers can be freed already.
// To check it, we need objspace parameter. // To check it, we need objspace parameter.
static void static void
@ -3387,6 +3381,22 @@ obj_free_object_id(rb_objspace_t *objspace, VALUE obj)
} }
} }
static enum rb_id_table_iterator_result
remove_child_shapes_parent(VALUE value, void *ref)
{
rb_shape_t * shape = (rb_shape_t *) value;
GC_ASSERT(IMEMO_TYPE_P(shape, imemo_shape));
// If both objects live on the same page and we're currently
// sweeping that page, then we need to assert that neither are marked
if (GET_HEAP_PAGE(shape) == GET_HEAP_PAGE(shape->parent)) {
GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(shape), shape));
}
shape->parent = NULL;
return ID_TABLE_CONTINUE;
}
static int static int
obj_free(rb_objspace_t *objspace, VALUE obj) obj_free(rb_objspace_t *objspace, VALUE obj)
{ {
@ -3435,6 +3445,19 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
RB_DEBUG_COUNTER_INC(obj_obj_transient); RB_DEBUG_COUNTER_INC(obj_obj_transient);
} }
else { else {
// A shape can be collected before an object is collected (if both
// happened to be garbage at the same time), so when we look up the shape, _do not_
// assert that the shape is an IMEMO because it could be null
rb_shape_t *shape = rb_shape_get_shape_by_id_without_assertion(ROBJECT_SHAPE_ID(obj));
if (shape) {
VALUE klass = RBASIC_CLASS(obj);
// Increment max_iv_count if applicable, used to determine size pool allocation
uint32_t num_of_ivs = shape->iv_count;
if (RCLASS_EXT(klass)->max_iv_count < num_of_ivs) {
RCLASS_EXT(klass)->max_iv_count = num_of_ivs;
}
}
xfree(RANY(obj)->as.object.as.heap.ivptr); xfree(RANY(obj)->as.object.as.heap.ivptr);
RB_DEBUG_COUNTER_INC(obj_obj_ptr); RB_DEBUG_COUNTER_INC(obj_obj_ptr);
} }
@ -3449,9 +3472,6 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
if (RCLASS_CONST_TBL(obj)) { if (RCLASS_CONST_TBL(obj)) {
rb_free_const_table(RCLASS_CONST_TBL(obj)); rb_free_const_table(RCLASS_CONST_TBL(obj));
} }
if (RCLASS_IV_INDEX_TBL(obj)) {
iv_index_tbl_free(RCLASS_IV_INDEX_TBL(obj));
}
if (RCLASS_CVC_TBL(obj)) { if (RCLASS_CVC_TBL(obj)) {
rb_id_table_foreach_values(RCLASS_CVC_TBL(obj), cvar_table_free_i, NULL); rb_id_table_foreach_values(RCLASS_CVC_TBL(obj), cvar_table_free_i, NULL);
rb_id_table_free(RCLASS_CVC_TBL(obj)); rb_id_table_free(RCLASS_CVC_TBL(obj));
@ -3728,6 +3748,37 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
case imemo_constcache: case imemo_constcache:
RB_DEBUG_COUNTER_INC(obj_imemo_constcache); RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
break; break;
case imemo_shape:
{
rb_shape_t *shape = (rb_shape_t *)obj;
rb_shape_t *parent = shape->parent;
if (parent) {
RUBY_ASSERT(IMEMO_TYPE_P(parent, imemo_shape));
RUBY_ASSERT(parent->edges);
VALUE res; // Only used to temporarily store lookup value
if (rb_id_table_lookup(parent->edges, shape->edge_name, &res)) {
if ((rb_shape_t *)res == shape) {
rb_id_table_delete(parent->edges, shape->edge_name);
}
}
else {
rb_bug("Edge %s should exist", rb_id2name(shape->edge_name));
}
}
if (shape->edges) {
rb_id_table_foreach_values(shape->edges, remove_child_shapes_parent, NULL);
rb_id_table_free(shape->edges);
shape->edges = NULL;
}
shape->parent = NULL;
rb_shape_set_shape_by_id(SHAPE_ID(shape), NULL);
RB_DEBUG_COUNTER_INC(obj_imemo_shape);
break;
}
} }
return TRUE; return TRUE;
@ -4873,10 +4924,6 @@ obj_memsize_of(VALUE obj, int use_all_types)
if (RCLASS_CVC_TBL(obj)) { if (RCLASS_CVC_TBL(obj)) {
size += rb_id_table_memsize(RCLASS_CVC_TBL(obj)); size += rb_id_table_memsize(RCLASS_CVC_TBL(obj));
} }
if (RCLASS_IV_INDEX_TBL(obj)) {
// TODO: more correct value
size += st_memsize(RCLASS_IV_INDEX_TBL(obj));
}
if (RCLASS_EXT(obj)->iv_tbl) { if (RCLASS_EXT(obj)->iv_tbl) {
size += st_memsize(RCLASS_EXT(obj)->iv_tbl); size += st_memsize(RCLASS_EXT(obj)->iv_tbl);
} }
@ -7154,6 +7201,21 @@ gc_mark_imemo(rb_objspace_t *objspace, VALUE obj)
const struct rb_callcache *cc = (const struct rb_callcache *)obj; const struct rb_callcache *cc = (const struct rb_callcache *)obj;
// should not mark klass here // should not mark klass here
gc_mark(objspace, (VALUE)vm_cc_cme(cc)); gc_mark(objspace, (VALUE)vm_cc_cme(cc));
// Check it's an attr_(reader|writer)
if (cc->cme_ && (cc->cme_->def->type == VM_METHOD_TYPE_ATTRSET ||
cc->cme_->def->type == VM_METHOD_TYPE_IVAR)) {
shape_id_t source_shape_id = vm_cc_attr_index_source_shape_id(cc);
shape_id_t dest_shape_id = vm_cc_attr_index_dest_shape_id(cc);
if (source_shape_id != INVALID_SHAPE_ID) {
rb_shape_t *shape = rb_shape_get_shape_by_id(source_shape_id);
rb_gc_mark((VALUE)shape);
}
if (dest_shape_id != INVALID_SHAPE_ID) {
rb_shape_t *shape = rb_shape_get_shape_by_id(dest_shape_id);
rb_gc_mark((VALUE)shape);
}
}
} }
return; return;
case imemo_constcache: case imemo_constcache:
@ -7162,6 +7224,14 @@ gc_mark_imemo(rb_objspace_t *objspace, VALUE obj)
gc_mark(objspace, ice->value); gc_mark(objspace, ice->value);
} }
return; return;
case imemo_shape:
{
rb_shape_t *shape = (rb_shape_t *)obj;
if (shape->edges) {
mark_m_tbl(objspace, shape->edges);
}
}
return;
#if VM_CHECK_MODE > 0 #if VM_CHECK_MODE > 0
default: default:
VM_UNREACHABLE(gc_mark_imemo); VM_UNREACHABLE(gc_mark_imemo);
@ -9765,6 +9835,10 @@ gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj)
GC_ASSERT(!SPECIAL_CONST_P(obj)); GC_ASSERT(!SPECIAL_CONST_P(obj));
switch (BUILTIN_TYPE(obj)) { switch (BUILTIN_TYPE(obj)) {
case T_IMEMO:
if (IMEMO_TYPE_P(obj, imemo_shape)) {
return FALSE;
}
case T_NONE: case T_NONE:
case T_NIL: case T_NIL:
case T_MOVED: case T_MOVED:
@ -9778,7 +9852,6 @@ gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj)
case T_STRING: case T_STRING:
case T_OBJECT: case T_OBJECT:
case T_FLOAT: case T_FLOAT:
case T_IMEMO:
case T_ARRAY: case T_ARRAY:
case T_BIGNUM: case T_BIGNUM:
case T_ICLASS: case T_ICLASS:
@ -10178,6 +10251,38 @@ gc_update_values(rb_objspace_t *objspace, long n, VALUE *values)
} }
} }
static enum rb_id_table_iterator_result
check_id_table_move(VALUE value, void *data)
{
rb_objspace_t *objspace = (rb_objspace_t *)data;
if (gc_object_moved_p(objspace, (VALUE)value)) {
return ID_TABLE_REPLACE;
}
return ID_TABLE_CONTINUE;
}
static enum rb_id_table_iterator_result
update_id_table(VALUE *value, void *data, int existing)
{
rb_objspace_t *objspace = (rb_objspace_t *)data;
if (gc_object_moved_p(objspace, (VALUE)*value)) {
*value = rb_gc_location((VALUE)*value);
}
return ID_TABLE_CONTINUE;
}
static void
update_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
{
if (tbl) {
rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
}
}
static void static void
gc_ref_update_imemo(rb_objspace_t *objspace, VALUE obj) gc_ref_update_imemo(rb_objspace_t *objspace, VALUE obj)
{ {
@ -10250,24 +10355,23 @@ gc_ref_update_imemo(rb_objspace_t *objspace, VALUE obj)
case imemo_tmpbuf: case imemo_tmpbuf:
case imemo_callinfo: case imemo_callinfo:
break; break;
case imemo_shape:
{
rb_shape_t * shape = (rb_shape_t *)obj;
if(shape->edges) {
update_m_tbl(objspace, shape->edges);
}
if (shape->parent) {
shape->parent = (rb_shape_t *)rb_gc_location((VALUE)shape->parent);
}
}
break;
default: default:
rb_bug("not reachable %d", imemo_type(obj)); rb_bug("not reachable %d", imemo_type(obj));
break; break;
} }
} }
static enum rb_id_table_iterator_result
check_id_table_move(VALUE value, void *data)
{
rb_objspace_t *objspace = (rb_objspace_t *)data;
if (gc_object_moved_p(objspace, (VALUE)value)) {
return ID_TABLE_REPLACE;
}
return ID_TABLE_CONTINUE;
}
/* Returns the new location of an object, if it moved. Otherwise returns /* Returns the new location of an object, if it moved. Otherwise returns
* the existing location. */ * the existing location. */
VALUE VALUE
@ -10300,26 +10404,6 @@ rb_gc_location(VALUE value)
return destination; return destination;
} }
static enum rb_id_table_iterator_result
update_id_table(VALUE *value, void *data, int existing)
{
rb_objspace_t *objspace = (rb_objspace_t *)data;
if (gc_object_moved_p(objspace, (VALUE)*value)) {
*value = rb_gc_location((VALUE)*value);
}
return ID_TABLE_CONTINUE;
}
static void
update_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
{
if (tbl) {
rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
}
}
static enum rb_id_table_iterator_result static enum rb_id_table_iterator_result
update_cc_tbl_i(VALUE ccs_ptr, void *data) update_cc_tbl_i(VALUE ccs_ptr, void *data)
{ {
@ -10407,15 +10491,6 @@ update_subclass_entries(rb_objspace_t *objspace, rb_subclass_entry_t *entry)
} }
} }
static int
update_iv_index_tbl_i(st_data_t key, st_data_t value, st_data_t arg)
{
rb_objspace_t *objspace = (rb_objspace_t *)arg;
struct rb_iv_index_tbl_entry *ent = (struct rb_iv_index_tbl_entry *)value;
UPDATE_IF_MOVED(objspace, ent->class_value);
return ST_CONTINUE;
}
static void static void
update_class_ext(rb_objspace_t *objspace, rb_classext_t *ext) update_class_ext(rb_objspace_t *objspace, rb_classext_t *ext)
{ {
@ -10423,11 +10498,6 @@ update_class_ext(rb_objspace_t *objspace, rb_classext_t *ext)
UPDATE_IF_MOVED(objspace, ext->includer); UPDATE_IF_MOVED(objspace, ext->includer);
UPDATE_IF_MOVED(objspace, ext->refined_class); UPDATE_IF_MOVED(objspace, ext->refined_class);
update_subclass_entries(objspace, ext->subclasses); update_subclass_entries(objspace, ext->subclasses);
// ext->iv_index_tbl
if (ext->iv_index_tbl) {
st_foreach(ext->iv_index_tbl, update_iv_index_tbl_i, (st_data_t)objspace);
}
} }
static void static void
@ -10669,6 +10739,8 @@ gc_update_references(rb_objspace_t *objspace)
struct heap_page *page = NULL; struct heap_page *page = NULL;
rb_vm_update_references(vm);
for (int i = 0; i < SIZE_POOL_COUNT; i++) { for (int i = 0; i < SIZE_POOL_COUNT; i++) {
bool should_set_mark_bits = TRUE; bool should_set_mark_bits = TRUE;
rb_size_pool_t *size_pool = &size_pools[i]; rb_size_pool_t *size_pool = &size_pools[i];
@ -10687,7 +10759,6 @@ gc_update_references(rb_objspace_t *objspace)
} }
} }
} }
rb_vm_update_references(vm);
rb_transient_heap_update_references(); rb_transient_heap_update_references();
rb_gc_update_global_tbl(); rb_gc_update_global_tbl();
global_symbols.ids = rb_gc_location(global_symbols.ids); global_symbols.ids = rb_gc_location(global_symbols.ids);

View File

@ -46,7 +46,6 @@
#define ROBJECT_EMBED ROBJECT_EMBED #define ROBJECT_EMBED ROBJECT_EMBED
#define ROBJECT_NUMIV ROBJECT_NUMIV #define ROBJECT_NUMIV ROBJECT_NUMIV
#define ROBJECT_IVPTR ROBJECT_IVPTR #define ROBJECT_IVPTR ROBJECT_IVPTR
#define ROBJECT_IV_INDEX_TBL ROBJECT_IV_INDEX_TBL
/** @endcond */ /** @endcond */
/** /**
@ -132,7 +131,7 @@ struct RObject {
* *
* This is a shortcut for `RCLASS_IV_INDEX_TBL(rb_obj_class(obj))`. * This is a shortcut for `RCLASS_IV_INDEX_TBL(rb_obj_class(obj))`.
*/ */
struct st_table *iv_index_tbl; struct rb_id_table *iv_index_tbl;
} heap; } heap;
#if USE_RVARGC #if USE_RVARGC

View File

@ -941,21 +941,8 @@ RB_OBJ_FREEZE_RAW(VALUE obj)
RB_FL_SET_RAW(obj, RUBY_FL_FREEZE); RB_FL_SET_RAW(obj, RUBY_FL_FREEZE);
} }
/** RUBY_SYMBOL_EXPORT_BEGIN
* Prevents further modifications to the given object. ::rb_eFrozenError shall void rb_obj_freeze_inline(VALUE obj);
* be raised if modification is attempted. RUBY_SYMBOL_EXPORT_END
*
* @param[out] x Object in question.
*/
static inline void
rb_obj_freeze_inline(VALUE x)
{
if (RB_FL_ABLE(x)) {
RB_OBJ_FREEZE_RAW(x);
if (RBASIC_CLASS(x) && !(RBASIC(x)->flags & RUBY_FL_SINGLETON)) {
rb_freeze_singleton_class(x);
}
}
}
#endif /* RBIMPL_FL_TYPE_H */ #endif /* RBIMPL_FL_TYPE_H */

View File

@ -77,6 +77,7 @@ rb_call_inits(void)
CALL(vm_stack_canary); CALL(vm_stack_canary);
CALL(ast); CALL(ast);
CALL(gc_stress); CALL(gc_stress);
CALL(shape);
// enable builtin loading // enable builtin loading
CALL(builtin); CALL(builtin);

View File

@ -48,9 +48,6 @@
#undef RHASH_TBL #undef RHASH_TBL
#undef RHASH_EMPTY_P #undef RHASH_EMPTY_P
/* internal/object.h */
#undef ROBJECT_IV_INDEX_TBL
/* internal/struct.h */ /* internal/struct.h */
#undef RSTRUCT_LEN #undef RSTRUCT_LEN
#undef RSTRUCT_PTR #undef RSTRUCT_PTR

View File

@ -14,6 +14,7 @@
#include "ruby/internal/stdbool.h" /* for bool */ #include "ruby/internal/stdbool.h" /* for bool */
#include "ruby/intern.h" /* for rb_alloc_func_t */ #include "ruby/intern.h" /* for rb_alloc_func_t */
#include "ruby/ruby.h" /* for struct RBasic */ #include "ruby/ruby.h" /* for struct RBasic */
#include "shape.h"
#ifdef RCLASS_SUPER #ifdef RCLASS_SUPER
# undef RCLASS_SUPER # undef RCLASS_SUPER
@ -26,9 +27,9 @@ struct rb_subclass_entry {
}; };
struct rb_iv_index_tbl_entry { struct rb_iv_index_tbl_entry {
uint32_t index; uint32_t idx;
rb_serial_t class_serial; shape_id_t source_shape_id;
VALUE class_value; shape_id_t dest_shape_id;
}; };
struct rb_cvar_class_tbl_entry { struct rb_cvar_class_tbl_entry {
@ -38,7 +39,6 @@ struct rb_cvar_class_tbl_entry {
}; };
struct rb_classext_struct { struct rb_classext_struct {
struct st_table *iv_index_tbl; // ID -> struct rb_iv_index_tbl_entry
struct st_table *iv_tbl; struct st_table *iv_tbl;
#if SIZEOF_SERIAL_T == SIZEOF_VALUE /* otherwise m_tbl is in struct RClass */ #if SIZEOF_SERIAL_T == SIZEOF_VALUE /* otherwise m_tbl is in struct RClass */
struct rb_id_table *m_tbl; struct rb_id_table *m_tbl;
@ -64,6 +64,8 @@ struct rb_classext_struct {
const VALUE refined_class; const VALUE refined_class;
rb_alloc_func_t allocator; rb_alloc_func_t allocator;
const VALUE includer; const VALUE includer;
uint32_t max_iv_count;
uint16_t shape_id;
}; };
struct RClass { struct RClass {
@ -102,7 +104,6 @@ typedef struct rb_classext_struct rb_classext_t;
#define RCLASS_CALLABLE_M_TBL(c) (RCLASS_EXT(c)->callable_m_tbl) #define RCLASS_CALLABLE_M_TBL(c) (RCLASS_EXT(c)->callable_m_tbl)
#define RCLASS_CC_TBL(c) (RCLASS_EXT(c)->cc_tbl) #define RCLASS_CC_TBL(c) (RCLASS_EXT(c)->cc_tbl)
#define RCLASS_CVC_TBL(c) (RCLASS_EXT(c)->cvc_tbl) #define RCLASS_CVC_TBL(c) (RCLASS_EXT(c)->cvc_tbl)
#define RCLASS_IV_INDEX_TBL(c) (RCLASS_EXT(c)->iv_index_tbl)
#define RCLASS_ORIGIN(c) (RCLASS_EXT(c)->origin_) #define RCLASS_ORIGIN(c) (RCLASS_EXT(c)->origin_)
#define RCLASS_REFINED_CLASS(c) (RCLASS_EXT(c)->refined_class) #define RCLASS_REFINED_CLASS(c) (RCLASS_EXT(c)->refined_class)
#if SIZEOF_SERIAL_T == SIZEOF_VALUE #if SIZEOF_SERIAL_T == SIZEOF_VALUE

View File

@ -45,6 +45,7 @@ enum imemo_type {
imemo_callinfo = 11, imemo_callinfo = 11,
imemo_callcache = 12, imemo_callcache = 12,
imemo_constcache = 13, imemo_constcache = 13,
imemo_shape = 14,
}; };
/* CREF (Class REFerence) is defined in method.h */ /* CREF (Class REFerence) is defined in method.h */

View File

@ -9,11 +9,6 @@
* @brief Internal header for Object. * @brief Internal header for Object.
*/ */
#include "ruby/ruby.h" /* for VALUE */ #include "ruby/ruby.h" /* for VALUE */
#include "internal/class.h" /* for RCLASS_IV_INDEX_TBL */
#ifdef ROBJECT_IV_INDEX_TBL
# undef ROBJECT_IV_INDEX_TBL
#endif
/* object.c */ /* object.c */
VALUE rb_class_search_ancestor(VALUE klass, VALUE super); VALUE rb_class_search_ancestor(VALUE klass, VALUE super);
@ -26,7 +21,6 @@ int rb_bool_expected(VALUE, const char *, int raise);
static inline void RBASIC_CLEAR_CLASS(VALUE obj); static inline void RBASIC_CLEAR_CLASS(VALUE obj);
static inline void RBASIC_SET_CLASS_RAW(VALUE obj, VALUE klass); static inline void RBASIC_SET_CLASS_RAW(VALUE obj, VALUE klass);
static inline void RBASIC_SET_CLASS(VALUE obj, VALUE klass); static inline void RBASIC_SET_CLASS(VALUE obj, VALUE klass);
static inline struct st_table *ROBJECT_IV_INDEX_TBL_inline(VALUE obj);
RUBY_SYMBOL_EXPORT_BEGIN RUBY_SYMBOL_EXPORT_BEGIN
/* object.c (export) */ /* object.c (export) */
@ -64,20 +58,4 @@ RBASIC_SET_CLASS(VALUE obj, VALUE klass)
RBASIC_SET_CLASS_RAW(obj, klass); RBASIC_SET_CLASS_RAW(obj, klass);
RB_OBJ_WRITTEN(obj, oldv, klass); RB_OBJ_WRITTEN(obj, oldv, klass);
} }
RBIMPL_ATTR_PURE()
static inline struct st_table *
ROBJECT_IV_INDEX_TBL_inline(VALUE obj)
{
if (RB_FL_ANY_RAW(obj, ROBJECT_EMBED)) {
VALUE klass = rb_obj_class(obj);
return RCLASS_IV_INDEX_TBL(klass);
}
else {
const struct RObject *const ptr = ROBJECT(obj);
return ptr->as.heap.iv_index_tbl;
}
}
#define ROBJECT_IV_INDEX_TBL ROBJECT_IV_INDEX_TBL_inline
#endif /* INTERNAL_OBJECT_H */ #endif /* INTERNAL_OBJECT_H */

View File

@ -37,6 +37,9 @@ static inline void ROBJ_TRANSIENT_SET(VALUE obj);
static inline void ROBJ_TRANSIENT_UNSET(VALUE obj); static inline void ROBJ_TRANSIENT_UNSET(VALUE obj);
uint32_t rb_obj_ensure_iv_index_mapping(VALUE obj, ID id); uint32_t rb_obj_ensure_iv_index_mapping(VALUE obj, ID id);
struct gen_ivtbl;
int rb_gen_ivtbl_get(VALUE obj, ID id, struct gen_ivtbl **ivtbl);
RUBY_SYMBOL_EXPORT_BEGIN RUBY_SYMBOL_EXPORT_BEGIN
/* variable.c (export) */ /* variable.c (export) */
void rb_mark_generic_ivar(VALUE); void rb_mark_generic_ivar(VALUE);
@ -52,6 +55,8 @@ VALUE rb_gvar_set(ID, VALUE);
VALUE rb_gvar_defined(ID); VALUE rb_gvar_defined(ID);
void rb_const_warn_if_deprecated(const rb_const_entry_t *, VALUE, ID); void rb_const_warn_if_deprecated(const rb_const_entry_t *, VALUE, ID);
void rb_init_iv_list(VALUE obj); void rb_init_iv_list(VALUE obj);
void rb_ensure_iv_list_size(VALUE obj, uint32_t len, uint32_t newsize);
struct gen_ivtbl * rb_ensure_generic_iv_list_size(VALUE obj, uint32_t newsize);
MJIT_SYMBOL_EXPORT_END MJIT_SYMBOL_EXPORT_END
static inline bool static inline bool

14
iseq.c
View File

@ -233,13 +233,15 @@ rb_iseq_each_value(const rb_iseq_t *iseq, iseq_value_itr_t * func, void *data)
// IVC entries // IVC entries
for (unsigned int i = 0; i < body->ivc_size; i++, is_entries++) { for (unsigned int i = 0; i < body->ivc_size; i++, is_entries++) {
IVC ivc = (IVC)is_entries; IVC ivc = (IVC)is_entries;
if (ivc->entry) { shape_id_t source_shape_id = vm_ic_attr_index_source_shape_id(ivc);
RUBY_ASSERT(!RB_TYPE_P(ivc->entry->class_value, T_NONE)); shape_id_t dest_shape_id = vm_ic_attr_index_dest_shape_id(ivc);
if (source_shape_id != INVALID_SHAPE_ID) {
VALUE nv = func(data, ivc->entry->class_value); rb_shape_t *shape = rb_shape_get_shape_by_id(source_shape_id);
if (ivc->entry->class_value != nv) { func(data, (VALUE)shape);
ivc->entry->class_value = nv;
} }
if (dest_shape_id != INVALID_SHAPE_ID) {
rb_shape_t *shape = rb_shape_get_shape_by_id(dest_shape_id);
func(data, (VALUE)shape);
} }
} }

View File

@ -73,23 +73,6 @@ module RubyVM::MJIT
src << "#undef GET_SELF\n" src << "#undef GET_SELF\n"
src << "#define GET_SELF() cfp_self\n" src << "#define GET_SELF() cfp_self\n"
# Generate merged ivar guards first if needed
if !status.compile_info.disable_ivar_cache && status.merge_ivar_guards_p
src << " if (UNLIKELY(!(RB_TYPE_P(GET_SELF(), T_OBJECT) && (rb_serial_t)#{status.ivar_serial} == RCLASS_SERIAL(RBASIC(GET_SELF())->klass) &&"
if USE_RVARGC
src << "#{status.max_ivar_index} < ROBJECT_NUMIV(GET_SELF())" # index < ROBJECT_NUMIV(obj)
else
if status.max_ivar_index >= ROBJECT_EMBED_LEN_MAX
src << "#{status.max_ivar_index} < ROBJECT_NUMIV(GET_SELF())" # index < ROBJECT_NUMIV(obj) && !RB_FL_ANY_RAW(obj, ROBJECT_EMBED)
else
src << "ROBJECT_EMBED_LEN_MAX == ROBJECT_NUMIV(GET_SELF())" # index < ROBJECT_NUMIV(obj) && RB_FL_ANY_RAW(obj, ROBJECT_EMBED)
end
end
src << "))) {\n"
src << " goto ivar_cancel;\n"
src << " }\n"
end
# Simulate `opt_pc` in setup_parameters_complex. Other PCs which may be passed by catch tables # Simulate `opt_pc` in setup_parameters_complex. Other PCs which may be passed by catch tables
# are not considered since vm_exec doesn't call jit_exec for catch tables. # are not considered since vm_exec doesn't call jit_exec for catch tables.
if iseq.body.param.flags.has_opt if iseq.body.param.flags.has_opt
@ -103,6 +86,13 @@ module RubyVM::MJIT
src << " }\n" src << " }\n"
end end
# Generate merged ivar guards first if needed
if !status.compile_info.disable_ivar_cache && status.merge_ivar_guards_p
src << " if (UNLIKELY(!(RB_TYPE_P(GET_SELF(), T_OBJECT)))) {"
src << " goto ivar_cancel;\n"
src << " }\n"
end
C.fprintf(f, src) C.fprintf(f, src)
compile_insns(0, 0, status, iseq.body, f) compile_insns(0, 0, status, iseq.body, f)
compile_cancel_handler(f, iseq.body, status) compile_cancel_handler(f, iseq.body, status)
@ -363,52 +353,37 @@ module RubyVM::MJIT
ic_copy = (status.is_entries + (C.iseq_inline_storage_entry.new(operands[1]) - body.is_entries)).iv_cache ic_copy = (status.is_entries + (C.iseq_inline_storage_entry.new(operands[1]) - body.is_entries)).iv_cache
src = +'' src = +''
if !status.compile_info.disable_ivar_cache && ic_copy.entry if !status.compile_info.disable_ivar_cache && ic_copy.source_shape_id != C.INVALID_SHAPE_ID
# JIT: optimize away motion of sp and pc. This path does not call rb_warning() and so it's always leaf and not `handles_sp`. # JIT: optimize away motion of sp and pc. This path does not call rb_warning() and so it's always leaf and not `handles_sp`.
# compile_pc_and_sp(src, insn, stack_size, sp_inc, local_stack_p, next_pos) # compile_pc_and_sp(src, insn, stack_size, sp_inc, local_stack_p, next_pos)
# JIT: prepare vm_getivar/vm_setivar arguments and variables # JIT: prepare vm_getivar/vm_setivar arguments and variables
src << "{\n" src << "{\n"
src << " VALUE obj = GET_SELF();\n" src << " VALUE obj = GET_SELF();\n"
src << " const uint32_t index = #{ic_copy.entry.index};\n" src << " const shape_id_t source_shape_id = (rb_serial_t)#{ic_copy.source_shape_id};\n"
if status.merge_ivar_guards_p
# JIT: Access ivar without checking these VM_ASSERTed prerequisites as we checked them in the beginning of `mjit_compile_body`
src << " VM_ASSERT(RB_TYPE_P(obj, T_OBJECT));\n"
src << " VM_ASSERT((rb_serial_t)#{ic_copy.entry.class_serial} == RCLASS_SERIAL(RBASIC(obj)->klass));\n"
src << " VM_ASSERT(index < ROBJECT_NUMIV(obj));\n"
if insn_name == :setinstancevariable
if USE_RVARGC
src << " if (LIKELY(!RB_OBJ_FROZEN_RAW(obj) && index < ROBJECT_NUMIV(obj))) {\n"
src << " RB_OBJ_WRITE(obj, &ROBJECT_IVPTR(obj)[index], stack[#{stack_size - 1}]);\n"
else
heap_ivar_p = status.max_ivar_index >= ROBJECT_EMBED_LEN_MAX
src << " if (LIKELY(!RB_OBJ_FROZEN_RAW(obj) && #{heap_ivar_p ? 'true' : 'RB_FL_ANY_RAW(obj, ROBJECT_EMBED)'})) {\n"
src << " RB_OBJ_WRITE(obj, &ROBJECT(obj)->as.#{heap_ivar_p ? 'heap.ivptr[index]' : 'ary[index]'}, stack[#{stack_size - 1}]);\n"
end
src << " }\n"
else
src << " VALUE val;\n"
if USE_RVARGC
src << " if (LIKELY(index < ROBJECT_NUMIV(obj) && (val = ROBJECT_IVPTR(obj)[index]) != Qundef)) {\n"
else
heap_ivar_p = status.max_ivar_index >= ROBJECT_EMBED_LEN_MAX
src << " if (LIKELY(#{heap_ivar_p ? 'true' : 'RB_FL_ANY_RAW(obj, ROBJECT_EMBED)'} && (val = ROBJECT(obj)->as.#{heap_ivar_p ? 'heap.ivptr[index]' : 'ary[index]'}) != Qundef)) {\n"
end
src << " stack[#{stack_size}] = val;\n"
src << " }\n"
end
else
src << " const rb_serial_t ic_serial = (rb_serial_t)#{ic_copy.entry.class_serial};\n"
# JIT: cache hit path of vm_getivar/vm_setivar, or cancel JIT (recompile it with exivar) # JIT: cache hit path of vm_getivar/vm_setivar, or cancel JIT (recompile it with exivar)
if insn_name == :setinstancevariable if insn_name == :setinstancevariable
src << " if (LIKELY(RB_TYPE_P(obj, T_OBJECT) && ic_serial == RCLASS_SERIAL(RBASIC(obj)->klass) && index < ROBJECT_NUMIV(obj) && !RB_OBJ_FROZEN_RAW(obj))) {\n" src << " const uint32_t index = #{ic_copy.attr_index - 1};\n"
src << " const shape_id_t dest_shape_id = (rb_serial_t)#{ic_copy.dest_shape_id};\n"
src << " if (source_shape_id == ROBJECT_SHAPE_ID(obj) && \n"
src << " dest_shape_id != ROBJECT_SHAPE_ID(obj)) {\n"
src << " if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {\n"
src << " rb_init_iv_list(obj);\n"
src << " }\n"
src << " ROBJECT_SET_SHAPE_ID(obj, dest_shape_id);\n"
src << " VALUE *ptr = ROBJECT_IVPTR(obj);\n" src << " VALUE *ptr = ROBJECT_IVPTR(obj);\n"
src << " RB_OBJ_WRITE(obj, &ptr[index], stack[#{stack_size - 1}]);\n" src << " RB_OBJ_WRITE(obj, &ptr[index], stack[#{stack_size - 1}]);\n"
src << " }\n" src << " }\n"
else else
src << " VALUE val;\n" if ic_copy.attr_index == 0 # cache hit, but uninitialized iv
src << " if (LIKELY(RB_TYPE_P(obj, T_OBJECT) && ic_serial == RCLASS_SERIAL(RBASIC(obj)->klass) && index < ROBJECT_NUMIV(obj) && (val = ROBJECT_IVPTR(obj)[index]) != Qundef)) {\n" src << " /* Uninitialized instance variable */\n"
src << " stack[#{stack_size}] = val;\n" src << " if (source_shape_id == ROBJECT_SHAPE_ID(obj)) {\n"
src << " stack[#{stack_size}] = Qnil;\n"
src << " }\n"
else
src << " const uint32_t index = #{ic_copy.attr_index - 1};\n"
src << " if (source_shape_id == ROBJECT_SHAPE_ID(obj)) {\n"
src << " stack[#{stack_size}] = ROBJECT_IVPTR(obj)[index];\n"
src << " }\n" src << " }\n"
end end
end end
@ -419,20 +394,19 @@ module RubyVM::MJIT
src << " }\n" src << " }\n"
src << "}\n" src << "}\n"
return src return src
elsif insn_name == :getinstancevariable && !status.compile_info.disable_exivar_cache && ic_copy.entry elsif insn_name == :getinstancevariable && !status.compile_info.disable_exivar_cache && ic_copy.source_shape_id != C.INVALID_SHAPE_ID
# JIT: optimize away motion of sp and pc. This path does not call rb_warning() and so it's always leaf and not `handles_sp`. # JIT: optimize away motion of sp and pc. This path does not call rb_warning() and so it's always leaf and not `handles_sp`.
# compile_pc_and_sp(src, insn, stack_size, sp_inc, local_stack_p, next_pos) # compile_pc_and_sp(src, insn, stack_size, sp_inc, local_stack_p, next_pos)
# JIT: prepare vm_getivar's arguments and variables # JIT: prepare vm_getivar's arguments and variables
src << "{\n" src << "{\n"
src << " VALUE obj = GET_SELF();\n" src << " VALUE obj = GET_SELF();\n"
src << " const rb_serial_t ic_serial = (rb_serial_t)#{ic_copy.entry.class_serial};\n" src << " const shape_id_t source_shape_id = (rb_serial_t)#{ic_copy.source_shape_id};\n"
src << " const uint32_t index = #{ic_copy.entry.index};\n" src << " const uint32_t index = #{ic_copy.attr_index - 1};\n"
# JIT: cache hit path of vm_getivar, or cancel JIT (recompile it without any ivar optimization) # JIT: cache hit path of vm_getivar, or cancel JIT (recompile it without any ivar optimization)
src << " struct gen_ivtbl *ivtbl;\n" src << " struct gen_ivtbl *ivtbl;\n"
src << " VALUE val;\n" src << " if (LIKELY(FL_TEST_RAW(obj, FL_EXIVAR) && source_shape_id == rb_shape_get_shape_id(obj) && rb_ivar_generic_ivtbl_lookup(obj, &ivtbl))) {\n"
src << " if (LIKELY(FL_TEST_RAW(obj, FL_EXIVAR) && ic_serial == RCLASS_SERIAL(RBASIC(obj)->klass) && rb_ivar_generic_ivtbl_lookup(obj, &ivtbl) && index < ivtbl->numiv && (val = ivtbl->ivptr[index]) != Qundef)) {\n" src << " stack[#{stack_size}] = ivtbl->ivptr[index];\n"
src << " stack[#{stack_size}] = val;\n"
src << " }\n" src << " }\n"
src << " else {\n" src << " else {\n"
src << " reg_cfp->pc = original_body_iseq + #{pos};\n" src << " reg_cfp->pc = original_body_iseq + #{pos};\n"
@ -832,35 +806,16 @@ module RubyVM::MJIT
def init_ivar_compile_status(body, status) def init_ivar_compile_status(body, status)
C.mjit_capture_is_entries(body, status.is_entries) C.mjit_capture_is_entries(body, status.is_entries)
num_ivars = 0
pos = 0 pos = 0
status.max_ivar_index = 0
status.ivar_serial = 0
while pos < body.iseq_size while pos < body.iseq_size
insn = INSNS.fetch(C.rb_vm_insn_decode(body.iseq_encoded[pos])) insn = INSNS.fetch(C.rb_vm_insn_decode(body.iseq_encoded[pos]))
if insn.name == :getinstancevariable || insn.name == :setinstancevariable if insn.name == :getinstancevariable || insn.name == :setinstancevariable
ic = body.iseq_encoded[pos+2] status.merge_ivar_guards_p = true
ic_copy = (status.is_entries + (C.iseq_inline_storage_entry.new(ic) - body.is_entries)).iv_cache
if ic_copy.entry # Only initialized (ic_serial > 0) IVCs are optimized
num_ivars += 1
if status.max_ivar_index < ic_copy.entry.index
status.max_ivar_index = ic_copy.entry.index
end
if status.ivar_serial == 0
status.ivar_serial = ic_copy.entry.class_serial
elsif status.ivar_serial != ic_copy.entry.class_serial
# Multiple classes have used this ISeq. Give up assuming one serial.
status.merge_ivar_guards_p = false
return return
end end
end
end
pos += insn.len pos += insn.len
end end
status.merge_ivar_guards_p = status.ivar_serial > 0 && num_ivars >= 2
end end
# Expand simple macro that doesn't require dynamic C code. # Expand simple macro that doesn't require dynamic C code.

View File

@ -39,6 +39,7 @@
#include "ruby/st.h" #include "ruby/st.h"
#include "ruby/util.h" #include "ruby/util.h"
#include "builtin.h" #include "builtin.h"
#include "shape.h"
#define BITSPERSHORT (2*CHAR_BIT) #define BITSPERSHORT (2*CHAR_BIT)
#define SHORTMASK ((1<<BITSPERSHORT)-1) #define SHORTMASK ((1<<BITSPERSHORT)-1)
@ -622,10 +623,6 @@ w_obj_each(st_data_t key, st_data_t val, st_data_t a)
} }
return ST_CONTINUE; return ST_CONTINUE;
} }
if (!ivarg->num_ivar) {
rb_raise(rb_eRuntimeError, "instance variable added to %"PRIsVALUE" instance",
CLASS_OF(arg->obj));
}
--ivarg->num_ivar; --ivarg->num_ivar;
w_symbol(ID2SYM(id), arg->arg); w_symbol(ID2SYM(id), arg->arg);
w_object(value, arg->arg, arg->limit); w_object(value, arg->arg, arg->limit);
@ -720,6 +717,7 @@ has_ivars(VALUE obj, VALUE encname, VALUE *ivobj)
static void static void
w_ivar_each(VALUE obj, st_index_t num, struct dump_call_arg *arg) w_ivar_each(VALUE obj, st_index_t num, struct dump_call_arg *arg)
{ {
shape_id_t shape_id = rb_shape_get_shape_id(arg->obj);
struct w_ivar_arg ivarg = {arg, num}; struct w_ivar_arg ivarg = {arg, num};
if (!num) return; if (!num) return;
rb_ivar_foreach(obj, w_obj_each, (st_data_t)&ivarg); rb_ivar_foreach(obj, w_obj_each, (st_data_t)&ivarg);
@ -727,6 +725,10 @@ w_ivar_each(VALUE obj, st_index_t num, struct dump_call_arg *arg)
rb_raise(rb_eRuntimeError, "instance variable removed from %"PRIsVALUE" instance", rb_raise(rb_eRuntimeError, "instance variable removed from %"PRIsVALUE" instance",
CLASS_OF(arg->obj)); CLASS_OF(arg->obj));
} }
if (shape_id != rb_shape_get_shape_id(arg->obj)) {
rb_raise(rb_eRuntimeError, "instance variable added to %"PRIsVALUE" instance",
CLASS_OF(arg->obj));
}
} }
static void static void

View File

@ -418,8 +418,12 @@ def lldb_inspect(debugger, target, result, val):
elif flType == RUBY_T_IMEMO: elif flType == RUBY_T_IMEMO:
# I'm not sure how to get IMEMO_MASK out of lldb. It's not in globals() # I'm not sure how to get IMEMO_MASK out of lldb. It's not in globals()
imemo_type = (flags >> RUBY_FL_USHIFT) & 0x0F # IMEMO_MASK imemo_type = (flags >> RUBY_FL_USHIFT) & 0x0F # IMEMO_MASK
print("T_IMEMO: ", file=result) print("T_IMEMO: ", file=result)
append_command_output(debugger, "p (enum imemo_type) %d" % imemo_type, result) append_command_output(debugger, "p (enum imemo_type) %d" % imemo_type, result)
if imemo_type == imemo_shape:
append_command_output(debugger, "p *(rb_shape_t *) %0#x" % val.GetValueAsUnsigned(), result)
else:
append_command_output(debugger, "p *(struct MEMO *) %0#x" % val.GetValueAsUnsigned(), result) append_command_output(debugger, "p *(struct MEMO *) %0#x" % val.GetValueAsUnsigned(), result)
elif flType == RUBY_T_STRUCT: elif flType == RUBY_T_STRUCT:
tRTypedData = target.FindFirstType("struct RStruct").GetPointerType() tRTypedData = target.FindFirstType("struct RStruct").GetPointerType()

View File

@ -5,6 +5,10 @@ module RubyVM::MJIT
C = Object.new C = Object.new
class << C class << C
def SHAPE_BITS
RubyVM::Shape::SHAPE_BITS
end
def ROBJECT_EMBED_LEN_MAX def ROBJECT_EMBED_LEN_MAX
Primitive.cexpr! 'INT2NUM(RBIMPL_EMBED_LEN_MAX_OF(VALUE))' Primitive.cexpr! 'INT2NUM(RBIMPL_EMBED_LEN_MAX_OF(VALUE))'
end end
@ -165,6 +169,14 @@ module RubyVM::MJIT
Primitive.cexpr! %q{ INT2NUM(VM_METHOD_TYPE_ISEQ) } Primitive.cexpr! %q{ INT2NUM(VM_METHOD_TYPE_ISEQ) }
end end
def C.INVALID_SHAPE_ID
Primitive.cexpr! %q{ ULONG2NUM(INVALID_SHAPE_ID) }
end
def C.SHAPE_MASK
Primitive.cexpr! %q{ ULONG2NUM(SHAPE_MASK) }
end
def C.CALL_DATA def C.CALL_DATA
@CALL_DATA ||= self.rb_call_data @CALL_DATA ||= self.rb_call_data
end end
@ -181,6 +193,10 @@ module RubyVM::MJIT
@RB_BUILTIN ||= self.rb_builtin_function @RB_BUILTIN ||= self.rb_builtin_function
end end
def C.attr_index_t
@attr_index_t ||= CType::Immediate.parse("uint32_t")
end
def C.compile_branch def C.compile_branch
@compile_branch ||= CType::Struct.new( @compile_branch ||= CType::Struct.new(
"compile_branch", Primitive.cexpr!("SIZEOF(struct compile_branch)"), "compile_branch", Primitive.cexpr!("SIZEOF(struct compile_branch)"),
@ -201,7 +217,6 @@ module RubyVM::MJIT
compiled_id: [CType::Immediate.parse("int"), Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), compiled_id)")], compiled_id: [CType::Immediate.parse("int"), Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), compiled_id)")],
compile_info: [CType::Pointer.new { self.rb_mjit_compile_info }, Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), compile_info)")], compile_info: [CType::Pointer.new { self.rb_mjit_compile_info }, Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), compile_info)")],
merge_ivar_guards_p: [self._Bool, Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), merge_ivar_guards_p)")], merge_ivar_guards_p: [self._Bool, Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), merge_ivar_guards_p)")],
ivar_serial: [self.rb_serial_t, Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), ivar_serial)")],
max_ivar_index: [CType::Immediate.parse("size_t"), Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), max_ivar_index)")], max_ivar_index: [CType::Immediate.parse("size_t"), Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), max_ivar_index)")],
inlined_iseqs: [CType::Pointer.new { CType::Pointer.new { self.rb_iseq_constant_body } }, Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), inlined_iseqs)")], inlined_iseqs: [CType::Pointer.new { CType::Pointer.new { self.rb_iseq_constant_body } }, Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), inlined_iseqs)")],
inline_context: [self.inlined_call_context, Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), inline_context)")], inline_context: [self.inlined_call_context, Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), inline_context)")],
@ -240,7 +255,9 @@ module RubyVM::MJIT
def C.iseq_inline_iv_cache_entry def C.iseq_inline_iv_cache_entry
@iseq_inline_iv_cache_entry ||= CType::Struct.new( @iseq_inline_iv_cache_entry ||= CType::Struct.new(
"iseq_inline_iv_cache_entry", Primitive.cexpr!("SIZEOF(struct iseq_inline_iv_cache_entry)"), "iseq_inline_iv_cache_entry", Primitive.cexpr!("SIZEOF(struct iseq_inline_iv_cache_entry)"),
entry: [CType::Pointer.new { self.rb_iv_index_tbl_entry }, Primitive.cexpr!("OFFSETOF((*((struct iseq_inline_iv_cache_entry *)NULL)), entry)")], source_shape_id: [self.shape_id_t, Primitive.cexpr!("OFFSETOF((*((struct iseq_inline_iv_cache_entry *)NULL)), source_shape_id)")],
dest_shape_id: [self.shape_id_t, Primitive.cexpr!("OFFSETOF((*((struct iseq_inline_iv_cache_entry *)NULL)), dest_shape_id)")],
attr_index: [self.attr_index_t, Primitive.cexpr!("OFFSETOF((*((struct iseq_inline_iv_cache_entry *)NULL)), attr_index)")],
) )
end end
@ -313,7 +330,11 @@ module RubyVM::MJIT
call_: [self.vm_call_handler, Primitive.cexpr!("OFFSETOF((*((struct rb_callcache *)NULL)), call_)")], call_: [self.vm_call_handler, Primitive.cexpr!("OFFSETOF((*((struct rb_callcache *)NULL)), call_)")],
aux_: [CType::Union.new( aux_: [CType::Union.new(
"", Primitive.cexpr!("SIZEOF(((struct rb_callcache *)NULL)->aux_)"), "", Primitive.cexpr!("SIZEOF(((struct rb_callcache *)NULL)->aux_)"),
attr_index: CType::Immediate.parse("unsigned int"), attr: CType::Struct.new(
"", Primitive.cexpr!("SIZEOF(((struct rb_callcache *)NULL)->aux_.attr)"),
index: [self.attr_index_t, Primitive.cexpr!("OFFSETOF(((struct rb_callcache *)NULL)->aux_.attr, index)")],
dest_shape_id: [self.shape_id_t, Primitive.cexpr!("OFFSETOF(((struct rb_callcache *)NULL)->aux_.attr, dest_shape_id)")],
),
method_missing_reason: self.method_missing_reason, method_missing_reason: self.method_missing_reason,
v: self.VALUE, v: self.VALUE,
), Primitive.cexpr!("OFFSETOF((*((struct rb_callcache *)NULL)), aux_)")], ), Primitive.cexpr!("OFFSETOF((*((struct rb_callcache *)NULL)), aux_)")],
@ -502,9 +523,9 @@ module RubyVM::MJIT
def C.rb_iv_index_tbl_entry def C.rb_iv_index_tbl_entry
@rb_iv_index_tbl_entry ||= CType::Struct.new( @rb_iv_index_tbl_entry ||= CType::Struct.new(
"rb_iv_index_tbl_entry", Primitive.cexpr!("SIZEOF(struct rb_iv_index_tbl_entry)"), "rb_iv_index_tbl_entry", Primitive.cexpr!("SIZEOF(struct rb_iv_index_tbl_entry)"),
index: [CType::Immediate.parse("uint32_t"), Primitive.cexpr!("OFFSETOF((*((struct rb_iv_index_tbl_entry *)NULL)), index)")], idx: [CType::Immediate.parse("uint32_t"), Primitive.cexpr!("OFFSETOF((*((struct rb_iv_index_tbl_entry *)NULL)), idx)")],
class_serial: [self.rb_serial_t, Primitive.cexpr!("OFFSETOF((*((struct rb_iv_index_tbl_entry *)NULL)), class_serial)")], source_shape_id: [self.shape_id_t, Primitive.cexpr!("OFFSETOF((*((struct rb_iv_index_tbl_entry *)NULL)), source_shape_id)")],
class_value: [self.VALUE, Primitive.cexpr!("OFFSETOF((*((struct rb_iv_index_tbl_entry *)NULL)), class_value)")], dest_shape_id: [self.shape_id_t, Primitive.cexpr!("OFFSETOF((*((struct rb_iv_index_tbl_entry *)NULL)), dest_shape_id)")],
) )
end end
@ -577,6 +598,10 @@ module RubyVM::MJIT
@VALUE ||= CType::Immediate.find(Primitive.cexpr!("SIZEOF(VALUE)"), Primitive.cexpr!("SIGNED_TYPE_P(VALUE)")) @VALUE ||= CType::Immediate.find(Primitive.cexpr!("SIZEOF(VALUE)"), Primitive.cexpr!("SIGNED_TYPE_P(VALUE)"))
end end
def C.shape_id_t
@shape_id_t ||= CType::Immediate.find(Primitive.cexpr!("SIZEOF(shape_id_t)"), Primitive.cexpr!("SIGNED_TYPE_P(shape_id_t)"))
end
def C._Bool def C._Bool
CType::Bool.new CType::Bool.new
end end

View File

@ -8,6 +8,7 @@
#include "builtin.h" #include "builtin.h"
#include "mjit.h" #include "mjit.h"
#include "mjit_unit.h" #include "mjit_unit.h"
#include "shape.h"
// Macros to check if a position is already compiled using compile_status.stack_size_for_pos // Macros to check if a position is already compiled using compile_status.stack_size_for_pos
#define NOT_COMPILED_STACK_SIZE -1 #define NOT_COMPILED_STACK_SIZE -1
@ -48,7 +49,6 @@ struct compile_status {
// Mutated optimization levels // Mutated optimization levels
struct rb_mjit_compile_info *compile_info; struct rb_mjit_compile_info *compile_info;
bool merge_ivar_guards_p; // If true, merge guards of ivar accesses bool merge_ivar_guards_p; // If true, merge guards of ivar accesses
rb_serial_t ivar_serial; // ic_serial of IVC in is_entries (used only when merge_ivar_guards_p)
size_t max_ivar_index; // Max IVC index in is_entries (used only when merge_ivar_guards_p) size_t max_ivar_index; // Max IVC index in is_entries (used only when merge_ivar_guards_p)
// If `inlined_iseqs[pos]` is not NULL, `mjit_compile_body` tries to inline ISeq there. // If `inlined_iseqs[pos]` is not NULL, `mjit_compile_body` tries to inline ISeq there.
const struct rb_iseq_constant_body **inlined_iseqs; const struct rb_iseq_constant_body **inlined_iseqs;

View File

@ -39,6 +39,7 @@
#include "ruby/util.h" #include "ruby/util.h"
#include "ruby/assert.h" #include "ruby/assert.h"
#include "builtin.h" #include "builtin.h"
#include "shape.h"
/*! /*!
* \addtogroup object * \addtogroup object
@ -271,9 +272,33 @@ rb_obj_copy_ivar(VALUE dest, VALUE obj)
VALUE *src_buf = ROBJECT_IVPTR(obj); VALUE *src_buf = ROBJECT_IVPTR(obj);
uint32_t dest_len = ROBJECT_NUMIV(dest); uint32_t dest_len = ROBJECT_NUMIV(dest);
uint32_t src_len = ROBJECT_NUMIV(obj); uint32_t src_len = ROBJECT_NUMIV(obj);
uint32_t len = dest_len < src_len ? dest_len : src_len; uint32_t max_len = dest_len < src_len ? src_len : dest_len;
MEMCPY(dest_buf, src_buf, VALUE, len); rb_ensure_iv_list_size(dest, dest_len, max_len);
dest_len = ROBJECT_NUMIV(dest);
uint32_t min_len = dest_len > src_len ? src_len : dest_len;
if (RBASIC(obj)->flags & ROBJECT_EMBED) {
src_buf = ROBJECT(obj)->as.ary;
// embedded -> embedded
if (RBASIC(dest)->flags & ROBJECT_EMBED) {
dest_buf = ROBJECT(dest)->as.ary;
}
// embedded -> extended
else {
dest_buf = ROBJECT(dest)->as.heap.ivptr;
}
}
// extended -> extended
else {
RUBY_ASSERT(!(RBASIC(dest)->flags & ROBJECT_EMBED));
dest_buf = ROBJECT(dest)->as.heap.ivptr;
src_buf = ROBJECT(obj)->as.heap.ivptr;
}
MEMCPY(dest_buf, src_buf, VALUE, min_len);
} }
static void static void
@ -283,10 +308,23 @@ init_copy(VALUE dest, VALUE obj)
rb_raise(rb_eTypeError, "[bug] frozen object (%s) allocated", rb_obj_classname(dest)); rb_raise(rb_eTypeError, "[bug] frozen object (%s) allocated", rb_obj_classname(dest));
} }
RBASIC(dest)->flags &= ~(T_MASK|FL_EXIVAR); RBASIC(dest)->flags &= ~(T_MASK|FL_EXIVAR);
// Copies the shape id from obj to dest
RBASIC(dest)->flags |= RBASIC(obj)->flags & (T_MASK|FL_EXIVAR); RBASIC(dest)->flags |= RBASIC(obj)->flags & (T_MASK|FL_EXIVAR);
rb_copy_wb_protected_attribute(dest, obj); rb_copy_wb_protected_attribute(dest, obj);
rb_copy_generic_ivar(dest, obj); rb_copy_generic_ivar(dest, obj);
rb_gc_copy_finalizer(dest, obj); rb_gc_copy_finalizer(dest, obj);
rb_shape_t *shape_to_set = rb_shape_get_shape(obj);
// If the object is frozen, the "dup"'d object will *not* be frozen,
// so we need to copy the frozen shape's parent to the new object.
if (rb_shape_frozen_shape_p(shape_to_set)) {
shape_to_set = shape_to_set->parent;
}
// shape ids are different
rb_shape_set_shape(dest, shape_to_set);
if (RB_TYPE_P(obj, T_OBJECT)) { if (RB_TYPE_P(obj, T_OBJECT)) {
rb_obj_copy_ivar(dest, obj); rb_obj_copy_ivar(dest, obj);
} }
@ -392,6 +430,9 @@ mutable_obj_clone(VALUE obj, VALUE kwfreeze)
case Qnil: case Qnil:
rb_funcall(clone, id_init_clone, 1, obj); rb_funcall(clone, id_init_clone, 1, obj);
RBASIC(clone)->flags |= RBASIC(obj)->flags & FL_FREEZE; RBASIC(clone)->flags |= RBASIC(obj)->flags & FL_FREEZE;
if (RB_OBJ_FROZEN(obj)) {
rb_shape_transition_shape_frozen(clone);
}
break; break;
case Qtrue: case Qtrue:
{ {
@ -407,6 +448,7 @@ mutable_obj_clone(VALUE obj, VALUE kwfreeze)
argv[1] = freeze_true_hash; argv[1] = freeze_true_hash;
rb_funcallv_kw(clone, id_init_clone, 2, argv, RB_PASS_KEYWORDS); rb_funcallv_kw(clone, id_init_clone, 2, argv, RB_PASS_KEYWORDS);
RBASIC(clone)->flags |= FL_FREEZE; RBASIC(clone)->flags |= FL_FREEZE;
rb_shape_transition_shape_frozen(clone);
break; break;
} }
case Qfalse: case Qfalse:

View File

@ -289,11 +289,13 @@ rb_ractor_id(const rb_ractor_t *r)
#if RACTOR_CHECK_MODE > 0 #if RACTOR_CHECK_MODE > 0
uint32_t rb_ractor_current_id(void); uint32_t rb_ractor_current_id(void);
// If ractor check mode is enabled, shape bits needs to be smaller
STATIC_ASSERT(shape_bits, SHAPE_BITS == 16);
static inline void static inline void
rb_ractor_setup_belonging_to(VALUE obj, uint32_t rid) rb_ractor_setup_belonging_to(VALUE obj, uint32_t rid)
{ {
VALUE flags = RBASIC(obj)->flags & 0xffffffff; // 4B VALUE flags = RBASIC(obj)->flags & 0xffff0000ffffffff; // 4B
RBASIC(obj)->flags = flags | ((VALUE)rid << 32); RBASIC(obj)->flags = flags | ((VALUE)rid << 32);
} }
@ -310,7 +312,7 @@ rb_ractor_belonging(VALUE obj)
return 0; return 0;
} }
else { else {
return RBASIC(obj)->flags >> 32; return RBASIC(obj)->flags >> 32 & 0xFFFF;
} }
} }

571
shape.c Normal file
View File

@ -0,0 +1,571 @@
#include "vm_core.h"
#include "vm_sync.h"
#include "shape.h"
#include "internal/class.h"
#include "internal/symbol.h"
#include "internal/variable.h"
#include <stdbool.h>
/*
* Shape getters
*/
static rb_shape_t*
rb_shape_get_root_shape(void) {
return GET_VM()->root_shape;
}
static rb_shape_t*
rb_shape_get_frozen_root_shape(void) {
return GET_VM()->frozen_root_shape;
}
bool
rb_shape_root_shape_p(rb_shape_t* shape) {
return shape == rb_shape_get_root_shape();
}
rb_shape_t*
rb_shape_get_shape_by_id(shape_id_t shape_id)
{
RUBY_ASSERT(shape_id != INVALID_SHAPE_ID);
rb_vm_t *vm = GET_VM();
rb_shape_t *shape = vm->shape_list[shape_id];
RUBY_ASSERT(IMEMO_TYPE_P(shape, imemo_shape));
return shape;
}
rb_shape_t*
rb_shape_get_shape_by_id_without_assertion(shape_id_t shape_id)
{
RUBY_ASSERT(shape_id != INVALID_SHAPE_ID);
rb_vm_t *vm = GET_VM();
rb_shape_t *shape = vm->shape_list[shape_id];
return shape;
}
static inline shape_id_t
shape_set_shape_id(rb_shape_t *shape, shape_id_t id) {
VALUE flags = shape->flags & ~((uint64_t)SHAPE_MASK << 16);
return (shape_id_t)(shape->flags = (flags | ((VALUE)id << SHAPE_FLAG_SHIFT)));
}
#if !SHAPE_IN_BASIC_FLAGS
static inline shape_id_t
RCLASS_SHAPE_ID(VALUE obj)
{
return RCLASS_EXT(obj)->shape_id;
}
shape_id_t rb_generic_shape_id(VALUE obj);
#endif
shape_id_t
rb_shape_get_shape_id(VALUE obj)
{
if (RB_SPECIAL_CONST_P(obj)) {
return SHAPE_ID(rb_shape_get_frozen_root_shape());
}
#if SHAPE_IN_BASIC_FLAGS
return RBASIC_SHAPE_ID(obj);
#else
switch (BUILTIN_TYPE(obj)) {
case T_OBJECT:
return ROBJECT_SHAPE_ID(obj);
break;
case T_CLASS:
case T_MODULE:
return RCLASS_SHAPE_ID(obj);
default:
return rb_generic_shape_id(obj);
}
#endif
}
rb_shape_t*
rb_shape_get_shape(VALUE obj)
{
return rb_shape_get_shape_by_id(rb_shape_get_shape_id(obj));
}
static shape_id_t
get_next_shape_id(void)
{
rb_vm_t *vm = GET_VM();
vm->max_shape_count++;
return vm->max_shape_count;
}
static rb_shape_t *
rb_shape_lookup_id(rb_shape_t* shape, ID id, enum shape_type shape_type) {
while (shape->parent) {
if (shape->edge_name == id) {
// If the shape type is different, we don't
// want this to count as a "found" ID
if (shape_type == (enum shape_type)shape->type) {
return shape;
}
else {
return NULL;
}
}
shape = shape->parent;
}
return NULL;
}
static rb_shape_t*
get_next_shape_internal(rb_shape_t* shape, ID id, VALUE obj, enum shape_type shape_type)
{
rb_shape_t *res = NULL;
RUBY_ASSERT(SHAPE_FROZEN != (enum shape_type)shape->type);
RB_VM_LOCK_ENTER();
{
if (rb_shape_lookup_id(shape, id, shape_type)) {
// If shape already contains the ivar that is being set, we'll return shape
res = shape;
}
else {
if (!shape->edges) {
shape->edges = rb_id_table_create(0);
}
// Lookup the shape in edges - if there's already an edge and a corresponding shape for it,
// we can return that. Otherwise, we'll need to get a new shape
if (!rb_id_table_lookup(shape->edges, id, (VALUE *)&res) || rb_objspace_garbage_object_p((VALUE)res)) {
// In this case, the shape exists, but the shape is garbage, so we need to recreate it
if (res) {
rb_id_table_delete(shape->edges, id);
res->parent = NULL;
}
shape_id_t next_shape_id = get_next_shape_id();
if (next_shape_id == MAX_SHAPE_ID) {
// TODO: Make an OutOfShapesError ??
rb_bug("Out of shapes\n");
}
else {
RUBY_ASSERT(next_shape_id < MAX_SHAPE_ID);
rb_shape_t * new_shape = rb_shape_alloc(next_shape_id,
id,
shape);
new_shape->type = (uint8_t)shape_type;
switch(shape_type) {
case SHAPE_FROZEN:
RB_OBJ_FREEZE_RAW((VALUE)new_shape);
break;
case SHAPE_IVAR:
new_shape->iv_count = new_shape->parent->iv_count + 1;
// Check if we should update max_iv_count on the object's class
if (BUILTIN_TYPE(obj) == T_OBJECT) {
VALUE klass = rb_obj_class(obj);
if (new_shape->iv_count > RCLASS_EXT(klass)->max_iv_count) {
RCLASS_EXT(klass)->max_iv_count = new_shape->iv_count;
}
}
break;
case SHAPE_IVAR_UNDEF:
new_shape->iv_count = new_shape->parent->iv_count;
break;
case SHAPE_ROOT:
rb_bug("Unreachable");
break;
}
rb_id_table_insert(shape->edges, id, (VALUE)new_shape);
RB_OBJ_WRITTEN((VALUE)shape, Qundef, (VALUE)new_shape);
rb_shape_set_shape_by_id(next_shape_id, new_shape);
res = new_shape;
}
}
}
}
RB_VM_LOCK_LEAVE();
return res;
}
MJIT_FUNC_EXPORTED int
rb_shape_frozen_shape_p(rb_shape_t* shape)
{
return SHAPE_FROZEN == (enum shape_type)shape->type;
}
void
rb_shape_transition_shape_remove_ivar(VALUE obj, ID id, rb_shape_t *shape)
{
rb_shape_t* next_shape = get_next_shape_internal(shape, id, obj, SHAPE_IVAR_UNDEF);
if (shape == next_shape) {
return;
}
RUBY_ASSERT(!rb_objspace_garbage_object_p((VALUE)next_shape));
rb_shape_set_shape(obj, next_shape);
}
void
rb_shape_transition_shape_frozen(VALUE obj)
{
rb_shape_t* shape = rb_shape_get_shape(obj);
RUBY_ASSERT(shape);
RUBY_ASSERT(RB_OBJ_FROZEN(obj));
if (rb_shape_frozen_shape_p(shape)) {
return;
}
rb_shape_t* next_shape;
if (shape == rb_shape_get_root_shape()) {
switch(BUILTIN_TYPE(obj)) {
case T_OBJECT:
case T_CLASS:
case T_MODULE:
break;
default:
return;
}
next_shape = rb_shape_get_frozen_root_shape();
}
else {
static ID id_frozen;
if (!id_frozen) {
id_frozen = rb_make_internal_id();
}
next_shape = get_next_shape_internal(shape, (ID)id_frozen, obj, SHAPE_FROZEN);
}
RUBY_ASSERT(next_shape);
rb_shape_set_shape(obj, next_shape);
}
void
rb_shape_transition_shape(VALUE obj, ID id, rb_shape_t *shape)
{
rb_shape_t* next_shape = rb_shape_get_next(shape, obj, id);
if (shape == next_shape) {
return;
}
RUBY_ASSERT(!rb_objspace_garbage_object_p((VALUE)next_shape));
rb_shape_set_shape(obj, next_shape);
}
rb_shape_t*
rb_shape_get_next(rb_shape_t* shape, VALUE obj, ID id)
{
return get_next_shape_internal(shape, id, obj, SHAPE_IVAR);
}
bool
rb_shape_get_iv_index(rb_shape_t * shape, ID id, attr_index_t *value) {
while (shape->parent) {
if (shape->edge_name == id) {
enum shape_type shape_type;
shape_type = (enum shape_type)shape->type;
switch(shape_type) {
case SHAPE_IVAR:
RUBY_ASSERT(shape->iv_count > 0);
*value = shape->iv_count - 1;
return true;
case SHAPE_IVAR_UNDEF:
case SHAPE_ROOT:
return false;
case SHAPE_FROZEN:
rb_bug("Ivar should not exist on frozen transition\n");
}
}
shape = shape->parent;
}
return false;
}
static rb_shape_t *
shape_alloc(void)
{
rb_shape_t *shape = (rb_shape_t *)rb_imemo_new(imemo_shape, 0, 0, 0, 0);
FL_SET_RAW((VALUE)shape, RUBY_FL_SHAREABLE);
FL_SET_RAW((VALUE)shape, RUBY_FL_PROMOTED1);
return shape;
}
rb_shape_t *
rb_shape_alloc(shape_id_t shape_id, ID edge_name, rb_shape_t * parent)
{
rb_shape_t * shape = shape_alloc();
shape_set_shape_id(shape, shape_id);
shape->edge_name = edge_name;
shape->iv_count = 0;
RB_OBJ_WRITE(shape, &shape->parent, parent);
RUBY_ASSERT(!parent || IMEMO_TYPE_P(parent, imemo_shape));
return shape;
}
MJIT_FUNC_EXPORTED void
rb_shape_set_shape(VALUE obj, rb_shape_t* shape)
{
RUBY_ASSERT(IMEMO_TYPE_P(shape, imemo_shape));
RUBY_ASSERT(SHAPE_FROZEN == shape->type ? RB_OBJ_FROZEN(obj) : 1);
if(rb_shape_set_shape_id(obj, SHAPE_ID(shape))) {
if (shape != rb_shape_get_frozen_root_shape()) {
RB_OBJ_WRITTEN(obj, Qundef, (VALUE)shape);
}
}
}
void
rb_shape_set_shape_by_id(shape_id_t shape_id, rb_shape_t *shape)
{
rb_vm_t *vm = GET_VM();
RUBY_ASSERT(shape == NULL || IMEMO_TYPE_P(shape, imemo_shape));
vm->shape_list[shape_id] = shape;
}
VALUE rb_cShape;
static void
shape_mark(void *ptr)
{
rb_gc_mark((VALUE)ptr);
}
/*
* Exposing Shape to Ruby via RubyVM.debug_shape
*/
static const rb_data_type_t shape_data_type = {
"Shape",
{shape_mark, NULL, NULL,},
0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
};
static VALUE
rb_shape_id(VALUE self) {
rb_shape_t * shape;
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
return INT2NUM(SHAPE_ID(shape));
}
static VALUE
rb_shape_type(VALUE self) {
rb_shape_t * shape;
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
return INT2NUM(shape->type);
}
static VALUE
rb_shape_parent_id(VALUE self)
{
rb_shape_t * shape;
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
if (shape->parent) {
return INT2NUM(SHAPE_ID(shape->parent));
}
else {
return Qnil;
}
}
static VALUE parse_key(ID key) {
if ((key & RUBY_ID_INTERNAL) == RUBY_ID_INTERNAL) {
return LONG2NUM(key);
} else {
return ID2SYM(key);
}
}
static VALUE
rb_shape_t_to_rb_cShape(rb_shape_t *shape) {
union { const rb_shape_t *in; void *out; } deconst;
VALUE res;
deconst.in = shape;
res = TypedData_Wrap_Struct(rb_cShape, &shape_data_type, deconst.out);
RB_OBJ_WRITTEN(res, Qundef, shape);
return res;
}
static enum rb_id_table_iterator_result rb_edges_to_hash(ID key, VALUE value, void *ref)
{
rb_hash_aset(*(VALUE *)ref, parse_key(key), rb_shape_t_to_rb_cShape((rb_shape_t*)value));
return ID_TABLE_CONTINUE;
}
static VALUE
rb_shape_edges(VALUE self)
{
rb_shape_t* shape;
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
VALUE hash = rb_hash_new();
if (shape->edges) {
rb_id_table_foreach(shape->edges, rb_edges_to_hash, &hash);
}
return hash;
}
static VALUE
rb_shape_edge_name(VALUE self)
{
rb_shape_t* shape;
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
if (shape->edge_name) {
return ID2SYM(shape->edge_name);
}
else {
return Qnil;
}
}
static VALUE
rb_shape_iv_count(VALUE self)
{
rb_shape_t* shape;
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
return INT2NUM(shape->iv_count);
}
static VALUE
rb_shape_export_depth(VALUE self)
{
rb_shape_t* shape;
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
unsigned int depth = 0;
while (shape->parent) {
depth++;
shape = shape->parent;
}
return INT2NUM(depth);
}
static VALUE
rb_shape_parent(VALUE self)
{
rb_shape_t * shape;
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
if (shape->parent) {
return rb_shape_t_to_rb_cShape(shape->parent);
}
else {
return Qnil;
}
}
VALUE rb_shape_debug_shape(VALUE self, VALUE obj) {
return rb_shape_t_to_rb_cShape(rb_shape_get_shape(obj));
}
VALUE rb_shape_debug_root_shape(VALUE self) {
return rb_shape_t_to_rb_cShape(rb_shape_get_root_shape());
}
VALUE rb_shape_debug_frozen_root_shape(VALUE self) {
return rb_shape_t_to_rb_cShape(rb_shape_get_frozen_root_shape());
}
VALUE rb_obj_shape(rb_shape_t* shape);
static enum rb_id_table_iterator_result collect_keys_and_values(ID key, VALUE value, void *ref)
{
rb_hash_aset(*(VALUE *)ref, parse_key(key), rb_obj_shape((rb_shape_t*)value));
return ID_TABLE_CONTINUE;
}
static VALUE edges(struct rb_id_table* edges)
{
VALUE hash = rb_hash_new();
if (edges)
rb_id_table_foreach(edges, collect_keys_and_values, &hash);
return hash;
}
VALUE rb_obj_shape(rb_shape_t* shape) {
VALUE rb_shape = rb_hash_new();
rb_hash_aset(rb_shape, ID2SYM(rb_intern("id")), INT2NUM(SHAPE_ID(shape)));
rb_hash_aset(rb_shape, ID2SYM(rb_intern("edges")), edges(shape->edges));
if (shape == rb_shape_get_root_shape()) {
rb_hash_aset(rb_shape, ID2SYM(rb_intern("parent_id")), INT2NUM(ROOT_SHAPE_ID));
}
else {
rb_hash_aset(rb_shape, ID2SYM(rb_intern("parent_id")), INT2NUM(SHAPE_ID(shape->parent)));
}
rb_hash_aset(rb_shape, ID2SYM(rb_intern("edge_name")), rb_id2str(shape->edge_name));
return rb_shape;
}
static VALUE shape_transition_tree(VALUE self) {
return rb_obj_shape(rb_shape_get_root_shape());
}
static VALUE shape_count(VALUE self) {
int shape_count = 0;
rb_vm_t *vm = GET_VM();
for(shape_id_t i = 0; i < vm->max_shape_count; i++) {
if(rb_shape_get_shape_by_id_without_assertion(i)) {
shape_count++;
}
}
return INT2NUM(shape_count);
}
static VALUE
shape_max_shape_count(VALUE self)
{
return INT2NUM(GET_VM()->max_shape_count);
}
VALUE
rb_shape_flags_mask(void)
{
return SHAPE_FLAG_MASK;
}
void
Init_shape(void)
{
rb_cShape = rb_define_class_under(rb_cRubyVM, "Shape", rb_cObject);
rb_undef_alloc_func(rb_cShape);
rb_define_method(rb_cShape, "parent_id", rb_shape_parent_id, 0);
rb_define_method(rb_cShape, "parent", rb_shape_parent, 0);
rb_define_method(rb_cShape, "edges", rb_shape_edges, 0);
rb_define_method(rb_cShape, "edge_name", rb_shape_edge_name, 0);
rb_define_method(rb_cShape, "iv_count", rb_shape_iv_count, 0);
rb_define_method(rb_cShape, "depth", rb_shape_export_depth, 0);
rb_define_method(rb_cShape, "id", rb_shape_id, 0);
rb_define_method(rb_cShape, "type", rb_shape_type, 0);
rb_define_const(rb_cShape, "SHAPE_ROOT", INT2NUM(SHAPE_ROOT));
rb_define_const(rb_cShape, "SHAPE_IVAR", INT2NUM(SHAPE_IVAR));
rb_define_const(rb_cShape, "SHAPE_IVAR_UNDEF", INT2NUM(SHAPE_IVAR_UNDEF));
rb_define_const(rb_cShape, "SHAPE_FROZEN", INT2NUM(SHAPE_FROZEN));
rb_define_const(rb_cShape, "SHAPE_BITS", INT2NUM(SHAPE_BITS));
rb_define_module_function(rb_cRubyVM, "debug_shape_transition_tree", shape_transition_tree, 0);
rb_define_module_function(rb_cRubyVM, "debug_shape_count", shape_count, 0);
rb_define_singleton_method(rb_cRubyVM, "debug_shape", rb_shape_debug_shape, 1);
rb_define_singleton_method(rb_cRubyVM, "debug_max_shape_count", shape_max_shape_count, 0);
rb_define_singleton_method(rb_cRubyVM, "debug_root_shape", rb_shape_debug_root_shape, 0);
rb_define_singleton_method(rb_cRubyVM, "debug_frozen_root_shape", rb_shape_debug_frozen_root_shape, 0);
}

153
shape.h Normal file
View File

@ -0,0 +1,153 @@
#ifndef RUBY_SHAPE_H
#define RUBY_SHAPE_H
#if (SIZEOF_UINT64_T == SIZEOF_VALUE)
#define SIZEOF_SHAPE_T 4
#define SHAPE_IN_BASIC_FLAGS 1
typedef uint32_t attr_index_t;
#else
#define SIZEOF_SHAPE_T 2
#define SHAPE_IN_BASIC_FLAGS 0
typedef uint16_t attr_index_t;
#endif
#define MAX_IVARS (attr_index_t)(-1)
#if RUBY_DEBUG || (defined(VM_CHECK_MODE) && VM_CHECK_MODE > 0)
# if SIZEOF_SHAPE_T == 4
typedef uint32_t shape_id_t;
# define SHAPE_BITS 16
# else
typedef uint16_t shape_id_t;
# define SHAPE_BITS 16
# endif
#else
# if SIZEOF_SHAPE_T == 4
typedef uint32_t shape_id_t;
# define SHAPE_BITS 32
# else
typedef uint16_t shape_id_t;
# define SHAPE_BITS 16
# endif
#endif
# define SHAPE_MASK (((uintptr_t)1 << SHAPE_BITS) - 1)
# define SHAPE_FLAG_MASK (((VALUE)-1) >> SHAPE_BITS)
# define SHAPE_FLAG_SHIFT ((SIZEOF_VALUE * 8) - SHAPE_BITS)
# define SHAPE_BITMAP_SIZE 16384
# define MAX_SHAPE_ID (SHAPE_MASK - 1)
# define INVALID_SHAPE_ID SHAPE_MASK
# define ROOT_SHAPE_ID 0x0
# define FROZEN_ROOT_SHAPE_ID 0x1
#define SHAPE_ID(shape) ((((rb_shape_t *)shape)->flags >> SHAPE_FLAG_SHIFT) & SHAPE_MASK)
struct rb_shape {
VALUE flags; // Shape ID and frozen status encoded within flags
struct rb_shape * parent; // Pointer to the parent
struct rb_id_table * edges; // id_table from ID (ivar) to next shape
ID edge_name; // ID (ivar) for transition from parent to rb_shape
attr_index_t iv_count;
uint8_t type;
};
typedef struct rb_shape rb_shape_t;
enum shape_type {
SHAPE_ROOT,
SHAPE_IVAR,
SHAPE_FROZEN,
SHAPE_IVAR_UNDEF,
};
static inline shape_id_t
IMEMO_CACHED_SHAPE_ID(VALUE cc)
{
RBIMPL_ASSERT_TYPE((VALUE)cc, RUBY_T_IMEMO);
return (shape_id_t)(SHAPE_MASK & (RBASIC(cc)->flags >> SHAPE_FLAG_SHIFT));
}
static inline void
IMEMO_SET_CACHED_SHAPE_ID(VALUE cc, shape_id_t shape_id)
{
RBIMPL_ASSERT_TYPE((VALUE)cc, RUBY_T_IMEMO);
RBASIC(cc)->flags &= SHAPE_FLAG_MASK;
RBASIC(cc)->flags |= ((VALUE)(shape_id) << SHAPE_FLAG_SHIFT);
}
#if SHAPE_IN_BASIC_FLAGS
static inline shape_id_t
RBASIC_SHAPE_ID(VALUE obj)
{
RUBY_ASSERT(!RB_SPECIAL_CONST_P(obj));
return (shape_id_t)(SHAPE_MASK & ((RBASIC(obj)->flags) >> SHAPE_FLAG_SHIFT));
}
static inline void
RBASIC_SET_SHAPE_ID(VALUE obj, shape_id_t shape_id)
{
// Ractors are occupying the upper 32 bits of flags, but only in debug mode
// Object shapes are occupying top bits
RBASIC(obj)->flags &= SHAPE_FLAG_MASK;
RBASIC(obj)->flags |= ((VALUE)(shape_id) << SHAPE_FLAG_SHIFT);
}
static inline shape_id_t
ROBJECT_SHAPE_ID(VALUE obj)
{
RBIMPL_ASSERT_TYPE(obj, RUBY_T_OBJECT);
return RBASIC_SHAPE_ID(obj);
}
static inline void
ROBJECT_SET_SHAPE_ID(VALUE obj, shape_id_t shape_id)
{
RBIMPL_ASSERT_TYPE(obj, RUBY_T_OBJECT);
RBASIC_SET_SHAPE_ID(obj, shape_id);
}
#else
static inline shape_id_t
ROBJECT_SHAPE_ID(VALUE obj)
{
RBIMPL_ASSERT_TYPE(obj, RUBY_T_OBJECT);
return (shape_id_t)(SHAPE_MASK & (RBASIC(obj)->flags >> SHAPE_FLAG_SHIFT));
}
static inline void
ROBJECT_SET_SHAPE_ID(VALUE obj, shape_id_t shape_id)
{
RBASIC(obj)->flags &= SHAPE_FLAG_MASK;
RBASIC(obj)->flags |= ((VALUE)(shape_id) << SHAPE_FLAG_SHIFT);
}
#endif
bool rb_shape_root_shape_p(rb_shape_t* shape);
rb_shape_t* rb_shape_get_shape_by_id_without_assertion(shape_id_t shape_id);
MJIT_SYMBOL_EXPORT_BEGIN
rb_shape_t* rb_shape_get_shape_by_id(shape_id_t shape_id);
void rb_shape_set_shape(VALUE obj, rb_shape_t* shape);
shape_id_t rb_shape_get_shape_id(VALUE obj);
rb_shape_t* rb_shape_get_shape(VALUE obj);
int rb_shape_frozen_shape_p(rb_shape_t* shape);
void rb_shape_transition_shape_frozen(VALUE obj);
void rb_shape_transition_shape_remove_ivar(VALUE obj, ID id, rb_shape_t *shape);
void rb_shape_transition_shape(VALUE obj, ID id, rb_shape_t *shape);
rb_shape_t* rb_shape_get_next(rb_shape_t* shape, VALUE obj, ID id);
bool rb_shape_get_iv_index(rb_shape_t * shape, ID id, attr_index_t * value);
MJIT_SYMBOL_EXPORT_END
rb_shape_t * rb_shape_alloc(shape_id_t shape_id, ID edge_name, rb_shape_t * parent);
bool rb_shape_set_shape_id(VALUE obj, shape_id_t shape_id);
void rb_shape_set_shape_by_id(shape_id_t, rb_shape_t *);
VALUE rb_obj_debug_shape(VALUE self, VALUE obj);
VALUE rb_shape_flags_mask(void);
#endif

View File

@ -17,7 +17,7 @@ describe "ObjectSpace.reachable_objects_from" do
it "enumerates objects directly reachable from a given object" do it "enumerates objects directly reachable from a given object" do
ObjectSpace.reachable_objects_from(['a', 'b', 'c']).should include(Array, 'a', 'b', 'c') ObjectSpace.reachable_objects_from(['a', 'b', 'c']).should include(Array, 'a', 'b', 'c')
ObjectSpace.reachable_objects_from(Object.new).should == [Object] ObjectSpace.reachable_objects_from(Object.new).should include(Object)
end end
it "finds an object stored in an Array" do it "finds an object stored in an Array" do

View File

@ -8,16 +8,9 @@ describe :rbasic, shared: true do
it "reports the appropriate FREEZE flag for the object when reading" do it "reports the appropriate FREEZE flag for the object when reading" do
obj, _ = @data.call obj, _ = @data.call
initial = @specs.get_flags(obj) (@specs.get_flags(obj) & @freeze).should == 0
obj.freeze obj.freeze
@specs.get_flags(obj).should == @freeze | initial (@specs.get_flags(obj) & @freeze).should == @freeze
end
it "supports setting the FREEZE flag" do
obj, _ = @data.call
initial = @specs.get_flags(obj)
@specs.set_flags(obj, @freeze | initial).should == @freeze | initial
obj.should.frozen?
end end
it "supports retrieving the (meta)class" do it "supports retrieving the (meta)class" do

View File

@ -7,6 +7,7 @@ module Bug end
module Bug::Marshal module Bug::Marshal
class TestInternalIVar < Test::Unit::TestCase class TestInternalIVar < Test::Unit::TestCase
def test_marshal def test_marshal
pend "We don't support IVs with ID of 0"
v = InternalIVar.new("hello", "world", "bye") v = InternalIVar.new("hello", "world", "bye")
assert_equal("hello", v.normal) assert_equal("hello", v.normal)
assert_equal("world", v.internal) assert_equal("world", v.internal)

View File

@ -116,12 +116,16 @@ class TestObjSpace < Test::Unit::TestCase
opts = %w[--disable-gem --disable=frozen-string-literal -robjspace] opts = %w[--disable-gem --disable=frozen-string-literal -robjspace]
assert_separately opts, "#{<<-"begin;"}\n#{<<-'end;'}" assert_separately opts, "#{<<-"begin;"}\n#{<<-'end;'}"
begin; begin;
assert_equal(nil, ObjectSpace.reachable_objects_from(nil)) def assert_reachable_object_as_expected(expectation, reachable_objects_from_array)
assert_equal([Array, 'a', 'b', 'c'], ObjectSpace.reachable_objects_from(['a', 'b', 'c'])) reachable_objects = ObjectSpace.reachable_objects_from(reachable_objects_from_array)
assert_equal(expectation, reachable_objects)
end
assert_equal([Array, 'a', 'a', 'a'], ObjectSpace.reachable_objects_from(['a', 'a', 'a'])) assert_equal(nil, ObjectSpace.reachable_objects_from(nil))
assert_equal([Array, 'a', 'a'], ObjectSpace.reachable_objects_from(['a', v = 'a', v])) assert_reachable_object_as_expected([Array, 'a', 'b', 'c'], ['a', 'b', 'c'])
assert_equal([Array, 'a'], ObjectSpace.reachable_objects_from([v = 'a', v, v])) assert_reachable_object_as_expected([Array, 'a', 'a', 'a'], ['a', 'a', 'a'])
assert_reachable_object_as_expected([Array, 'a', 'a'], ['a', v = 'a', v])
assert_reachable_object_as_expected([Array, 'a'], [v = 'a', v, v])
long_ary = Array.new(1_000){''} long_ary = Array.new(1_000){''}
max = 0 max = 0

View File

@ -831,7 +831,7 @@ class TestMJIT < Test::Unit::TestCase
end end
def test_inlined_exivar def test_inlined_exivar
assert_eval_with_jit("#{<<~"begin;"}\n#{<<~"end;"}", stdout: "aaa", success_count: 3, recompile_count: 1, min_calls: 2) assert_eval_with_jit("#{<<~"begin;"}\n#{<<~"end;"}", stdout: "aaa", success_count: 4, recompile_count: 2, min_calls: 2)
begin; begin;
class Foo < Hash class Foo < Hash
def initialize def initialize
@ -850,7 +850,7 @@ class TestMJIT < Test::Unit::TestCase
end end
def test_inlined_undefined_ivar def test_inlined_undefined_ivar
assert_eval_with_jit("#{<<~"begin;"}\n#{<<~"end;"}", stdout: "bbb", success_count: 3, min_calls: 3) assert_eval_with_jit("#{<<~"begin;"}\n#{<<~"end;"}", stdout: "bbb", success_count: 2, min_calls: 2)
begin; begin;
class Foo class Foo
def initialize def initialize

171
test/ruby/test_shapes.rb Normal file
View File

@ -0,0 +1,171 @@
# frozen_string_literal: false
require 'test/unit'
# These test the functionality of object shapes
class TestShapes < Test::Unit::TestCase
class Example
def initialize
@a = 1
end
end
class RemoveAndAdd
def add_foo
@foo = 1
end
def remove
remove_instance_variable(:@foo)
end
def add_bar
@bar = 1
end
end
# RubyVM.debug_shape returns new instances of shape objects for
# each call. This helper method allows us to define equality for
# shapes
def assert_shape_equal(shape1, shape2)
assert_equal(shape1.id, shape2.id)
assert_equal(shape1.parent_id, shape2.parent_id)
assert_equal(shape1.depth, shape2.depth)
assert_equal(shape1.type, shape2.type)
end
def refute_shape_equal(shape1, shape2)
refute_equal(shape1.id, shape2.id)
end
def test_iv_index
example = RemoveAndAdd.new
shape = RubyVM.debug_shape(example)
assert_equal 0, shape.iv_count
example.add_foo # makes a transition
new_shape = RubyVM.debug_shape(example)
assert_equal([:@foo], example.instance_variables)
assert_equal(shape.id, new_shape.parent.id)
assert_equal(1, new_shape.iv_count)
example.remove # makes a transition
remove_shape = RubyVM.debug_shape(example)
assert_equal([], example.instance_variables)
assert_equal(new_shape.id, remove_shape.parent.id)
assert_equal(1, remove_shape.iv_count)
example.add_bar # makes a transition
bar_shape = RubyVM.debug_shape(example)
assert_equal([:@bar], example.instance_variables)
assert_equal(remove_shape.id, bar_shape.parent.id)
assert_equal(2, bar_shape.iv_count)
end
def test_new_obj_has_root_shape
assert_shape_equal(RubyVM.debug_root_shape, RubyVM.debug_shape(Object.new))
end
def test_frozen_new_obj_has_frozen_root_shape
assert_shape_equal(
RubyVM.debug_frozen_root_shape,
RubyVM.debug_shape(Object.new.freeze)
)
end
def test_str_has_root_shape
assert_shape_equal(RubyVM.debug_root_shape, RubyVM.debug_shape(""))
end
def test_array_has_root_shape
assert_shape_equal(RubyVM.debug_root_shape, RubyVM.debug_shape([]))
end
def test_hash_has_root_shape
assert_shape_equal(RubyVM.debug_root_shape, RubyVM.debug_shape({}))
end
def test_true_has_frozen_root_shape
assert_shape_equal(RubyVM.debug_frozen_root_shape, RubyVM.debug_shape(true))
end
def test_nil_has_frozen_root_shape
assert_shape_equal(RubyVM.debug_frozen_root_shape, RubyVM.debug_shape(nil))
end
def test_basic_shape_transition
obj = Example.new
refute_equal(RubyVM.debug_root_shape, RubyVM.debug_shape(obj))
assert_shape_equal(RubyVM.debug_root_shape.edges[:@a], RubyVM.debug_shape(obj))
assert_equal(obj.instance_variable_get(:@a), 1)
end
def test_different_objects_make_same_transition
obj = Example.new
obj2 = ""
obj2.instance_variable_set(:@a, 1)
assert_shape_equal(RubyVM.debug_shape(obj), RubyVM.debug_shape(obj2))
end
def test_duplicating_objects
obj = Example.new
obj2 = obj.dup
assert_shape_equal(RubyVM.debug_shape(obj), RubyVM.debug_shape(obj2))
end
def test_freezing_and_duplicating_object
obj = Object.new.freeze
obj2 = obj.dup
refute_predicate(obj2, :frozen?)
refute_equal(RubyVM.debug_shape(obj).id, RubyVM.debug_shape(obj2).id)
end
def test_freezing_and_duplicating_object_with_ivars
obj = Example.new.freeze
obj2 = obj.dup
refute_predicate(obj2, :frozen?)
refute_shape_equal(RubyVM.debug_shape(obj), RubyVM.debug_shape(obj2))
assert_equal(obj2.instance_variable_get(:@a), 1)
end
def test_freezing_and_duplicating_string_with_ivars
str = "str"
str.instance_variable_set(:@a, 1)
str.freeze
str2 = str.dup
refute_predicate(str2, :frozen?)
refute_equal(RubyVM.debug_shape(str).id, RubyVM.debug_shape(str2).id)
assert_equal(str2.instance_variable_get(:@a), 1)
end
def test_freezing_and_cloning_objects
obj = Object.new.freeze
obj2 = obj.clone(freeze: true)
assert_predicate(obj2, :frozen?)
assert_shape_equal(RubyVM.debug_shape(obj), RubyVM.debug_shape(obj2))
end
def test_freezing_and_cloning_object_with_ivars
obj = Example.new.freeze
obj2 = obj.clone(freeze: true)
assert_predicate(obj2, :frozen?)
assert_shape_equal(RubyVM.debug_shape(obj), RubyVM.debug_shape(obj2))
assert_equal(obj2.instance_variable_get(:@a), 1)
end
def test_freezing_and_cloning_string
str = "str".freeze
str2 = str.clone(freeze: true)
assert_predicate(str2, :frozen?)
assert_shape_equal(RubyVM.debug_shape(str), RubyVM.debug_shape(str2))
end
def test_freezing_and_cloning_string_with_ivars
str = "str"
str.instance_variable_set(:@a, 1)
str.freeze
str2 = str.clone(freeze: true)
assert_predicate(str2, :frozen?)
assert_shape_equal(RubyVM.debug_shape(str), RubyVM.debug_shape(str2))
assert_equal(str2.instance_variable_get(:@a), 1)
end
end

View File

@ -341,12 +341,17 @@ generator = BindingGenerator.new(
VM_METHOD_TYPE_CFUNC VM_METHOD_TYPE_CFUNC
VM_METHOD_TYPE_ISEQ VM_METHOD_TYPE_ISEQ
], ],
ULONG: %w[
INVALID_SHAPE_ID
SHAPE_MASK
],
}, },
types: %w[ types: %w[
CALL_DATA CALL_DATA
IC IC
IVC IVC
RB_BUILTIN RB_BUILTIN
attr_index_t
compile_branch compile_branch
compile_status compile_status
inlined_call_context inlined_call_context
@ -360,10 +365,10 @@ generator = BindingGenerator.new(
rb_callable_method_entry_struct rb_callable_method_entry_struct
rb_callcache rb_callcache
rb_callinfo rb_callinfo
rb_cref_t
rb_control_frame_t rb_control_frame_t
rb_execution_context_t rb_cref_t
rb_execution_context_struct rb_execution_context_struct
rb_execution_context_t
rb_iseq_constant_body rb_iseq_constant_body
rb_iseq_location_t rb_iseq_location_t
rb_iseq_struct rb_iseq_struct
@ -378,6 +383,7 @@ generator = BindingGenerator.new(
], ],
dynamic_types: %w[ dynamic_types: %w[
VALUE VALUE
shape_id_t
], ],
skip_fields: { skip_fields: {
'rb_execution_context_struct.machine': %w[regs], # differs between macOS and Linux 'rb_execution_context_struct.machine': %w[regs], # differs between macOS and Linux

File diff suppressed because it is too large Load Diff

View File

@ -11,11 +11,19 @@
/* per-object */ /* per-object */
struct gen_ivtbl { struct gen_ivtbl {
#if !SHAPE_IN_BASIC_FLAGS
uint16_t shape_id;
#endif
uint32_t numiv; uint32_t numiv;
VALUE ivptr[FLEX_ARY_LEN]; VALUE ivptr[FLEX_ARY_LEN];
}; };
int rb_ivar_generic_ivtbl_lookup(VALUE obj, struct gen_ivtbl **); int rb_ivar_generic_ivtbl_lookup(VALUE obj, struct gen_ivtbl **);
VALUE rb_ivar_generic_lookup_with_index(VALUE obj, ID id, uint32_t index);
#include "shape.h"
#if !SHAPE_IN_BASIC_FLAGS
shape_id_t rb_generic_shape_id(VALUE obj);
#endif
#endif /* RUBY_TOPLEVEL_VARIABLE_H */ #endif /* RUBY_TOPLEVEL_VARIABLE_H */

45
vm.c
View File

@ -26,6 +26,7 @@
#include "internal/thread.h" #include "internal/thread.h"
#include "internal/vm.h" #include "internal/vm.h"
#include "internal/sanitizers.h" #include "internal/sanitizers.h"
#include "internal/variable.h"
#include "iseq.h" #include "iseq.h"
#include "mjit.h" #include "mjit.h"
#include "yjit.h" #include "yjit.h"
@ -2720,6 +2721,12 @@ rb_vm_update_references(void *ptr)
vm->top_self = rb_gc_location(vm->top_self); vm->top_self = rb_gc_location(vm->top_self);
vm->orig_progname = rb_gc_location(vm->orig_progname); vm->orig_progname = rb_gc_location(vm->orig_progname);
for (shape_id_t i = 0; i <= vm->max_shape_count; i++) {
if (vm->shape_list[i]) {
vm->shape_list[i] = (rb_shape_t *)rb_gc_location((VALUE)vm->shape_list[i]);
}
}
rb_gc_update_tbl_refs(vm->overloaded_cme_table); rb_gc_update_tbl_refs(vm->overloaded_cme_table);
if (vm->coverages) { if (vm->coverages) {
@ -2801,6 +2808,8 @@ rb_vm_mark(void *ptr)
obj_ary++; obj_ary++;
} }
rb_gc_mark((VALUE)vm->root_shape);
rb_gc_mark((VALUE)vm->frozen_root_shape);
rb_gc_mark_movable(vm->load_path); rb_gc_mark_movable(vm->load_path);
rb_gc_mark_movable(vm->load_path_snapshot); rb_gc_mark_movable(vm->load_path_snapshot);
RUBY_MARK_MOVABLE_UNLESS_NULL(vm->load_path_check_cache); RUBY_MARK_MOVABLE_UNLESS_NULL(vm->load_path_check_cache);
@ -4021,6 +4030,11 @@ Init_BareVM(void)
rb_native_cond_initialize(&vm->ractor.sync.terminate_cond); rb_native_cond_initialize(&vm->ractor.sync.terminate_cond);
} }
#ifndef _WIN32
#include <unistd.h>
#include <sys/mman.h>
#endif
void void
Init_vm_objects(void) Init_vm_objects(void)
{ {
@ -4032,6 +4046,37 @@ Init_vm_objects(void)
vm->mark_object_ary = rb_ary_hidden_new(128); vm->mark_object_ary = rb_ary_hidden_new(128);
vm->loading_table = st_init_strtable(); vm->loading_table = st_init_strtable();
vm->frozen_strings = st_init_table_with_size(&rb_fstring_hash_type, 10000); vm->frozen_strings = st_init_table_with_size(&rb_fstring_hash_type, 10000);
#if HAVE_MMAP
vm->shape_list = (rb_shape_t **)mmap(NULL, rb_size_mul_or_raise(SHAPE_BITMAP_SIZE * 32, sizeof(rb_shape_t *), rb_eRuntimeError),
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (vm->shape_list == MAP_FAILED) {
vm->shape_list = 0;
}
#else
vm->shape_list = xcalloc(SHAPE_BITMAP_SIZE * 32, sizeof(rb_shape_t *));
#endif
if (!vm->shape_list) {
rb_memerror();
}
// Root shape
vm->root_shape = rb_shape_alloc(ROOT_SHAPE_ID,
0,
0);
rb_shape_set_shape_by_id(ROOT_SHAPE_ID, vm->root_shape);
RB_OBJ_WRITTEN(vm->root_shape, Qundef, (VALUE)vm);
// Frozen root shape
vm->frozen_root_shape = rb_shape_alloc(FROZEN_ROOT_SHAPE_ID,
rb_make_internal_id(),
vm->root_shape);
vm->frozen_root_shape->type = (uint8_t)SHAPE_FROZEN;
RB_OBJ_FREEZE_RAW((VALUE)vm->frozen_root_shape);
rb_shape_set_shape_by_id(FROZEN_ROOT_SHAPE_ID, vm->frozen_root_shape);
RB_OBJ_WRITTEN(vm->frozen_root_shape, Qundef, (VALUE)vm);
vm->max_shape_count = 1;
} }
/* Stub for builtin function when not building YJIT units*/ /* Stub for builtin function when not building YJIT units*/

View File

@ -10,6 +10,7 @@
#include "debug_counter.h" #include "debug_counter.h"
#include "internal/class.h" #include "internal/class.h"
#include "shape.h"
enum vm_call_flag_bits { enum vm_call_flag_bits {
VM_CALL_ARGS_SPLAT_bit, /* m(*args) */ VM_CALL_ARGS_SPLAT_bit, /* m(*args) */
@ -284,14 +285,32 @@ struct rb_callcache {
const vm_call_handler call_; const vm_call_handler call_;
union { union {
const unsigned int attr_index; struct {
const attr_index_t index;
shape_id_t dest_shape_id;
} attr;
const enum method_missing_reason method_missing_reason; /* used by method_missing */ const enum method_missing_reason method_missing_reason; /* used by method_missing */
VALUE v; VALUE v;
} aux_; } aux_;
}; };
#define VM_CALLCACHE_UNMARKABLE IMEMO_FL_USER0 #define VM_CALLCACHE_UNMARKABLE FL_FREEZE
#define VM_CALLCACHE_ON_STACK IMEMO_FL_USER1 #define VM_CALLCACHE_ON_STACK FL_EXIVAR
extern const struct rb_callcache *rb_vm_empty_cc(void);
extern const struct rb_callcache *rb_vm_empty_cc_for_super(void);
#define vm_cc_empty() rb_vm_empty_cc()
static inline void
vm_cc_attr_index_initialize(const struct rb_callcache *cc, shape_id_t shape_id)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
VM_ASSERT(cc != vm_cc_empty());
IMEMO_SET_CACHED_SHAPE_ID((VALUE)cc, shape_id);
*(attr_index_t *)&cc->aux_.attr.index = 0;
*(shape_id_t *)&cc->aux_.attr.dest_shape_id = shape_id;
}
static inline const struct rb_callcache * static inline const struct rb_callcache *
vm_cc_new(VALUE klass, vm_cc_new(VALUE klass,
@ -299,6 +318,7 @@ vm_cc_new(VALUE klass,
vm_call_handler call) vm_call_handler call)
{ {
const struct rb_callcache *cc = (const struct rb_callcache *)rb_imemo_new(imemo_callcache, (VALUE)cme, (VALUE)call, 0, klass); const struct rb_callcache *cc = (const struct rb_callcache *)rb_imemo_new(imemo_callcache, (VALUE)cme, (VALUE)call, 0, klass);
vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
RB_DEBUG_COUNTER_INC(cc_new); RB_DEBUG_COUNTER_INC(cc_new);
return cc; return cc;
} }
@ -350,30 +370,71 @@ vm_cc_call(const struct rb_callcache *cc)
return cc->call_; return cc->call_;
} }
static inline unsigned int static inline attr_index_t
vm_cc_attr_index(const struct rb_callcache *cc) vm_cc_attr_index(const struct rb_callcache *cc)
{ {
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache)); VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
return cc->aux_.attr_index - 1; return cc->aux_.attr.index - 1;
} }
static inline bool static inline bool
vm_cc_attr_index_p(const struct rb_callcache *cc) vm_cc_attr_index_p(const struct rb_callcache *cc)
{ {
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache)); VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
return cc->aux_.attr_index > 0; return cc->aux_.attr.index != 0;
} }
static inline uint32_t static inline shape_id_t
vm_ic_entry_index(const struct iseq_inline_iv_cache_entry *ic) vm_cc_attr_index_source_shape_id(const struct rb_callcache *cc)
{ {
return ic->entry->index; VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
return IMEMO_CACHED_SHAPE_ID((VALUE)cc);
}
static inline shape_id_t
vm_cc_attr_shape_id(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
return vm_cc_attr_index_source_shape_id(cc);
}
static inline shape_id_t
vm_cc_attr_index_dest_shape_id(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
return cc->aux_.attr.dest_shape_id;
}
static inline attr_index_t
vm_ic_attr_index(const struct iseq_inline_iv_cache_entry *ic)
{
return ic->attr_index - 1;
} }
static inline bool static inline bool
vm_ic_entry_p(const struct iseq_inline_iv_cache_entry *ic) vm_ic_attr_index_p(const struct iseq_inline_iv_cache_entry *ic)
{ {
return ic->entry; return ic->attr_index > 0;
}
static inline shape_id_t
vm_ic_attr_shape_id(const struct iseq_inline_iv_cache_entry *ic)
{
return ic->source_shape_id;
}
static inline shape_id_t
vm_ic_attr_index_source_shape_id(const struct iseq_inline_iv_cache_entry *ic)
{
return ic->source_shape_id;
}
static inline shape_id_t
vm_ic_attr_index_dest_shape_id(const struct iseq_inline_iv_cache_entry *ic)
{
return ic->dest_shape_id;
} }
static inline unsigned int static inline unsigned int
@ -407,10 +468,6 @@ vm_cc_valid_p(const struct rb_callcache *cc, const rb_callable_method_entry_t *c
} }
} }
extern const struct rb_callcache *rb_vm_empty_cc(void);
extern const struct rb_callcache *rb_vm_empty_cc_for_super(void);
#define vm_cc_empty() rb_vm_empty_cc()
/* callcache: mutate */ /* callcache: mutate */
static inline void static inline void
@ -422,26 +479,29 @@ vm_cc_call_set(const struct rb_callcache *cc, vm_call_handler call)
} }
static inline void static inline void
vm_cc_attr_index_set(const struct rb_callcache *cc, int index) vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t source_shape_id, shape_id_t dest_shape_id)
{ {
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache)); VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
VM_ASSERT(cc != vm_cc_empty()); VM_ASSERT(cc != vm_cc_empty());
*(int *)&cc->aux_.attr_index = index + 1; IMEMO_SET_CACHED_SHAPE_ID((VALUE)cc, source_shape_id);
*(attr_index_t *)&cc->aux_.attr.index = (index + 1);
*(shape_id_t *)&cc->aux_.attr.dest_shape_id = dest_shape_id;
} }
static inline void static inline void
vm_ic_entry_set(struct iseq_inline_iv_cache_entry *ic, struct rb_iv_index_tbl_entry *entry, const rb_iseq_t *iseq) vm_ic_attr_index_set(const rb_iseq_t *iseq, const struct iseq_inline_iv_cache_entry *ic, attr_index_t index, shape_id_t source_shape_id, shape_id_t dest_shape_id)
{ {
ic->entry = entry; *(shape_id_t *)&ic->source_shape_id = source_shape_id;
RB_OBJ_WRITTEN(iseq, Qundef, entry->class_value); *(shape_id_t *)&ic->dest_shape_id = dest_shape_id;
*(attr_index_t *)&ic->attr_index = index + 1;
} }
static inline void static inline void
vm_cc_attr_index_initialize(const struct rb_callcache *cc) vm_ic_attr_index_initialize(const struct iseq_inline_iv_cache_entry *ic, shape_id_t shape_id)
{ {
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache)); *(shape_id_t *)&ic->source_shape_id = shape_id;
VM_ASSERT(cc != vm_cc_empty()); *(shape_id_t *)&ic->dest_shape_id = shape_id;
*(int *)&cc->aux_.attr_index = 0; *(attr_index_t *)&ic->attr_index = 0;
} }
static inline void static inline void

View File

@ -99,6 +99,7 @@ extern int ruby_assert_critical_section_entered;
#include "ruby/st.h" #include "ruby/st.h"
#include "ruby_atomic.h" #include "ruby_atomic.h"
#include "vm_opts.h" #include "vm_opts.h"
#include "shape.h"
#include "ruby/thread_native.h" #include "ruby/thread_native.h"
@ -272,7 +273,9 @@ struct iseq_inline_constant_cache {
}; };
struct iseq_inline_iv_cache_entry { struct iseq_inline_iv_cache_entry {
struct rb_iv_index_tbl_entry *entry; shape_id_t source_shape_id;
shape_id_t dest_shape_id;
attr_index_t attr_index;
}; };
struct iseq_inline_cvar_cache_entry { struct iseq_inline_cvar_cache_entry {
@ -687,6 +690,12 @@ typedef struct rb_vm_struct {
VALUE mark_object_ary; VALUE mark_object_ary;
const VALUE special_exceptions[ruby_special_error_count]; const VALUE special_exceptions[ruby_special_error_count];
/* object shapes */
rb_shape_t **shape_list;
rb_shape_t *root_shape;
rb_shape_t *frozen_root_shape;
shape_id_t max_shape_count;
/* load */ /* load */
VALUE top_self; VALUE top_self;
VALUE load_path; VALUE load_path;

View File

@ -47,7 +47,7 @@ rb_vm_call0(rb_execution_context_t *ec, VALUE recv, ID id, int argc, const VALUE
{ {
struct rb_calling_info calling = { struct rb_calling_info calling = {
.ci = &VM_CI_ON_STACK(id, kw_splat ? VM_CALL_KW_SPLAT : 0, argc, NULL), .ci = &VM_CI_ON_STACK(id, kw_splat ? VM_CALL_KW_SPLAT : 0, argc, NULL),
.cc = &VM_CC_ON_STACK(Qfalse, vm_call_general, { 0 }, cme), .cc = &VM_CC_ON_STACK(Qfalse, vm_call_general, {{ 0 }}, cme),
.block_handler = vm_passed_block_handler(ec), .block_handler = vm_passed_block_handler(ec),
.recv = recv, .recv = recv,
.argc = argc, .argc = argc,
@ -89,7 +89,7 @@ vm_call0_cc(rb_execution_context_t *ec, VALUE recv, ID id, int argc, const VALUE
static VALUE static VALUE
vm_call0_cme(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv, const rb_callable_method_entry_t *cme) vm_call0_cme(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv, const rb_callable_method_entry_t *cme)
{ {
calling->cc = &VM_CC_ON_STACK(Qfalse, vm_call_general, { 0 }, cme); calling->cc = &VM_CC_ON_STACK(Qfalse, vm_call_general, {{ 0 }}, cme);
return vm_call0_body(ec, calling, argv); return vm_call0_body(ec, calling, argv);
} }

View File

@ -50,6 +50,11 @@ MJIT_STATIC VALUE
ruby_vm_special_exception_copy(VALUE exc) ruby_vm_special_exception_copy(VALUE exc)
{ {
VALUE e = rb_obj_alloc(rb_class_real(RBASIC_CLASS(exc))); VALUE e = rb_obj_alloc(rb_class_real(RBASIC_CLASS(exc)));
rb_shape_t * shape = rb_shape_get_shape(exc);
if (rb_shape_frozen_shape_p(shape)) {
shape = shape->parent;
}
rb_shape_set_shape(e, shape);
rb_obj_copy_ivar(e, exc); rb_obj_copy_ivar(e, exc);
return e; return e;
} }
@ -1085,35 +1090,19 @@ vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_l
return klass; return klass;
} }
static bool ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
iv_index_tbl_lookup(struct st_table *iv_index_tbl, ID id, struct rb_iv_index_tbl_entry **ent)
{
int found;
st_data_t ent_data;
if (iv_index_tbl == NULL) return false;
RB_VM_LOCK_ENTER();
{
found = st_lookup(iv_index_tbl, (st_data_t)id, &ent_data);
}
RB_VM_LOCK_LEAVE();
if (found) *ent = (struct rb_iv_index_tbl_entry *)ent_data;
return found ? true : false;
}
ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, struct rb_iv_index_tbl_entry *ent));
static inline void static inline void
fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, struct rb_iv_index_tbl_entry *ent) fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
{ {
// fill cache if (is_attr) {
if (!is_attr) { if (vm_cc_markable(cc)) {
vm_ic_entry_set(ic, ent, iseq); vm_cc_attr_index_set(cc, index, shape_id, shape_id);
RB_OBJ_WRITTEN(cc, Qundef, rb_shape_get_shape_by_id(shape_id));
}
} }
else { else {
vm_cc_attr_index_set(cc, ent->index); vm_ic_attr_index_set(iseq, ic, index, shape_id, shape_id);
RB_OBJ_WRITTEN(iseq, Qundef, rb_shape_get_shape_by_id(shape_id));
} }
} }
@ -1123,67 +1112,119 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
{ {
#if OPT_IC_FOR_IVAR #if OPT_IC_FOR_IVAR
VALUE val = Qundef; VALUE val = Qundef;
shape_id_t shape_id;
VALUE * ivar_list;
if (SPECIAL_CONST_P(obj)) { if (SPECIAL_CONST_P(obj)) {
// frozen? return Qnil;
} }
else if (LIKELY(is_attr ?
RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_unset, vm_cc_attr_index_p(cc)) :
RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_serial, vm_ic_entry_p(ic) && ic->entry->class_serial == RCLASS_SERIAL(RBASIC(obj)->klass)))) {
uint32_t index = !is_attr ? vm_ic_entry_index(ic): (vm_cc_attr_index(cc));
RB_DEBUG_COUNTER_INC(ivar_get_ic_hit); #if SHAPE_IN_BASIC_FLAGS
shape_id = RBASIC_SHAPE_ID(obj);
if (LIKELY(BUILTIN_TYPE(obj) == T_OBJECT) && #endif
LIKELY(index < ROBJECT_NUMIV(obj))) {
val = ROBJECT_IVPTR(obj)[index];
switch (BUILTIN_TYPE(obj)) {
case T_OBJECT:
ivar_list = ROBJECT_IVPTR(obj);
VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true); VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
}
else if (FL_TEST_RAW(obj, FL_EXIVAR)) {
val = rb_ivar_generic_lookup_with_index(obj, id, index);
}
goto ret; #if !SHAPE_IN_BASIC_FLAGS
} shape_id = ROBJECT_SHAPE_ID(obj);
else { #endif
struct rb_iv_index_tbl_entry *ent; break;
case T_CLASS:
if (BUILTIN_TYPE(obj) == T_OBJECT) { case T_MODULE:
struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj); {
if (iv_index_tbl && iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
fill_ivar_cache(iseq, ic, cc, is_attr, ent);
// get value
if (ent->index < ROBJECT_NUMIV(obj)) {
val = ROBJECT_IVPTR(obj)[ent->index];
VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
}
}
}
else if (FL_TEST_RAW(obj, FL_EXIVAR)) {
struct st_table *iv_index_tbl = RCLASS_IV_INDEX_TBL(rb_obj_class(obj));
if (iv_index_tbl && iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
fill_ivar_cache(iseq, ic, cc, is_attr, ent);
val = rb_ivar_generic_lookup_with_index(obj, id, ent->index);
}
}
else {
// T_CLASS / T_MODULE
goto general_path; goto general_path;
} }
default:
if (FL_TEST_RAW(obj, FL_EXIVAR)) {
struct gen_ivtbl *ivtbl;
rb_gen_ivtbl_get(obj, id, &ivtbl);
#if !SHAPE_IN_BASIC_FLAGS
shape_id = ivtbl->shape_id;
#endif
ivar_list = ivtbl->ivptr;
} else {
return Qnil;
}
}
ret: shape_id_t cached_id;
if (LIKELY(val != Qundef)) {
return val; if (is_attr) {
cached_id = vm_cc_attr_shape_id(cc);
}
else {
cached_id = vm_ic_attr_shape_id(ic);
}
attr_index_t index;
if (LIKELY(cached_id == shape_id)) {
RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
if (is_attr && vm_cc_attr_index_p(cc)) {
index = vm_cc_attr_index(cc);
}
else if (!is_attr && vm_ic_attr_index_p(ic)) {
index = vm_ic_attr_index(ic);
} }
else { else {
return Qnil; return Qnil;
} }
val = ivar_list[index];
VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
} }
else { // cache miss case
#if RUBY_DEBUG
if (is_attr) {
if (cached_id != INVALID_SHAPE_ID) {
RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
} else {
RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
}
}
else {
if (cached_id != INVALID_SHAPE_ID) {
RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
} else {
RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
}
}
#endif
attr_index_t index;
rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id);
if (rb_shape_get_iv_index(shape, id, &index)) {
// This fills in the cache with the shared cache object.
// "ent" is the shared cache object
fill_ivar_cache(iseq, ic, cc, is_attr, index, shape_id);
// We fetched the ivar list above
val = ivar_list[index];
}
else {
if (is_attr) {
if (vm_cc_markable(cc)) {
vm_cc_attr_index_initialize(cc, shape_id);
}
}
else {
vm_ic_attr_index_initialize(ic, shape_id);
}
val = Qnil;
}
}
RUBY_ASSERT(val != Qundef);
return val;
general_path: general_path:
#endif /* OPT_IC_FOR_IVAR */ #endif /* OPT_IC_FOR_IVAR */
RB_DEBUG_COUNTER_INC(ivar_get_ic_miss); RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
@ -1196,40 +1237,89 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
} }
} }
static void
populate_cache(attr_index_t index, rb_shape_t *shape, rb_shape_t *next_shape, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
{
// Cache population code
if (is_attr) {
if (vm_cc_markable(cc)) {
vm_cc_attr_index_set(cc, index, SHAPE_ID(shape), SHAPE_ID(next_shape));
RB_OBJ_WRITTEN(cc, Qundef, (VALUE)shape);
RB_OBJ_WRITTEN(cc, Qundef, (VALUE)next_shape);
}
}
else {
vm_ic_attr_index_set(iseq, ic, index, SHAPE_ID(shape), SHAPE_ID(next_shape));
RB_OBJ_WRITTEN(iseq, Qundef, (VALUE)shape);
RB_OBJ_WRITTEN(iseq, Qundef, (VALUE)next_shape);
}
}
ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)); ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic)); NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc)); NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
static VALUE static VALUE
vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr) vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
{
#if OPT_IC_FOR_IVAR
switch (BUILTIN_TYPE(obj)) {
case T_OBJECT:
{ {
rb_check_frozen_internal(obj); rb_check_frozen_internal(obj);
#if OPT_IC_FOR_IVAR attr_index_t index;
if (RB_TYPE_P(obj, T_OBJECT)) {
struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
struct rb_iv_index_tbl_entry *ent;
if (iv_index_tbl_lookup(iv_index_tbl, id, &ent)) { uint32_t num_iv = ROBJECT_NUMIV(obj);
if (!is_attr) { rb_shape_t* shape = rb_shape_get_shape(obj);
vm_ic_entry_set(ic, ent, iseq); rb_shape_t* next_shape = rb_shape_get_next(shape, obj, id);
if (shape != next_shape) {
rb_shape_set_shape(obj, next_shape);
} }
else if (ent->index >= INT_MAX) {
if (rb_shape_get_iv_index(next_shape, id, &index)) { // based off the hash stored in the transition tree
if (index >= MAX_IVARS) {
rb_raise(rb_eArgError, "too many instance variables"); rb_raise(rb_eArgError, "too many instance variables");
} }
populate_cache(index, shape, next_shape, id, iseq, ic, cc, is_attr);
}
else { else {
vm_cc_attr_index_set(cc, (int)(ent->index)); rb_bug("Didn't find instance variable %s\n", rb_id2name(id));
} }
uint32_t index = ent->index; // Ensure the IV buffer is wide enough to store the IV
if (UNLIKELY(index >= num_iv)) {
if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
rb_init_iv_list(obj); rb_init_iv_list(obj);
} }
VALUE *ptr = ROBJECT_IVPTR(obj); VALUE *ptr = ROBJECT_IVPTR(obj);
RB_OBJ_WRITE(obj, &ptr[index], val); RB_OBJ_WRITE(obj, &ptr[index], val);
RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_iv_hit); RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_iv_hit);
return val;
}
case T_CLASS:
case T_MODULE:
break;
default:
{
rb_shape_t * shape = rb_shape_get_shape(obj);
rb_ivar_set(obj, id, val);
rb_shape_t * next_shape = rb_shape_get_shape(obj);
attr_index_t index;
if (rb_shape_get_iv_index(next_shape, id, &index)) { // based off the hash stored in the transition tree
if (index >= MAX_IVARS) {
rb_raise(rb_eArgError, "too many instance variables");
}
populate_cache(index, shape, next_shape, id, iseq, ic, cc, is_attr);
}
else {
rb_bug("didn't find the id\n");
}
return val; return val;
} }
} }
@ -1250,39 +1340,99 @@ vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache
return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true); return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
} }
NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t source_shape_id, shape_id_t dest_shape_id, attr_index_t index));
static VALUE
vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t source_shape_id, shape_id_t dest_shape_id, attr_index_t index)
{
#if SHAPE_IN_BASIC_FLAGS
shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
#else
shape_id_t shape_id = rb_generic_shape_id(obj);
#endif
// Cache hit case
if (shape_id == source_shape_id) {
RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
struct gen_ivtbl *ivtbl = 0;
if (dest_shape_id != shape_id) {
ivtbl = rb_ensure_generic_iv_list_size(obj, index + 1);
#if SHAPE_IN_BASIC_FLAGS
RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
#else
ivtbl->shape_id = dest_shape_id;
#endif
RB_OBJ_WRITTEN(obj, Qundef, rb_shape_get_shape_by_id(dest_shape_id));
}
else {
// Just get the IV table
RUBY_ASSERT(GET_VM()->shape_list[dest_shape_id]);
rb_gen_ivtbl_get(obj, 0, &ivtbl);
}
VALUE *ptr = ivtbl->ivptr;
RB_OBJ_WRITE(obj, &ptr[index], val);
RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
return val;
}
return Qundef;
}
static inline VALUE static inline VALUE
vm_setivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr) vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t source_shape_id, shape_id_t dest_shape_id, attr_index_t index)
{ {
#if OPT_IC_FOR_IVAR #if OPT_IC_FOR_IVAR
if (LIKELY(RB_TYPE_P(obj, T_OBJECT)) && switch (BUILTIN_TYPE(obj)) {
LIKELY(!RB_OBJ_FROZEN_RAW(obj))) { case T_OBJECT:
{
VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
// If object's shape id is the same as the source
// then do the shape transition and write the ivar
// If object's shape id is the same as the dest
// then write the ivar
shape_id_t shape_id = ROBJECT_SHAPE_ID(obj);
// Do we have a cache hit *and* is the CC intitialized
if (shape_id == source_shape_id) {
RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
VM_ASSERT(!rb_ractor_shareable_p(obj)); VM_ASSERT(!rb_ractor_shareable_p(obj));
if (LIKELY( if (dest_shape_id != shape_id) {
(!is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_serial, vm_ic_entry_p(ic) && ic->entry->class_serial == RCLASS_SERIAL(RBASIC(obj)->klass))) ||
( is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_unset, vm_cc_attr_index_p(cc))))) {
uint32_t index = !is_attr ? vm_ic_entry_index(ic) : vm_cc_attr_index(cc);
if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) { if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
rb_init_iv_list(obj); rb_init_iv_list(obj);
} }
ROBJECT_SET_SHAPE_ID(obj, dest_shape_id);
}
else {
RUBY_ASSERT(GET_VM()->shape_list[dest_shape_id]);
}
RUBY_ASSERT(index < ROBJECT_NUMIV(obj));
VALUE *ptr = ROBJECT_IVPTR(obj); VALUE *ptr = ROBJECT_IVPTR(obj);
RB_OBJ_WRITE(obj, &ptr[index], val); RB_OBJ_WRITE(obj, &ptr[index], val);
RB_DEBUG_COUNTER_INC(ivar_set_ic_hit); RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
return val; /* inline cache hit */
return val;
} }
} }
else { break;
case T_CLASS:
case T_MODULE:
RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject); RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
default:
break;
} }
return Qundef;
#endif /* OPT_IC_FOR_IVAR */ #endif /* OPT_IC_FOR_IVAR */
if (is_attr) {
return vm_setivar_slowpath_attr(obj, id, val, cc);
}
else {
return vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
}
} }
static VALUE static VALUE
@ -1377,7 +1527,22 @@ vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
static inline void static inline void
vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic) vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
{ {
vm_setivar(obj, id, val, iseq, ic, 0, 0); shape_id_t source_shape_id = vm_ic_attr_index_source_shape_id(ic);
attr_index_t index = vm_ic_attr_index(ic);
shape_id_t dest_shape_id = vm_ic_attr_index_dest_shape_id(ic);
if (UNLIKELY(vm_setivar(obj, id, val, source_shape_id, dest_shape_id, index) == Qundef)) {
switch (BUILTIN_TYPE(obj)) {
case T_OBJECT:
case T_CLASS:
case T_MODULE:
break;
default:
if (vm_setivar_default(obj, id, val, source_shape_id, dest_shape_id, index) != Qundef) {
return;
}
}
vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
}
} }
void void
@ -1386,28 +1551,6 @@ rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IV
vm_setinstancevariable(iseq, obj, id, val, ic); vm_setinstancevariable(iseq, obj, id, val, ic);
} }
/* Set the instance variable +val+ on object +obj+ at the +index+.
* This function only works with T_OBJECT objects, so make sure
* +obj+ is of type T_OBJECT before using this function.
*/
VALUE
rb_vm_set_ivar_idx(VALUE obj, uint32_t index, VALUE val)
{
RUBY_ASSERT(RB_TYPE_P(obj, T_OBJECT));
rb_check_frozen_internal(obj);
VM_ASSERT(!rb_ractor_shareable_p(obj));
if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
rb_init_iv_list(obj);
}
VALUE *ptr = ROBJECT_IVPTR(obj);
RB_OBJ_WRITE(obj, &ptr[index], val);
return val;
}
static VALUE static VALUE
vm_throw_continue(const rb_execution_context_t *ec, VALUE err) vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
{ {
@ -3100,17 +3243,45 @@ vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_call
const struct rb_callcache *cc = calling->cc; const struct rb_callcache *cc = calling->cc;
RB_DEBUG_COUNTER_INC(ccf_ivar); RB_DEBUG_COUNTER_INC(ccf_ivar);
cfp->sp -= 1; cfp->sp -= 1;
return vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE); VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE);
return ivar;
}
static VALUE
vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
{
RB_DEBUG_COUNTER_INC(ccf_attrset);
VALUE val = *(cfp->sp - 1);
cfp->sp -= 2;
shape_id_t source_shape_id = vm_cc_attr_index_source_shape_id(cc);
attr_index_t index = vm_cc_attr_index(cc);
shape_id_t dest_shape_id = vm_cc_attr_index_dest_shape_id(cc);
ID id = vm_cc_cme(cc)->def->body.attr.id;
rb_check_frozen_internal(obj);
VALUE res = vm_setivar(obj, id, val, source_shape_id, dest_shape_id, index);
if (res == Qundef) {
switch (BUILTIN_TYPE(obj)) {
case T_OBJECT:
case T_CLASS:
case T_MODULE:
break;
default:
{
res = vm_setivar_default(obj, id, val, source_shape_id, dest_shape_id, index);
if (res != Qundef) {
return res;
}
}
}
res = vm_setivar_slowpath_attr(obj, id, val, cc);
}
return res;
} }
static VALUE static VALUE
vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling) vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
{ {
const struct rb_callcache *cc = calling->cc; return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
RB_DEBUG_COUNTER_INC(ccf_attrset);
VALUE val = *(cfp->sp - 1);
cfp->sp -= 2;
return vm_setivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, val, NULL, NULL, cc, 1);
} }
bool bool
@ -3219,7 +3390,7 @@ vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_cal
{ {
calling->cc = &VM_CC_ON_STACK(Qundef, calling->cc = &VM_CC_ON_STACK(Qundef,
vm_call_general, vm_call_general,
{ 0 }, {{0}},
aliased_callable_method_entry(vm_cc_cme(calling->cc))); aliased_callable_method_entry(vm_cc_cme(calling->cc)));
return vm_call_method_each_type(ec, cfp, calling); return vm_call_method_each_type(ec, cfp, calling);
@ -3389,7 +3560,7 @@ vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_
ec->method_missing_reason = reason; ec->method_missing_reason = reason;
calling->ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)); calling->ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci));
calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, { 0 }, calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL)); rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
return vm_call_method(ec, reg_cfp, calling); return vm_call_method(ec, reg_cfp, calling);
} }
@ -3415,7 +3586,7 @@ vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_ca
cme = refined_method_callable_without_refinement(cme); cme = refined_method_callable_without_refinement(cme);
} }
calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, { 0 }, cme); calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
return vm_call_method_each_type(ec, cfp, calling); return vm_call_method_each_type(ec, cfp, calling);
} }
@ -3522,7 +3693,7 @@ search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struc
static VALUE static VALUE
vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling) vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
{ {
struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, { 0 }, struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
search_refined_method(ec, cfp, calling)); search_refined_method(ec, cfp, calling));
if (vm_cc_cme(ref_cc)) { if (vm_cc_cme(ref_cc)) {
@ -3702,18 +3873,45 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci); CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
rb_check_arity(calling->argc, 1, 1); rb_check_arity(calling->argc, 1, 1);
vm_cc_attr_index_initialize(cc);
const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG); const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG);
if (vm_cc_markable(cc)) {
vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
VM_CALL_METHOD_ATTR(v, VM_CALL_METHOD_ATTR(v,
vm_call_attrset(ec, cfp, calling), vm_call_attrset_direct(ec, cfp, cc, calling->recv),
CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask))); CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
} else {
cc = &((struct rb_callcache) {
.flags = T_IMEMO |
(imemo_callcache << FL_USHIFT) |
VM_CALLCACHE_UNMARKABLE |
((VALUE)INVALID_SHAPE_ID << SHAPE_FLAG_SHIFT) |
VM_CALLCACHE_ON_STACK,
.klass = cc->klass,
.cme_ = cc->cme_,
.call_ = cc->call_,
.aux_ = {
.attr = {
.index = 0,
.dest_shape_id = INVALID_SHAPE_ID,
}
},
});
VM_CALL_METHOD_ATTR(v,
vm_call_attrset_direct(ec, cfp, cc, calling->recv),
CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
}
return v; return v;
case VM_METHOD_TYPE_IVAR: case VM_METHOD_TYPE_IVAR:
CALLER_SETUP_ARG(cfp, calling, ci); CALLER_SETUP_ARG(cfp, calling, ci);
CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci); CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
rb_check_arity(calling->argc, 0, 0); rb_check_arity(calling->argc, 0, 0);
vm_cc_attr_index_initialize(cc); if (vm_cc_markable(cc)) {
vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
}
const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT); const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT);
VM_CALL_METHOD_ATTR(v, VM_CALL_METHOD_ATTR(v,
vm_call_ivar(ec, cfp, calling), vm_call_ivar(ec, cfp, calling),

View File

@ -40,6 +40,7 @@ fn main() {
.header("internal.h") .header("internal.h")
.header("internal/re.h") .header("internal/re.h")
.header("include/ruby/ruby.h") .header("include/ruby/ruby.h")
.header("shape.h")
.header("vm_core.h") .header("vm_core.h")
.header("vm_callinfo.h") .header("vm_callinfo.h")
@ -81,6 +82,12 @@ fn main() {
// This function prints info about a value and is useful for debugging // This function prints info about a value and is useful for debugging
.allowlist_function("rb_obj_info_dump") .allowlist_function("rb_obj_info_dump")
// From shape.h
.allowlist_function("rb_shape_get_shape_id")
.allowlist_function("rb_shape_get_shape_by_id")
.allowlist_function("rb_shape_flags_mask")
.allowlist_function("rb_shape_get_iv_index")
// From ruby/internal/intern/object.h // From ruby/internal/intern/object.h
.allowlist_function("rb_obj_is_kind_of") .allowlist_function("rb_obj_is_kind_of")

View File

@ -617,7 +617,7 @@ fn write_rm_multi(cb: &mut CodeBlock, op_mem_reg8: u8, op_mem_reg_pref: u8, op_r
write_rm(cb, sz_pref, rex_w, X86Opnd::None, opnd0, op_ext_imm, &[op_mem_imm_lrg]); write_rm(cb, sz_pref, rex_w, X86Opnd::None, opnd0, op_ext_imm, &[op_mem_imm_lrg]);
cb.write_int(uimm.value, if opnd_size > 32 { 32 } else { opnd_size.into() }); cb.write_int(uimm.value, if opnd_size > 32 { 32 } else { opnd_size.into() });
} else { } else {
panic!("immediate value too large"); panic!("immediate value too large (num_bits={})", num_bits);
} }
}, },
_ => unreachable!() _ => unreachable!()

View File

@ -1938,14 +1938,12 @@ fn gen_set_ivar(
let val_opnd = ctx.stack_pop(1); let val_opnd = ctx.stack_pop(1);
let recv_opnd = ctx.stack_pop(1); let recv_opnd = ctx.stack_pop(1);
let ivar_index: u32 = unsafe { rb_obj_ensure_iv_index_mapping(recv, ivar_name) }; // Call rb_vm_set_ivar_id with the receiver, the ivar name, and the value
// Call rb_vm_set_ivar_idx with the receiver, the index of the ivar, and the value
let val = asm.ccall( let val = asm.ccall(
rb_vm_set_ivar_idx as *const u8, rb_vm_set_ivar_id as *const u8,
vec![ vec![
recv_opnd, recv_opnd,
Opnd::Imm(ivar_index.into()), Opnd::UImm(ivar_name.into()),
val_opnd, val_opnd,
], ],
); );
@ -2023,38 +2021,50 @@ fn gen_get_ivar(
return EndBlock; return EndBlock;
} }
// FIXME: Mapping the index could fail when there is too many ivar names. If we're let ivar_index = unsafe {
// compiling for a branch stub that can cause the exception to be thrown from the let shape_id = comptime_receiver.shape_of();
// wrong PC. let shape = rb_shape_get_shape_by_id(shape_id);
let ivar_index = let mut ivar_index: u32 = 0;
unsafe { rb_obj_ensure_iv_index_mapping(comptime_receiver, ivar_name) }.as_usize(); if rb_shape_get_iv_index(shape, ivar_name, &mut ivar_index) {
Some(ivar_index as usize)
} else {
None
}
};
// must be before stack_pop
let recv_type = ctx.get_opnd_type(recv_opnd);
// Upgrade type
if !recv_type.is_heap() {
ctx.upgrade_opnd_type(recv_opnd, Type::UnknownHeap);
}
// Pop receiver if it's on the temp stack // Pop receiver if it's on the temp stack
if recv_opnd != SelfOpnd { if recv_opnd != SelfOpnd {
ctx.stack_pop(1); ctx.stack_pop(1);
} }
if USE_RVARGC != 0 { // Guard heap object
// Check that the ivar table is big enough if !recv_type.is_heap() {
// Check that the slot is inside the ivar table (num_slots > index) guard_object_is_heap(asm, recv, side_exit);
let num_slots = Opnd::mem(32, recv, ROBJECT_OFFSET_NUMIV);
asm.cmp(num_slots, Opnd::UImm(ivar_index as u64));
asm.jbe(counted_exit!(ocb, side_exit, getivar_idx_out_of_range).into());
} }
// Compile time self is embedded and the ivar index lands within the object // Compile time self is embedded and the ivar index lands within the object
let test_result = unsafe { FL_TEST_RAW(comptime_receiver, VALUE(ROBJECT_EMBED.as_usize())) != VALUE(0) }; let embed_test_result = unsafe { FL_TEST_RAW(comptime_receiver, VALUE(ROBJECT_EMBED.as_usize())) != VALUE(0) };
if test_result {
// See ROBJECT_IVPTR() from include/ruby/internal/core/robject.h
// Guard that self is embedded let flags_mask: usize = unsafe { rb_shape_flags_mask() }.as_usize();
// TODO: BT and JC is shorter let expected_flags_mask: usize = (RUBY_T_MASK as usize) | !flags_mask | (ROBJECT_EMBED as usize);
asm.comment("guard embedded getivar"); let expected_flags = comptime_receiver.builtin_flags() & expected_flags_mask;
// Combined guard for all flags: shape, embeddedness, and T_OBJECT
let flags_opnd = Opnd::mem(64, recv, RUBY_OFFSET_RBASIC_FLAGS); let flags_opnd = Opnd::mem(64, recv, RUBY_OFFSET_RBASIC_FLAGS);
asm.test(flags_opnd, Opnd::UImm(ROBJECT_EMBED as u64));
let side_exit = counted_exit!(ocb, side_exit, getivar_megamorphic); asm.comment("guard shape, embedded, and T_OBJECT");
let flags_opnd = asm.and(flags_opnd, Opnd::UImm(expected_flags_mask as u64));
asm.cmp(flags_opnd, Opnd::UImm(expected_flags as u64));
jit_chain_guard( jit_chain_guard(
JCC_JZ, JCC_JNE,
jit, jit,
&starting_context, &starting_context,
asm, asm,
@ -2063,41 +2073,30 @@ fn gen_get_ivar(
side_exit, side_exit,
); );
// Load the variable // If there is no IVAR index, then the ivar was undefined
let offs = ROBJECT_OFFSET_AS_ARY + (ivar_index * SIZEOF_VALUE) as i32; // when we entered the compiler. That means we can just return
let ivar_opnd = Opnd::mem(64, recv, offs); // nil for this shape + iv name
if ivar_index.is_none() {
let out_opnd = ctx.stack_push(Type::Nil);
asm.mov(out_opnd, Qnil.into());
} else if embed_test_result {
// See ROBJECT_IVPTR() from include/ruby/internal/core/robject.h
// Guard that the variable is not Qundef // Load the variable
asm.cmp(ivar_opnd, Qundef.into()); let offs = ROBJECT_OFFSET_AS_ARY + (ivar_index.unwrap() * SIZEOF_VALUE) as i32;
let out_val = asm.csel_e(Qnil.into(), ivar_opnd); let ivar_opnd = Opnd::mem(64, recv, offs);
// Push the ivar on the stack // Push the ivar on the stack
let out_opnd = ctx.stack_push(Type::Unknown); let out_opnd = ctx.stack_push(Type::Unknown);
asm.mov(out_opnd, out_val); asm.mov(out_opnd, ivar_opnd);
} else { } else {
// Compile time value is *not* embedded. // Compile time value is *not* embedded.
// Guard that value is *not* embedded
// See ROBJECT_IVPTR() from include/ruby/internal/core/robject.h
asm.comment("guard extended getivar");
let flags_opnd = Opnd::mem(64, recv, RUBY_OFFSET_RBASIC_FLAGS);
asm.test(flags_opnd, Opnd::UImm(ROBJECT_EMBED as u64));
let megamorphic_side_exit = counted_exit!(ocb, side_exit, getivar_megamorphic);
jit_chain_guard(
JCC_JNZ,
jit,
&starting_context,
asm,
ocb,
max_chain_depth,
megamorphic_side_exit,
);
if USE_RVARGC == 0 { if USE_RVARGC == 0 {
// Check that the extended table is big enough // Check that the extended table is big enough
// Check that the slot is inside the extended table (num_slots > index) // Check that the slot is inside the extended table (num_slots > index)
let num_slots = Opnd::mem(32, recv, ROBJECT_OFFSET_NUMIV); let num_slots = Opnd::mem(32, recv, ROBJECT_OFFSET_NUMIV);
asm.cmp(num_slots, Opnd::UImm(ivar_index as u64)); asm.cmp(num_slots, Opnd::UImm(ivar_index.unwrap() as u64));
asm.jbe(counted_exit!(ocb, side_exit, getivar_idx_out_of_range).into()); asm.jbe(counted_exit!(ocb, side_exit, getivar_idx_out_of_range).into());
} }
@ -2105,15 +2104,10 @@ fn gen_get_ivar(
let tbl_opnd = asm.load(Opnd::mem(64, recv, ROBJECT_OFFSET_AS_HEAP_IVPTR)); let tbl_opnd = asm.load(Opnd::mem(64, recv, ROBJECT_OFFSET_AS_HEAP_IVPTR));
// Read the ivar from the extended table // Read the ivar from the extended table
let ivar_opnd = Opnd::mem(64, tbl_opnd, (SIZEOF_VALUE * ivar_index) as i32); let ivar_opnd = Opnd::mem(64, tbl_opnd, (SIZEOF_VALUE * ivar_index.unwrap()) as i32);
// Check that the ivar is not Qundef
asm.cmp(ivar_opnd, Qundef.into());
let out_val = asm.csel_ne(ivar_opnd, Qnil.into());
// Push the ivar on the stack
let out_opnd = ctx.stack_push(Type::Unknown); let out_opnd = ctx.stack_push(Type::Unknown);
asm.mov(out_opnd, out_val); asm.mov(out_opnd, ivar_opnd);
} }
// Jump to next instruction. This allows guard chains to share the same successor. // Jump to next instruction. This allows guard chains to share the same successor.
@ -2136,25 +2130,12 @@ fn gen_getinstancevariable(
let ivar_name = jit_get_arg(jit, 0).as_u64(); let ivar_name = jit_get_arg(jit, 0).as_u64();
let comptime_val = jit_peek_at_self(jit); let comptime_val = jit_peek_at_self(jit);
let comptime_val_klass = comptime_val.class_of();
// Generate a side exit // Generate a side exit
let side_exit = get_side_exit(jit, ocb, ctx); let side_exit = get_side_exit(jit, ocb, ctx);
// Guard that the receiver has the same class as the one from compile time. // Guard that the receiver has the same class as the one from compile time.
let self_asm_opnd = Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF); let self_asm_opnd = Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF);
jit_guard_known_klass(
jit,
ctx,
asm,
ocb,
comptime_val_klass,
self_asm_opnd,
SelfOpnd,
comptime_val,
GET_IVAR_MAX_DEPTH,
side_exit,
);
gen_get_ivar( gen_get_ivar(
jit, jit,

View File

@ -120,7 +120,7 @@ extern "C" {
obj: VALUE, obj: VALUE,
v: VALUE, v: VALUE,
) -> bool; ) -> bool;
pub fn rb_vm_set_ivar_idx(obj: VALUE, idx: u32, val: VALUE) -> VALUE; pub fn rb_vm_set_ivar_id(obj: VALUE, idx: u32, val: VALUE) -> VALUE;
pub fn rb_vm_setinstancevariable(iseq: IseqPtr, obj: VALUE, id: ID, val: VALUE, ic: IVC); pub fn rb_vm_setinstancevariable(iseq: IseqPtr, obj: VALUE, id: ID, val: VALUE, ic: IVC);
pub fn rb_aliased_callable_method_entry( pub fn rb_aliased_callable_method_entry(
me: *const rb_callable_method_entry_t, me: *const rb_callable_method_entry_t,
@ -354,18 +354,26 @@ impl VALUE {
/// Read the flags bits from the RBasic object, then return a Ruby type enum (e.g. RUBY_T_ARRAY) /// Read the flags bits from the RBasic object, then return a Ruby type enum (e.g. RUBY_T_ARRAY)
pub fn builtin_type(self) -> ruby_value_type { pub fn builtin_type(self) -> ruby_value_type {
(self.builtin_flags() & (RUBY_T_MASK as usize)) as ruby_value_type
}
pub fn builtin_flags(self) -> usize {
assert!(!self.special_const_p()); assert!(!self.special_const_p());
let VALUE(cval) = self; let VALUE(cval) = self;
let rbasic_ptr = cval as *const RBasic; let rbasic_ptr = cval as *const RBasic;
let flags_bits: usize = unsafe { (*rbasic_ptr).flags }.as_usize(); let flags_bits: usize = unsafe { (*rbasic_ptr).flags }.as_usize();
(flags_bits & (RUBY_T_MASK as usize)) as ruby_value_type return flags_bits;
} }
pub fn class_of(self) -> VALUE { pub fn class_of(self) -> VALUE {
unsafe { CLASS_OF(self) } unsafe { CLASS_OF(self) }
} }
pub fn shape_of(self) -> u32 {
unsafe { rb_shape_get_shape_id(self) }
}
pub fn as_isize(self) -> isize { pub fn as_isize(self) -> isize {
let VALUE(is) = self; let VALUE(is) = self;
is as isize is as isize

View File

@ -269,6 +269,30 @@ extern "C" {
extern "C" { extern "C" {
pub fn rb_reg_new_ary(ary: VALUE, options: ::std::os::raw::c_int) -> VALUE; pub fn rb_reg_new_ary(ary: VALUE, options: ::std::os::raw::c_int) -> VALUE;
} }
pub type attr_index_t = u32;
pub type shape_id_t = u32;
#[repr(C)]
pub struct rb_shape {
pub flags: VALUE,
pub parent: *mut rb_shape,
pub edges: *mut rb_id_table,
pub edge_name: ID,
pub iv_count: attr_index_t,
pub type_: u8,
}
pub type rb_shape_t = rb_shape;
extern "C" {
pub fn rb_shape_get_shape_by_id(shape_id: shape_id_t) -> *mut rb_shape_t;
}
extern "C" {
pub fn rb_shape_get_shape_id(obj: VALUE) -> shape_id_t;
}
extern "C" {
pub fn rb_shape_get_iv_index(shape: *mut rb_shape_t, id: ID, value: *mut attr_index_t) -> bool;
}
extern "C" {
pub fn rb_shape_flags_mask() -> VALUE;
}
pub const idDot2: ruby_method_ids = 128; pub const idDot2: ruby_method_ids = 128;
pub const idDot3: ruby_method_ids = 129; pub const idDot3: ruby_method_ids = 129;
pub const idUPlus: ruby_method_ids = 132; pub const idUPlus: ruby_method_ids = 132;
@ -513,6 +537,7 @@ pub const imemo_parser_strterm: imemo_type = 10;
pub const imemo_callinfo: imemo_type = 11; pub const imemo_callinfo: imemo_type = 11;
pub const imemo_callcache: imemo_type = 12; pub const imemo_callcache: imemo_type = 12;
pub const imemo_constcache: imemo_type = 13; pub const imemo_constcache: imemo_type = 13;
pub const imemo_shape: imemo_type = 14;
pub type imemo_type = u32; pub type imemo_type = u32;
pub const METHOD_VISI_UNDEF: rb_method_visibility_t = 0; pub const METHOD_VISI_UNDEF: rb_method_visibility_t = 0;
pub const METHOD_VISI_PUBLIC: rb_method_visibility_t = 1; pub const METHOD_VISI_PUBLIC: rb_method_visibility_t = 1;
@ -572,6 +597,11 @@ pub const OPTIMIZED_METHOD_TYPE_STRUCT_AREF: method_optimized_type = 3;
pub const OPTIMIZED_METHOD_TYPE_STRUCT_ASET: method_optimized_type = 4; pub const OPTIMIZED_METHOD_TYPE_STRUCT_ASET: method_optimized_type = 4;
pub const OPTIMIZED_METHOD_TYPE__MAX: method_optimized_type = 5; pub const OPTIMIZED_METHOD_TYPE__MAX: method_optimized_type = 5;
pub type method_optimized_type = u32; pub type method_optimized_type = u32;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct rb_id_table {
_unused: [u8; 0],
}
extern "C" { extern "C" {
pub fn rb_method_entry_at(obj: VALUE, id: ID) -> *const rb_method_entry_t; pub fn rb_method_entry_at(obj: VALUE, id: ID) -> *const rb_method_entry_t;
} }
@ -600,9 +630,10 @@ pub struct iseq_inline_constant_cache {
pub segments: *const ID, pub segments: *const ID,
} }
#[repr(C)] #[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct iseq_inline_iv_cache_entry { pub struct iseq_inline_iv_cache_entry {
pub entry: *mut rb_iv_index_tbl_entry, pub source_shape_id: shape_id_t,
pub dest_shape_id: shape_id_t,
pub attr_index: attr_index_t,
} }
#[repr(C)] #[repr(C)]
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
@ -698,12 +729,6 @@ extern "C" {
) -> *const rb_callable_method_entry_t; ) -> *const rb_callable_method_entry_t;
} }
#[repr(C)] #[repr(C)]
pub struct rb_iv_index_tbl_entry {
pub index: u32,
pub class_serial: rb_serial_t,
pub class_value: VALUE,
}
#[repr(C)]
pub struct rb_cvar_class_tbl_entry { pub struct rb_cvar_class_tbl_entry {
pub index: u32, pub index: u32,
pub global_cvar_state: rb_serial_t, pub global_cvar_state: rb_serial_t,