Fix Object Movement allocation in GC
When moving Objects between size pools we have to assign a new shape. This happened during updating references - we tried to create a new shape tree that mirrored the existing tree, but based on the root shape of the new size pool. This causes allocations to happen if the new tree doesn't already exist, potentially triggering a GC, during GC. This commit changes object movement to look for a pre-existing new tree during object movement, and if that tree does not exist, we don't move the object to the new pool. This allows us to remove the shape allocation from update references. Co-Authored-By: Peter Zhu <peter@peterzhu.ca>
This commit is contained in:
parent
723cca6d82
commit
9c54466e29
Notes:
git
2022-12-15 14:04:51 +00:00
41
gc.c
41
gc.c
@ -574,6 +574,7 @@ struct RMoved {
|
||||
VALUE flags;
|
||||
VALUE dummy;
|
||||
VALUE destination;
|
||||
shape_id_t original_shape_id;
|
||||
};
|
||||
|
||||
#define RMOVED(obj) ((struct RMoved *)(obj))
|
||||
@ -6081,10 +6082,19 @@ invalidate_moved_plane(rb_objspace_t *objspace, struct heap_page *page, uintptr_
|
||||
|
||||
object = rb_gc_location(forwarding_object);
|
||||
|
||||
shape_id_t old_shape_id = 0;
|
||||
if (RB_TYPE_P(object, T_OBJECT)) {
|
||||
old_shape_id = RMOVED(forwarding_object)->original_shape_id;
|
||||
}
|
||||
|
||||
gc_move(objspace, object, forwarding_object, GET_HEAP_PAGE(object)->slot_size, page->slot_size);
|
||||
/* forwarding_object is now our actual object, and "object"
|
||||
* is the free slot for the original page */
|
||||
|
||||
if (old_shape_id) {
|
||||
ROBJECT_SET_SHAPE_ID(forwarding_object, old_shape_id);
|
||||
}
|
||||
|
||||
struct heap_page *orig_page = GET_HEAP_PAGE(object);
|
||||
orig_page->free_slots++;
|
||||
heap_page_add_freeobj(objspace, orig_page, object);
|
||||
@ -8448,12 +8458,28 @@ gc_compact_move(rb_objspace_t *objspace, rb_heap_t *heap, rb_size_pool_t *size_p
|
||||
{
|
||||
GC_ASSERT(BUILTIN_TYPE(src) != T_MOVED);
|
||||
|
||||
rb_heap_t *dheap = SIZE_POOL_EDEN_HEAP(gc_compact_destination_pool(objspace, size_pool, src));
|
||||
rb_size_pool_t *dest_pool = gc_compact_destination_pool(objspace, size_pool, src);
|
||||
rb_heap_t *dheap = SIZE_POOL_EDEN_HEAP(dest_pool);
|
||||
rb_shape_t *new_shape = NULL;
|
||||
rb_shape_t *orig_shape = NULL;
|
||||
|
||||
if (gc_compact_heap_cursors_met_p(dheap)) {
|
||||
return dheap != heap;
|
||||
}
|
||||
|
||||
if (RB_TYPE_P(src, T_OBJECT)) {
|
||||
orig_shape = rb_shape_get_shape(src);
|
||||
if (dheap != heap) {
|
||||
rb_shape_t *initial_shape = rb_shape_get_shape_by_id((shape_id_t)((dest_pool - size_pools) + SIZE_POOL_COUNT));
|
||||
new_shape = rb_shape_traverse_from_new_root(initial_shape, orig_shape);
|
||||
|
||||
if (!new_shape) {
|
||||
dest_pool = size_pool;
|
||||
dheap = heap;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while (!try_move(objspace, dheap, dheap->free_pages, src)) {
|
||||
struct gc_sweep_context ctx = {
|
||||
.page = dheap->sweeping_page,
|
||||
@ -8478,6 +8504,15 @@ gc_compact_move(rb_objspace_t *objspace, rb_heap_t *heap, rb_size_pool_t *size_p
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (orig_shape) {
|
||||
if (new_shape) {
|
||||
VALUE dest = rb_gc_location(src);
|
||||
rb_shape_set_shape(dest, new_shape);
|
||||
}
|
||||
RMOVED(src)->original_shape_id = rb_shape_id(orig_shape);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -10059,10 +10094,6 @@ gc_ref_update_object(rb_objspace_t *objspace, VALUE v)
|
||||
xfree(ptr);
|
||||
}
|
||||
ptr = ROBJECT(v)->as.ary;
|
||||
size_t size_pool_shape_id = size_pool_idx_for_size(embed_size);
|
||||
rb_shape_t * initial_shape = rb_shape_get_shape_by_id((shape_id_t)size_pool_shape_id + SIZE_POOL_COUNT);
|
||||
rb_shape_t * new_shape = rb_shape_rebuild_shape(initial_shape, rb_shape_get_shape(v));
|
||||
rb_shape_set_shape(v, new_shape);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
33
shape.c
33
shape.c
@ -403,6 +403,39 @@ rb_shape_id_offset(void)
|
||||
return sizeof(uintptr_t) - SHAPE_ID_NUM_BITS / sizeof(uintptr_t);
|
||||
}
|
||||
|
||||
rb_shape_t *
|
||||
rb_shape_traverse_from_new_root(rb_shape_t *initial_shape, rb_shape_t *dest_shape)
|
||||
{
|
||||
RUBY_ASSERT(initial_shape->type == SHAPE_T_OBJECT);
|
||||
rb_shape_t *next_shape = initial_shape;
|
||||
|
||||
if (dest_shape->type != initial_shape->type) {
|
||||
next_shape = rb_shape_traverse_from_new_root(initial_shape, rb_shape_get_parent(dest_shape));
|
||||
if (!next_shape) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
switch ((enum shape_type)dest_shape->type) {
|
||||
case SHAPE_IVAR:
|
||||
if (!next_shape->edges) {
|
||||
return NULL;
|
||||
}
|
||||
if (!rb_id_table_lookup(next_shape->edges, dest_shape->edge_name, (VALUE *)&next_shape)) {
|
||||
return NULL;
|
||||
}
|
||||
break;
|
||||
case SHAPE_ROOT:
|
||||
case SHAPE_FROZEN:
|
||||
case SHAPE_CAPACITY_CHANGE:
|
||||
case SHAPE_INITIAL_CAPACITY:
|
||||
case SHAPE_T_OBJECT:
|
||||
break;
|
||||
}
|
||||
|
||||
return next_shape;
|
||||
}
|
||||
|
||||
rb_shape_t *
|
||||
rb_shape_rebuild_shape(rb_shape_t * initial_shape, rb_shape_t * dest_shape)
|
||||
{
|
||||
|
2
shape.h
2
shape.h
@ -178,6 +178,8 @@ rb_shape_t * rb_shape_alloc(ID edge_name, rb_shape_t * parent);
|
||||
rb_shape_t * rb_shape_alloc_with_size_pool_index(ID edge_name, rb_shape_t * parent, uint8_t size_pool_index);
|
||||
rb_shape_t * rb_shape_alloc_with_parent_id(ID edge_name, shape_id_t parent_id);
|
||||
|
||||
rb_shape_t *rb_shape_traverse_from_new_root(rb_shape_t *initial_shape, rb_shape_t *orig_shape);
|
||||
|
||||
bool rb_shape_set_shape_id(VALUE obj, shape_id_t shape_id);
|
||||
|
||||
VALUE rb_obj_debug_shape(VALUE self, VALUE obj);
|
||||
|
@ -270,6 +270,9 @@ class TestGCCompact < Test::Unit::TestCase
|
||||
ary = OBJ_COUNT.times.map { Foo.new }
|
||||
ary.each(&:add_ivars)
|
||||
|
||||
GC.start
|
||||
Foo.new.add_ivars
|
||||
|
||||
stats = GC.verify_compaction_references(expand_heap: true, toward: :empty)
|
||||
|
||||
assert_operator(stats[:moved_up][:T_OBJECT], :>=, OBJ_COUNT)
|
||||
|
Loading…
x
Reference in New Issue
Block a user