Don't directly read the SIZE_POOL_COUNT in shapes

This removes the assumption about SIZE_POOL_COUNT for shapes.
This commit is contained in:
Peter Zhu 2024-03-12 13:50:50 -04:00
parent d1eaa97ec3
commit 6ad347a105
4 changed files with 48 additions and 32 deletions

66
gc.c
View File

@ -2665,6 +2665,44 @@ rb_gc_size_allocatable_p(size_t size)
return size <= size_pool_slot_size(SIZE_POOL_COUNT - 1);
}
static size_t size_pool_sizes[SIZE_POOL_COUNT + 1] = { 0 };
size_t *
rb_gc_size_pool_sizes(void)
{
if (size_pool_sizes[0] == 0) {
for (unsigned char i = 0; i < SIZE_POOL_COUNT; i++) {
size_pool_sizes[i] = rb_size_pool_slot_size(i);
}
}
return size_pool_sizes;
}
size_t
rb_gc_size_pool_id_for_size(size_t size)
{
size += RVALUE_OVERHEAD;
size_t slot_count = CEILDIV(size, BASE_SLOT_SIZE);
/* size_pool_idx is ceil(log2(slot_count)) */
size_t size_pool_idx = 64 - nlz_int64(slot_count - 1);
if (size_pool_idx >= SIZE_POOL_COUNT) {
rb_bug("rb_gc_size_pool_id_for_size: allocation size too large "
"(size=%"PRIuSIZE"u, size_pool_idx=%"PRIuSIZE"u)", size, size_pool_idx);
}
#if RGENGC_CHECK_MODE
rb_objspace_t *objspace = &rb_objspace;
GC_ASSERT(size <= (size_t)size_pools[size_pool_idx].slot_size);
if (size_pool_idx > 0) GC_ASSERT(size > (size_t)size_pools[size_pool_idx - 1].slot_size);
#endif
return size_pool_idx;
}
static inline VALUE
ractor_cache_allocate_slot(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache,
size_t size_pool_idx)
@ -2754,30 +2792,6 @@ newobj_fill(VALUE obj, VALUE v1, VALUE v2, VALUE v3)
return obj;
}
static inline size_t
size_pool_idx_for_size(size_t size)
{
size += RVALUE_OVERHEAD;
size_t slot_count = CEILDIV(size, BASE_SLOT_SIZE);
/* size_pool_idx is ceil(log2(slot_count)) */
size_t size_pool_idx = 64 - nlz_int64(slot_count - 1);
if (size_pool_idx >= SIZE_POOL_COUNT) {
rb_bug("size_pool_idx_for_size: allocation size too large "
"(size=%"PRIuSIZE"u, size_pool_idx=%"PRIuSIZE"u)", size, size_pool_idx);
}
#if RGENGC_CHECK_MODE
rb_objspace_t *objspace = &rb_objspace;
GC_ASSERT(size <= (size_t)size_pools[size_pool_idx].slot_size);
if (size_pool_idx > 0) GC_ASSERT(size > (size_t)size_pools[size_pool_idx - 1].slot_size);
#endif
return size_pool_idx;
}
static VALUE
newobj_alloc(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t size_pool_idx, bool vm_locked)
{
@ -2902,7 +2916,7 @@ newobj_of(rb_ractor_t *cr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v
}
}
size_t size_pool_idx = size_pool_idx_for_size(alloc_size);
size_t size_pool_idx = rb_gc_size_pool_id_for_size(alloc_size);
if (SHAPE_IN_BASIC_FLAGS || (flags & RUBY_T_MASK) == T_OBJECT) {
flags |= (VALUE)size_pool_idx << SHAPE_FLAG_SHIFT;
@ -8086,7 +8100,7 @@ gc_compact_destination_pool(rb_objspace_t *objspace, rb_size_pool_t *src_pool, V
}
if (rb_gc_size_allocatable_p(obj_size)){
idx = size_pool_idx_for_size(obj_size);
idx = rb_gc_size_pool_id_for_size(obj_size);
}
return &size_pools[idx];
}

View File

@ -223,6 +223,8 @@ static inline void ruby_sized_xfree_inlined(void *ptr, size_t size);
void rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t *newobj_cache);
size_t rb_gc_obj_slot_size(VALUE obj);
bool rb_gc_size_allocatable_p(size_t size);
size_t *rb_gc_size_pool_sizes(void);
size_t rb_gc_size_pool_id_for_size(size_t size);
int rb_objspace_garbage_object_p(VALUE obj);
bool rb_gc_is_ptr_to_obj(const void *ptr);

View File

@ -134,9 +134,8 @@ rb_class_allocate_instance(VALUE klass)
RUBY_ASSERT(rb_shape_get_shape(obj)->type == SHAPE_ROOT);
// Set the shape to the specific T_OBJECT shape which is always
// SIZE_POOL_COUNT away from the root shape.
ROBJECT_SET_SHAPE_ID(obj, ROBJECT_SHAPE_ID(obj) + SIZE_POOL_COUNT);
// Set the shape to the specific T_OBJECT shape.
ROBJECT_SET_SHAPE_ID(obj, SIZE_POOL_COUNT + rb_gc_size_pool_id_for_size(size));
#if RUBY_DEBUG
RUBY_ASSERT(!rb_shape_obj_too_complex(obj));

View File

@ -1266,12 +1266,13 @@ Init_default_shapes(void)
}
// Make shapes for T_OBJECT
for (int i = 0; i < SIZE_POOL_COUNT; i++) {
size_t *sizes = rb_gc_size_pool_sizes();
for (int i = 0; sizes[i] > 0; i++) {
rb_shape_t * shape = rb_shape_get_shape_by_id(i);
bool dont_care;
rb_shape_t * t_object_shape =
rb_shape_t *t_object_shape =
get_next_shape_internal(shape, id_t_object, SHAPE_T_OBJECT, &dont_care, true);
t_object_shape->capacity = (uint32_t)((rb_size_pool_slot_size(i) - offsetof(struct RObject, as.ary)) / sizeof(VALUE));
t_object_shape->capacity = (uint32_t)((sizes[i] - offsetof(struct RObject, as.ary)) / sizeof(VALUE));
t_object_shape->edges = rb_id_table_create(0);
t_object_shape->ancestor_index = LEAF;
RUBY_ASSERT(rb_shape_id(t_object_shape) == (shape_id_t)(i + SIZE_POOL_COUNT));