Implement dsize function for fstring_table_type

The fstring table size used to be reported as part of the VM
size, but since it was refactored to be lock-less it was no
longer reported.

Since it's now wrapped by a `T_DATA`, we can implement its
`dsize` function and get a valuable insight into the size
of the table.

```
{"address":"0x100ebff18", "type":"DATA", "shape_id":0, "slot_size":80,
"struct":"VM/fstring_table", "memsize":131176, ...
```
This commit is contained in:
Jean Boussier 2025-04-19 11:45:02 +09:00
parent 52487705d0
commit 0f25886fac
Notes: git 2025-04-19 03:42:28 +00:00

View File

@ -546,13 +546,20 @@ fstring_table_free(void *ptr)
xfree(table->entries); xfree(table->entries);
} }
static size_t
fstring_table_size(const void *ptr)
{
const struct fstring_table_struct *table = ptr;
return sizeof(struct fstring_table_struct) + sizeof(struct fstring_table_entry) * table->capacity;
}
// We declare a type for the table so that we can lean on Ruby's GC for deferred reclamation // We declare a type for the table so that we can lean on Ruby's GC for deferred reclamation
static const rb_data_type_t fstring_table_type = { static const rb_data_type_t fstring_table_type = {
.wrap_struct_name = "fstring_table", .wrap_struct_name = "VM/fstring_table",
.function = { .function = {
.dmark = NULL, .dmark = NULL,
.dfree = fstring_table_free, .dfree = fstring_table_free,
.dsize = NULL, .dsize = fstring_table_size,
}, },
.flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
}; };
@ -609,7 +616,9 @@ struct fstring_table_probe {
int mask; int mask;
}; };
static int fstring_table_probe_start(struct fstring_table_probe *probe, struct fstring_table_struct *table, VALUE hash_code) { static int
fstring_table_probe_start(struct fstring_table_probe *probe, struct fstring_table_struct *table, VALUE hash_code)
{
RUBY_ASSERT((table->capacity & (table->capacity - 1)) == 0); RUBY_ASSERT((table->capacity & (table->capacity - 1)) == 0);
probe->d = 0; probe->d = 0;
probe->mask = table->capacity - 1; probe->mask = table->capacity - 1;
@ -617,7 +626,9 @@ static int fstring_table_probe_start(struct fstring_table_probe *probe, struct f
return probe->idx; return probe->idx;
} }
static int fstring_table_probe_next(struct fstring_table_probe *probe) { static int
fstring_table_probe_next(struct fstring_table_probe *probe)
{
probe->d++; probe->d++;
probe->idx = (probe->idx + probe->d) & probe->mask; probe->idx = (probe->idx + probe->d) & probe->mask;
return probe->idx; return probe->idx;
@ -626,7 +637,9 @@ static int fstring_table_probe_next(struct fstring_table_probe *probe) {
#define RUBY_ATOMIC_VALUE_LOAD(x) (VALUE)(RUBY_ATOMIC_PTR_LOAD(x)) #define RUBY_ATOMIC_VALUE_LOAD(x) (VALUE)(RUBY_ATOMIC_PTR_LOAD(x))
static void fstring_insert_on_resize(struct fstring_table_struct *table, VALUE hash_code, VALUE value) { static void
fstring_insert_on_resize(struct fstring_table_struct *table, VALUE hash_code, VALUE value)
{
struct fstring_table_probe probe; struct fstring_table_probe probe;
int idx = fstring_table_probe_start(&probe, table, hash_code); int idx = fstring_table_probe_start(&probe, table, hash_code);
@ -653,7 +666,9 @@ static void fstring_insert_on_resize(struct fstring_table_struct *table, VALUE h
} }
// Rebuilds the table // Rebuilds the table
static void fstring_try_resize(VALUE old_table_obj) { static void
fstring_try_resize(VALUE old_table_obj)
{
RB_VM_LOCK_ENTER(); RB_VM_LOCK_ENTER();
// Check if another thread has already resized // Check if another thread has already resized
@ -710,7 +725,9 @@ end:
RB_VM_LOCK_LEAVE(); RB_VM_LOCK_LEAVE();
} }
static VALUE fstring_find_or_insert(VALUE hash_code, VALUE value, struct fstr_update_arg *arg) { static VALUE
fstring_find_or_insert(VALUE hash_code, VALUE value, struct fstr_update_arg *arg)
{
struct fstring_table_probe probe; struct fstring_table_probe probe;
bool inserting = false; bool inserting = false;
int idx; int idx;
@ -790,7 +807,9 @@ static VALUE fstring_find_or_insert(VALUE hash_code, VALUE value, struct fstr_up
// Removes an fstring from the table. Compares by identity // Removes an fstring from the table. Compares by identity
static void fstring_delete(VALUE hash_code, VALUE value) { static void
fstring_delete(VALUE hash_code, VALUE value)
{
// Delete is never called concurrently, so atomic operations are unnecessary // Delete is never called concurrently, so atomic operations are unnecessary
VALUE table_obj = RUBY_ATOMIC_VALUE_LOAD(fstring_table_obj); VALUE table_obj = RUBY_ATOMIC_VALUE_LOAD(fstring_table_obj);
RUBY_ASSERT_ALWAYS(table_obj); RUBY_ASSERT_ALWAYS(table_obj);
@ -849,7 +868,9 @@ register_fstring(VALUE str, bool copy, bool force_precompute_hash)
return result; return result;
} }
void rb_fstring_foreach_with_replace(st_foreach_check_callback_func *func, st_update_callback_func *replace, st_data_t arg) { void
rb_fstring_foreach_with_replace(st_foreach_check_callback_func *func, st_update_callback_func *replace, st_data_t arg)
{
// Assume locking and barrier (which there is no assert for) // Assume locking and barrier (which there is no assert for)
ASSERT_vm_locking(); ASSERT_vm_locking();
@ -888,13 +909,17 @@ void rb_fstring_foreach_with_replace(st_foreach_check_callback_func *func, st_up
} }
} }
bool rb_obj_is_fstring_table(VALUE obj) { bool
rb_obj_is_fstring_table(VALUE obj)
{
ASSERT_vm_locking(); ASSERT_vm_locking();
return obj == fstring_table_obj; return obj == fstring_table_obj;
} }
void rb_gc_free_fstring(VALUE obj) { void
rb_gc_free_fstring(VALUE obj)
{
// Assume locking and barrier (which there is no assert for) // Assume locking and barrier (which there is no assert for)
ASSERT_vm_locking(); ASSERT_vm_locking();