Take VM lock around manipulation of fiber pool for vacant stacks

When creating fibers in multiple ractors at the same time there were
issues with the manipulation of this structure, causing segfaults.

I didn't add any tests for this because I'm making a more general
PR in the very near future to be able to run test methods (test-all suite)
inside multiple ractors at the same time. This is how this bug was
caught, running test/ruby/test_fiber.rb inside 10 ractors at once.
This commit is contained in:
Luke Gruber 2025-05-29 11:40:57 -04:00 committed by John Hawthorn
parent 38ecaca155
commit 5b3f1c4c51
Notes: git 2025-05-29 16:51:56 +00:00

19
cont.c
View File

@ -508,6 +508,9 @@ fiber_pool_allocate_memory(size_t * count, size_t stride)
// @sa fiber_pool_allocation_free
static struct fiber_pool_allocation *
fiber_pool_expand(struct fiber_pool * fiber_pool, size_t count)
{
struct fiber_pool_allocation * allocation;
RB_VM_LOCK_ENTER();
{
STACK_GROW_DIR_DETECTION;
@ -522,7 +525,7 @@ fiber_pool_expand(struct fiber_pool * fiber_pool, size_t count)
}
struct fiber_pool_vacancy * vacancies = fiber_pool->vacancies;
struct fiber_pool_allocation * allocation = RB_ALLOC(struct fiber_pool_allocation);
allocation = RB_ALLOC(struct fiber_pool_allocation);
// Initialize fiber pool allocation:
allocation->base = base;
@ -543,7 +546,6 @@ fiber_pool_expand(struct fiber_pool * fiber_pool, size_t count)
for (size_t i = 0; i < count; i += 1) {
void * base = (char*)allocation->base + (stride * i);
void * page = (char*)base + STACK_DIR_UPPER(size, 0);
#if defined(_WIN32)
DWORD old_protect;
@ -586,6 +588,8 @@ fiber_pool_expand(struct fiber_pool * fiber_pool, size_t count)
fiber_pool->allocations = allocation;
fiber_pool->vacancies = vacancies;
fiber_pool->count += count;
}
RB_VM_LOCK_LEAVE();
return allocation;
}
@ -659,7 +663,10 @@ fiber_pool_allocation_free(struct fiber_pool_allocation * allocation)
static struct fiber_pool_stack
fiber_pool_stack_acquire(struct fiber_pool * fiber_pool)
{
struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pop(fiber_pool);
struct fiber_pool_vacancy * vacancy ;
RB_VM_LOCK_ENTER();
{
vacancy = fiber_pool_vacancy_pop(fiber_pool);
if (DEBUG) fprintf(stderr, "fiber_pool_stack_acquire: %p used=%"PRIuSIZE"\n", (void*)fiber_pool->vacancies, fiber_pool->used);
@ -694,6 +701,8 @@ fiber_pool_stack_acquire(struct fiber_pool * fiber_pool)
#endif
fiber_pool_stack_reset(&vacancy->stack);
}
RB_VM_LOCK_LEAVE();
return vacancy->stack;
}
@ -764,6 +773,8 @@ static void
fiber_pool_stack_release(struct fiber_pool_stack * stack)
{
struct fiber_pool * pool = stack->pool;
RB_VM_LOCK_ENTER();
{
struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(stack->base, stack->size);
if (DEBUG) fprintf(stderr, "fiber_pool_stack_release: %p used=%"PRIuSIZE"\n", stack->base, stack->pool->used);
@ -799,6 +810,8 @@ fiber_pool_stack_release(struct fiber_pool_stack * stack)
}
#endif
}
RB_VM_LOCK_LEAVE();
}
static inline void
ec_switch(rb_thread_t *th, rb_fiber_t *fiber)