diff --git a/cont.c b/cont.c index 095cc86767..a43950c6cb 100644 --- a/cont.c +++ b/cont.c @@ -1288,6 +1288,7 @@ root_fiber_alloc(rb_thread_t *th) rb_fiber_t *fib; /* no need to allocate vm stack */ fib = fiber_t_alloc(fiber_alloc(rb_cFiber)); + fib->cont.saved_thread.ec.stack = NULL; fib->cont.type = ROOT_FIBER_CONTEXT; #if FIBER_USE_NATIVE #ifdef _WIN32 diff --git a/thread.c b/thread.c index b7ee1d8d9b..0fa37b70f0 100644 --- a/thread.c +++ b/thread.c @@ -694,10 +694,8 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_s rb_threadptr_unlock_all_locking_mutexes(th); rb_check_deadlock(th->vm); - if (!th->root_fiber) { - rb_thread_recycle_stack_release(th->ec.stack); - th->ec.stack = 0; - } + rb_thread_recycle_stack_release(th->ec.stack); + th->ec.stack = NULL; } native_mutex_lock(&th->vm->thread_destruct_lock); /* make sure vm->running_thread never point me after this point.*/ diff --git a/vm.c b/vm.c index 6815e84f76..266b251485 100644 --- a/vm.c +++ b/vm.c @@ -2330,7 +2330,7 @@ static int thread_recycle_stack_count = 0; static VALUE * thread_recycle_stack(size_t size) { - if (thread_recycle_stack_count) { + if (thread_recycle_stack_count > 0) { /* TODO: check stack size if stack sizes are variable */ return thread_recycle_stack_slot[--thread_recycle_stack_count]; } @@ -2346,6 +2346,8 @@ thread_recycle_stack(size_t size) void rb_thread_recycle_stack_release(VALUE *stack) { + VM_ASSERT(stack != NULL); + #if USE_THREAD_DATA_RECYCLE if (thread_recycle_stack_count < RECYCLE_MAX) { thread_recycle_stack_slot[thread_recycle_stack_count++] = stack; @@ -2429,8 +2431,9 @@ thread_free(void *ptr) rb_thread_t *th = ptr; RUBY_FREE_ENTER("thread"); - if (!th->root_fiber) { - RUBY_FREE_UNLESS_NULL(th->ec.stack); + if (th->ec.stack != NULL) { + rb_thread_recycle_stack_release(th->ec.stack); + th->ec.stack = NULL; } if (th->locking_mutex != Qfalse) {