fix native_thread_destroy()
timing
With M:N thread scheduler, the native thread (NT) related resources should be freed when the NT is no longer needed. So the calling `native_thread_destroy()` at the end of `is will be freed when `thread_cleanup_func()` (at the end of Ruby thread) is not correct timing. Call it when the corresponding Ruby thread is collected.
This commit is contained in:
parent
2794a8fef6
commit
cdb36dfe7d
9
thread.c
9
thread.c
@ -508,16 +508,13 @@ thread_cleanup_func(void *th_ptr, int atfork)
|
|||||||
* Unfortunately, we can't release native threading resource at fork
|
* Unfortunately, we can't release native threading resource at fork
|
||||||
* because libc may have unstable locking state therefore touching
|
* because libc may have unstable locking state therefore touching
|
||||||
* a threading resource may cause a deadlock.
|
* a threading resource may cause a deadlock.
|
||||||
*
|
|
||||||
* FIXME: Skipping native_mutex_destroy(pthread_mutex_destroy) is safe
|
|
||||||
* with NPTL, but native_thread_destroy calls pthread_cond_destroy
|
|
||||||
* which calls free(3), so there is a small memory leak atfork, here.
|
|
||||||
*/
|
*/
|
||||||
if (atfork)
|
if (atfork) {
|
||||||
|
th->nt = NULL;
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
rb_native_mutex_destroy(&th->interrupt_lock);
|
rb_native_mutex_destroy(&th->interrupt_lock);
|
||||||
native_thread_destroy(th);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
|
static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
|
||||||
|
@ -139,11 +139,6 @@ ruby_mn_threads_params(void)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
native_thread_destroy(rb_thread_t *th)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
ruby_init_stack(volatile VALUE *addr)
|
ruby_init_stack(volatile VALUE *addr)
|
||||||
{
|
{
|
||||||
|
@ -1717,16 +1717,21 @@ native_thread_assign(struct rb_native_thread *nt, rb_thread_t *th)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
native_thread_destroy(rb_thread_t *th)
|
native_thread_destroy(struct rb_native_thread *nt)
|
||||||
{
|
{
|
||||||
struct rb_native_thread *nt = th->nt;
|
if (nt) {
|
||||||
|
|
||||||
rb_native_cond_destroy(&nt->cond.readyq);
|
rb_native_cond_destroy(&nt->cond.readyq);
|
||||||
|
|
||||||
if (&nt->cond.readyq != &nt->cond.intr)
|
if (&nt->cond.readyq != &nt->cond.intr) {
|
||||||
rb_native_cond_destroy(&nt->cond.intr);
|
rb_native_cond_destroy(&nt->cond.intr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
RB_ALTSTACK_FREE(nt->altstack);
|
||||||
|
ruby_xfree(nt->nt_context);
|
||||||
|
ruby_xfree(nt);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
|
#if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
|
||||||
#define STACKADDR_AVAILABLE 1
|
#define STACKADDR_AVAILABLE 1
|
||||||
#elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
|
#elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
|
||||||
@ -2109,6 +2114,7 @@ static struct rb_native_thread *
|
|||||||
native_thread_alloc(void)
|
native_thread_alloc(void)
|
||||||
{
|
{
|
||||||
struct rb_native_thread *nt = ZALLOC(struct rb_native_thread);
|
struct rb_native_thread *nt = ZALLOC(struct rb_native_thread);
|
||||||
|
native_thread_setup(nt);
|
||||||
|
|
||||||
#if USE_MN_THREADS
|
#if USE_MN_THREADS
|
||||||
nt->nt_context = ruby_xmalloc(sizeof(struct coroutine_context));
|
nt->nt_context = ruby_xmalloc(sizeof(struct coroutine_context));
|
||||||
@ -2128,7 +2134,6 @@ native_thread_create_dedicated(rb_thread_t *th)
|
|||||||
th->nt->vm = th->vm;
|
th->nt->vm = th->vm;
|
||||||
th->nt->running_thread = th;
|
th->nt->running_thread = th;
|
||||||
th->nt->dedicated = 1;
|
th->nt->dedicated = 1;
|
||||||
native_thread_setup(th->nt);
|
|
||||||
|
|
||||||
// vm stack
|
// vm stack
|
||||||
size_t vm_stack_word_size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
|
size_t vm_stack_word_size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
|
||||||
@ -2265,10 +2270,9 @@ rb_threadptr_sched_free(rb_thread_t *th)
|
|||||||
{
|
{
|
||||||
#if USE_MN_THREADS
|
#if USE_MN_THREADS
|
||||||
if (th->sched.malloc_stack) {
|
if (th->sched.malloc_stack) {
|
||||||
|
// has dedicated
|
||||||
ruby_xfree(th->sched.context_stack);
|
ruby_xfree(th->sched.context_stack);
|
||||||
RB_ALTSTACK_FREE(th->nt->altstack);
|
native_thread_destroy(th->nt);
|
||||||
ruby_xfree(th->nt->nt_context);
|
|
||||||
ruby_xfree(th->nt);
|
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
nt_free_stack(th->sched.context_stack);
|
nt_free_stack(th->sched.context_stack);
|
||||||
@ -2279,17 +2283,12 @@ rb_threadptr_sched_free(rb_thread_t *th)
|
|||||||
ruby_xfree(th->sched.context);
|
ruby_xfree(th->sched.context);
|
||||||
VM_ASSERT((th->sched.context = NULL) == NULL);
|
VM_ASSERT((th->sched.context = NULL) == NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
th->nt = NULL;
|
|
||||||
#else
|
#else
|
||||||
ruby_xfree(th->sched.context_stack);
|
ruby_xfree(th->sched.context_stack);
|
||||||
|
native_thread_destroy(th->nt);
|
||||||
struct rb_native_thread *nt = th->nt;
|
|
||||||
if (nt) { // TODO: not sure why nt is NULL
|
|
||||||
RB_ALTSTACK_FREE(nt->altstack);
|
|
||||||
ruby_xfree(nt);
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
th->nt = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -623,12 +623,14 @@ native_thread_init_stack(rb_thread_t *th)
|
|||||||
(void *)InterlockedExchange((long *)(t), (long)(v))
|
(void *)InterlockedExchange((long *)(t), (long)(v))
|
||||||
#endif
|
#endif
|
||||||
static void
|
static void
|
||||||
native_thread_destroy(rb_thread_t *th)
|
native_thread_destroy(struct rb_native_thread *nt)
|
||||||
{
|
{
|
||||||
HANDLE intr = InterlockedExchangePointer(&th->nt->interrupt_event, 0);
|
if (nt) {
|
||||||
RUBY_DEBUG_LOG("close handle intr:%p, thid:%p\n", intr, th->nt->thread_id);
|
HANDLE intr = InterlockedExchangePointer(&nt->interrupt_event, 0);
|
||||||
|
RUBY_DEBUG_LOG("close handle intr:%p, thid:%p\n", intr, nt->thread_id);
|
||||||
w32_close_handle(intr);
|
w32_close_handle(intr);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static unsigned long __stdcall
|
static unsigned long __stdcall
|
||||||
thread_start_func_1(void *th_ptr)
|
thread_start_func_1(void *th_ptr)
|
||||||
@ -893,6 +895,7 @@ th_has_dedicated_nt(const rb_thread_t *th)
|
|||||||
void
|
void
|
||||||
rb_threadptr_sched_free(rb_thread_t *th)
|
rb_threadptr_sched_free(rb_thread_t *th)
|
||||||
{
|
{
|
||||||
|
native_thread_destroy(th->nt);
|
||||||
ruby_xfree(th->nt);
|
ruby_xfree(th->nt);
|
||||||
ruby_xfree(th->sched.vm_stack);
|
ruby_xfree(th->sched.vm_stack);
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user