notify ASAN about M:N threading stack switches
In a similar way to how we do it with fibers in cont.c, we need to call __sanitize_start_switch_fiber and __sanitize_finish_switch_fiber around the call to coroutine_transfer to let ASAN save & restore the fake stack pointer. When a M:N thread is exiting, we pass `to_dead` to the new coroutine_transfer0 function, so that we can pass NULL for saving the stack pointer. This signals to ASAN that the fake stack can be freed (otherwise it would be leaked) [Bug #20220]
This commit is contained in:
parent
19f615521d
commit
719db18b50
@ -333,6 +333,8 @@ static void timer_thread_wakeup(void);
|
|||||||
static void timer_thread_wakeup_locked(rb_vm_t *vm);
|
static void timer_thread_wakeup_locked(rb_vm_t *vm);
|
||||||
static void timer_thread_wakeup_force(void);
|
static void timer_thread_wakeup_force(void);
|
||||||
static void thread_sched_switch(rb_thread_t *cth, rb_thread_t *next_th);
|
static void thread_sched_switch(rb_thread_t *cth, rb_thread_t *next_th);
|
||||||
|
static void coroutine_transfer0(struct coroutine_context *transfer_from,
|
||||||
|
struct coroutine_context *transfer_to, bool to_dead);
|
||||||
|
|
||||||
#define thread_sched_dump(s) thread_sched_dump_(__FILE__, __LINE__, s)
|
#define thread_sched_dump(s) thread_sched_dump_(__FILE__, __LINE__, s)
|
||||||
|
|
||||||
@ -892,7 +894,7 @@ thread_sched_wait_running_turn(struct rb_thread_sched *sched, rb_thread_t *th, b
|
|||||||
thread_sched_set_lock_owner(sched, NULL);
|
thread_sched_set_lock_owner(sched, NULL);
|
||||||
{
|
{
|
||||||
rb_ractor_set_current_ec(th->ractor, NULL);
|
rb_ractor_set_current_ec(th->ractor, NULL);
|
||||||
coroutine_transfer(th->sched.context, nt->nt_context);
|
coroutine_transfer0(th->sched.context, nt->nt_context, false);
|
||||||
}
|
}
|
||||||
thread_sched_set_lock_owner(sched, th);
|
thread_sched_set_lock_owner(sched, th);
|
||||||
}
|
}
|
||||||
@ -1151,7 +1153,28 @@ rb_thread_sched_init(struct rb_thread_sched *sched, bool atfork)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
thread_sched_switch0(struct coroutine_context *current_cont, rb_thread_t *next_th, struct rb_native_thread *nt)
|
coroutine_transfer0(struct coroutine_context *transfer_from, struct coroutine_context *transfer_to, bool to_dead)
|
||||||
|
{
|
||||||
|
#ifdef RUBY_ASAN_ENABLED
|
||||||
|
void **fake_stack = to_dead ? NULL : &transfer_from->fake_stack;
|
||||||
|
__sanitizer_start_switch_fiber(fake_stack, transfer_to->stack_base, transfer_to->stack_size);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
RBIMPL_ATTR_MAYBE_UNUSED()
|
||||||
|
struct coroutine_context *returning_from = coroutine_transfer(transfer_from, transfer_to);
|
||||||
|
|
||||||
|
/* if to_dead was passed, the caller is promising that this coroutine is finished and it should
|
||||||
|
* never be resumed! */
|
||||||
|
VM_ASSERT(!to_dead);
|
||||||
|
#ifdef RUBY_ASAN_ENABLED
|
||||||
|
__sanitizer_finish_switch_fiber(transfer_from->fake_stack,
|
||||||
|
(const void**)&returning_from->stack_base, &returning_from->stack_size);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
thread_sched_switch0(struct coroutine_context *current_cont, rb_thread_t *next_th, struct rb_native_thread *nt, bool to_dead)
|
||||||
{
|
{
|
||||||
VM_ASSERT(!nt->dedicated);
|
VM_ASSERT(!nt->dedicated);
|
||||||
VM_ASSERT(next_th->nt == NULL);
|
VM_ASSERT(next_th->nt == NULL);
|
||||||
@ -1160,7 +1183,8 @@ thread_sched_switch0(struct coroutine_context *current_cont, rb_thread_t *next_t
|
|||||||
|
|
||||||
ruby_thread_set_native(next_th);
|
ruby_thread_set_native(next_th);
|
||||||
native_thread_assign(nt, next_th);
|
native_thread_assign(nt, next_th);
|
||||||
coroutine_transfer(current_cont, next_th->sched.context);
|
|
||||||
|
coroutine_transfer0(current_cont, next_th->sched.context, to_dead);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -1169,7 +1193,7 @@ thread_sched_switch(rb_thread_t *cth, rb_thread_t *next_th)
|
|||||||
struct rb_native_thread *nt = cth->nt;
|
struct rb_native_thread *nt = cth->nt;
|
||||||
native_thread_assign(NULL, cth);
|
native_thread_assign(NULL, cth);
|
||||||
RUBY_DEBUG_LOG("th:%u->%u on nt:%d", rb_th_serial(cth), rb_th_serial(next_th), nt->serial);
|
RUBY_DEBUG_LOG("th:%u->%u on nt:%d", rb_th_serial(cth), rb_th_serial(next_th), nt->serial);
|
||||||
thread_sched_switch0(cth->sched.context, next_th, nt);
|
thread_sched_switch0(cth->sched.context, next_th, nt, cth->status == THREAD_KILLED);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if VM_CHECK_MODE > 0
|
#if VM_CHECK_MODE > 0
|
||||||
@ -2268,7 +2292,7 @@ nt_start(void *ptr)
|
|||||||
|
|
||||||
if (next_th && next_th->nt == NULL) {
|
if (next_th && next_th->nt == NULL) {
|
||||||
RUBY_DEBUG_LOG("nt:%d next_th:%d", (int)nt->serial, (int)next_th->serial);
|
RUBY_DEBUG_LOG("nt:%d next_th:%d", (int)nt->serial, (int)next_th->serial);
|
||||||
thread_sched_switch0(nt->nt_context, next_th, nt);
|
thread_sched_switch0(nt->nt_context, next_th, nt, false);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
RUBY_DEBUG_LOG("no schedulable threads -- next_th:%p", next_th);
|
RUBY_DEBUG_LOG("no schedulable threads -- next_th:%p", next_th);
|
||||||
|
@ -413,6 +413,11 @@ native_thread_check_and_create_shared(rb_vm_t *vm)
|
|||||||
static COROUTINE
|
static COROUTINE
|
||||||
co_start(struct coroutine_context *from, struct coroutine_context *self)
|
co_start(struct coroutine_context *from, struct coroutine_context *self)
|
||||||
{
|
{
|
||||||
|
#ifdef RUBY_ASAN_ENABLED
|
||||||
|
__sanitizer_finish_switch_fiber(self->fake_stack,
|
||||||
|
(const void**)&from->stack_base, &from->stack_size);
|
||||||
|
#endif
|
||||||
|
|
||||||
rb_thread_t *th = (rb_thread_t *)self->argument;
|
rb_thread_t *th = (rb_thread_t *)self->argument;
|
||||||
struct rb_thread_sched *sched = TH_SCHED(th);
|
struct rb_thread_sched *sched = TH_SCHED(th);
|
||||||
VM_ASSERT(th->nt != NULL);
|
VM_ASSERT(th->nt != NULL);
|
||||||
@ -447,13 +452,13 @@ co_start(struct coroutine_context *from, struct coroutine_context *self)
|
|||||||
if (!has_ready_ractor && next_th && !next_th->nt) {
|
if (!has_ready_ractor && next_th && !next_th->nt) {
|
||||||
// switch to the next thread
|
// switch to the next thread
|
||||||
thread_sched_set_lock_owner(sched, NULL);
|
thread_sched_set_lock_owner(sched, NULL);
|
||||||
thread_sched_switch0(th->sched.context, next_th, nt);
|
thread_sched_switch0(th->sched.context, next_th, nt, true);
|
||||||
th->sched.finished = true;
|
th->sched.finished = true;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
// switch to the next Ractor
|
// switch to the next Ractor
|
||||||
th->sched.finished = true;
|
th->sched.finished = true;
|
||||||
coroutine_transfer(self, nt->nt_context);
|
coroutine_transfer0(self, nt->nt_context, true);
|
||||||
}
|
}
|
||||||
rb_bug("unreachable");
|
rb_bug("unreachable");
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user