Pass down "stack start" variables from closer to the top of the stack
This commit changes how stack extents are calculated for both the main thread and other threads. Ruby uses the address of a local variable as part of the calculation for machine stack extents: * pthreads uses it as a lower-bound on the start of the stack, because glibc (and maybe other libcs) can store its own data on the stack before calling into user code on thread creation. * win32 uses it as an argument to VirtualQuery, which gets the extent of the memory mapping which contains the variable However, the local being used for this is actually too low (too close to the leaf function call) in both the main thread case and the new thread case. In the main thread case, we have the `INIT_STACK` macro, which is used for pthreads to set the `native_main_thread->stack_start` value. This value is correctly captured at the very top level of the program (in main.c). However, this is _not_ what's used to set the execution context machine stack (`th->ec->machine_stack.stack_start`); that gets set as part of a call to `ruby_thread_init_stack` in `Init_BareVM`, using the address of a local variable allocated _inside_ `Init_BareVM`. This is too low; we need to use a local allocated closer to the top of the program. In the new thread case, the lolcal is allocated inside `native_thread_init_stack`, which is, again, too low. In both cases, this means that we might have VALUEs lying outside the bounds of `th->ec->machine.stack_{start,end}`, which won't be marked correctly by the GC machinery. To fix this, * In the main thread case: We already have `INIT_STACK` at the right level, so just pass that local var to `ruby_thread_init_stack`. * In the new thread case: Allocate the local one level above the call to `native_thread_init_stack` in `call_thread_start_func2`. [Bug #20001] fix
This commit is contained in:
parent
08edad31a6
commit
807714447e
6
eval.c
6
eval.c
@ -70,8 +70,6 @@ ruby_setup(void)
|
||||
if (GET_VM())
|
||||
return 0;
|
||||
|
||||
ruby_init_stack((void *)&state);
|
||||
|
||||
/*
|
||||
* Disable THP early before mallocs happen because we want this to
|
||||
* affect as many future pages as possible for CoW-friendliness
|
||||
@ -115,7 +113,6 @@ ruby_options(int argc, char **argv)
|
||||
enum ruby_tag_type state;
|
||||
void *volatile iseq = 0;
|
||||
|
||||
ruby_init_stack((void *)&iseq);
|
||||
EC_PUSH_TAG(ec);
|
||||
if ((state = EC_EXEC_TAG()) == TAG_NONE) {
|
||||
SAVE_ROOT_JMPBUF(GET_THREAD(), iseq = ruby_process_options(argc, argv));
|
||||
@ -205,7 +202,6 @@ rb_ec_cleanup(rb_execution_context_t *ec, enum ruby_tag_type ex)
|
||||
step_0: step++;
|
||||
save_error = ec->errinfo;
|
||||
if (THROW_DATA_P(ec->errinfo)) ec->errinfo = Qnil;
|
||||
ruby_init_stack(&message);
|
||||
|
||||
/* exits with failure but silently when an exception raised
|
||||
* here */
|
||||
@ -324,14 +320,12 @@ ruby_run_node(void *n)
|
||||
rb_ec_cleanup(ec, (NIL_P(ec->errinfo) ? TAG_NONE : TAG_RAISE));
|
||||
return status;
|
||||
}
|
||||
ruby_init_stack((void *)&status);
|
||||
return rb_ec_cleanup(ec, rb_ec_exec_node(ec, n));
|
||||
}
|
||||
|
||||
int
|
||||
ruby_exec_node(void *n)
|
||||
{
|
||||
ruby_init_stack((void *)&n);
|
||||
return rb_ec_exec_node(GET_EC(), n);
|
||||
}
|
||||
|
||||
|
@ -141,7 +141,7 @@ void ruby_show_copyright(void);
|
||||
*
|
||||
* @param[in] addr A pointer somewhere on the stack, near its bottom.
|
||||
*/
|
||||
void ruby_init_stack(volatile VALUE *addr);
|
||||
void ruby_init_stack(void *addr);
|
||||
|
||||
/**
|
||||
* Initializes the VM and builtin libraries.
|
||||
|
4
thread.c
4
thread.c
@ -522,9 +522,9 @@ static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
|
||||
static VALUE rb_thread_to_s(VALUE thread);
|
||||
|
||||
void
|
||||
ruby_thread_init_stack(rb_thread_t *th)
|
||||
ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
|
||||
{
|
||||
native_thread_init_stack(th);
|
||||
native_thread_init_stack(th, local_in_parent_frame);
|
||||
}
|
||||
|
||||
const VALUE *
|
||||
|
@ -139,13 +139,8 @@ ruby_mn_threads_params(void)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
ruby_init_stack(volatile VALUE *addr)
|
||||
{
|
||||
}
|
||||
|
||||
static int
|
||||
native_thread_init_stack(rb_thread_t *th)
|
||||
native_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
|
||||
{
|
||||
#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
|
||||
th->ec->machine.stack_start = (VALUE *)rb_wasm_stack_get_base();
|
||||
|
@ -1962,9 +1962,8 @@ reserve_stack(volatile char *limit, size_t size)
|
||||
# define reserve_stack(limit, size) ((void)(limit), (void)(size))
|
||||
#endif
|
||||
|
||||
#undef ruby_init_stack
|
||||
void
|
||||
ruby_init_stack(volatile VALUE *addr)
|
||||
static void
|
||||
native_thread_init_main_thread_stack(void *addr)
|
||||
{
|
||||
native_main_thread.id = pthread_self();
|
||||
|
||||
@ -1987,7 +1986,7 @@ ruby_init_stack(volatile VALUE *addr)
|
||||
if (!native_main_thread.stack_start ||
|
||||
STACK_UPPER((VALUE *)(void *)&addr,
|
||||
native_main_thread.stack_start > addr,
|
||||
native_main_thread.stack_start < addr)) {
|
||||
native_main_thread.stack_start < (VALUE *)addr)) {
|
||||
native_main_thread.stack_start = (VALUE *)addr;
|
||||
}
|
||||
#endif
|
||||
@ -2049,10 +2048,16 @@ ruby_init_stack(volatile VALUE *addr)
|
||||
{int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
|
||||
|
||||
static int
|
||||
native_thread_init_stack(rb_thread_t *th)
|
||||
native_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
|
||||
{
|
||||
rb_nativethread_id_t curr = pthread_self();
|
||||
|
||||
if (!native_main_thread.id) {
|
||||
/* This thread is the first thread, must be the main thread -
|
||||
* configure the native_main_thread object */
|
||||
native_thread_init_main_thread_stack(local_in_parent_frame);
|
||||
}
|
||||
|
||||
if (pthread_equal(curr, native_main_thread.id)) {
|
||||
th->ec->machine.stack_start = native_main_thread.stack_start;
|
||||
th->ec->machine.stack_maxsize = native_main_thread.stack_maxsize;
|
||||
@ -2064,8 +2069,8 @@ native_thread_init_stack(rb_thread_t *th)
|
||||
size_t size;
|
||||
|
||||
if (get_stack(&start, &size) == 0) {
|
||||
uintptr_t diff = (uintptr_t)start - (uintptr_t)&curr;
|
||||
th->ec->machine.stack_start = (VALUE *)&curr;
|
||||
uintptr_t diff = (uintptr_t)start - (uintptr_t)local_in_parent_frame;
|
||||
th->ec->machine.stack_start = (uintptr_t)local_in_parent_frame;
|
||||
th->ec->machine.stack_maxsize = size - diff;
|
||||
}
|
||||
}
|
||||
@ -2185,8 +2190,19 @@ native_thread_create_dedicated(rb_thread_t *th)
|
||||
static void
|
||||
call_thread_start_func_2(rb_thread_t *th)
|
||||
{
|
||||
native_thread_init_stack(th);
|
||||
/* Capture the address of a local in this stack frame to mark the beginning of the
|
||||
machine stack for this thread. This is required even if we can tell the real
|
||||
stack beginning from the pthread API in native_thread_init_stack, because
|
||||
glibc stores some of its own data on the stack before calling into user code
|
||||
on a new thread, and replacing that data on fiber-switch would break it (see
|
||||
bug #13887) */
|
||||
VALUE stack_start = 0;
|
||||
VALUE *stack_start_addr = &stack_start;
|
||||
native_thread_init_stack(th, stack_start_addr);
|
||||
thread_start_func_2(th, th->ec->machine.stack_start);
|
||||
|
||||
/* Ensure that stack_start really was spilled to the stack */
|
||||
RB_GC_GUARD(stack_start)
|
||||
}
|
||||
|
||||
static void *
|
||||
|
@ -581,10 +581,6 @@ rb_native_cond_destroy(rb_nativethread_cond_t *cond)
|
||||
/* */
|
||||
}
|
||||
|
||||
void
|
||||
ruby_init_stack(volatile VALUE *addr)
|
||||
{
|
||||
}
|
||||
|
||||
#define CHECK_ERR(expr) \
|
||||
{if (!(expr)) {rb_bug("err: %lu - %s", GetLastError(), #expr);}}
|
||||
@ -594,20 +590,20 @@ COMPILER_WARNING_PUSH
|
||||
COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
|
||||
#endif
|
||||
static inline SIZE_T
|
||||
query_memory_basic_info(PMEMORY_BASIC_INFORMATION mi)
|
||||
query_memory_basic_info(PMEMORY_BASIC_INFORMATION mi, void *local_in_parent_frame)
|
||||
{
|
||||
return VirtualQuery(mi, mi, sizeof(*mi));
|
||||
return VirtualQuery(local_in_parent_frame, mi, sizeof(*mi));
|
||||
}
|
||||
COMPILER_WARNING_POP
|
||||
|
||||
static void
|
||||
native_thread_init_stack(rb_thread_t *th)
|
||||
native_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
|
||||
{
|
||||
MEMORY_BASIC_INFORMATION mi;
|
||||
char *base, *end;
|
||||
DWORD size, space;
|
||||
|
||||
CHECK_ERR(query_memory_basic_info(&mi));
|
||||
CHECK_ERR(query_memory_basic_info(&mi, local_in_parent_frame));
|
||||
base = mi.AllocationBase;
|
||||
end = mi.BaseAddress;
|
||||
end += mi.RegionSize;
|
||||
@ -638,7 +634,7 @@ thread_start_func_1(void *th_ptr)
|
||||
rb_thread_t *th = th_ptr;
|
||||
volatile HANDLE thread_id = th->nt->thread_id;
|
||||
|
||||
native_thread_init_stack(th);
|
||||
native_thread_init_stack(th, &th);
|
||||
th->nt->interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
|
||||
|
||||
/* run */
|
||||
|
11
vm.c
11
vm.c
@ -54,6 +54,8 @@
|
||||
int ruby_assert_critical_section_entered = 0;
|
||||
#endif
|
||||
|
||||
static void *native_main_thread_stack_top;
|
||||
|
||||
VALUE rb_str_concat_literals(size_t, const VALUE*);
|
||||
|
||||
VALUE vm_exec(rb_execution_context_t *);
|
||||
@ -4206,7 +4208,8 @@ Init_BareVM(void)
|
||||
th_init(th, 0, vm);
|
||||
|
||||
rb_ractor_set_current_ec(th->ractor, th->ec);
|
||||
ruby_thread_init_stack(th);
|
||||
/* n.b. native_main_thread_stack_top is set by the INIT_STACK macro */
|
||||
ruby_thread_init_stack(th, native_main_thread_stack_top);
|
||||
|
||||
// setup ractor system
|
||||
rb_native_mutex_initialize(&vm->ractor.sync.lock);
|
||||
@ -4217,6 +4220,12 @@ Init_BareVM(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
ruby_init_stack(void *addr)
|
||||
{
|
||||
native_main_thread_stack_top = addr;
|
||||
}
|
||||
|
||||
#ifndef _WIN32
|
||||
#include <unistd.h>
|
||||
#include <sys/mman.h>
|
||||
|
@ -1840,7 +1840,7 @@ rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_cont
|
||||
VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
|
||||
int rb_vm_get_sourceline(const rb_control_frame_t *);
|
||||
void rb_vm_stack_to_heap(rb_execution_context_t *ec);
|
||||
void ruby_thread_init_stack(rb_thread_t *th);
|
||||
void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
|
||||
rb_thread_t * ruby_thread_from_native(void);
|
||||
int ruby_thread_set_native(rb_thread_t *th);
|
||||
int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
|
||||
|
Loading…
x
Reference in New Issue
Block a user